Skip to content

Commit 87a69eb

Browse files
committed
Fix typo, defer TARGET_NODE resolution, and add code block language specifiers
- Fix typo: rescheuling -> rescheduling in demos/README.md - Move TARGET_NODE lookup into find_target_node() called after check_cluster() - Prevents kubectl errors when cluster isn't validated yet - Add language specifiers to code blocks (text, bash) for proper syntax highlighting Signed-off-by: Karla Saur <[email protected]>
1 parent 5f6dcec commit 87a69eb

File tree

4 files changed

+28
-19
lines changed

4 files changed

+28
-19
lines changed

demos/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ Interactive demonstrations of NVSentinel's core capabilities that run locally on
1717

1818
## Coming Soon
1919

20-
- Pod rescheuling and restarting from checkpointing
20+
- Pod rescheduling and restarting from checkpointing
2121

2222
**Questions?** See the [main README](../README.md) or [open an issue](https://github.com/NVIDIA/NVSentinel/issues).
2323

demos/local-xid-demo/README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ Welcome! This demo shows NVSentinel's core functionality running locally on your
3939
**What you'll see:** The entire workflow runs automatically from cluster creation through error injection to verification. Great for getting a quick sense of NVSentinel's capabilities, but you won't see the details of each step.
4040

4141
```bash
42-
# Run the complete demo (takes ~6-10 minutes)
42+
# Run the complete demo (takes ~5-10 minutes)
4343
make demo
4444

4545
# Clean up when done
@@ -111,7 +111,7 @@ This demo uses a **minimal NVSentinel deployment** with:
111111
- **Fault Quarantine** - Rule engine that cordons nodes on fatal errors
112112
- **MongoDB** - Event storage and change streams
113113

114-
```
114+
```text
115115
┌───────────────────────────────────────────────────────┐
116116
│ Your Laptop (KIND Cluster) │
117117
│ │
@@ -170,7 +170,7 @@ Shows the healthy cluster:
170170

171171
**Expected output:**
172172

173-
```
173+
```bash
174174
$ kubectl get nodes
175175
NAME STATUS ROLES AGE VERSION
176176
nvsentinel-demo-control-plane Ready control-plane 2m v1.31.0
@@ -222,7 +222,7 @@ Confirms the automated response:
222222

223223
**Expected output:**
224224

225-
```
225+
```bash
226226
$ kubectl get nodes
227227
NAME STATUS ROLES AGE VERSION
228228
nvsentinel-demo-control-plane Ready control-plane 12m v1.31.0

demos/local-xid-demo/scripts/02-inject-xid.sh

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,7 @@ NC='\033[0m'
2424

2525
CLUSTER_NAME="nvsentinel-demo"
2626
NAMESPACE="nvsentinel"
27-
# Dynamically find the first worker node (supports clusters with 1 or more workers)
28-
TARGET_NODE=$(kubectl get nodes -o json | jq -r '.items[] | select(.metadata.name | contains("worker")) | .metadata.name' | head -1)
29-
30-
if [ -z "$TARGET_NODE" ]; then
31-
echo -e "\n${RED}[ERROR]${NC} No worker nodes found in cluster"
32-
exit 1
33-
fi
27+
TARGET_NODE=""
3428

3529
log() {
3630
echo -e "${BLUE}[INFO]${NC} $1"
@@ -65,6 +59,16 @@ check_cluster() {
6559
kubectl config use-context "kind-${CLUSTER_NAME}" > /dev/null 2>&1
6660
}
6761

62+
find_target_node() {
63+
# Dynamically find the first worker node (supports clusters with 1 or more workers)
64+
TARGET_NODE=$(kubectl get nodes -o json | jq -r '.items[] | select(.metadata.name | contains("worker")) | .metadata.name' | head -1)
65+
66+
if [ -z "$TARGET_NODE" ]; then
67+
echo -e "\n${RED}[ERROR]${NC} No worker nodes found in cluster"
68+
exit 1
69+
fi
70+
}
71+
6872
check_node_exists() {
6973
if ! kubectl get node "$TARGET_NODE" &> /dev/null; then
7074
error "Node '$TARGET_NODE' not found in cluster"

demos/local-xid-demo/scripts/03-verify-cordon.sh

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,7 @@ NC='\033[0m'
2424

2525
CLUSTER_NAME="nvsentinel-demo"
2626
NAMESPACE="nvsentinel"
27-
# Dynamically find the first worker node (supports clusters with 1 or more workers)
28-
TARGET_NODE=$(kubectl get nodes -o json | jq -r '.items[] | select(.metadata.name | contains("worker")) | .metadata.name' | head -1)
29-
30-
if [ -z "$TARGET_NODE" ]; then
31-
echo -e "\n${RED}[ERROR]${NC} No worker nodes found in cluster"
32-
exit 1
33-
fi
27+
TARGET_NODE=""
3428

3529
log() {
3630
echo -e "${BLUE}[INFO]${NC} $1"
@@ -65,6 +59,16 @@ check_cluster() {
6559
kubectl config use-context "kind-${CLUSTER_NAME}" > /dev/null 2>&1
6660
}
6761

62+
find_target_node() {
63+
# Dynamically find the first worker node (supports clusters with 1 or more workers)
64+
TARGET_NODE=$(kubectl get nodes -o json | jq -r '.items[] | select(.metadata.name | contains("worker")) | .metadata.name' | head -1)
65+
66+
if [ -z "$TARGET_NODE" ]; then
67+
echo -e "\n${RED}[ERROR]${NC} No worker nodes found in cluster"
68+
exit 1
69+
fi
70+
}
71+
6872
verify_cordon() {
6973
section "Verifying Node Cordon Status"
7074

@@ -193,6 +197,7 @@ verify_cordon() {
193197

194198
main() {
195199
check_cluster
200+
find_target_node
196201
verify_cordon
197202
}
198203

0 commit comments

Comments
 (0)