Skip to content

Commit d49d240

Browse files
committed
Prevent flakiness in instance groups controller test
1 parent 576c1a1 commit d49d240

File tree

1 file changed

+49
-13
lines changed

1 file changed

+49
-13
lines changed

pkg/instancegroups/controller_test.go

Lines changed: 49 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package instancegroups
22

33
import (
44
"context"
5+
"sync"
56
"testing"
67
"time"
78

@@ -130,12 +131,14 @@ func withUnschedulable(value bool) func(node *api_v1.Node) {
130131
}
131132

132133
func TestSync(t *testing.T) {
134+
t.Parallel()
135+
133136
config := &ControllerConfig{}
134137
resyncPeriod := 1 * time.Second
135138
fakeKubeClient := fake.NewSimpleClientset()
136139
informer := informerv1.NewNodeInformer(fakeKubeClient, resyncPeriod, utils.NewNamespaceIndexer())
137140
config.NodeInformer = informer
138-
fakeManager := &IGManagerFake{}
141+
fakeManager := &IGManagerFake{SyncLock: &sync.Mutex{}}
139142
config.IGManager = fakeManager
140143
config.HasSynced = func() bool {
141144
return true
@@ -155,33 +158,63 @@ func TestSync(t *testing.T) {
155158
var expectedSyncedNodesCounter = 0
156159
firstNode := testNode()
157160
secondNode := testNode()
161+
thirdNode := testNode()
158162
secondNode.Name = "secondNode"
163+
thirdNode.Name = "thirdNode"
159164

160-
// Add two nodes
165+
// Freeze resyncs to ensure that second node sync won't start before third node triggers it as well
166+
fakeManager.SyncLock.Lock()
161167
fakeKubeClient.CoreV1().Nodes().Create(context.TODO(), firstNode, meta_v1.CreateOptions{})
162-
// wait time > resync period
163-
time.Sleep(2 * time.Second)
168+
time.Sleep(time.Second) // ensure sync started
169+
170+
// Add two nodes
164171
fakeKubeClient.CoreV1().Nodes().Create(context.TODO(), secondNode, meta_v1.CreateOptions{})
165-
// The counter = 1 because it synced only once (for the first Create() call)
166-
expectedSyncedNodesCounter += 1
172+
fakeKubeClient.CoreV1().Nodes().Create(context.TODO(), thirdNode, meta_v1.CreateOptions{})
173+
time.Sleep(time.Second) // ensure third node will be able to trigger sync
174+
fakeManager.SyncLock.Unlock()
175+
time.Sleep(2 * time.Second) // wait until both syncs complete
176+
// Second and third nodes should trigger only a single sync (+1 for the first)
177+
expectedSyncedNodesCounter += 2
167178
verifyExpectedSyncerCount(t, fakeManager.syncedNodes, expectedSyncedNodesCounter)
168179

169-
// Update both nodes
170-
firstNode.Annotations["key"] = "true"
171-
firstNode.Spec.Unschedulable = false
172-
secondNode.Annotations["key"] = "true"
180+
// Freeze resyncs to ensure that second node sync won't start before third node triggers it as well
181+
fakeManager.SyncLock.Lock()
182+
firstNode.Spec.Unschedulable = true
173183
fakeKubeClient.CoreV1().Nodes().Update(context.TODO(), firstNode, meta_v1.UpdateOptions{})
184+
time.Sleep(time.Second) // ensure sync started
185+
186+
// Update two nodes
187+
secondNode.Spec.Unschedulable = true
188+
thirdNode.Spec.Unschedulable = true
174189
fakeKubeClient.CoreV1().Nodes().Update(context.TODO(), secondNode, meta_v1.UpdateOptions{})
175-
time.Sleep(2 * time.Second)
176-
// nodes were updated
177-
expectedSyncedNodesCounter += 1
190+
fakeKubeClient.CoreV1().Nodes().Update(context.TODO(), thirdNode, meta_v1.UpdateOptions{})
191+
time.Sleep(time.Second) // ensure third node will be able to trigger sync
192+
fakeManager.SyncLock.Unlock()
193+
time.Sleep(2 * time.Second) // wait until both syncs complete
194+
// Second and third nodes should trigger only a single sync (+1 for the first)
195+
expectedSyncedNodesCounter += 2
178196
verifyExpectedSyncerCount(t, fakeManager.syncedNodes, expectedSyncedNodesCounter)
179197

180198
// no real update
181199
fakeKubeClient.CoreV1().Nodes().Update(context.TODO(), firstNode, meta_v1.UpdateOptions{})
182200
// Nothing should change
183201
time.Sleep(2 * time.Second)
184202
verifyExpectedSyncerCount(t, fakeManager.syncedNodes, expectedSyncedNodesCounter)
203+
204+
// Freeze resyncs to ensure that second node sync won't start before third node triggers it as well
205+
fakeManager.SyncLock.Lock()
206+
fakeKubeClient.CoreV1().Nodes().Delete(context.TODO(), firstNode.Name, meta_v1.DeleteOptions{})
207+
time.Sleep(time.Second) // ensure sync started
208+
209+
// Delete two nodes
210+
fakeKubeClient.CoreV1().Nodes().Delete(context.TODO(), secondNode.Name, meta_v1.DeleteOptions{})
211+
fakeKubeClient.CoreV1().Nodes().Delete(context.TODO(), thirdNode.Name, meta_v1.DeleteOptions{})
212+
time.Sleep(time.Second) // ensure third node will be able to trigger sync
213+
fakeManager.SyncLock.Unlock()
214+
time.Sleep(2 * time.Second) // wait until both syncs complete
215+
// Second and third nodes should trigger only a single sync (+1 for the first)
216+
expectedSyncedNodesCounter += 2
217+
verifyExpectedSyncerCount(t, fakeManager.syncedNodes, expectedSyncedNodesCounter)
185218
}
186219

187220
func verifyExpectedSyncerCount(t *testing.T, syncedNodes [][]string, expectedCount int) {
@@ -192,9 +225,12 @@ func verifyExpectedSyncerCount(t *testing.T, syncedNodes [][]string, expectedCou
192225

193226
type IGManagerFake struct {
194227
syncedNodes [][]string
228+
SyncLock sync.Locker
195229
}
196230

197231
func (igmf *IGManagerFake) Sync(nodeNames []string, logger klog.Logger) error {
232+
igmf.SyncLock.Lock()
233+
defer igmf.SyncLock.Unlock()
198234
igmf.syncedNodes = append(igmf.syncedNodes, nodeNames)
199235
return nil
200236
}

0 commit comments

Comments
 (0)