|
| 1 | +package neg |
| 2 | + |
| 3 | +import ( |
| 4 | + "testing" |
| 5 | + "time" |
| 6 | + |
| 7 | + networkclient "github.com/GoogleCloudPlatform/gke-networking-api/client/network/clientset/versioned" |
| 8 | + nodetopologyclient "github.com/GoogleCloudPlatform/gke-networking-api/client/nodetopology/clientset/versioned" |
| 9 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 10 | + "k8s.io/apimachinery/pkg/types" |
| 11 | + "k8s.io/client-go/kubernetes" |
| 12 | + k8sfake "k8s.io/client-go/kubernetes/fake" |
| 13 | + "k8s.io/client-go/tools/cache" |
| 14 | + providerconfig "k8s.io/ingress-gce/pkg/apis/providerconfig/v1" |
| 15 | + multiprojectgce "k8s.io/ingress-gce/pkg/multiproject/gce" |
| 16 | + multiprojectinformers "k8s.io/ingress-gce/pkg/multiproject/informerset" |
| 17 | + "k8s.io/ingress-gce/pkg/neg" |
| 18 | + "k8s.io/ingress-gce/pkg/neg/metrics" |
| 19 | + syncMetrics "k8s.io/ingress-gce/pkg/neg/metrics/metricscollector" |
| 20 | + "k8s.io/ingress-gce/pkg/neg/syncers/labels" |
| 21 | + negtypes "k8s.io/ingress-gce/pkg/neg/types" |
| 22 | + svcnegclient "k8s.io/ingress-gce/pkg/svcneg/client/clientset/versioned" |
| 23 | + svcnegfake "k8s.io/ingress-gce/pkg/svcneg/client/clientset/versioned/fake" |
| 24 | + "k8s.io/ingress-gce/pkg/utils" |
| 25 | + "k8s.io/ingress-gce/pkg/utils/namer" |
| 26 | + "k8s.io/ingress-gce/pkg/utils/zonegetter" |
| 27 | + klog "k8s.io/klog/v2" |
| 28 | + ktesting "k8s.io/klog/v2/ktesting" |
| 29 | +) |
| 30 | + |
| 31 | +// TestStartNEGController_StopJoin verifies that the stop channel passed to the controller |
| 32 | +// closes when either the global stop channel or the per-ProviderConfig stop channel closes. |
| 33 | +func TestStartNEGController_StopJoin(t *testing.T) { |
| 34 | + |
| 35 | + logger, _ := ktesting.NewTestContext(t) |
| 36 | + kubeClient := k8sfake.NewSimpleClientset() |
| 37 | + informers := multiprojectinformers.NewInformerSet(kubeClient, svcnegfake.NewSimpleClientset(), networkclient.Interface(nil), nodetopologyclient.Interface(nil), metav1.Duration{}) |
| 38 | + |
| 39 | + // Start base informers; they are not strictly required by our stubbed controller, |
| 40 | + // but mirrors real startup flow and ensures CombinedHasSynced would be true if used. |
| 41 | + globalStop := make(chan struct{}) |
| 42 | + t.Cleanup(func() { close(globalStop) }) |
| 43 | + if err := informers.Start(globalStop, logger); err != nil { |
| 44 | + t.Fatalf("start informers: %v", err) |
| 45 | + } |
| 46 | + |
| 47 | + // Provide required inputs for StartNEGController |
| 48 | + pc := &providerconfig.ProviderConfig{ObjectMeta: metav1.ObjectMeta{Name: "pc-1"}} |
| 49 | + kubeSystemUID := types.UID("uid") |
| 50 | + rootNamer := namer.NewNamer("clusteruid", "", logger) |
| 51 | + l4Namer := namer.NewL4Namer(string(kubeSystemUID), rootNamer) |
| 52 | + lpCfg := labels.PodLabelPropagationConfig{} |
| 53 | + // Create a fake cloud with a valid SubnetworkURL via multiproject helper. |
| 54 | + gceCreator := multiprojectgce.NewGCEFake() |
| 55 | + // Minimal provider config for the GCE fake |
| 56 | + pcForCloud := &providerconfig.ProviderConfig{ |
| 57 | + ObjectMeta: metav1.ObjectMeta{Name: "pc-1"}, |
| 58 | + Spec: providerconfig.ProviderConfigSpec{ |
| 59 | + ProjectID: "test-project", |
| 60 | + ProjectNumber: 123, |
| 61 | + NetworkConfig: providerconfig.ProviderNetworkConfig{ |
| 62 | + Network: "net-1", |
| 63 | + SubnetInfo: providerconfig.ProviderConfigSubnetInfo{Subnetwork: "sub-1"}, |
| 64 | + }, |
| 65 | + }, |
| 66 | + } |
| 67 | + cloud, err := gceCreator.GCEForProviderConfig(pcForCloud, logger) |
| 68 | + if err != nil { |
| 69 | + t.Fatalf("create fake cloud: %v", err) |
| 70 | + } |
| 71 | + |
| 72 | + // Stub newNEGController to capture the stopCh passed in and to construct a minimal controller |
| 73 | + // that can run without panics. |
| 74 | + var capturedStopCh <-chan struct{} |
| 75 | + orig := newNEGController |
| 76 | + newNEGController = func(kc kubernetes.Interface, sc svcnegclient.Interface, ec kubernetes.Interface, uid types.UID, |
| 77 | + ing cache.SharedIndexInformer, svc cache.SharedIndexInformer, pod cache.SharedIndexInformer, node cache.SharedIndexInformer, |
| 78 | + es cache.SharedIndexInformer, sn cache.SharedIndexInformer, netInf cache.SharedIndexInformer, gke cache.SharedIndexInformer, nt cache.SharedIndexInformer, |
| 79 | + synced func() bool, l4 namer.L4ResourcesNamer, defSP utils.ServicePort, cloud negtypes.NetworkEndpointGroupCloud, zg *zonegetter.ZoneGetter, nm negtypes.NetworkEndpointGroupNamer, |
| 80 | + resync time.Duration, gc time.Duration, workers int, enableRR bool, runL4 bool, nonGCP bool, dualStack bool, lp labels.PodLabelPropagationConfig, |
| 81 | + multiNetworking bool, ingressRegional bool, runNetLB bool, readOnly bool, enableNEGsForIngress bool, |
| 82 | + stopCh <-chan struct{}, l klog.Logger, negMetrics *metrics.NegMetrics, syncerMetrics *syncMetrics.SyncerMetrics) (*neg.Controller, error) { |
| 83 | + capturedStopCh = stopCh |
| 84 | + return neg.NewController(kc, sc, ec, uid, ing, svc, pod, node, es, sn, netInf, gke, nt, synced, l4, defSP, cloud, zg, nm, |
| 85 | + resync, gc, workers, enableRR, runL4, nonGCP, dualStack, lp, multiNetworking, ingressRegional, runNetLB, readOnly, enableNEGsForIngress, stopCh, l, negMetrics, syncerMetrics) |
| 86 | + } |
| 87 | + t.Cleanup(func() { newNEGController = orig }) |
| 88 | + |
| 89 | + testCases := []struct { |
| 90 | + name string |
| 91 | + closeProvider bool |
| 92 | + }{ |
| 93 | + {name: "provider-stop-closes-joined", closeProvider: true}, |
| 94 | + {name: "global-stop-closes-joined", closeProvider: false}, |
| 95 | + } |
| 96 | + |
| 97 | + for _, tc := range testCases { |
| 98 | + tc := tc |
| 99 | + t.Run(tc.name, func(t *testing.T) { |
| 100 | + |
| 101 | + var joinStop <-chan struct{} |
| 102 | + var providerStop chan<- struct{} |
| 103 | + |
| 104 | + if tc.closeProvider { |
| 105 | + // Wire the join to the real globalStop for this subcase. |
| 106 | + joinStop = globalStop |
| 107 | + var err error |
| 108 | + providerStop, err = StartNEGController(informers, kubeClient, kubeClient, svcnegfake.NewSimpleClientset(), networkclient.Interface(nil), nodetopologyclient.Interface(nil), kubeSystemUID, rootNamer, l4Namer, lpCfg, cloud, joinStop, logger, pc, syncMetrics.FakeSyncerMetrics()) |
| 109 | + if err != nil { |
| 110 | + t.Fatalf("StartNEGController: %v", err) |
| 111 | + } |
| 112 | + close(providerStop) |
| 113 | + } else { |
| 114 | + // Use a dedicated join channel so informers keep running. |
| 115 | + js := make(chan struct{}) |
| 116 | + joinStop = js |
| 117 | + var err error |
| 118 | + providerStop, err = StartNEGController(informers, kubeClient, kubeClient, svcnegfake.NewSimpleClientset(), networkclient.Interface(nil), nodetopologyclient.Interface(nil), kubeSystemUID, rootNamer, l4Namer, lpCfg, cloud, joinStop, logger, &providerconfig.ProviderConfig{ObjectMeta: metav1.ObjectMeta{Name: "pc-2"}}, syncMetrics.FakeSyncerMetrics()) |
| 119 | + if err != nil { |
| 120 | + t.Fatalf("StartNEGController (2): %v", err) |
| 121 | + } |
| 122 | + close(js) |
| 123 | + defer close(providerStop) // safe if already closed |
| 124 | + } |
| 125 | + |
| 126 | + if capturedStopCh == nil { |
| 127 | + t.Fatalf("capturedStopCh is nil; stub did not run") |
| 128 | + } |
| 129 | + select { |
| 130 | + case <-capturedStopCh: |
| 131 | + // ok |
| 132 | + case <-time.After(2 * time.Second): |
| 133 | + t.Fatalf("joined stopCh did not close for case %q", tc.name) |
| 134 | + } |
| 135 | + }) |
| 136 | + } |
| 137 | +} |
| 138 | + |
| 139 | +// TestStartNEGController_NilSvcNegClientErrors verifies StartNEGController returns an error |
| 140 | +// when the svcneg client is nil (which makes controller construction fail). |
| 141 | +func TestStartNEGController_NilSvcNegClientErrors(t *testing.T) { |
| 142 | + t.Parallel() |
| 143 | + |
| 144 | + logger, _ := ktesting.NewTestContext(t) |
| 145 | + kubeClient := k8sfake.NewSimpleClientset() |
| 146 | + informers := multiprojectinformers.NewInformerSet(kubeClient, nil, networkclient.Interface(nil), nodetopologyclient.Interface(nil), metav1.Duration{}) |
| 147 | + globalStop := make(chan struct{}) |
| 148 | + t.Cleanup(func() { close(globalStop) }) |
| 149 | + if err := informers.Start(globalStop, logger); err != nil { |
| 150 | + t.Fatalf("start informers: %v", err) |
| 151 | + } |
| 152 | + |
| 153 | + pc := &providerconfig.ProviderConfig{ObjectMeta: metav1.ObjectMeta{Name: "pc-err"}} |
| 154 | + kubeSystemUID := types.UID("uid") |
| 155 | + rootNamer := namer.NewNamer("clusteruid", "", logger) |
| 156 | + l4Namer := namer.NewL4Namer(string(kubeSystemUID), rootNamer) |
| 157 | + lpCfg := labels.PodLabelPropagationConfig{} |
| 158 | + gceCreator := multiprojectgce.NewGCEFake() |
| 159 | + pcForCloud := &providerconfig.ProviderConfig{ |
| 160 | + ObjectMeta: metav1.ObjectMeta{Name: "pc-err"}, |
| 161 | + Spec: providerconfig.ProviderConfigSpec{ |
| 162 | + ProjectID: "test-project", |
| 163 | + ProjectNumber: 123, |
| 164 | + NetworkConfig: providerconfig.ProviderNetworkConfig{ |
| 165 | + Network: "net-1", |
| 166 | + SubnetInfo: providerconfig.ProviderConfigSubnetInfo{Subnetwork: "sub-1"}, |
| 167 | + }, |
| 168 | + }, |
| 169 | + } |
| 170 | + cloud, err := gceCreator.GCEForProviderConfig(pcForCloud, logger) |
| 171 | + if err != nil { |
| 172 | + t.Fatalf("create fake cloud: %v", err) |
| 173 | + } |
| 174 | + |
| 175 | + // newNEGController remains default (neg.NewController), which errors when svcNegClient is nil |
| 176 | + ch, err := StartNEGController(informers, kubeClient, kubeClient, nil /* svcneg */, networkclient.Interface(nil), nodetopologyclient.Interface(nil), kubeSystemUID, rootNamer, l4Namer, lpCfg, cloud, globalStop, logger, pc, syncMetrics.FakeSyncerMetrics()) |
| 177 | + if err == nil { |
| 178 | + t.Fatalf("expected error from StartNEGController when svcNegClient is nil, got nil and channel=%v", ch) |
| 179 | + } |
| 180 | +} |
0 commit comments