From 9d5673a5bcfd0e62892332d1355d9676623b0ba4 Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Fri, 25 Jul 2025 16:27:02 -0500 Subject: [PATCH 1/9] Make Loadbalancer struct public so other projects can import this implementation --- cloud/linode/cilium_loadbalancers.go | 26 ++++---- cloud/linode/cilium_loadbalancers_test.go | 26 ++++---- cloud/linode/cloud.go | 2 +- cloud/linode/loadbalancers.go | 54 ++++++++--------- cloud/linode/loadbalancers_test.go | 74 +++++++++++------------ cloud/linode/service_controller.go | 4 +- cloud/linode/service_controller_test.go | 8 +-- 7 files changed, 97 insertions(+), 97 deletions(-) diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go index c22f7317..cd5b0690 100644 --- a/cloud/linode/cilium_loadbalancers.go +++ b/cloud/linode/cilium_loadbalancers.go @@ -75,7 +75,7 @@ var ( // getExistingSharedIPsInCluster determines the list of addresses to share on nodes by checking the // CiliumLoadBalancerIPPools created by the CCM in createCiliumLBIPPool // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]string, error) { +func (l *Loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]string, error) { addrs := []string{} if err := l.retrieveCiliumClientset(); err != nil { return addrs, err @@ -94,7 +94,7 @@ func (l *loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]st return addrs, nil } -func (l *loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *linodego.Instance) ([]string, error) { +func (l *Loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *linodego.Instance) ([]string, error) { if ipHolder == nil { return nil, nil } @@ -110,7 +110,7 @@ func (l *loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *lino } // shareIPs shares the given list of IP addresses on the given Node -func (l *loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.Node) error { +func (l *Loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.Node) error { nodeLinodeID, err := parseProviderID(node.Spec.ProviderID) if err != nil { return err @@ -151,7 +151,7 @@ func (l *loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.N // perform IP sharing (via a specified node selector) have the expected IPs shared // in the event that a Node joins the cluster after the LoadBalancer Service already // exists -func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHolderSuffix string) error { +func (l *Loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHolderSuffix string) error { // ignore cases where the provider ID has been set if node.Spec.ProviderID == "" { klog.Info("skipping IP while providerID is unset") @@ -210,7 +210,7 @@ func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHo // createSharedIP requests an additional IP that can be shared on Nodes to support // loadbalancing via Cilium LB IPAM + BGP Control Plane. -func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ipHolderSuffix string) (string, error) { +func (l *Loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ipHolderSuffix string) (string, error) { ipHolder, err := l.ensureIPHolder(ctx, ipHolderSuffix) if err != nil { return "", err @@ -267,7 +267,7 @@ func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip // deleteSharedIP cleans up the shared IP for a LoadBalancer Service if it was assigned // by Cilium LB IPAM, removing it from the ip-holder -func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) error { +func (l *Loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) error { err := l.retrieveKubeClient() if err != nil { return err @@ -323,7 +323,7 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) // To hold the IP in lieu of a proper IP reservation system, a special Nanode is // created but not booted and used to hold all shared IPs. -func (l *loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { +func (l *Loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { ipHolder, err := l.getIPHolder(ctx, suffix) if err != nil { return nil, err @@ -353,7 +353,7 @@ func (l *loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*lin return ipHolder, nil } -func (l *loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { +func (l *Loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { // even though we have updated the naming convention, leaving this in ensures we have backwards compatibility filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.zone)} rawFilter, err := json.Marshal(filter) @@ -407,7 +407,7 @@ func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) return label } -func (l *loadbalancers) retrieveCiliumClientset() error { +func (l *Loadbalancers) retrieveCiliumClientset() error { if l.ciliumClient != nil { return nil } @@ -432,7 +432,7 @@ func (l *loadbalancers) retrieveCiliumClientset() error { // for LoadBalancer Services not backed by a NodeBalancer, a CiliumLoadBalancerIPPool resource // will be created specifically for the Service with the requested shared IP // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Service, sharedIP string) (*v2alpha1.CiliumLoadBalancerIPPool, error) { +func (l *Loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Service, sharedIP string) (*v2alpha1.CiliumLoadBalancerIPPool, error) { if err := l.retrieveCiliumClientset(); err != nil { return nil, err } @@ -459,7 +459,7 @@ func (l *loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Se } // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Service) error { +func (l *Loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Service) error { if err := l.retrieveCiliumClientset(); err != nil { return err } @@ -472,7 +472,7 @@ func (l *loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Se } // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Service) (*v2alpha1.CiliumLoadBalancerIPPool, error) { +func (l *Loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Service) (*v2alpha1.CiliumLoadBalancerIPPool, error) { if err := l.retrieveCiliumClientset(); err != nil { return nil, err } @@ -485,7 +485,7 @@ func (l *loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Servi } // NOTE: Cilium CRDs must be installed for this to work -func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error { +func (l *Loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error { if raw, ok := os.LookupEnv("BGP_CUSTOM_ID_MAP"); ok && raw != "" { klog.Info("BGP_CUSTOM_ID_MAP env variable specified, using it instead of the default region map") if err := json.Unmarshal([]byte(raw), ®ionIDMap); err != nil { diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go index 2d3aa17b..6ada547b 100644 --- a/cloud/linode/cilium_loadbalancers_test.go +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -222,7 +222,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -277,7 +277,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) - lb := &loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType} lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { @@ -289,7 +289,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { // Use BGP custom id map t.Setenv("BGP_CUSTOM_ID_MAP", "{'us-foobar': 2}") - lb = &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb = &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} lbStatus, err = lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { t.Fatal("expected not nil error") @@ -310,7 +310,7 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -355,7 +355,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -400,7 +400,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -445,7 +445,7 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -497,7 +497,7 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -549,7 +549,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -599,7 +599,7 @@ func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} @@ -632,7 +632,7 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} @@ -669,7 +669,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -732,7 +732,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index 8b1d67a4..0635ab7b 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -225,7 +225,7 @@ func (c *linodeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBui go c.linodeTokenHealthChecker.Run(stopCh) } - lb, assertion := c.loadbalancers.(*loadbalancers) + lb, assertion := c.loadbalancers.(*Loadbalancers) if !assertion { klog.Error("type assertion during Initialize() failed") return diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 1b8e9276..96404a06 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -100,7 +100,7 @@ func (e lbNotFoundError) Error() string { return fmt.Sprintf("LoadBalancer not found for service (%s)", e.serviceNn) } -type loadbalancers struct { +type Loadbalancers struct { client client.Client zone string kubeClient kubernetes.Interface @@ -129,10 +129,10 @@ type portConfig struct { // newLoadbalancers returns a cloudprovider.LoadBalancer whose concrete type is a *loadbalancer. func newLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalancer { - return &loadbalancers{client: client, zone: zone, loadBalancerType: Options.LoadBalancerType} + return &Loadbalancers{client: client, zone: zone, loadBalancerType: Options.LoadBalancerType} } -func (l *loadbalancers) getNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { +func (l *Loadbalancers) getNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { rawID := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID] id, idErr := strconv.Atoi(rawID) hasIDAnn := idErr == nil && id != 0 @@ -144,7 +144,7 @@ func (l *loadbalancers) getNodeBalancerForService(ctx context.Context, service * return l.getNodeBalancerByStatus(ctx, service) } -func (l *loadbalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, service *v1.Service) (v1.LoadBalancerStatus, error) { +func (l *Loadbalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, service *v1.Service) (v1.LoadBalancerStatus, error) { err := l.retrieveKubeClient() if err != nil { return v1.LoadBalancerStatus{}, err @@ -159,7 +159,7 @@ func (l *loadbalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, // getNodeBalancerByStatus attempts to get the NodeBalancer from the IP or hostname specified in the // most recent LoadBalancer status. -func (l *loadbalancers) getNodeBalancerByStatus(ctx context.Context, service *v1.Service) (nb *linodego.NodeBalancer, err error) { +func (l *Loadbalancers) getNodeBalancerByStatus(ctx context.Context, service *v1.Service) (nb *linodego.NodeBalancer, err error) { lb := service.Status.LoadBalancer updatedLb, err := l.getLatestServiceLoadBalancerStatus(ctx, service) if err != nil { @@ -188,7 +188,7 @@ func (l *loadbalancers) getNodeBalancerByStatus(ctx context.Context, service *v1 // The current NodeBalancer from getNodeBalancerForService is compared to the most recent // LoadBalancer status; if they are different (because of an updated NodeBalancerID // annotation), the old one is deleted. -func (l *loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1.Service) error { +func (l *Loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1.Service) error { // unless there's an annotation, we can never get a past and current NB to differ, // because they're looked up the same way if _, ok := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID]; !ok { @@ -225,7 +225,7 @@ func (l *loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. // GetLoadBalancerName returns the name of the load balancer. // // GetLoadBalancer will not modify service. -func (l *loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.Service) string { +func (l *Loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.Service) string { unixNano := strconv.FormatInt(time.Now().UnixNano(), 16) return fmt.Sprintf("%s-%s", Options.NodeBalancerPrefix, unixNano[len(unixNano)-12:]) } @@ -233,7 +233,7 @@ func (l *loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.S // GetLoadBalancer returns the *v1.LoadBalancerStatus of service. // // GetLoadBalancer will not modify service. -func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { +func (l *Loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) @@ -263,7 +263,7 @@ func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, // service. // // EnsureLoadBalancer will not modify service or nodes. -func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (lbStatus *v1.LoadBalancerStatus, err error) { +func (l *Loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (lbStatus *v1.LoadBalancerStatus, err error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) @@ -361,7 +361,7 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri return lbStatus, nil } -func (l *loadbalancers) updateNodeBalancer( +func (l *Loadbalancers) updateNodeBalancer( ctx context.Context, clusterName string, service *v1.Service, @@ -520,7 +520,7 @@ func (l *loadbalancers) updateNodeBalancer( } // UpdateLoadBalancer updates the NodeBalancer to have configs that match the Service's ports -func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (err error) { +func (l *Loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (err error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) @@ -570,7 +570,7 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri // Delete any NodeBalancer configs for ports that no longer exist on the Service // Note: Don't build a map or other lookup structure here, it is not worth the overhead -func (l *loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []linodego.NodeBalancerConfig, servicePorts []v1.ServicePort) error { +func (l *Loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []linodego.NodeBalancerConfig, servicePorts []v1.ServicePort) error { for _, nbc := range nbConfigs { found := false for _, sp := range servicePorts { @@ -589,7 +589,7 @@ func (l *loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []lin // shouldPreserveNodeBalancer determines whether a NodeBalancer should be deleted based on the // service's preserve annotation. -func (l *loadbalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { +func (l *Loadbalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { return getServiceBoolAnnotation(service, annotations.AnnLinodeLoadBalancerPreserve) } @@ -598,7 +598,7 @@ func (l *loadbalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { // successfully deleted. // // EnsureLoadBalancerDeleted will not modify service. -func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { +func (l *Loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) @@ -665,7 +665,7 @@ func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa return nil } -func (l *loadbalancers) getNodeBalancerByHostname(ctx context.Context, service *v1.Service, hostname string) (*linodego.NodeBalancer, error) { +func (l *Loadbalancers) getNodeBalancerByHostname(ctx context.Context, service *v1.Service, hostname string) (*linodego.NodeBalancer, error) { lbs, err := l.client.ListNodeBalancers(ctx, nil) if err != nil { return nil, err @@ -679,7 +679,7 @@ func (l *loadbalancers) getNodeBalancerByHostname(ctx context.Context, service * return nil, lbNotFoundError{serviceNn: getServiceNn(service)} } -func (l *loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Service, ip netip.Addr) (*linodego.NodeBalancer, error) { +func (l *Loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Service, ip netip.Addr) (*linodego.NodeBalancer, error) { var filter string if ip.Is6() { filter = fmt.Sprintf(`{"ipv6": "%v"}`, ip.String()) @@ -698,7 +698,7 @@ func (l *loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Ser return &lbs[0], nil } -func (l *loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Service, id int) (*linodego.NodeBalancer, error) { +func (l *Loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Service, id int) (*linodego.NodeBalancer, error) { nb, err := l.client.GetNodeBalancer(ctx, id) if err != nil { var targetError *linodego.Error @@ -710,7 +710,7 @@ func (l *loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Ser return nb, nil } -func (l *loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName string, service *v1.Service) []string { +func (l *Loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName string, service *v1.Service) []string { tags := []string{} if clusterName != "" { tags = append(tags, clusterName) @@ -727,7 +727,7 @@ func (l *loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName strin } // GetLinodeNBType returns the NodeBalancer type for the service. -func (l *loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalancerPlanType { +func (l *Loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalancerPlanType { typeStr, ok := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerType] if ok && linodego.NodeBalancerPlanType(typeStr) == linodego.NBTypePremium { return linodego.NBTypePremium @@ -743,7 +743,7 @@ func (l *loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalanc // 3. NodeBalancerBackendIPv4SubnetID/NodeBalancerBackendIPv4SubnetName flag // 4. NodeBalancerBackendIPv4Subnet flag // 5. Default to using the subnet ID of the service's VPC -func (l *loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Service) ([]linodego.NodeBalancerVPCOptions, error) { +func (l *Loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Service) ([]linodego.NodeBalancerVPCOptions, error) { // Evaluate subnetID based on annotations or flags subnetID, err := l.getSubnetIDForSVC(ctx, service) if err != nil { @@ -816,7 +816,7 @@ func (l *loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser return vpcCreateOpts, nil } -func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { +func (l *Loadbalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { connThrottle := getConnectionThrottle(service) label := l.GetLoadBalancerName(ctx, clusterName, service) @@ -866,7 +866,7 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri return l.client.CreateNodeBalancer(ctx, createOpts) } -func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port v1.ServicePort) (linodego.NodeBalancerConfig, error) { +func (l *Loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port v1.ServicePort) (linodego.NodeBalancerConfig, error) { portConfigResult, err := getPortConfig(service, port) if err != nil { return linodego.NodeBalancerConfig{}, err @@ -951,7 +951,7 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 return config, nil } -func (l *loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbConfig *linodego.NodeBalancerConfig, config portConfig) error { +func (l *Loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbConfig *linodego.NodeBalancerConfig, config portConfig) error { err := l.retrieveKubeClient() if err != nil { return err @@ -970,7 +970,7 @@ func (l *loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbC // 2. If the service has annotations specifying VPCName or SubnetName, use them. // 3. If CCM is configured with --nodebalancer-backend-ipv4-subnet-id, it will be used as the subnet ID. // 4. Else, use first VPCName and SubnetName to calculate subnet id for the service. -func (l *loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { +func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { if len(Options.VPCNames) == 0 { return 0, fmt.Errorf("CCM not configured with VPC, cannot create NodeBalancer with specified annotation") } @@ -1012,7 +1012,7 @@ func (l *loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Servi // buildLoadBalancerRequest returns a linodego.NodeBalancer // requests for service across nodes. -func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { +func (l *Loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { if len(nodes) == 0 { return nil, fmt.Errorf("%w: cluster %s, service %s", errNoNodesAvailable, clusterName, getServiceNn(service)) } @@ -1075,7 +1075,7 @@ func coerceString(str string, minLen, maxLen int, padding string) string { return str } -func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) { +func (l *Loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) { nodeIP, err := getNodePrivateIP(node, subnetID) if err != nil { return nil, fmt.Errorf("node %s does not have a private IP address: %w", node.Name, err) @@ -1099,7 +1099,7 @@ func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, return nodeOptions, nil } -func (l *loadbalancers) retrieveKubeClient() error { +func (l *Loadbalancers) retrieveKubeClient() error { if l.kubeClient != nil { return nil } diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 848c4d12..9e4c4d10 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -370,7 +370,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a for key, value := range annMap { svc.Annotations[key] = value } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -484,7 +484,7 @@ func testCreateNodeBalancerWithNodeNoAddresses(t *testing.T, client *linodego.Cl }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -716,7 +716,7 @@ func testUpdateNodeBalancerWithVPCBackend(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -803,7 +803,7 @@ func testCreateNodeBalancerWithVPCOnlySubnetFlag(t *testing.T, client *linodego. }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -896,7 +896,7 @@ func testCreateNodeBalancerWithVPCNoFlagOrAnnotation(t *testing.T, client *linod }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -985,7 +985,7 @@ func testCreateNodeBalancerWithVPCAnnotationOnly(t *testing.T, client *linodego. }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1070,7 +1070,7 @@ func testCreateNodeBalancerWithVPCOnlySubnetIDFlag(t *testing.T, client *linodeg }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1216,7 +1216,7 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1381,7 +1381,7 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1456,7 +1456,7 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1569,7 +1569,7 @@ func testVeryLongServiceName(t *testing.T, client *linodego.Client, _ *fakeAPI) }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1637,7 +1637,7 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1722,7 +1722,7 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * NodePort: int32(30001), } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1803,7 +1803,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1931,7 +1931,7 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2032,7 +2032,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2164,7 +2164,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2263,7 +2263,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2403,7 +2403,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2550,7 +2550,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2786,7 +2786,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2919,7 +2919,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -3020,7 +3020,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -3990,7 +3990,7 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4042,7 +4042,7 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4159,7 +4159,7 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4212,7 +4212,7 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4467,7 +4467,7 @@ func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ * os.Unsetenv("LINODE_HOSTNAME_ONLY_INGRESS") } -func getLatestNbNodesForService(t *testing.T, client *linodego.Client, svc *v1.Service, lb *loadbalancers) []linodego.NodeBalancerNode { +func getLatestNbNodesForService(t *testing.T, client *linodego.Client, svc *v1.Service, lb *Loadbalancers) []linodego.NodeBalancerNode { t.Helper() nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { @@ -4511,7 +4511,7 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA } svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb1) svcAnn.Status.LoadBalancer = *makeLoadBalancerStatus(svcAnn, nb1) - lb, assertion := newLoadbalancers(client, region).(*loadbalancers) + lb, assertion := newLoadbalancers(client, region).(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4562,7 +4562,7 @@ func testUpdateLoadBalancerNodeExcludedByAnnotation(t *testing.T, client *linode }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4805,7 +4805,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4843,7 +4843,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4908,7 +4908,7 @@ func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeA func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4952,7 +4952,7 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -5051,7 +5051,7 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI }, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -5069,7 +5069,7 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -5492,7 +5492,7 @@ func Test_loadbalancers_GetLinodeNBType(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - l := &loadbalancers{ + l := &Loadbalancers{ client: tt.fields.client, zone: tt.fields.zone, kubeClient: tt.fields.kubeClient, diff --git a/cloud/linode/service_controller.go b/cloud/linode/service_controller.go index ef909428..15aa1bbc 100644 --- a/cloud/linode/service_controller.go +++ b/cloud/linode/service_controller.go @@ -19,13 +19,13 @@ import ( var retryInterval = time.Minute * 1 type serviceController struct { - loadbalancers *loadbalancers + loadbalancers *Loadbalancers informer v1informers.ServiceInformer queue workqueue.TypedDelayingInterface[any] } -func newServiceController(loadbalancers *loadbalancers, informer v1informers.ServiceInformer) *serviceController { +func newServiceController(loadbalancers *Loadbalancers, informer v1informers.ServiceInformer) *serviceController { return &serviceController{ loadbalancers: loadbalancers, informer: informer, diff --git a/cloud/linode/service_controller_test.go b/cloud/linode/service_controller_test.go index bcb8c340..fe424f0d 100644 --- a/cloud/linode/service_controller_test.go +++ b/cloud/linode/service_controller_test.go @@ -24,7 +24,7 @@ func Test_serviceController_Run(t *testing.T) { informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Services() mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) - loadbalancers, assertion := newLoadbalancers(client, "us-east").(*loadbalancers) + loadbalancers, assertion := newLoadbalancers(client, "us-east").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -53,7 +53,7 @@ func Test_serviceController_Run(t *testing.T) { func Test_serviceController_processNextDeletion(t *testing.T) { type fields struct { - loadbalancers *loadbalancers + loadbalancers *Loadbalancers queue workqueue.TypedDelayingInterface[any] Client *mocks.MockClient } @@ -70,7 +70,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { loadbalancers: nil, }, Setup: func(f *fields) { - f.loadbalancers = &loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.loadbalancers = &Loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) f.queue.Add("test") }, @@ -83,7 +83,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { loadbalancers: nil, }, Setup: func(f *fields) { - f.loadbalancers = &loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.loadbalancers = &Loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) svc := createTestService() f.queue.Add(svc) From d5eaf7792626f973e568f786fb758ec0e255299c Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Fri, 25 Jul 2025 16:37:17 -0500 Subject: [PATCH 2/9] Make NewLoadbalancers() public for export --- cloud/linode/cloud.go | 2 +- cloud/linode/cloud_test.go | 16 +++--- cloud/linode/loadbalancers.go | 4 +- cloud/linode/loadbalancers_test.go | 70 ++++++++++++------------- cloud/linode/service_controller_test.go | 2 +- 5 files changed, 47 insertions(+), 47 deletions(-) diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index 0635ab7b..4e032556 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -204,7 +204,7 @@ func newCloud() (cloudprovider.Interface, error) { lcloud := &linodeCloud{ client: linodeClient, instances: instanceCache, - loadbalancers: newLoadbalancers(linodeClient, region), + loadbalancers: NewLoadbalancers(linodeClient, region), routes: routes, linodeTokenHealthChecker: healthChecker, } diff --git a/cloud/linode/cloud_test.go b/cloud/linode/cloud_test.go index b335b939..a23df57a 100644 --- a/cloud/linode/cloud_test.go +++ b/cloud/linode/cloud_test.go @@ -182,10 +182,10 @@ func Test_linodeCloud_LoadBalancer(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, - want: newLoadbalancers(client, "us-east"), + want: NewLoadbalancers(client, "us-east"), want1: true, }, } @@ -229,7 +229,7 @@ func Test_linodeCloud_InstancesV2(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, want: newInstances(client), @@ -276,7 +276,7 @@ func Test_linodeCloud_Instances(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, want: nil, @@ -323,7 +323,7 @@ func Test_linodeCloud_Zones(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, want: nil, @@ -370,7 +370,7 @@ func Test_linodeCloud_Clusters(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: nil, }, want: nil, @@ -419,7 +419,7 @@ func Test_linodeCloud_Routes(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: r, EnableRouteController: false, }, @@ -431,7 +431,7 @@ func Test_linodeCloud_Routes(t *testing.T) { fields: fields{ client: client, instances: newInstances(client), - loadbalancers: newLoadbalancers(client, "us-east"), + loadbalancers: NewLoadbalancers(client, "us-east"), routes: r, EnableRouteController: true, }, diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 96404a06..b05b2a31 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -127,8 +127,8 @@ type portConfig struct { UDPCheckPort int } -// newLoadbalancers returns a cloudprovider.LoadBalancer whose concrete type is a *loadbalancer. -func newLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalancer { +// NewLoadbalancers returns a cloudprovider.LoadBalancer whose concrete type is a *loadbalancer. +func NewLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalancer { return &Loadbalancers{client: client, zone: zone, loadBalancerType: Options.LoadBalancerType} } diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 9e4c4d10..2b76d683 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -370,7 +370,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a for key, value := range annMap { svc.Annotations[key] = value } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -484,7 +484,7 @@ func testCreateNodeBalancerWithNodeNoAddresses(t *testing.T, client *linodego.Cl }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -716,7 +716,7 @@ func testUpdateNodeBalancerWithVPCBackend(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -803,7 +803,7 @@ func testCreateNodeBalancerWithVPCOnlySubnetFlag(t *testing.T, client *linodego. }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -896,7 +896,7 @@ func testCreateNodeBalancerWithVPCNoFlagOrAnnotation(t *testing.T, client *linod }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -985,7 +985,7 @@ func testCreateNodeBalancerWithVPCAnnotationOnly(t *testing.T, client *linodego. }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1070,7 +1070,7 @@ func testCreateNodeBalancerWithVPCOnlySubnetIDFlag(t *testing.T, client *linodeg }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1216,7 +1216,7 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1381,7 +1381,7 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1456,7 +1456,7 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1569,7 +1569,7 @@ func testVeryLongServiceName(t *testing.T, client *linodego.Client, _ *fakeAPI) }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1637,7 +1637,7 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1722,7 +1722,7 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * NodePort: int32(30001), } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1803,7 +1803,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -1931,7 +1931,7 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2032,7 +2032,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2164,7 +2164,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2263,7 +2263,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2403,7 +2403,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2550,7 +2550,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2786,7 +2786,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -2919,7 +2919,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -3020,7 +3020,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -3990,7 +3990,7 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4042,7 +4042,7 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4159,7 +4159,7 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4212,7 +4212,7 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4511,7 +4511,7 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA } svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb1) svcAnn.Status.LoadBalancer = *makeLoadBalancerStatus(svcAnn, nb1) - lb, assertion := newLoadbalancers(client, region).(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, region).(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4562,7 +4562,7 @@ func testUpdateLoadBalancerNodeExcludedByAnnotation(t *testing.T, client *linode }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4805,7 +4805,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4843,7 +4843,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4908,7 +4908,7 @@ func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeA func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -4952,7 +4952,7 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -5051,7 +5051,7 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI }, }, } - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } @@ -5069,7 +5069,7 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := newLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } diff --git a/cloud/linode/service_controller_test.go b/cloud/linode/service_controller_test.go index fe424f0d..cc3308ed 100644 --- a/cloud/linode/service_controller_test.go +++ b/cloud/linode/service_controller_test.go @@ -24,7 +24,7 @@ func Test_serviceController_Run(t *testing.T) { informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Services() mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) - loadbalancers, assertion := newLoadbalancers(client, "us-east").(*Loadbalancers) + loadbalancers, assertion := NewLoadbalancers(client, "us-east").(*Loadbalancers) if !assertion { t.Error("type assertion failed") } From c063d396be9cb9d76ad08adb2e7dea82bccf6d7f Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Sat, 26 Jul 2025 12:44:33 -0500 Subject: [PATCH 3/9] [feat] : refactor Loadbalancers to use OptionsConfig for better configuration management --- cloud/linode/cilium_loadbalancers.go | 22 ++--- cloud/linode/cilium_loadbalancers_test.go | 26 +++--- cloud/linode/cloud.go | 11 ++- cloud/linode/loadbalancers.go | 99 ++++++++++++++--------- cloud/linode/loadbalancers_test.go | 85 ++++++++++++------- main.go | 1 + 6 files changed, 147 insertions(+), 97 deletions(-) diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go index cd5b0690..de3730fa 100644 --- a/cloud/linode/cilium_loadbalancers.go +++ b/cloud/linode/cilium_loadbalancers.go @@ -159,8 +159,8 @@ func (l *Loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHo } // If performing Service load-balancing via IP sharing + BGP, check for a special annotation // added by the CCM gets set when load-balancer IPs have been successfully shared on the node - if Options.BGPNodeSelector != "" { - kv := strings.Split(Options.BGPNodeSelector, "=") + if l.options.BGPNodeSelector != "" { + kv := strings.Split(l.options.BGPNodeSelector, "=") // Check if node should be participating in IP sharing via the given selector if val, ok := node.Labels[kv[0]]; !ok || len(kv) != 2 || val != kv[1] { // not a selected Node @@ -243,7 +243,7 @@ func (l *Loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip } // share the IPs with nodes participating in Cilium BGP peering - if Options.BGPNodeSelector == "" { + if l.options.BGPNodeSelector == "" { for _, node := range nodes { if _, ok := node.Labels[commonControlPlaneLabel]; !ok { if err = l.shareIPs(ctx, addrs, node); err != nil { @@ -252,7 +252,7 @@ func (l *Loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip } } } else { - kv := strings.Split(Options.BGPNodeSelector, "=") + kv := strings.Split(l.options.BGPNodeSelector, "=") for _, node := range nodes { if val, ok := node.Labels[kv[0]]; ok && len(kv) == 2 && val == kv[1] { if err = l.shareIPs(ctx, addrs, node); err != nil { @@ -273,7 +273,7 @@ func (l *Loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) return err } nodeList, err := l.kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ - LabelSelector: Options.BGPNodeSelector, + LabelSelector: l.options.BGPNodeSelector, }) if err != nil { return err @@ -282,8 +282,8 @@ func (l *Loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) serviceNn := getServiceNn(service) var ipHolderSuffix string - if Options.IpHolderSuffix != "" { - ipHolderSuffix = Options.IpHolderSuffix + if l.options.IpHolderSuffix != "" { + ipHolderSuffix = l.options.IpHolderSuffix klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } @@ -415,7 +415,7 @@ func (l *Loadbalancers) retrieveCiliumClientset() error { kubeConfig *rest.Config err error ) - kubeconfigFlag := Options.KubeconfigFlag + kubeconfigFlag := l.options.KubeconfigFlag if kubeconfigFlag == nil || kubeconfigFlag.Value.String() == "" { kubeConfig, err = rest.InClusterConfig() } else { @@ -513,7 +513,7 @@ func (l *Loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error // otherwise create it var nodeSelector slimv1.LabelSelector // If no BGPNodeSelector is specified, select all worker nodes. - if Options.BGPNodeSelector == "" { + if l.options.BGPNodeSelector == "" { nodeSelector = slimv1.LabelSelector{ MatchExpressions: []slimv1.LabelSelectorRequirement{ { @@ -523,9 +523,9 @@ func (l *Loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error }, } } else { - kv := strings.Split(Options.BGPNodeSelector, "=") + kv := strings.Split(l.options.BGPNodeSelector, "=") if len(kv) != BGPNodeSelectorFlagInputLen { - return fmt.Errorf("invalid node selector %s", Options.BGPNodeSelector) + return fmt.Errorf("invalid node selector %s", l.options.BGPNodeSelector) } nodeSelector = slimv1.LabelSelector{MatchLabels: map[string]string{kv[0]: kv[1]}} diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go index 6ada547b..f2b11ae0 100644 --- a/cloud/linode/cilium_loadbalancers_test.go +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -222,7 +222,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -277,7 +277,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) - lb := &Loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType, &Options} lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { @@ -289,7 +289,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { // Use BGP custom id map t.Setenv("BGP_CUSTOM_ID_MAP", "{'us-foobar': 2}") - lb = &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb = &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} lbStatus, err = lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { t.Fatal("expected not nil error") @@ -310,7 +310,7 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -355,7 +355,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -400,7 +400,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -445,7 +445,7 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -497,7 +497,7 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -549,7 +549,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -599,7 +599,7 @@ func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} @@ -632,7 +632,7 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} @@ -669,7 +669,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -732,7 +732,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index 4e032556..ef9c2b62 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -31,10 +31,8 @@ const ( var supportedLoadBalancerTypes = []string{ciliumLBType, nodeBalancerLBType} -// Options is a configuration object for this cloudprovider implementation. -// We expect it to be initialized with flags external to this package, likely in -// main.go -var Options struct { +// OptionsConfig defines the configuration structure for cloud controller options +type OptionsConfig struct { KubeconfigFlag *pflag.Flag LinodeGoDebug bool EnableRouteController bool @@ -61,8 +59,13 @@ var Options struct { NodeCIDRMaskSizeIPv4 int NodeCIDRMaskSizeIPv6 int NodeBalancerPrefix string + AllowEmptyNodeBalancerBackends bool } +// Options is the global configuration instance used by the CCM. +// We expect it to be initialized with flags external to this package, likely in main.go +var Options OptionsConfig + type linodeCloud struct { client client.Client instances cloudprovider.InstancesV2 diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index b05b2a31..396e84bc 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -106,6 +106,7 @@ type Loadbalancers struct { kubeClient kubernetes.Interface ciliumClient ciliumclient.CiliumV2alpha1Interface loadBalancerType string + options *OptionsConfig } type portConfigAnnotation struct { @@ -127,9 +128,20 @@ type portConfig struct { UDPCheckPort int } +// NewLoadbalancersWithOptions returns a cloudprovider.LoadBalancer with custom options +func NewLoadbalancersWithOptions(client client.Client, zone string, options *OptionsConfig) cloudprovider.LoadBalancer { + return &Loadbalancers{ + client: client, + zone: zone, + loadBalancerType: options.LoadBalancerType, + options: options, + } +} + // NewLoadbalancers returns a cloudprovider.LoadBalancer whose concrete type is a *loadbalancer. +// This constructor uses the global Options for backward compatibility. func NewLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalancer { - return &Loadbalancers{client: client, zone: zone, loadBalancerType: Options.LoadBalancerType} + return NewLoadbalancersWithOptions(client, zone, &Options) } func (l *Loadbalancers) getNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { @@ -227,7 +239,7 @@ func (l *Loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. // GetLoadBalancer will not modify service. func (l *Loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.Service) string { unixNano := strconv.FormatInt(time.Now().UnixNano(), 16) - return fmt.Sprintf("%s-%s", Options.NodeBalancerPrefix, unixNano[len(unixNano)-12:]) + return fmt.Sprintf("%s-%s", l.options.NodeBalancerPrefix, unixNano[len(unixNano)-12:]) } // GetLoadBalancer returns the *v1.LoadBalancerStatus of service. @@ -256,7 +268,7 @@ func (l *Loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, } } - return makeLoadBalancerStatus(service, nb), true, nil + return l.makeLoadBalancerStatus(service, nb), true, nil } // EnsureLoadBalancer ensures that the cluster is running a load balancer for @@ -296,8 +308,8 @@ func (l *Loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri } var ipHolderSuffix string - if Options.IpHolderSuffix != "" { - ipHolderSuffix = Options.IpHolderSuffix + if l.options.IpHolderSuffix != "" { + ipHolderSuffix = l.options.IpHolderSuffix klog.Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } @@ -349,7 +361,7 @@ func (l *Loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri } klog.Infof("NodeBalancer (%d) has been ensured for service (%s)", nb.ID, serviceNn) - lbStatus = makeLoadBalancerStatus(service, nb) + lbStatus = l.makeLoadBalancerStatus(service, nb) if !l.shouldPreserveNodeBalancer(service) { if err := l.cleanupOldNodeBalancer(ctx, service); err != nil { @@ -368,10 +380,15 @@ func (l *Loadbalancers) updateNodeBalancer( nodes []*v1.Node, nb *linodego.NodeBalancer, ) (err error) { - if len(nodes) == 0 { + if len(nodes) == 0 && !l.options.AllowEmptyNodeBalancerBackends { return fmt.Errorf("%w: service %s", errNoNodesAvailable, getServiceNn(service)) } + // Log warning when updating NodeBalancer without nodes + if len(nodes) == 0 && l.options.AllowEmptyNodeBalancerBackends { + klog.Warningf("Updating NodeBalancer for service (%s) without backend nodes - load balancer will be non-functional until nodes are added", getServiceNn(service)) + } + connThrottle := getConnectionThrottle(service) if connThrottle != nb.ClientConnThrottle { update := nb.GetUpdateOptions() @@ -451,16 +468,16 @@ func (l *Loadbalancers) updateNodeBalancer( // Add all of the Nodes to the config newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes)) subnetID := 0 - if Options.NodeBalancerBackendIPv4SubnetID != 0 { - subnetID = Options.NodeBalancerBackendIPv4SubnetID + if l.options.NodeBalancerBackendIPv4SubnetID != 0 { + subnetID = l.options.NodeBalancerBackendIPv4SubnetID } backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] if ok { - if err = validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + if err = l.validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { return err } } - if len(Options.VPCNames) > 0 && !Options.DisableNodeBalancerVPCBackends { + if len(l.options.VPCNames) > 0 && !l.options.DisableNodeBalancerVPCBackends { var id int id, err = l.getSubnetIDForSVC(ctx, service) if err != nil { @@ -530,8 +547,8 @@ func (l *Loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri klog.Infof("handling update for LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass) serviceNn := getServiceNn(service) var ipHolderSuffix string - if Options.IpHolderSuffix != "" { - ipHolderSuffix = Options.IpHolderSuffix + if l.options.IpHolderSuffix != "" { + ipHolderSuffix = l.options.IpHolderSuffix klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } @@ -716,7 +733,7 @@ func (l *Loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName strin tags = append(tags, clusterName) } - tags = append(tags, Options.NodeBalancerTags...) + tags = append(tags, l.options.NodeBalancerTags...) tagStr, ok := service.GetAnnotations()[annotations.AnnLinodeLoadBalancerTags] if ok { @@ -733,7 +750,7 @@ func (l *Loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalanc return linodego.NBTypePremium } - return linodego.NodeBalancerPlanType(Options.DefaultNBType) + return linodego.NodeBalancerPlanType(l.options.DefaultNBType) } // getVPCCreateOptions returns the VPC options for the NodeBalancer creation. @@ -753,7 +770,7 @@ func (l *Loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser // Precedence 1: If the user has specified a NodeBalancerBackendIPv4Range, use that backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] if ok { - if err := validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + if err := l.validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { return nil, err } // If the user has specified a NodeBalancerBackendIPv4Range, use that @@ -785,10 +802,10 @@ func (l *Loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser // Precedence 3: If the user has specified a NodeBalancerBackendIPv4SubnetID, use that // and auto-allocate subnets from it for the NodeBalancer - if Options.NodeBalancerBackendIPv4SubnetID != 0 { + if l.options.NodeBalancerBackendIPv4SubnetID != 0 { vpcCreateOpts := []linodego.NodeBalancerVPCOptions{ { - SubnetID: Options.NodeBalancerBackendIPv4SubnetID, + SubnetID: l.options.NodeBalancerBackendIPv4SubnetID, }, } return vpcCreateOpts, nil @@ -796,11 +813,11 @@ func (l *Loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser // Precedence 4: If the user has specified a NodeBalancerBackendIPv4Subnet, use that // and auto-allocate subnets from it for the NodeBalancer - if Options.NodeBalancerBackendIPv4Subnet != "" { + if l.options.NodeBalancerBackendIPv4Subnet != "" { vpcCreateOpts := []linodego.NodeBalancerVPCOptions{ { SubnetID: subnetID, - IPv4Range: Options.NodeBalancerBackendIPv4Subnet, + IPv4Range: l.options.NodeBalancerBackendIPv4Subnet, IPv4RangeAutoAssign: true, }, } @@ -831,7 +848,7 @@ func (l *Loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri Type: nbType, } - if len(Options.VPCNames) > 0 && !Options.DisableNodeBalancerVPCBackends { + if len(l.options.VPCNames) > 0 && !l.options.DisableNodeBalancerVPCBackends { createOpts.VPCs, err = l.getVPCCreateOptions(ctx, service) if err != nil { return nil, err @@ -971,7 +988,7 @@ func (l *Loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbC // 3. If CCM is configured with --nodebalancer-backend-ipv4-subnet-id, it will be used as the subnet ID. // 4. Else, use first VPCName and SubnetName to calculate subnet id for the service. func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { - if len(Options.VPCNames) == 0 { + if len(l.options.VPCNames) == 0 { return 0, fmt.Errorf("CCM not configured with VPC, cannot create NodeBalancer with specified annotation") } // Check if the service has an annotation for NodeBalancerBackendSubnetID @@ -988,11 +1005,11 @@ func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Servi // If no VPCName or SubnetName is specified in annotations, but NodeBalancerBackendIPv4SubnetID is set, // use the NodeBalancerBackendIPv4SubnetID as the subnet ID. - if !vpcOk && !subnetOk && Options.NodeBalancerBackendIPv4SubnetID != 0 { - return Options.NodeBalancerBackendIPv4SubnetID, nil + if !vpcOk && !subnetOk && l.options.NodeBalancerBackendIPv4SubnetID != 0 { + return l.options.NodeBalancerBackendIPv4SubnetID, nil } - vpcName := Options.VPCNames[0] + vpcName := l.options.VPCNames[0] if vpcOk { vpcName = specifiedVPCName } @@ -1001,7 +1018,7 @@ func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Servi return 0, err } - subnetName := Options.SubnetNames[0] + subnetName := l.options.SubnetNames[0] if subnetOk { subnetName = specifiedSubnetName } @@ -1013,24 +1030,30 @@ func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Servi // buildLoadBalancerRequest returns a linodego.NodeBalancer // requests for service across nodes. func (l *Loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { - if len(nodes) == 0 { + if len(nodes) == 0 && !l.options.AllowEmptyNodeBalancerBackends { return nil, fmt.Errorf("%w: cluster %s, service %s", errNoNodesAvailable, clusterName, getServiceNn(service)) } + + // Log warning when creating NodeBalancer without nodes + if len(nodes) == 0 && l.options.AllowEmptyNodeBalancerBackends { + klog.Warningf("Creating NodeBalancer for service (%s) without backend nodes - load balancer will be non-functional until nodes are added", getServiceNn(service)) + } + ports := service.Spec.Ports configs := make([]*linodego.NodeBalancerConfigCreateOptions, 0, len(ports)) subnetID := 0 - if Options.NodeBalancerBackendIPv4SubnetID != 0 { - subnetID = Options.NodeBalancerBackendIPv4SubnetID + if l.options.NodeBalancerBackendIPv4SubnetID != 0 { + subnetID = l.options.NodeBalancerBackendIPv4SubnetID } // Check for the NodeBalancerBackendIPv4Range annotation backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] if ok { - if err := validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + if err := l.validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { return nil, err } } - if len(Options.VPCNames) > 0 && !Options.DisableNodeBalancerVPCBackends { + if len(l.options.VPCNames) > 0 && !l.options.DisableNodeBalancerVPCBackends { id, err := l.getSubnetIDForSVC(ctx, service) if err != nil { return nil, err @@ -1111,7 +1134,7 @@ func (l *Loadbalancers) retrieveKubeClient() error { // Check to see if --kubeconfig was set. If it was, build a kubeconfig from the given file. // Otherwise, use the in-cluster config. - kubeconfigFlag := Options.KubeconfigFlag + kubeconfigFlag := l.options.KubeconfigFlag if kubeconfigFlag == nil || kubeconfigFlag.Value.String() == "" { kubeConfig, err = rest.InClusterConfig() } else { @@ -1284,7 +1307,7 @@ func getConnectionThrottle(service *v1.Service) int { return connThrottle } -func makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1.LoadBalancerStatus { +func (l *Loadbalancers) makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1.LoadBalancerStatus { ingress := v1.LoadBalancerIngress{ Hostname: *nb.Hostname, } @@ -1304,7 +1327,7 @@ func makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1. } // Check for per-service IPv6 annotation first, then fall back to global setting - useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Ingress) || Options.EnableIPv6ForLoadBalancers + useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Ingress) || l.options.EnableIPv6ForLoadBalancers // When IPv6 is enabled (either per-service or globally), include both IPv4 and IPv6 if useIPv6 && nb.IPv6 != nil && *nb.IPv6 != "" { @@ -1356,16 +1379,16 @@ func getServiceBoolAnnotation(service *v1.Service, name string) bool { // validateNodeBalancerBackendIPv4Range validates the NodeBalancerBackendIPv4Range // annotation to be within the NodeBalancerBackendIPv4Subnet if it is set. -func validateNodeBalancerBackendIPv4Range(backendIPv4Range string) error { - if Options.NodeBalancerBackendIPv4Subnet == "" { +func (l *Loadbalancers) validateNodeBalancerBackendIPv4Range(backendIPv4Range string) error { + if l.options.NodeBalancerBackendIPv4Subnet == "" { return nil } - withinCIDR, err := isCIDRWithinCIDR(Options.NodeBalancerBackendIPv4Subnet, backendIPv4Range) + withinCIDR, err := isCIDRWithinCIDR(l.options.NodeBalancerBackendIPv4Subnet, backendIPv4Range) if err != nil { return fmt.Errorf("invalid IPv4 range: %w", err) } if !withinCIDR { - return fmt.Errorf("IPv4 range %s is not within the subnet %s", backendIPv4Range, Options.NodeBalancerBackendIPv4Subnet) + return fmt.Errorf("IPv4 range %s is not within the subnet %s", backendIPv4Range, l.options.NodeBalancerBackendIPv4Subnet) } return nil } diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 2b76d683..751dd756 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -1862,7 +1862,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien t.Fatalf("failed to create NodeBalancer: %s", err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) svc.SetAnnotations(map[string]string{ annotations.AnnLinodeDefaultProxyProtocol: string(tc.proxyProtocolConfig), }) @@ -3038,7 +3038,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie t.Fatalf("failed to create NodeBalancer: %s", err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) newNodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ Region: lb.zone, @@ -3062,7 +3062,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie t.Errorf("GetLoadBalancer returned an error: %s", err) } - expectedLBStatus := makeLoadBalancerStatus(svc, newNodeBalancer) + expectedLBStatus := lb.makeLoadBalancerStatus(svc, newNodeBalancer) if !reflect.DeepEqual(expectedLBStatus, lbStatus) { t.Errorf("LoadBalancer status mismatch: expected %v, got %v", expectedLBStatus, lbStatus) } @@ -4082,7 +4082,7 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli t.Fatal(err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nb) err = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) didDelete := fake.didRequestOccur(http.MethodDelete, fmt.Sprintf("/nodebalancers/%d", nb.ID), "") @@ -4225,7 +4225,7 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa t.Fatal(err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nb) defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() getLBStatus, exists, err := lb.GetLoadBalancer(t.Context(), "linodelb", svc) if err != nil { @@ -4336,14 +4336,20 @@ func testMakeLoadBalancerStatus(t *testing.T, client *linodego.Client, _ *fakeAP IP: ipv4, }}, } - status := makeLoadBalancerStatus(svc, nb) + + l := Loadbalancers{ + options: &OptionsConfig{ + EnableIPv6ForLoadBalancers: false, + }, + } + status := l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for basic service to be %#v; got %#v", expectedStatus, status) } svc.Annotations[annotations.AnnLinodeHostnameOnlyIngress] = "true" expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } @@ -4369,20 +4375,24 @@ func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ } // Test with EnableIPv6ForLoadBalancers = false (default) - Options.EnableIPv6ForLoadBalancers = false + l := Loadbalancers{ + options: &OptionsConfig{ + EnableIPv6ForLoadBalancers: false, + }, + } expectedStatus := &v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{{ Hostname: hostname, IP: ipv4, }}, } - status := makeLoadBalancerStatus(svc, nb) + status := l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status with EnableIPv6ForLoadBalancers=false to be %#v; got %#v", expectedStatus, status) } // Test with EnableIPv6ForLoadBalancers = true - Options.EnableIPv6ForLoadBalancers = true + l.options.EnableIPv6ForLoadBalancers = true expectedStatus = &v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{ { @@ -4395,18 +4405,18 @@ func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ }, }, } - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status with EnableIPv6ForLoadBalancers=true to be %#v; got %#v", expectedStatus, status) } // Test with per-service annotation // Reset the global flag to false and set the annotation - Options.EnableIPv6ForLoadBalancers = false + l.options.EnableIPv6ForLoadBalancers = false svc.Annotations[annotations.AnnLinodeEnableIPv6Ingress] = "true" // Expect the same result as when the global flag is enabled - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status with %s=true annotation to be %#v; got %#v", annotations.AnnLinodeEnableIPv6Ingress, expectedStatus, status) @@ -4439,28 +4449,34 @@ func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ * IP: ipv4, }}, } - status := makeLoadBalancerStatus(svc, nb) + + l := Loadbalancers{ + options: &OptionsConfig{ + EnableIPv6ForLoadBalancers: false, + }, + } + + status := l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for basic service to be %#v; got %#v", expectedStatus, status) } t.Setenv("LINODE_HOSTNAME_ONLY_INGRESS", "true") expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } t.Setenv("LINODE_HOSTNAME_ONLY_INGRESS", "false") expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } t.Setenv("LINODE_HOSTNAME_ONLY_INGRESS", "banana") - expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} - status = makeLoadBalancerStatus(svc, nb) + status = l.makeLoadBalancerStatus(svc, nb) if reflect.DeepEqual(status, expectedStatus) { t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } @@ -4509,12 +4525,12 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA Annotations: map[string]string{annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nb2.ID)}, }, } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb1) - svcAnn.Status.LoadBalancer = *makeLoadBalancerStatus(svcAnn, nb1) lb, assertion := NewLoadbalancers(client, region).(*Loadbalancers) if !assertion { t.Error("type assertion failed") } + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nb1) + svcAnn.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svcAnn, nb1) fakeAPI.ResetRequests() t.Run("non-annotated service shouldn't call the API during cleanup", func(t *testing.T) { @@ -4579,7 +4595,7 @@ func testUpdateLoadBalancerNodeExcludedByAnnotation(t *testing.T, client *linode if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) stubService(fakeClientset, svc) svc.SetAnnotations(map[string]string{ annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nodeBalancer.ID), @@ -4822,7 +4838,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) } - svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) + svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) stubService(fakeClientset, svc) svc.SetAnnotations(map[string]string{ annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nodeBalancer.ID), @@ -4886,7 +4902,7 @@ func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeA if err != nil { t.Fatal(err) } - test.service.Status.LoadBalancer = *makeLoadBalancerStatus(test.service, expectedNB) + test.service.Status.LoadBalancer = *lb.makeLoadBalancerStatus(test.service, expectedNB) stubService(fakeClientset, test.service) actualNB, err := lb.getNodeBalancerByStatus(t.Context(), test.service) @@ -5141,7 +5157,7 @@ func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus := makeLoadBalancerStatus(svc, nb) + lbStatus := lb.makeLoadBalancerStatus(svc, nb) svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) stubService(fakeClientset, svc2) @@ -5498,8 +5514,10 @@ func Test_loadbalancers_GetLinodeNBType(t *testing.T) { kubeClient: tt.fields.kubeClient, ciliumClient: tt.fields.ciliumClient, loadBalancerType: tt.fields.loadBalancerType, + options: &OptionsConfig{ + DefaultNBType: string(tt.defaultNB), + }, } - Options.DefaultNBType = string(tt.defaultNB) if got := l.GetLinodeNBType(tt.args.service); !reflect.DeepEqual(got, tt.want) { t.Errorf("loadbalancers.GetLinodeNBType() = %v, want %v", got, tt.want) } @@ -5528,15 +5546,20 @@ func Test_validateNodeBalancerBackendIPv4Range(t *testing.T) { }, } - nbBackendSubnet := Options.NodeBalancerBackendIPv4Subnet - defer func() { - Options.NodeBalancerBackendIPv4Subnet = nbBackendSubnet - }() - Options.NodeBalancerBackendIPv4Subnet = "10.100.0.0/24" + l := &Loadbalancers{ + client: nil, + zone: "", + kubeClient: nil, + ciliumClient: nil, + loadBalancerType: "nodebalancer", + options: &OptionsConfig{ + NodeBalancerBackendIPv4Subnet: "10.100.0.0/24", + }, + } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := validateNodeBalancerBackendIPv4Range(tt.args.backendIPv4Range); (err != nil) != tt.wantErr { + if err := l.validateNodeBalancerBackendIPv4Range(tt.args.backendIPv4Range); (err != nil) != tt.wantErr { t.Errorf("validateNodeBalancerBackendIPv4Range() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/main.go b/main.go index 81e403e8..36455a7a 100644 --- a/main.go +++ b/main.go @@ -101,6 +101,7 @@ func main() { command.Flags().BoolVar(&linode.Options.DisableNodeBalancerVPCBackends, "disable-nodebalancer-vpc-backends", false, "disables nodebalancer backends in VPCs (when enabled, nodebalancers will only have private IPs as backends for backward compatibility)") command.Flags().StringVar(&linode.Options.NodeBalancerPrefix, "nodebalancer-prefix", "ccm", fmt.Sprintf("Name prefix for NoadBalancers. (max. %v char.)", linode.NodeBalancerPrefixCharLimit)) command.Flags().BoolVar(&linode.Options.DisableIPv6NodeCIDRAllocation, "disable-ipv6-node-cidr-allocation", false, "disables IPv6 node cidr allocation by ipam controller (when enabled, IPv6 cidr ranges will be allocated to nodes)") + command.Flags().BoolVar(&linode.Options.AllowEmptyNodeBalancerBackends, "allow-empty-nodebalancer-backends", false, "allows creating NodeBalancers without backend nodes (useful for external management of backends)") // Set static flags command.Flags().VisitAll(func(fl *pflag.Flag) { From 58c9bd9b3d2bbac5940fb29175ee152f1d04f3e4 Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Mon, 28 Jul 2025 23:37:55 -0400 Subject: [PATCH 4/9] potential fix --- cloud/linode/loadbalancers.go | 79 ++++++++++++++++-------------- cloud/linode/loadbalancers_test.go | 2 +- 2 files changed, 44 insertions(+), 37 deletions(-) diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 396e84bc..47049595 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -144,7 +144,7 @@ func NewLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalan return NewLoadbalancersWithOptions(client, zone, &Options) } -func (l *Loadbalancers) getNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { +func (l *Loadbalancers) GetNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { rawID := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID] id, idErr := strconv.Atoi(rawID) hasIDAnn := idErr == nil && id != 0 @@ -217,7 +217,7 @@ func (l *Loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. } } - nb, err := l.getNodeBalancerForService(ctx, service) + nb, err := l.GetNodeBalancerForService(ctx, service) if err != nil { return err } @@ -257,7 +257,7 @@ func (l *Loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, }, true, nil } - nb, err := l.getNodeBalancerForService(ctx, service) + nb, err := l.GetNodeBalancerForService(ctx, service) if err != nil { var targetError lbNotFoundError if errors.As(err, &targetError) { @@ -333,7 +333,7 @@ func (l *Loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri // Handle LoadBalancers backed by NodeBalancers var nb *linodego.NodeBalancer - nb, err = l.getNodeBalancerForService(ctx, service) + nb, err = l.GetNodeBalancerForService(ctx, service) if err == nil { if err = l.updateNodeBalancer(ctx, clusterName, service, nodes, nb); err != nil { sentry.CaptureError(ctx, err) @@ -486,24 +486,27 @@ func (l *Loadbalancers) updateNodeBalancer( } subnetID = id } - for _, node := range nodes { - if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { - klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) - continue - } - var newNodeOpts *linodego.NodeBalancerConfigRebuildNodeOptions - newNodeOpts, err = l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, newNBCfg.Protocol) - if err != nil { - sentry.CaptureError(ctx, err) - return fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) - } - oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address] - if ok { - newNodeOpts.ID = oldNodeID - } else { - klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address) + + if len(nodes) > 0 { + for _, node := range nodes { + if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { + klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) + continue + } + var newNodeOpts *linodego.NodeBalancerConfigRebuildNodeOptions + newNodeOpts, err = l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, newNBCfg.Protocol) + if err != nil { + sentry.CaptureError(ctx, err) + return fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) + } + oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address] + if ok { + newNodeOpts.ID = oldNodeID + } else { + klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address) + } + newNBNodes = append(newNBNodes, *newNodeOpts) } - newNBNodes = append(newNBNodes, *newNodeOpts) } // If there's no existing config, create it var rebuildOpts linodego.NodeBalancerConfigRebuildOptions @@ -553,9 +556,11 @@ func (l *Loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri } // make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers - for _, node := range nodes { - if err = l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { - return err + if len(nodes) > 0 { + for _, node := range nodes { + if err = l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { + return err + } } } return nil @@ -569,7 +574,7 @@ func (l *Loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri return fmt.Errorf("failed to get latest LoadBalancer status for service (%s): %w", getServiceNn(service), err) } - nb, err := l.getNodeBalancerForService(ctx, serviceWithStatus) + nb, err := l.GetNodeBalancerForService(ctx, serviceWithStatus) if err != nil { sentry.CaptureError(ctx, err) return err @@ -644,7 +649,7 @@ func (l *Loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa return nil } - nb, err := l.getNodeBalancerForService(ctx, service) + nb, err := l.GetNodeBalancerForService(ctx, service) if err != nil { var targetError lbNotFoundError if errors.As(err, &targetError) { @@ -1068,17 +1073,19 @@ func (l *Loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam } createOpt := config.GetCreateOptions() - for _, node := range nodes { - if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { - klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) - continue - } - newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, config.Protocol) - if err != nil { - sentry.CaptureError(ctx, err) - return nil, fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) + if len(nodes) > 0 { + for _, node := range nodes { + if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { + klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) + continue + } + newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, config.Protocol) + if err != nil { + sentry.CaptureError(ctx, err) + return nil, fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) + } + createOpt.Nodes = append(createOpt.Nodes, newNodeOpts.NodeBalancerNodeCreateOptions) } - createOpt.Nodes = append(createOpt.Nodes, newNodeOpts.NodeBalancerNodeCreateOptions) } configs = append(configs, &createOpt) diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 751dd756..5a683cd9 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -4950,7 +4950,7 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. }, } - _, err := lb.getNodeBalancerForService(t.Context(), svc) + _, err := lb.GetNodeBalancerForService(t.Context(), svc) if err == nil { t.Fatal("expected getNodeBalancerForService to return an error") } From a04f57cbdeb8c9fdea14b01f46306f3bea98e50f Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Tue, 29 Jul 2025 15:40:08 -0400 Subject: [PATCH 5/9] Refactor Loadbalancers to use public fields for improved accessibility and consistency in naming conventions. Update all references to the new public field names across the implementation and tests. --- cloud/linode/cilium_loadbalancers.go | 72 ++++++------ cloud/linode/loadbalancers.go | 138 +++++++++++----------- cloud/linode/loadbalancers_test.go | 146 ++++++++++++------------ cloud/linode/service_controller_test.go | 6 +- 4 files changed, 181 insertions(+), 181 deletions(-) diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go index de3730fa..4059ca75 100644 --- a/cloud/linode/cilium_loadbalancers.go +++ b/cloud/linode/cilium_loadbalancers.go @@ -80,7 +80,7 @@ func (l *Loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]st if err := l.retrieveCiliumClientset(); err != nil { return addrs, err } - pools, err := l.ciliumClient.CiliumLoadBalancerIPPools().List(ctx, metav1.ListOptions{ + pools, err := l.CiliumClient.CiliumLoadBalancerIPPools().List(ctx, metav1.ListOptions{ LabelSelector: "app.kubernetes.io/managed-by=linode-ccm", }) if err != nil { @@ -98,7 +98,7 @@ func (l *Loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *lino if ipHolder == nil { return nil, nil } - ipHolderAddrs, err := l.client.GetInstanceIPAddresses(ctx, ipHolder.ID) + ipHolderAddrs, err := l.Client.GetInstanceIPAddresses(ctx, ipHolder.ID) if err != nil { return nil, err } @@ -118,14 +118,14 @@ func (l *Loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.N if err = l.retrieveKubeClient(); err != nil { return err } - if err = l.client.ShareIPAddresses(ctx, linodego.IPAddressesShareOptions{ + if err = l.Client.ShareIPAddresses(ctx, linodego.IPAddressesShareOptions{ IPs: addrs, LinodeID: nodeLinodeID, }); err != nil { return err } // need to make sure node is up-to-date - node, err = l.kubeClient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + node, err = l.KubeClient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -134,7 +134,7 @@ func (l *Loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.N } node.Labels[annotations.AnnLinodeNodeIPSharingUpdated] = "true" retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - _, err := l.kubeClient.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) + _, err := l.KubeClient.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) return err }) if retryErr != nil { @@ -159,8 +159,8 @@ func (l *Loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHo } // If performing Service load-balancing via IP sharing + BGP, check for a special annotation // added by the CCM gets set when load-balancer IPs have been successfully shared on the node - if l.options.BGPNodeSelector != "" { - kv := strings.Split(l.options.BGPNodeSelector, "=") + if l.Options.BGPNodeSelector != "" { + kv := strings.Split(l.Options.BGPNodeSelector, "=") // Check if node should be participating in IP sharing via the given selector if val, ok := node.Labels[kv[0]]; !ok || len(kv) != 2 || val != kv[1] { // not a selected Node @@ -216,7 +216,7 @@ func (l *Loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip return "", err } - newSharedIP, err := l.client.AddInstanceIPAddress(ctx, ipHolder.ID, true) + newSharedIP, err := l.Client.AddInstanceIPAddress(ctx, ipHolder.ID, true) if err != nil { return "", err } @@ -243,7 +243,7 @@ func (l *Loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip } // share the IPs with nodes participating in Cilium BGP peering - if l.options.BGPNodeSelector == "" { + if l.Options.BGPNodeSelector == "" { for _, node := range nodes { if _, ok := node.Labels[commonControlPlaneLabel]; !ok { if err = l.shareIPs(ctx, addrs, node); err != nil { @@ -252,7 +252,7 @@ func (l *Loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip } } } else { - kv := strings.Split(l.options.BGPNodeSelector, "=") + kv := strings.Split(l.Options.BGPNodeSelector, "=") for _, node := range nodes { if val, ok := node.Labels[kv[0]]; ok && len(kv) == 2 && val == kv[1] { if err = l.shareIPs(ctx, addrs, node); err != nil { @@ -272,8 +272,8 @@ func (l *Loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) if err != nil { return err } - nodeList, err := l.kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ - LabelSelector: l.options.BGPNodeSelector, + nodeList, err := l.KubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + LabelSelector: l.Options.BGPNodeSelector, }) if err != nil { return err @@ -282,8 +282,8 @@ func (l *Loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) serviceNn := getServiceNn(service) var ipHolderSuffix string - if l.options.IpHolderSuffix != "" { - ipHolderSuffix = l.options.IpHolderSuffix + if l.Options.IpHolderSuffix != "" { + ipHolderSuffix = l.Options.IpHolderSuffix klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } @@ -304,14 +304,14 @@ func (l *Loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) if err != nil { return err } - err = l.client.DeleteInstanceIPAddress(ctx, nodeLinodeID, ingress.IP) + err = l.Client.DeleteInstanceIPAddress(ctx, nodeLinodeID, ingress.IP) if IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { return err } } // finally delete the shared IP on the ip-holder - err = l.client.DeleteInstanceIPAddress(ctx, ipHolder.ID, ingress.IP) + err = l.Client.DeleteInstanceIPAddress(ctx, ipHolder.ID, ingress.IP) if IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { return err } @@ -331,9 +331,9 @@ func (l *Loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*lin if ipHolder != nil { return ipHolder, nil } - label := generateClusterScopedIPHolderLinodeName(l.zone, suffix) - ipHolder, err = l.client.CreateInstance(ctx, linodego.InstanceCreateOptions{ - Region: l.zone, + label := generateClusterScopedIPHolderLinodeName(l.Zone, suffix) + ipHolder, err = l.Client.CreateInstance(ctx, linodego.InstanceCreateOptions{ + Region: l.Zone, Type: "g6-nanode-1", Label: label, RootPass: uuid.NewString(), @@ -355,14 +355,14 @@ func (l *Loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*lin func (l *Loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { // even though we have updated the naming convention, leaving this in ensures we have backwards compatibility - filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.zone)} + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.Zone)} rawFilter, err := json.Marshal(filter) if err != nil { panic("this should not have failed") } var ipHolder *linodego.Instance // TODO (rk): should we switch to using GET instead of LIST? we would be able to wrap logic around errors - linodes, err := l.client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) + linodes, err := l.Client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) if err != nil { return nil, err } @@ -373,12 +373,12 @@ func (l *Loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linode // a) an ip holder instance does not exist yet // or // b) another cluster already holds the linode grant to an ip holder using the old naming convention - filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(l.zone, suffix)} + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(l.Zone, suffix)} rawFilter, err = json.Marshal(filter) if err != nil { panic("this should not have failed") } - linodes, err = l.client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) + linodes, err = l.Client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) if err != nil { return nil, err } @@ -408,14 +408,14 @@ func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) } func (l *Loadbalancers) retrieveCiliumClientset() error { - if l.ciliumClient != nil { + if l.CiliumClient != nil { return nil } var ( kubeConfig *rest.Config err error ) - kubeconfigFlag := l.options.KubeconfigFlag + kubeconfigFlag := l.Options.KubeconfigFlag if kubeconfigFlag == nil || kubeconfigFlag.Value.String() == "" { kubeConfig, err = rest.InClusterConfig() } else { @@ -424,7 +424,7 @@ func (l *Loadbalancers) retrieveCiliumClientset() error { if err != nil { return err } - l.ciliumClient, err = ciliumclient.NewForConfig(kubeConfig) + l.CiliumClient, err = ciliumclient.NewForConfig(kubeConfig) return err } @@ -455,7 +455,7 @@ func (l *Loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Se }, } - return l.ciliumClient.CiliumLoadBalancerIPPools().Create(ctx, ciliumLBIPPool, metav1.CreateOptions{}) + return l.CiliumClient.CiliumLoadBalancerIPPools().Create(ctx, ciliumLBIPPool, metav1.CreateOptions{}) } // NOTE: Cilium CRDs must be installed for this to work @@ -464,7 +464,7 @@ func (l *Loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Se return err } - return l.ciliumClient.CiliumLoadBalancerIPPools().Delete( + return l.CiliumClient.CiliumLoadBalancerIPPools().Delete( ctx, fmt.Sprintf("%s-%s-pool", service.Namespace, service.Name), metav1.DeleteOptions{}, @@ -477,7 +477,7 @@ func (l *Loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Servi return nil, err } - return l.ciliumClient.CiliumLoadBalancerIPPools().Get( + return l.CiliumClient.CiliumLoadBalancerIPPools().Get( ctx, fmt.Sprintf("%s-%s-pool", service.Namespace, service.Name), metav1.GetOptions{}, @@ -492,15 +492,15 @@ func (l *Loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error return err } } - regionID, ok := regionIDMap[l.zone] + regionID, ok := regionIDMap[l.Zone] if !ok { - return fmt.Errorf("unsupported region for BGP: %s", l.zone) + return fmt.Errorf("unsupported region for BGP: %s", l.Zone) } if err := l.retrieveCiliumClientset(); err != nil { return err } // check if policy already exists - policy, err := l.ciliumClient.CiliumBGPPeeringPolicies().Get(ctx, ciliumBGPPeeringPolicyName, metav1.GetOptions{}) + policy, err := l.CiliumClient.CiliumBGPPeeringPolicies().Get(ctx, ciliumBGPPeeringPolicyName, metav1.GetOptions{}) if err != nil && !k8serrors.IsNotFound(err) { klog.Infof("Failed to get CiliumBGPPeeringPolicy: %s", err.Error()) return err @@ -513,7 +513,7 @@ func (l *Loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error // otherwise create it var nodeSelector slimv1.LabelSelector // If no BGPNodeSelector is specified, select all worker nodes. - if l.options.BGPNodeSelector == "" { + if l.Options.BGPNodeSelector == "" { nodeSelector = slimv1.LabelSelector{ MatchExpressions: []slimv1.LabelSelectorRequirement{ { @@ -523,9 +523,9 @@ func (l *Loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error }, } } else { - kv := strings.Split(l.options.BGPNodeSelector, "=") + kv := strings.Split(l.Options.BGPNodeSelector, "=") if len(kv) != BGPNodeSelectorFlagInputLen { - return fmt.Errorf("invalid node selector %s", l.options.BGPNodeSelector) + return fmt.Errorf("invalid node selector %s", l.Options.BGPNodeSelector) } nodeSelector = slimv1.LabelSelector{MatchLabels: map[string]string{kv[0]: kv[1]}} @@ -581,7 +581,7 @@ func (l *Loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error } klog.Info("Creating CiliumBGPPeeringPolicy") - _, err = l.ciliumClient.CiliumBGPPeeringPolicies().Create(ctx, ciliumBGPPeeringPolicy, metav1.CreateOptions{}) + _, err = l.CiliumClient.CiliumBGPPeeringPolicies().Create(ctx, ciliumBGPPeeringPolicy, metav1.CreateOptions{}) return err } diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 47049595..482669da 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -101,12 +101,12 @@ func (e lbNotFoundError) Error() string { } type Loadbalancers struct { - client client.Client - zone string - kubeClient kubernetes.Interface - ciliumClient ciliumclient.CiliumV2alpha1Interface - loadBalancerType string - options *OptionsConfig + Client client.Client + Zone string + KubeClient kubernetes.Interface + CiliumClient ciliumclient.CiliumV2alpha1Interface + LoadBalancerType string + Options *OptionsConfig } type portConfigAnnotation struct { @@ -131,10 +131,10 @@ type portConfig struct { // NewLoadbalancersWithOptions returns a cloudprovider.LoadBalancer with custom options func NewLoadbalancersWithOptions(client client.Client, zone string, options *OptionsConfig) cloudprovider.LoadBalancer { return &Loadbalancers{ - client: client, - zone: zone, - loadBalancerType: options.LoadBalancerType, - options: options, + Client: client, + Zone: zone, + LoadBalancerType: options.LoadBalancerType, + Options: options, } } @@ -162,7 +162,7 @@ func (l *Loadbalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, return v1.LoadBalancerStatus{}, err } - service, err = l.kubeClient.CoreV1().Services(service.Namespace).Get(ctx, service.Name, metav1.GetOptions{}) + service, err = l.KubeClient.CoreV1().Services(service.Namespace).Get(ctx, service.Name, metav1.GetOptions{}) if err != nil { return v1.LoadBalancerStatus{}, err } @@ -226,7 +226,7 @@ func (l *Loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. return nil } - if err := l.client.DeleteNodeBalancer(ctx, previousNB.ID); err != nil { + if err := l.Client.DeleteNodeBalancer(ctx, previousNB.ID); err != nil { return err } @@ -239,7 +239,7 @@ func (l *Loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. // GetLoadBalancer will not modify service. func (l *Loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.Service) string { unixNano := strconv.FormatInt(time.Now().UnixNano(), 16) - return fmt.Sprintf("%s-%s", l.options.NodeBalancerPrefix, unixNano[len(unixNano)-12:]) + return fmt.Sprintf("%s-%s", l.Options.NodeBalancerPrefix, unixNano[len(unixNano)-12:]) } // GetLoadBalancer returns the *v1.LoadBalancerStatus of service. @@ -251,7 +251,7 @@ func (l *Loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, sentry.SetTag(ctx, "service", service.Name) // Handle LoadBalancers backed by Cilium - if l.loadBalancerType == ciliumLBType { + if l.LoadBalancerType == ciliumLBType { return &v1.LoadBalancerStatus{ Ingress: service.Status.LoadBalancer.Ingress, }, true, nil @@ -282,7 +282,7 @@ func (l *Loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri serviceNn := getServiceNn(service) // Handle LoadBalancers backed by Cilium - if l.loadBalancerType == ciliumLBType { + if l.LoadBalancerType == ciliumLBType { klog.Infof("handling LoadBalancer Service %s as %s", serviceNn, ciliumLBClass) if err = l.ensureCiliumBGPPeeringPolicy(ctx); err != nil { @@ -308,8 +308,8 @@ func (l *Loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri } var ipHolderSuffix string - if l.options.IpHolderSuffix != "" { - ipHolderSuffix = l.options.IpHolderSuffix + if l.Options.IpHolderSuffix != "" { + ipHolderSuffix = l.Options.IpHolderSuffix klog.Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } @@ -380,12 +380,12 @@ func (l *Loadbalancers) updateNodeBalancer( nodes []*v1.Node, nb *linodego.NodeBalancer, ) (err error) { - if len(nodes) == 0 && !l.options.AllowEmptyNodeBalancerBackends { + if len(nodes) == 0 && !l.Options.AllowEmptyNodeBalancerBackends { return fmt.Errorf("%w: service %s", errNoNodesAvailable, getServiceNn(service)) } // Log warning when updating NodeBalancer without nodes - if len(nodes) == 0 && l.options.AllowEmptyNodeBalancerBackends { + if len(nodes) == 0 && l.Options.AllowEmptyNodeBalancerBackends { klog.Warningf("Updating NodeBalancer for service (%s) without backend nodes - load balancer will be non-functional until nodes are added", getServiceNn(service)) } @@ -393,7 +393,7 @@ func (l *Loadbalancers) updateNodeBalancer( if connThrottle != nb.ClientConnThrottle { update := nb.GetUpdateOptions() update.ClientConnThrottle = &connThrottle - nb, err = l.client.UpdateNodeBalancer(ctx, nb.ID, update) + nb, err = l.Client.UpdateNodeBalancer(ctx, nb.ID, update) if err != nil { sentry.CaptureError(ctx, err) return err @@ -404,21 +404,21 @@ func (l *Loadbalancers) updateNodeBalancer( if !reflect.DeepEqual(nb.Tags, tags) { update := nb.GetUpdateOptions() update.Tags = &tags - nb, err = l.client.UpdateNodeBalancer(ctx, nb.ID, update) + nb, err = l.Client.UpdateNodeBalancer(ctx, nb.ID, update) if err != nil { sentry.CaptureError(ctx, err) return err } } - fwClient := firewall.LinodeClient{Client: l.client} + fwClient := firewall.LinodeClient{Client: l.Client} err = fwClient.UpdateNodeBalancerFirewall(ctx, l.GetLoadBalancerName(ctx, clusterName, service), tags, service, nb) if err != nil { return err } // Get all of the NodeBalancer's configs - nbCfgs, err := l.client.ListNodeBalancerConfigs(ctx, nb.ID, nil) + nbCfgs, err := l.Client.ListNodeBalancerConfigs(ctx, nb.ID, nil) if err != nil { sentry.CaptureError(ctx, err) return err @@ -452,7 +452,7 @@ func (l *Loadbalancers) updateNodeBalancer( if currentNBCfg != nil { // Obtain list of current NB nodes and convert it to map of node IDs var currentNBNodes []linodego.NodeBalancerNode - currentNBNodes, err = l.client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil) + currentNBNodes, err = l.Client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil) if err != nil { // This error can be ignored, because if we fail to get nodes we can anyway rebuild the config from scratch, // it would just cause the NB to reload config even if the node list did not change, so we prefer to send IDs when it is possible. @@ -468,8 +468,8 @@ func (l *Loadbalancers) updateNodeBalancer( // Add all of the Nodes to the config newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes)) subnetID := 0 - if l.options.NodeBalancerBackendIPv4SubnetID != 0 { - subnetID = l.options.NodeBalancerBackendIPv4SubnetID + if l.Options.NodeBalancerBackendIPv4SubnetID != 0 { + subnetID = l.Options.NodeBalancerBackendIPv4SubnetID } backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] if ok { @@ -477,7 +477,7 @@ func (l *Loadbalancers) updateNodeBalancer( return err } } - if len(l.options.VPCNames) > 0 && !l.options.DisableNodeBalancerVPCBackends { + if len(l.Options.VPCNames) > 0 && !l.Options.DisableNodeBalancerVPCBackends { var id int id, err = l.getSubnetIDForSVC(ctx, service) if err != nil { @@ -513,7 +513,7 @@ func (l *Loadbalancers) updateNodeBalancer( if currentNBCfg == nil { createOpts := newNBCfg.GetCreateOptions() - currentNBCfg, err = l.client.CreateNodeBalancerConfig(ctx, nb.ID, createOpts) + currentNBCfg, err = l.Client.CreateNodeBalancerConfig(ctx, nb.ID, createOpts) if err != nil { sentry.CaptureError(ctx, err) return fmt.Errorf("[port %d] error creating NodeBalancer config: %w", int(port.Port), err) @@ -530,7 +530,7 @@ func (l *Loadbalancers) updateNodeBalancer( rebuildOpts.Nodes = newNBNodes - if _, err = l.client.RebuildNodeBalancerConfig(ctx, nb.ID, currentNBCfg.ID, rebuildOpts); err != nil { + if _, err = l.Client.RebuildNodeBalancerConfig(ctx, nb.ID, currentNBCfg.ID, rebuildOpts); err != nil { sentry.CaptureError(ctx, err) return fmt.Errorf("[port %d] error rebuilding NodeBalancer config: %w", int(port.Port), err) } @@ -546,12 +546,12 @@ func (l *Loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri sentry.SetTag(ctx, "service", service.Name) // handle LoadBalancers backed by Cilium - if l.loadBalancerType == ciliumLBType { + if l.LoadBalancerType == ciliumLBType { klog.Infof("handling update for LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass) serviceNn := getServiceNn(service) var ipHolderSuffix string - if l.options.IpHolderSuffix != "" { - ipHolderSuffix = l.options.IpHolderSuffix + if l.Options.IpHolderSuffix != "" { + ipHolderSuffix = l.Options.IpHolderSuffix klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) } @@ -601,7 +601,7 @@ func (l *Loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []lin } } if !found { - if err := l.client.DeleteNodeBalancerConfig(ctx, nbc.NodeBalancerID, nbc.ID); err != nil { + if err := l.Client.DeleteNodeBalancerConfig(ctx, nbc.NodeBalancerID, nbc.ID); err != nil { return err } } @@ -626,7 +626,7 @@ func (l *Loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa sentry.SetTag(ctx, "service", service.Name) // Handle LoadBalancers backed by Cilium - if l.loadBalancerType == ciliumLBType { + if l.LoadBalancerType == ciliumLBType { klog.Infof("handling LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass) if err := l.deleteSharedIP(ctx, service); err != nil { return err @@ -672,12 +672,12 @@ func (l *Loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa return nil } - fwClient := firewall.LinodeClient{Client: l.client} + fwClient := firewall.LinodeClient{Client: l.Client} if err = fwClient.DeleteNodeBalancerFirewall(ctx, service, nb); err != nil { return err } - if err = l.client.DeleteNodeBalancer(ctx, nb.ID); err != nil { + if err = l.Client.DeleteNodeBalancer(ctx, nb.ID); err != nil { klog.Errorf("failed to delete NodeBalancer (%d) for service (%s): %s", nb.ID, serviceNn, err) sentry.CaptureError(ctx, err) return err @@ -688,7 +688,7 @@ func (l *Loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa } func (l *Loadbalancers) getNodeBalancerByHostname(ctx context.Context, service *v1.Service, hostname string) (*linodego.NodeBalancer, error) { - lbs, err := l.client.ListNodeBalancers(ctx, nil) + lbs, err := l.Client.ListNodeBalancers(ctx, nil) if err != nil { return nil, err } @@ -709,7 +709,7 @@ func (l *Loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Ser filter = fmt.Sprintf(`{"ipv4": "%v"}`, ip.String()) } - lbs, err := l.client.ListNodeBalancers(ctx, &linodego.ListOptions{Filter: filter}) + lbs, err := l.Client.ListNodeBalancers(ctx, &linodego.ListOptions{Filter: filter}) if err != nil { return nil, err } @@ -721,7 +721,7 @@ func (l *Loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Ser } func (l *Loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Service, id int) (*linodego.NodeBalancer, error) { - nb, err := l.client.GetNodeBalancer(ctx, id) + nb, err := l.Client.GetNodeBalancer(ctx, id) if err != nil { var targetError *linodego.Error if errors.As(err, &targetError) && targetError.Code == http.StatusNotFound { @@ -738,7 +738,7 @@ func (l *Loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName strin tags = append(tags, clusterName) } - tags = append(tags, l.options.NodeBalancerTags...) + tags = append(tags, l.Options.NodeBalancerTags...) tagStr, ok := service.GetAnnotations()[annotations.AnnLinodeLoadBalancerTags] if ok { @@ -755,7 +755,7 @@ func (l *Loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalanc return linodego.NBTypePremium } - return linodego.NodeBalancerPlanType(l.options.DefaultNBType) + return linodego.NodeBalancerPlanType(l.Options.DefaultNBType) } // getVPCCreateOptions returns the VPC options for the NodeBalancer creation. @@ -807,10 +807,10 @@ func (l *Loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser // Precedence 3: If the user has specified a NodeBalancerBackendIPv4SubnetID, use that // and auto-allocate subnets from it for the NodeBalancer - if l.options.NodeBalancerBackendIPv4SubnetID != 0 { + if l.Options.NodeBalancerBackendIPv4SubnetID != 0 { vpcCreateOpts := []linodego.NodeBalancerVPCOptions{ { - SubnetID: l.options.NodeBalancerBackendIPv4SubnetID, + SubnetID: l.Options.NodeBalancerBackendIPv4SubnetID, }, } return vpcCreateOpts, nil @@ -818,11 +818,11 @@ func (l *Loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser // Precedence 4: If the user has specified a NodeBalancerBackendIPv4Subnet, use that // and auto-allocate subnets from it for the NodeBalancer - if l.options.NodeBalancerBackendIPv4Subnet != "" { + if l.Options.NodeBalancerBackendIPv4Subnet != "" { vpcCreateOpts := []linodego.NodeBalancerVPCOptions{ { SubnetID: subnetID, - IPv4Range: l.options.NodeBalancerBackendIPv4Subnet, + IPv4Range: l.Options.NodeBalancerBackendIPv4Subnet, IPv4RangeAutoAssign: true, }, } @@ -846,14 +846,14 @@ func (l *Loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri nbType := l.GetLinodeNBType(service) createOpts := linodego.NodeBalancerCreateOptions{ Label: &label, - Region: l.zone, + Region: l.Zone, ClientConnThrottle: &connThrottle, Configs: configs, Tags: tags, Type: nbType, } - if len(l.options.VPCNames) > 0 && !l.options.DisableNodeBalancerVPCBackends { + if len(l.Options.VPCNames) > 0 && !l.Options.DisableNodeBalancerVPCBackends { createOpts.VPCs, err = l.getVPCCreateOptions(ctx, service) if err != nil { return nil, err @@ -876,7 +876,7 @@ func (l *Loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri return nil, err } - fw, err := l.client.CreateFirewall(ctx, *fwcreateOpts) + fw, err := l.Client.CreateFirewall(ctx, *fwcreateOpts) if err != nil { return nil, err } @@ -885,7 +885,7 @@ func (l *Loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri // no need to deal with firewalls, continue creating nb's } - return l.client.CreateNodeBalancer(ctx, createOpts) + return l.Client.CreateNodeBalancer(ctx, createOpts) } func (l *Loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port v1.ServicePort) (linodego.NodeBalancerConfig, error) { @@ -979,7 +979,7 @@ func (l *Loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbC return err } - nbConfig.SSLCert, nbConfig.SSLKey, err = getTLSCertInfo(ctx, l.kubeClient, service.Namespace, config) + nbConfig.SSLCert, nbConfig.SSLKey, err = getTLSCertInfo(ctx, l.KubeClient, service.Namespace, config) if err != nil { return err } @@ -993,7 +993,7 @@ func (l *Loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbC // 3. If CCM is configured with --nodebalancer-backend-ipv4-subnet-id, it will be used as the subnet ID. // 4. Else, use first VPCName and SubnetName to calculate subnet id for the service. func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { - if len(l.options.VPCNames) == 0 { + if len(l.Options.VPCNames) == 0 { return 0, fmt.Errorf("CCM not configured with VPC, cannot create NodeBalancer with specified annotation") } // Check if the service has an annotation for NodeBalancerBackendSubnetID @@ -1010,37 +1010,37 @@ func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Servi // If no VPCName or SubnetName is specified in annotations, but NodeBalancerBackendIPv4SubnetID is set, // use the NodeBalancerBackendIPv4SubnetID as the subnet ID. - if !vpcOk && !subnetOk && l.options.NodeBalancerBackendIPv4SubnetID != 0 { - return l.options.NodeBalancerBackendIPv4SubnetID, nil + if !vpcOk && !subnetOk && l.Options.NodeBalancerBackendIPv4SubnetID != 0 { + return l.Options.NodeBalancerBackendIPv4SubnetID, nil } - vpcName := l.options.VPCNames[0] + vpcName := l.Options.VPCNames[0] if vpcOk { vpcName = specifiedVPCName } - vpcID, err := GetVPCID(ctx, l.client, vpcName) + vpcID, err := GetVPCID(ctx, l.Client, vpcName) if err != nil { return 0, err } - subnetName := l.options.SubnetNames[0] + subnetName := l.Options.SubnetNames[0] if subnetOk { subnetName = specifiedSubnetName } // Use the VPC ID and Subnet Name to get the subnet ID - return GetSubnetID(ctx, l.client, vpcID, subnetName) + return GetSubnetID(ctx, l.Client, vpcID, subnetName) } // buildLoadBalancerRequest returns a linodego.NodeBalancer // requests for service across nodes. func (l *Loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { - if len(nodes) == 0 && !l.options.AllowEmptyNodeBalancerBackends { + if len(nodes) == 0 && !l.Options.AllowEmptyNodeBalancerBackends { return nil, fmt.Errorf("%w: cluster %s, service %s", errNoNodesAvailable, clusterName, getServiceNn(service)) } // Log warning when creating NodeBalancer without nodes - if len(nodes) == 0 && l.options.AllowEmptyNodeBalancerBackends { + if len(nodes) == 0 && l.Options.AllowEmptyNodeBalancerBackends { klog.Warningf("Creating NodeBalancer for service (%s) without backend nodes - load balancer will be non-functional until nodes are added", getServiceNn(service)) } @@ -1048,8 +1048,8 @@ func (l *Loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam configs := make([]*linodego.NodeBalancerConfigCreateOptions, 0, len(ports)) subnetID := 0 - if l.options.NodeBalancerBackendIPv4SubnetID != 0 { - subnetID = l.options.NodeBalancerBackendIPv4SubnetID + if l.Options.NodeBalancerBackendIPv4SubnetID != 0 { + subnetID = l.Options.NodeBalancerBackendIPv4SubnetID } // Check for the NodeBalancerBackendIPv4Range annotation backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] @@ -1058,7 +1058,7 @@ func (l *Loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam return nil, err } } - if len(l.options.VPCNames) > 0 && !l.options.DisableNodeBalancerVPCBackends { + if len(l.Options.VPCNames) > 0 && !l.Options.DisableNodeBalancerVPCBackends { id, err := l.getSubnetIDForSVC(ctx, service) if err != nil { return nil, err @@ -1130,7 +1130,7 @@ func (l *Loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, } func (l *Loadbalancers) retrieveKubeClient() error { - if l.kubeClient != nil { + if l.KubeClient != nil { return nil } @@ -1141,7 +1141,7 @@ func (l *Loadbalancers) retrieveKubeClient() error { // Check to see if --kubeconfig was set. If it was, build a kubeconfig from the given file. // Otherwise, use the in-cluster config. - kubeconfigFlag := l.options.KubeconfigFlag + kubeconfigFlag := l.Options.KubeconfigFlag if kubeconfigFlag == nil || kubeconfigFlag.Value.String() == "" { kubeConfig, err = rest.InClusterConfig() } else { @@ -1152,7 +1152,7 @@ func (l *Loadbalancers) retrieveKubeClient() error { return err } - l.kubeClient, err = kubernetes.NewForConfig(kubeConfig) + l.KubeClient, err = kubernetes.NewForConfig(kubeConfig) if err != nil { return err } @@ -1334,7 +1334,7 @@ func (l *Loadbalancers) makeLoadBalancerStatus(service *v1.Service, nb *linodego } // Check for per-service IPv6 annotation first, then fall back to global setting - useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Ingress) || l.options.EnableIPv6ForLoadBalancers + useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Ingress) || l.Options.EnableIPv6ForLoadBalancers // When IPv6 is enabled (either per-service or globally), include both IPv4 and IPv6 if useIPv6 && nb.IPv6 != nil && *nb.IPv6 != "" { @@ -1387,15 +1387,15 @@ func getServiceBoolAnnotation(service *v1.Service, name string) bool { // validateNodeBalancerBackendIPv4Range validates the NodeBalancerBackendIPv4Range // annotation to be within the NodeBalancerBackendIPv4Subnet if it is set. func (l *Loadbalancers) validateNodeBalancerBackendIPv4Range(backendIPv4Range string) error { - if l.options.NodeBalancerBackendIPv4Subnet == "" { + if l.Options.NodeBalancerBackendIPv4Subnet == "" { return nil } - withinCIDR, err := isCIDRWithinCIDR(l.options.NodeBalancerBackendIPv4Subnet, backendIPv4Range) + withinCIDR, err := isCIDRWithinCIDR(l.Options.NodeBalancerBackendIPv4Subnet, backendIPv4Range) if err != nil { return fmt.Errorf("invalid IPv4 range: %w", err) } if !withinCIDR { - return fmt.Errorf("IPv4 range %s is not within the subnet %s", backendIPv4Range, l.options.NodeBalancerBackendIPv4Subnet) + return fmt.Errorf("IPv4 range %s is not within the subnet %s", backendIPv4Range, l.Options.NodeBalancerBackendIPv4Subnet) } return nil } diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 5a683cd9..e8474c17 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -392,9 +392,9 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a return err } - if nb.Region != lb.zone { + if nb.Region != lb.Zone { t.Error("unexpected nodebalancer region") - t.Logf("expected: %s", lb.zone) + t.Logf("expected: %s", lb.Zone) t.Logf("actual: %s", nb.Region) } @@ -489,7 +489,7 @@ func testCreateNodeBalancerWithNodeNoAddresses(t *testing.T, client *linodego.Cl t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -721,7 +721,7 @@ func testUpdateNodeBalancerWithVPCBackend(t *testing.T, client *linodego.Client, t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -808,7 +808,7 @@ func testCreateNodeBalancerWithVPCOnlySubnetFlag(t *testing.T, client *linodego. t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -901,7 +901,7 @@ func testCreateNodeBalancerWithVPCNoFlagOrAnnotation(t *testing.T, client *linod t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -990,7 +990,7 @@ func testCreateNodeBalancerWithVPCAnnotationOnly(t *testing.T, client *linodego. t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1075,7 +1075,7 @@ func testCreateNodeBalancerWithVPCOnlySubnetIDFlag(t *testing.T, client *linodeg t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1221,7 +1221,7 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1386,7 +1386,7 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1461,7 +1461,7 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1574,7 +1574,7 @@ func testVeryLongServiceName(t *testing.T, client *linodego.Client, _ *fakeAPI) t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1642,7 +1642,7 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset clusterName := "linodelb" defer func() { @@ -1732,8 +1732,8 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * }() fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset - addTLSSecret(t, lb.kubeClient) + lb.KubeClient = fakeClientset + addTLSSecret(t, lb.KubeClient) stubService(fakeClientset, svc) lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) @@ -1808,7 +1808,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset for _, tc := range []struct { name string @@ -1856,7 +1856,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) @@ -1936,7 +1936,7 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -1984,7 +1984,7 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2037,7 +2037,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -2055,7 +2055,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2113,7 +2113,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2169,7 +2169,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset svc.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{ @@ -2195,7 +2195,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2220,7 +2220,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2268,7 +2268,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset svc.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{ @@ -2294,7 +2294,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2347,7 +2347,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2408,7 +2408,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset fwClient := firewall.LinodeClient{Client: client} fw, err := fwClient.CreateFirewall(t.Context(), linodego.FirewallCreateOptions{ @@ -2452,7 +2452,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2487,7 +2487,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2555,7 +2555,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -2573,7 +2573,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2611,7 +2611,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2657,7 +2657,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err = lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err = lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2703,7 +2703,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err = lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err = lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2791,7 +2791,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -2822,7 +2822,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -2858,7 +2858,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2924,7 +2924,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) @@ -2956,7 +2956,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Errorf("Error in listing firewalls %s", err) } @@ -2976,7 +2976,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.Client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -3029,10 +3029,10 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie }() fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) @@ -3041,7 +3041,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie svc.Status.LoadBalancer = *lb.makeLoadBalancerStatus(svc, nodeBalancer) newNodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create new NodeBalancer: %s", err) @@ -4216,8 +4216,8 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa if !assertion { t.Error("type assertion failed") } - lb.kubeClient = fake.NewSimpleClientset() - addTLSSecret(t, lb.kubeClient) + lb.KubeClient = fake.NewSimpleClientset() + addTLSSecret(t, lb.KubeClient) configs := []*linodego.NodeBalancerConfigCreateOptions{} nb, err := lb.createNodeBalancer(t.Context(), "linodelb", svc, configs) @@ -4338,7 +4338,7 @@ func testMakeLoadBalancerStatus(t *testing.T, client *linodego.Client, _ *fakeAP } l := Loadbalancers{ - options: &OptionsConfig{ + Options: &OptionsConfig{ EnableIPv6ForLoadBalancers: false, }, } @@ -4376,7 +4376,7 @@ func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ // Test with EnableIPv6ForLoadBalancers = false (default) l := Loadbalancers{ - options: &OptionsConfig{ + Options: &OptionsConfig{ EnableIPv6ForLoadBalancers: false, }, } @@ -4392,7 +4392,7 @@ func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ } // Test with EnableIPv6ForLoadBalancers = true - l.options.EnableIPv6ForLoadBalancers = true + l.Options.EnableIPv6ForLoadBalancers = true expectedStatus = &v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{ { @@ -4412,7 +4412,7 @@ func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ // Test with per-service annotation // Reset the global flag to false and set the annotation - l.options.EnableIPv6ForLoadBalancers = false + l.Options.EnableIPv6ForLoadBalancers = false svc.Annotations[annotations.AnnLinodeEnableIPv6Ingress] = "true" // Expect the same result as when the global flag is enabled @@ -4451,7 +4451,7 @@ func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ * } l := Loadbalancers{ - options: &OptionsConfig{ + Options: &OptionsConfig{ EnableIPv6ForLoadBalancers: false, }, } @@ -4587,10 +4587,10 @@ func testUpdateLoadBalancerNodeExcludedByAnnotation(t *testing.T, client *linode }() fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) @@ -4830,10 +4830,10 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak }() fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) @@ -4864,7 +4864,7 @@ func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeA t.Error("type assertion failed") } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset for _, test := range []struct { name string @@ -4973,7 +4973,7 @@ func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego. t.Error("type assertion failed") } nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ - Region: lb.zone, + Region: lb.Zone, }) if err != nil { t.Fatalf("failed to create NodeBalancer: %s", err) @@ -5071,8 +5071,8 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI if !assertion { t.Error("type assertion failed") } - lb.kubeClient = fake.NewSimpleClientset() - addTLSSecret(t, lb.kubeClient) + lb.KubeClient = fake.NewSimpleClientset() + addTLSSecret(t, lb.KubeClient) defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() @@ -5153,7 +5153,7 @@ func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Fatal(err) } fakeClientset := fake.NewSimpleClientset() - lb.kubeClient = fakeClientset + lb.KubeClient = fakeClientset defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() @@ -5509,12 +5509,12 @@ func Test_loadbalancers_GetLinodeNBType(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { l := &Loadbalancers{ - client: tt.fields.client, - zone: tt.fields.zone, - kubeClient: tt.fields.kubeClient, - ciliumClient: tt.fields.ciliumClient, - loadBalancerType: tt.fields.loadBalancerType, - options: &OptionsConfig{ + Client: tt.fields.client, + Zone: tt.fields.zone, + KubeClient: tt.fields.kubeClient, + CiliumClient: tt.fields.ciliumClient, + LoadBalancerType: tt.fields.loadBalancerType, + Options: &OptionsConfig{ DefaultNBType: string(tt.defaultNB), }, } @@ -5547,12 +5547,12 @@ func Test_validateNodeBalancerBackendIPv4Range(t *testing.T) { } l := &Loadbalancers{ - client: nil, - zone: "", - kubeClient: nil, - ciliumClient: nil, - loadBalancerType: "nodebalancer", - options: &OptionsConfig{ + Client: nil, + Zone: "", + KubeClient: nil, + CiliumClient: nil, + LoadBalancerType: "nodebalancer", + Options: &OptionsConfig{ NodeBalancerBackendIPv4Subnet: "10.100.0.0/24", }, } diff --git a/cloud/linode/service_controller_test.go b/cloud/linode/service_controller_test.go index cc3308ed..ed7bb4a7 100644 --- a/cloud/linode/service_controller_test.go +++ b/cloud/linode/service_controller_test.go @@ -70,7 +70,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { loadbalancers: nil, }, Setup: func(f *fields) { - f.loadbalancers = &Loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.loadbalancers = &Loadbalancers{Client: f.Client, Zone: "test", LoadBalancerType: Options.LoadBalancerType} f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) f.queue.Add("test") }, @@ -83,7 +83,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { loadbalancers: nil, }, Setup: func(f *fields) { - f.loadbalancers = &Loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.loadbalancers = &Loadbalancers{Client: f.Client, Zone: "test", LoadBalancerType: Options.LoadBalancerType} f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) svc := createTestService() f.queue.Add(svc) @@ -105,7 +105,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { tt.Setup(&tt.fields) s.loadbalancers = tt.fields.loadbalancers s.queue = tt.fields.queue - s.loadbalancers.client = tt.fields.Client + s.loadbalancers.Client = tt.fields.Client if got := s.processNextDeletion(); got != tt.want { t.Errorf("serviceController.processNextDeletion() = %v, want %v", got, tt.want) } From d23d79c10e010393152df2ee8ce245ca1859bfee Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Tue, 29 Jul 2025 15:47:02 -0400 Subject: [PATCH 6/9] revert a redundant if statement --- cloud/linode/loadbalancers.go | 67 ++++++++++++++++------------------- 1 file changed, 31 insertions(+), 36 deletions(-) diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 482669da..72b0a6f2 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -487,26 +487,24 @@ func (l *Loadbalancers) updateNodeBalancer( subnetID = id } - if len(nodes) > 0 { - for _, node := range nodes { - if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { - klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) - continue - } - var newNodeOpts *linodego.NodeBalancerConfigRebuildNodeOptions - newNodeOpts, err = l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, newNBCfg.Protocol) - if err != nil { - sentry.CaptureError(ctx, err) - return fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) - } - oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address] - if ok { - newNodeOpts.ID = oldNodeID - } else { - klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address) - } - newNBNodes = append(newNBNodes, *newNodeOpts) + for _, node := range nodes { + if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { + klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) + continue } + var newNodeOpts *linodego.NodeBalancerConfigRebuildNodeOptions + newNodeOpts, err = l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, newNBCfg.Protocol) + if err != nil { + sentry.CaptureError(ctx, err) + return fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) + } + oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address] + if ok { + newNodeOpts.ID = oldNodeID + } else { + klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address) + } + newNBNodes = append(newNBNodes, *newNodeOpts) } // If there's no existing config, create it var rebuildOpts linodego.NodeBalancerConfigRebuildOptions @@ -556,11 +554,10 @@ func (l *Loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri } // make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers - if len(nodes) > 0 { - for _, node := range nodes { - if err = l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { - return err - } + + for _, node := range nodes { + if err = l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { + return err } } return nil @@ -1073,19 +1070,17 @@ func (l *Loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam } createOpt := config.GetCreateOptions() - if len(nodes) > 0 { - for _, node := range nodes { - if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { - klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) - continue - } - newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, config.Protocol) - if err != nil { - sentry.CaptureError(ctx, err) - return nil, fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) - } - createOpt.Nodes = append(createOpt.Nodes, newNodeOpts.NodeBalancerNodeCreateOptions) + for _, node := range nodes { + if _, ok := node.Annotations[annotations.AnnExcludeNodeFromNb]; ok { + klog.Infof("Node %s is excluded from NodeBalancer by annotation, skipping", node.Name) + continue + } + newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, config.Protocol) + if err != nil { + sentry.CaptureError(ctx, err) + return nil, fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) } + createOpt.Nodes = append(createOpt.Nodes, newNodeOpts.NodeBalancerNodeCreateOptions) } configs = append(configs, &createOpt) From 50a939c001a58505ff16394017cbe7a5223638f2 Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Wed, 6 Aug 2025 10:00:29 -0500 Subject: [PATCH 7/9] Rename Loadbalancers struct to LoadBalancers for consistency across the codebase --- cloud/linode/cilium_loadbalancers.go | 26 +++---- cloud/linode/cilium_loadbalancers_test.go | 26 +++---- cloud/linode/cloud.go | 2 +- cloud/linode/loadbalancers.go | 58 ++++++++-------- cloud/linode/loadbalancers_test.go | 82 +++++++++++------------ cloud/linode/service_controller.go | 4 +- cloud/linode/service_controller_test.go | 8 +-- 7 files changed, 103 insertions(+), 103 deletions(-) diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go index 4059ca75..f377082a 100644 --- a/cloud/linode/cilium_loadbalancers.go +++ b/cloud/linode/cilium_loadbalancers.go @@ -75,7 +75,7 @@ var ( // getExistingSharedIPsInCluster determines the list of addresses to share on nodes by checking the // CiliumLoadBalancerIPPools created by the CCM in createCiliumLBIPPool // NOTE: Cilium CRDs must be installed for this to work -func (l *Loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]string, error) { +func (l *LoadBalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]string, error) { addrs := []string{} if err := l.retrieveCiliumClientset(); err != nil { return addrs, err @@ -94,7 +94,7 @@ func (l *Loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]st return addrs, nil } -func (l *Loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *linodego.Instance) ([]string, error) { +func (l *LoadBalancers) getExistingSharedIPs(ctx context.Context, ipHolder *linodego.Instance) ([]string, error) { if ipHolder == nil { return nil, nil } @@ -110,7 +110,7 @@ func (l *Loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *lino } // shareIPs shares the given list of IP addresses on the given Node -func (l *Loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.Node) error { +func (l *LoadBalancers) shareIPs(ctx context.Context, addrs []string, node *v1.Node) error { nodeLinodeID, err := parseProviderID(node.Spec.ProviderID) if err != nil { return err @@ -151,7 +151,7 @@ func (l *Loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.N // perform IP sharing (via a specified node selector) have the expected IPs shared // in the event that a Node joins the cluster after the LoadBalancer Service already // exists -func (l *Loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHolderSuffix string) error { +func (l *LoadBalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHolderSuffix string) error { // ignore cases where the provider ID has been set if node.Spec.ProviderID == "" { klog.Info("skipping IP while providerID is unset") @@ -210,7 +210,7 @@ func (l *Loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHo // createSharedIP requests an additional IP that can be shared on Nodes to support // loadbalancing via Cilium LB IPAM + BGP Control Plane. -func (l *Loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ipHolderSuffix string) (string, error) { +func (l *LoadBalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ipHolderSuffix string) (string, error) { ipHolder, err := l.ensureIPHolder(ctx, ipHolderSuffix) if err != nil { return "", err @@ -267,7 +267,7 @@ func (l *Loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ip // deleteSharedIP cleans up the shared IP for a LoadBalancer Service if it was assigned // by Cilium LB IPAM, removing it from the ip-holder -func (l *Loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) error { +func (l *LoadBalancers) deleteSharedIP(ctx context.Context, service *v1.Service) error { err := l.retrieveKubeClient() if err != nil { return err @@ -323,7 +323,7 @@ func (l *Loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) // To hold the IP in lieu of a proper IP reservation system, a special Nanode is // created but not booted and used to hold all shared IPs. -func (l *Loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { +func (l *LoadBalancers) ensureIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { ipHolder, err := l.getIPHolder(ctx, suffix) if err != nil { return nil, err @@ -353,7 +353,7 @@ func (l *Loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*lin return ipHolder, nil } -func (l *Loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { +func (l *LoadBalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { // even though we have updated the naming convention, leaving this in ensures we have backwards compatibility filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.Zone)} rawFilter, err := json.Marshal(filter) @@ -407,7 +407,7 @@ func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) return label } -func (l *Loadbalancers) retrieveCiliumClientset() error { +func (l *LoadBalancers) retrieveCiliumClientset() error { if l.CiliumClient != nil { return nil } @@ -432,7 +432,7 @@ func (l *Loadbalancers) retrieveCiliumClientset() error { // for LoadBalancer Services not backed by a NodeBalancer, a CiliumLoadBalancerIPPool resource // will be created specifically for the Service with the requested shared IP // NOTE: Cilium CRDs must be installed for this to work -func (l *Loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Service, sharedIP string) (*v2alpha1.CiliumLoadBalancerIPPool, error) { +func (l *LoadBalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Service, sharedIP string) (*v2alpha1.CiliumLoadBalancerIPPool, error) { if err := l.retrieveCiliumClientset(); err != nil { return nil, err } @@ -459,7 +459,7 @@ func (l *Loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Se } // NOTE: Cilium CRDs must be installed for this to work -func (l *Loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Service) error { +func (l *LoadBalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Service) error { if err := l.retrieveCiliumClientset(); err != nil { return err } @@ -472,7 +472,7 @@ func (l *Loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Se } // NOTE: Cilium CRDs must be installed for this to work -func (l *Loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Service) (*v2alpha1.CiliumLoadBalancerIPPool, error) { +func (l *LoadBalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Service) (*v2alpha1.CiliumLoadBalancerIPPool, error) { if err := l.retrieveCiliumClientset(); err != nil { return nil, err } @@ -485,7 +485,7 @@ func (l *Loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Servi } // NOTE: Cilium CRDs must be installed for this to work -func (l *Loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error { +func (l *LoadBalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error { if raw, ok := os.LookupEnv("BGP_CUSTOM_ID_MAP"); ok && raw != "" { klog.Info("BGP_CUSTOM_ID_MAP env variable specified, using it instead of the default region map") if err := json.Unmarshal([]byte(raw), ®ionIDMap); err != nil { diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go index f2b11ae0..bb91e187 100644 --- a/cloud/linode/cilium_loadbalancers_test.go +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -222,7 +222,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -277,7 +277,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { kubeClient, _ := k8sClient.NewFakeClientset() ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) - lb := &Loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType, &Options} lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { @@ -289,7 +289,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { // Use BGP custom id map t.Setenv("BGP_CUSTOM_ID_MAP", "{'us-foobar': 2}") - lb = &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb = &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} lbStatus, err = lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { t.Fatal("expected not nil error") @@ -310,7 +310,7 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -355,7 +355,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -400,7 +400,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -445,7 +445,7 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -497,7 +497,7 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -549,7 +549,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -599,7 +599,7 @@ func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} @@ -632,7 +632,7 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} @@ -669,7 +669,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) @@ -732,7 +732,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} addService(t, kubeClient, svc) addNodes(t, kubeClient, nodes) - lb := &Loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} + lb := &LoadBalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType, &Options} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} rawFilter, err := json.Marshal(filter) diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index ef9c2b62..5e183761 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -228,7 +228,7 @@ func (c *linodeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBui go c.linodeTokenHealthChecker.Run(stopCh) } - lb, assertion := c.loadbalancers.(*Loadbalancers) + lb, assertion := c.loadbalancers.(*LoadBalancers) if !assertion { klog.Error("type assertion during Initialize() failed") return diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 72b0a6f2..32070f2d 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -100,7 +100,7 @@ func (e lbNotFoundError) Error() string { return fmt.Sprintf("LoadBalancer not found for service (%s)", e.serviceNn) } -type Loadbalancers struct { +type LoadBalancers struct { Client client.Client Zone string KubeClient kubernetes.Interface @@ -130,7 +130,7 @@ type portConfig struct { // NewLoadbalancersWithOptions returns a cloudprovider.LoadBalancer with custom options func NewLoadbalancersWithOptions(client client.Client, zone string, options *OptionsConfig) cloudprovider.LoadBalancer { - return &Loadbalancers{ + return &LoadBalancers{ Client: client, Zone: zone, LoadBalancerType: options.LoadBalancerType, @@ -144,7 +144,7 @@ func NewLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalan return NewLoadbalancersWithOptions(client, zone, &Options) } -func (l *Loadbalancers) GetNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { +func (l *LoadBalancers) GetNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { rawID := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID] id, idErr := strconv.Atoi(rawID) hasIDAnn := idErr == nil && id != 0 @@ -156,7 +156,7 @@ func (l *Loadbalancers) GetNodeBalancerForService(ctx context.Context, service * return l.getNodeBalancerByStatus(ctx, service) } -func (l *Loadbalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, service *v1.Service) (v1.LoadBalancerStatus, error) { +func (l *LoadBalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, service *v1.Service) (v1.LoadBalancerStatus, error) { err := l.retrieveKubeClient() if err != nil { return v1.LoadBalancerStatus{}, err @@ -171,7 +171,7 @@ func (l *Loadbalancers) getLatestServiceLoadBalancerStatus(ctx context.Context, // getNodeBalancerByStatus attempts to get the NodeBalancer from the IP or hostname specified in the // most recent LoadBalancer status. -func (l *Loadbalancers) getNodeBalancerByStatus(ctx context.Context, service *v1.Service) (nb *linodego.NodeBalancer, err error) { +func (l *LoadBalancers) getNodeBalancerByStatus(ctx context.Context, service *v1.Service) (nb *linodego.NodeBalancer, err error) { lb := service.Status.LoadBalancer updatedLb, err := l.getLatestServiceLoadBalancerStatus(ctx, service) if err != nil { @@ -200,7 +200,7 @@ func (l *Loadbalancers) getNodeBalancerByStatus(ctx context.Context, service *v1 // The current NodeBalancer from getNodeBalancerForService is compared to the most recent // LoadBalancer status; if they are different (because of an updated NodeBalancerID // annotation), the old one is deleted. -func (l *Loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1.Service) error { +func (l *LoadBalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1.Service) error { // unless there's an annotation, we can never get a past and current NB to differ, // because they're looked up the same way if _, ok := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID]; !ok { @@ -237,7 +237,7 @@ func (l *Loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. // GetLoadBalancerName returns the name of the load balancer. // // GetLoadBalancer will not modify service. -func (l *Loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.Service) string { +func (l *LoadBalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.Service) string { unixNano := strconv.FormatInt(time.Now().UnixNano(), 16) return fmt.Sprintf("%s-%s", l.Options.NodeBalancerPrefix, unixNano[len(unixNano)-12:]) } @@ -245,7 +245,7 @@ func (l *Loadbalancers) GetLoadBalancerName(_ context.Context, _ string, _ *v1.S // GetLoadBalancer returns the *v1.LoadBalancerStatus of service. // // GetLoadBalancer will not modify service. -func (l *Loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { +func (l *LoadBalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) @@ -275,7 +275,7 @@ func (l *Loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, // service. // // EnsureLoadBalancer will not modify service or nodes. -func (l *Loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (lbStatus *v1.LoadBalancerStatus, err error) { +func (l *LoadBalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (lbStatus *v1.LoadBalancerStatus, err error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) @@ -373,7 +373,7 @@ func (l *Loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri return lbStatus, nil } -func (l *Loadbalancers) updateNodeBalancer( +func (l *LoadBalancers) updateNodeBalancer( ctx context.Context, clusterName string, service *v1.Service, @@ -538,7 +538,7 @@ func (l *Loadbalancers) updateNodeBalancer( } // UpdateLoadBalancer updates the NodeBalancer to have configs that match the Service's ports -func (l *Loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (err error) { +func (l *LoadBalancers) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (err error) { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) @@ -589,7 +589,7 @@ func (l *Loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri // Delete any NodeBalancer configs for ports that no longer exist on the Service // Note: Don't build a map or other lookup structure here, it is not worth the overhead -func (l *Loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []linodego.NodeBalancerConfig, servicePorts []v1.ServicePort) error { +func (l *LoadBalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []linodego.NodeBalancerConfig, servicePorts []v1.ServicePort) error { for _, nbc := range nbConfigs { found := false for _, sp := range servicePorts { @@ -608,7 +608,7 @@ func (l *Loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []lin // shouldPreserveNodeBalancer determines whether a NodeBalancer should be deleted based on the // service's preserve annotation. -func (l *Loadbalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { +func (l *LoadBalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { return getServiceBoolAnnotation(service, annotations.AnnLinodeLoadBalancerPreserve) } @@ -617,7 +617,7 @@ func (l *Loadbalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { // successfully deleted. // // EnsureLoadBalancerDeleted will not modify service. -func (l *Loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { +func (l *LoadBalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) @@ -684,7 +684,7 @@ func (l *Loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa return nil } -func (l *Loadbalancers) getNodeBalancerByHostname(ctx context.Context, service *v1.Service, hostname string) (*linodego.NodeBalancer, error) { +func (l *LoadBalancers) getNodeBalancerByHostname(ctx context.Context, service *v1.Service, hostname string) (*linodego.NodeBalancer, error) { lbs, err := l.Client.ListNodeBalancers(ctx, nil) if err != nil { return nil, err @@ -698,7 +698,7 @@ func (l *Loadbalancers) getNodeBalancerByHostname(ctx context.Context, service * return nil, lbNotFoundError{serviceNn: getServiceNn(service)} } -func (l *Loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Service, ip netip.Addr) (*linodego.NodeBalancer, error) { +func (l *LoadBalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Service, ip netip.Addr) (*linodego.NodeBalancer, error) { var filter string if ip.Is6() { filter = fmt.Sprintf(`{"ipv6": "%v"}`, ip.String()) @@ -717,7 +717,7 @@ func (l *Loadbalancers) getNodeBalancerByIP(ctx context.Context, service *v1.Ser return &lbs[0], nil } -func (l *Loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Service, id int) (*linodego.NodeBalancer, error) { +func (l *LoadBalancers) getNodeBalancerByID(ctx context.Context, service *v1.Service, id int) (*linodego.NodeBalancer, error) { nb, err := l.Client.GetNodeBalancer(ctx, id) if err != nil { var targetError *linodego.Error @@ -729,7 +729,7 @@ func (l *Loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Ser return nb, nil } -func (l *Loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName string, service *v1.Service) []string { +func (l *LoadBalancers) GetLoadBalancerTags(_ context.Context, clusterName string, service *v1.Service) []string { tags := []string{} if clusterName != "" { tags = append(tags, clusterName) @@ -746,7 +746,7 @@ func (l *Loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName strin } // GetLinodeNBType returns the NodeBalancer type for the service. -func (l *Loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalancerPlanType { +func (l *LoadBalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalancerPlanType { typeStr, ok := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerType] if ok && linodego.NodeBalancerPlanType(typeStr) == linodego.NBTypePremium { return linodego.NBTypePremium @@ -762,7 +762,7 @@ func (l *Loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalanc // 3. NodeBalancerBackendIPv4SubnetID/NodeBalancerBackendIPv4SubnetName flag // 4. NodeBalancerBackendIPv4Subnet flag // 5. Default to using the subnet ID of the service's VPC -func (l *Loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Service) ([]linodego.NodeBalancerVPCOptions, error) { +func (l *LoadBalancers) getVPCCreateOptions(ctx context.Context, service *v1.Service) ([]linodego.NodeBalancerVPCOptions, error) { // Evaluate subnetID based on annotations or flags subnetID, err := l.getSubnetIDForSVC(ctx, service) if err != nil { @@ -835,7 +835,7 @@ func (l *Loadbalancers) getVPCCreateOptions(ctx context.Context, service *v1.Ser return vpcCreateOpts, nil } -func (l *Loadbalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { +func (l *LoadBalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { connThrottle := getConnectionThrottle(service) label := l.GetLoadBalancerName(ctx, clusterName, service) @@ -885,7 +885,7 @@ func (l *Loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri return l.Client.CreateNodeBalancer(ctx, createOpts) } -func (l *Loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port v1.ServicePort) (linodego.NodeBalancerConfig, error) { +func (l *LoadBalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port v1.ServicePort) (linodego.NodeBalancerConfig, error) { portConfigResult, err := getPortConfig(service, port) if err != nil { return linodego.NodeBalancerConfig{}, err @@ -970,7 +970,7 @@ func (l *Loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 return config, nil } -func (l *Loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbConfig *linodego.NodeBalancerConfig, config portConfig) error { +func (l *LoadBalancers) addTLSCert(ctx context.Context, service *v1.Service, nbConfig *linodego.NodeBalancerConfig, config portConfig) error { err := l.retrieveKubeClient() if err != nil { return err @@ -989,7 +989,7 @@ func (l *Loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbC // 2. If the service has annotations specifying VPCName or SubnetName, use them. // 3. If CCM is configured with --nodebalancer-backend-ipv4-subnet-id, it will be used as the subnet ID. // 4. Else, use first VPCName and SubnetName to calculate subnet id for the service. -func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { +func (l *LoadBalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { if len(l.Options.VPCNames) == 0 { return 0, fmt.Errorf("CCM not configured with VPC, cannot create NodeBalancer with specified annotation") } @@ -1031,7 +1031,7 @@ func (l *Loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Servi // buildLoadBalancerRequest returns a linodego.NodeBalancer // requests for service across nodes. -func (l *Loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { +func (l *LoadBalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { if len(nodes) == 0 && !l.Options.AllowEmptyNodeBalancerBackends { return nil, fmt.Errorf("%w: cluster %s, service %s", errNoNodesAvailable, clusterName, getServiceNn(service)) } @@ -1100,7 +1100,7 @@ func coerceString(str string, minLen, maxLen int, padding string) string { return str } -func (l *Loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) { +func (l *LoadBalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) { nodeIP, err := getNodePrivateIP(node, subnetID) if err != nil { return nil, fmt.Errorf("node %s does not have a private IP address: %w", node.Name, err) @@ -1124,7 +1124,7 @@ func (l *Loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, return nodeOptions, nil } -func (l *Loadbalancers) retrieveKubeClient() error { +func (l *LoadBalancers) retrieveKubeClient() error { if l.KubeClient != nil { return nil } @@ -1309,7 +1309,7 @@ func getConnectionThrottle(service *v1.Service) int { return connThrottle } -func (l *Loadbalancers) makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1.LoadBalancerStatus { +func (l *LoadBalancers) makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1.LoadBalancerStatus { ingress := v1.LoadBalancerIngress{ Hostname: *nb.Hostname, } @@ -1381,7 +1381,7 @@ func getServiceBoolAnnotation(service *v1.Service, name string) bool { // validateNodeBalancerBackendIPv4Range validates the NodeBalancerBackendIPv4Range // annotation to be within the NodeBalancerBackendIPv4Subnet if it is set. -func (l *Loadbalancers) validateNodeBalancerBackendIPv4Range(backendIPv4Range string) error { +func (l *LoadBalancers) validateNodeBalancerBackendIPv4Range(backendIPv4Range string) error { if l.Options.NodeBalancerBackendIPv4Subnet == "" { return nil } diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index e8474c17..f5b4cfd7 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -370,7 +370,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a for key, value := range annMap { svc.Annotations[key] = value } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -484,7 +484,7 @@ func testCreateNodeBalancerWithNodeNoAddresses(t *testing.T, client *linodego.Cl }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -716,7 +716,7 @@ func testUpdateNodeBalancerWithVPCBackend(t *testing.T, client *linodego.Client, }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -803,7 +803,7 @@ func testCreateNodeBalancerWithVPCOnlySubnetFlag(t *testing.T, client *linodego. }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -896,7 +896,7 @@ func testCreateNodeBalancerWithVPCNoFlagOrAnnotation(t *testing.T, client *linod }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -985,7 +985,7 @@ func testCreateNodeBalancerWithVPCAnnotationOnly(t *testing.T, client *linodego. }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1070,7 +1070,7 @@ func testCreateNodeBalancerWithVPCOnlySubnetIDFlag(t *testing.T, client *linodeg }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1216,7 +1216,7 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1381,7 +1381,7 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1456,7 +1456,7 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1569,7 +1569,7 @@ func testVeryLongServiceName(t *testing.T, client *linodego.Client, _ *fakeAPI) }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1637,7 +1637,7 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1722,7 +1722,7 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * NodePort: int32(30001), } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1803,7 +1803,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -1931,7 +1931,7 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -2032,7 +2032,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -2164,7 +2164,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -2263,7 +2263,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -2403,7 +2403,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -2550,7 +2550,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -2786,7 +2786,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -2919,7 +2919,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -3020,7 +3020,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -3990,7 +3990,7 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4042,7 +4042,7 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4159,7 +4159,7 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4212,7 +4212,7 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4337,7 +4337,7 @@ func testMakeLoadBalancerStatus(t *testing.T, client *linodego.Client, _ *fakeAP }}, } - l := Loadbalancers{ + l := LoadBalancers{ Options: &OptionsConfig{ EnableIPv6ForLoadBalancers: false, }, @@ -4375,7 +4375,7 @@ func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ } // Test with EnableIPv6ForLoadBalancers = false (default) - l := Loadbalancers{ + l := LoadBalancers{ Options: &OptionsConfig{ EnableIPv6ForLoadBalancers: false, }, @@ -4450,7 +4450,7 @@ func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ * }}, } - l := Loadbalancers{ + l := LoadBalancers{ Options: &OptionsConfig{ EnableIPv6ForLoadBalancers: false, }, @@ -4483,7 +4483,7 @@ func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ * os.Unsetenv("LINODE_HOSTNAME_ONLY_INGRESS") } -func getLatestNbNodesForService(t *testing.T, client *linodego.Client, svc *v1.Service, lb *Loadbalancers) []linodego.NodeBalancerNode { +func getLatestNbNodesForService(t *testing.T, client *linodego.Client, svc *v1.Service, lb *LoadBalancers) []linodego.NodeBalancerNode { t.Helper() nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { @@ -4525,7 +4525,7 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA Annotations: map[string]string{annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nb2.ID)}, }, } - lb, assertion := NewLoadbalancers(client, region).(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, region).(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4578,7 +4578,7 @@ func testUpdateLoadBalancerNodeExcludedByAnnotation(t *testing.T, client *linode }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4821,7 +4821,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4859,7 +4859,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4924,7 +4924,7 @@ func testGetNodeBalancerByStatus(t *testing.T, client *linodego.Client, _ *fakeA func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -4968,7 +4968,7 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -5067,7 +5067,7 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI }, }, } - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -5085,7 +5085,7 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() - lb, assertion := NewLoadbalancers(client, "us-west").(*Loadbalancers) + lb, assertion := NewLoadbalancers(client, "us-west").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -5508,7 +5508,7 @@ func Test_loadbalancers_GetLinodeNBType(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - l := &Loadbalancers{ + l := &LoadBalancers{ Client: tt.fields.client, Zone: tt.fields.zone, KubeClient: tt.fields.kubeClient, @@ -5546,7 +5546,7 @@ func Test_validateNodeBalancerBackendIPv4Range(t *testing.T) { }, } - l := &Loadbalancers{ + l := &LoadBalancers{ Client: nil, Zone: "", KubeClient: nil, diff --git a/cloud/linode/service_controller.go b/cloud/linode/service_controller.go index 15aa1bbc..1eff5fc8 100644 --- a/cloud/linode/service_controller.go +++ b/cloud/linode/service_controller.go @@ -19,13 +19,13 @@ import ( var retryInterval = time.Minute * 1 type serviceController struct { - loadbalancers *Loadbalancers + loadbalancers *LoadBalancers informer v1informers.ServiceInformer queue workqueue.TypedDelayingInterface[any] } -func newServiceController(loadbalancers *Loadbalancers, informer v1informers.ServiceInformer) *serviceController { +func newServiceController(loadbalancers *LoadBalancers, informer v1informers.ServiceInformer) *serviceController { return &serviceController{ loadbalancers: loadbalancers, informer: informer, diff --git a/cloud/linode/service_controller_test.go b/cloud/linode/service_controller_test.go index ed7bb4a7..e844ed82 100644 --- a/cloud/linode/service_controller_test.go +++ b/cloud/linode/service_controller_test.go @@ -24,7 +24,7 @@ func Test_serviceController_Run(t *testing.T) { informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Services() mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) - loadbalancers, assertion := NewLoadbalancers(client, "us-east").(*Loadbalancers) + loadbalancers, assertion := NewLoadbalancers(client, "us-east").(*LoadBalancers) if !assertion { t.Error("type assertion failed") } @@ -53,7 +53,7 @@ func Test_serviceController_Run(t *testing.T) { func Test_serviceController_processNextDeletion(t *testing.T) { type fields struct { - loadbalancers *Loadbalancers + loadbalancers *LoadBalancers queue workqueue.TypedDelayingInterface[any] Client *mocks.MockClient } @@ -70,7 +70,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { loadbalancers: nil, }, Setup: func(f *fields) { - f.loadbalancers = &Loadbalancers{Client: f.Client, Zone: "test", LoadBalancerType: Options.LoadBalancerType} + f.loadbalancers = &LoadBalancers{Client: f.Client, Zone: "test", LoadBalancerType: Options.LoadBalancerType} f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) f.queue.Add("test") }, @@ -83,7 +83,7 @@ func Test_serviceController_processNextDeletion(t *testing.T) { loadbalancers: nil, }, Setup: func(f *fields) { - f.loadbalancers = &Loadbalancers{Client: f.Client, Zone: "test", LoadBalancerType: Options.LoadBalancerType} + f.loadbalancers = &LoadBalancers{Client: f.Client, Zone: "test", LoadBalancerType: Options.LoadBalancerType} f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) svc := createTestService() f.queue.Add(svc) From 6c8b8df3011809a2add1102c991b8e2ab297fef0 Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Fri, 15 Aug 2025 17:43:22 -0500 Subject: [PATCH 8/9] update go mod. Merge linodego change for vpc dual stack and nlb --- go.mod | 17 +++++++++-------- go.sum | 36 ++++++++++++++++++------------------ 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/go.mod b/go.mod index f519d76f..b364512f 100644 --- a/go.mod +++ b/go.mod @@ -141,16 +141,16 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/mod v0.26.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.26.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.33.0 // indirect + golang.org/x/tools v0.35.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect google.golang.org/grpc v1.72.1 // indirect @@ -178,6 +178,7 @@ require ( // Fixes for `unknown revision v0.0.0` reported by `go list -modfile=go.mod -m -json -mod=mod all` replace ( + github.com/linode/linodego => github.com/komer3/linodego v0.0.0-20250815220120-72ddfca407ca k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.0 k8s.io/cri-api => k8s.io/cri-api v0.33.0 k8s.io/cri-client => k8s.io/cri-client v0.33.0 diff --git a/go.sum b/go.sum index 3f232c98..c64afe7a 100644 --- a/go.sum +++ b/go.sum @@ -194,6 +194,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/komer3/linodego v0.0.0-20250815220120-72ddfca407ca h1:HRkQv+nfxNUiCs9vQjA4MNRdDLwt4AaR+p4HsWFTGng= +github.com/komer3/linodego v0.0.0-20250815220120-72ddfca407ca/go.mod h1:LoQZ8hW8ZcXh/DJdYADwPCtVP8duyckLRGXwwwh55SU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -202,8 +204,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/linode/linodego v1.53.1-0.20250709175023-9b152d30578c h1:WlZm+YNHBuphycMZG2s2+F04hx2wx1ShuOwPAIInjP8= -github.com/linode/linodego v1.53.1-0.20250709175023-9b152d30578c/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA= github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o= github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY= github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= @@ -391,31 +391,31 @@ golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -427,15 +427,15 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -444,8 +444,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From c5d4e69037fdcd52b372bb20d9cdd0921415cca3 Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Fri, 15 Aug 2025 17:54:09 -0500 Subject: [PATCH 9/9] update go mod to right verson of linode go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b364512f..02752b1b 100644 --- a/go.mod +++ b/go.mod @@ -178,7 +178,7 @@ require ( // Fixes for `unknown revision v0.0.0` reported by `go list -modfile=go.mod -m -json -mod=mod all` replace ( - github.com/linode/linodego => github.com/komer3/linodego v0.0.0-20250815220120-72ddfca407ca + github.com/linode/linodego => github.com/komer3/linodego v0.0.0-20250815223828-0e5518dcb256 k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.0 k8s.io/cri-api => k8s.io/cri-api v0.33.0 k8s.io/cri-client => k8s.io/cri-client v0.33.0 diff --git a/go.sum b/go.sum index c64afe7a..6f567474 100644 --- a/go.sum +++ b/go.sum @@ -194,8 +194,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/komer3/linodego v0.0.0-20250815220120-72ddfca407ca h1:HRkQv+nfxNUiCs9vQjA4MNRdDLwt4AaR+p4HsWFTGng= -github.com/komer3/linodego v0.0.0-20250815220120-72ddfca407ca/go.mod h1:LoQZ8hW8ZcXh/DJdYADwPCtVP8duyckLRGXwwwh55SU= +github.com/komer3/linodego v0.0.0-20250815223828-0e5518dcb256 h1:iSmjmSvz/0FfuOMtqaPSQtzB0a9uK1/LIZ+h5Basopg= +github.com/komer3/linodego v0.0.0-20250815223828-0e5518dcb256/go.mod h1:LoQZ8hW8ZcXh/DJdYADwPCtVP8duyckLRGXwwwh55SU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=