diff --git a/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go b/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go index e9f17e19a28b..1b8636909f27 100644 --- a/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go +++ b/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go @@ -174,16 +174,7 @@ type ClusterConfiguration struct { CertificatesDir string `json:"certificatesDir,omitempty"` // imageRepository sets the container registry to pull images from. - // * If not set, the default registry of kubeadm will be used, i.e. - // * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - // * k8s.gcr.io (old registry): all older versions - // Please note that when imageRepository is not set we don't allow upgrades to - // versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - // a newer patch version with the new registry instead (i.e. >= v1.22.17, - // >= v1.23.15, >= v1.24.9, >= v1.25.0). - // * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - // `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - // and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + // If not set, the default registry of kubeadm will be used (registry.k8s.io). // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 diff --git a/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go b/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go index 6f5644bdbf20..e3622270b490 100644 --- a/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go +++ b/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go @@ -420,11 +420,6 @@ type KubeadmControlPlaneSpec struct { Replicas *int32 `json:"replicas,omitempty"` // version defines the desired Kubernetes version. - // Please note that if kubeadmConfigSpec.ClusterConfiguration.imageRepository is not set - // we don't allow upgrades to versions >= v1.22.0 for which kubeadm uses the old registry (k8s.gcr.io). - // Please use a newer patch version with the new registry instead. The default registries of kubeadm are: - // * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - // * k8s.gcr.io (old registry): all older versions // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml index 4d7156ab28ca..5c0dbd17e0e7 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml @@ -5158,16 +5158,7 @@ spec: imageRepository: description: |- imageRepository sets the container registry to pull images from. - * If not set, the default registry of kubeadm will be used, i.e. - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we don't allow upgrades to - versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - a newer patch version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + If not set, the default registry of kubeadm will be used (registry.k8s.io). maxLength: 512 minLength: 1 type: string diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml index d9c04191981f..cd0a899bf5fb 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml @@ -5047,16 +5047,7 @@ spec: imageRepository: description: |- imageRepository sets the container registry to pull images from. - * If not set, the default registry of kubeadm will be used, i.e. - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we don't allow upgrades to - versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - a newer patch version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + If not set, the default registry of kubeadm will be used (registry.k8s.io). maxLength: 512 minLength: 1 type: string diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml index fec6112d42d4..cd683831541d 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml @@ -6080,16 +6080,7 @@ spec: imageRepository: description: |- imageRepository sets the container registry to pull images from. - * If not set, the default registry of kubeadm will be used, i.e. - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we don't allow upgrades to - versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - a newer patch version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + If not set, the default registry of kubeadm will be used (registry.k8s.io). maxLength: 512 minLength: 1 type: string @@ -7807,13 +7798,7 @@ spec: type: object type: object version: - description: |- - version defines the desired Kubernetes version. - Please note that if kubeadmConfigSpec.ClusterConfiguration.imageRepository is not set - we don't allow upgrades to versions >= v1.22.0 for which kubeadm uses the old registry (k8s.gcr.io). - Please use a newer patch version with the new registry instead. The default registries of kubeadm are: - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions + description: version defines the desired Kubernetes version. maxLength: 256 minLength: 1 type: string diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml index f1adad63fb64..e82ff9bcda9c 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml @@ -4477,16 +4477,7 @@ spec: imageRepository: description: |- imageRepository sets the container registry to pull images from. - * If not set, the default registry of kubeadm will be used, i.e. - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we don't allow upgrades to - versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - a newer patch version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + If not set, the default registry of kubeadm will be used (registry.k8s.io). maxLength: 512 minLength: 1 type: string diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index cd3e862c886d..e599a8622931 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -50,9 +50,8 @@ type ControlPlane struct { Machines collections.Machines machinesPatchHelpers map[string]*patch.Helper - machinesNotUptoDate collections.Machines - machinesNotUptoDateLogMessages map[string][]string - machinesNotUptoDateConditionMessages map[string][]string + machinesNotUptoDate collections.Machines + machinesNotUpToDateResults map[string]NotUpToDateResult // reconciliationTime is the time of the current reconciliation, and should be used for all "now" calculations reconciliationTime metav1.Time @@ -98,7 +97,7 @@ type PreflightCheckResults struct { // NewControlPlane returns an instantiated ControlPlane. func NewControlPlane(ctx context.Context, managementCluster ManagementCluster, client client.Client, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ownedMachines collections.Machines) (*ControlPlane, error) { - infraObjects, err := getInfraResources(ctx, client, ownedMachines) + infraMachines, err := getInfraMachines(ctx, client, ownedMachines) if err != nil { return nil, err } @@ -118,32 +117,29 @@ func NewControlPlane(ctx context.Context, managementCluster ManagementCluster, c // Select machines that should be rolled out because of an outdated configuration or because rolloutAfter/Before expired. reconciliationTime := metav1.Now() machinesNotUptoDate := make(collections.Machines, len(ownedMachines)) - machinesNotUptoDateLogMessages := map[string][]string{} - machinesNotUptoDateConditionMessages := map[string][]string{} + machinesNotUpToDateResults := map[string]NotUpToDateResult{} for _, m := range ownedMachines { - upToDate, logMessages, conditionMessages, err := UpToDate(m, kcp, &reconciliationTime, infraObjects, kubeadmConfigs) + upToDate, notUpToDateResult, err := UpToDate(m, kcp, &reconciliationTime, infraMachines, kubeadmConfigs) if err != nil { return nil, err } if !upToDate { machinesNotUptoDate.Insert(m) - machinesNotUptoDateLogMessages[m.Name] = logMessages - machinesNotUptoDateConditionMessages[m.Name] = conditionMessages + machinesNotUpToDateResults[m.Name] = *notUpToDateResult } } return &ControlPlane{ - KCP: kcp, - Cluster: cluster, - Machines: ownedMachines, - machinesPatchHelpers: patchHelpers, - machinesNotUptoDate: machinesNotUptoDate, - machinesNotUptoDateLogMessages: machinesNotUptoDateLogMessages, - machinesNotUptoDateConditionMessages: machinesNotUptoDateConditionMessages, - KubeadmConfigs: kubeadmConfigs, - InfraResources: infraObjects, - reconciliationTime: reconciliationTime, - managementCluster: managementCluster, + KCP: kcp, + Cluster: cluster, + Machines: ownedMachines, + machinesPatchHelpers: patchHelpers, + machinesNotUptoDate: machinesNotUptoDate, + machinesNotUpToDateResults: machinesNotUpToDateResults, + KubeadmConfigs: kubeadmConfigs, + InfraResources: infraMachines, + reconciliationTime: reconciliationTime, + managementCluster: managementCluster, }, nil } @@ -256,15 +252,15 @@ func (c *ControlPlane) GetKubeadmConfig(machineName string) (*bootstrapv1.Kubead } // MachinesNeedingRollout return a list of machines that need to be rolled out. -func (c *ControlPlane) MachinesNeedingRollout() (collections.Machines, map[string][]string) { +func (c *ControlPlane) MachinesNeedingRollout() (collections.Machines, map[string]NotUpToDateResult) { // Note: Machines already deleted are dropped because they will be replaced by new machines after deletion completes. - return c.machinesNotUptoDate.Filter(collections.Not(collections.HasDeletionTimestamp)), c.machinesNotUptoDateLogMessages + return c.machinesNotUptoDate.Filter(collections.Not(collections.HasDeletionTimestamp)), c.machinesNotUpToDateResults } // NotUpToDateMachines return a list of machines that are not up to date with the control // plane's configuration. -func (c *ControlPlane) NotUpToDateMachines() (collections.Machines, map[string][]string) { - return c.machinesNotUptoDate, c.machinesNotUptoDateConditionMessages +func (c *ControlPlane) NotUpToDateMachines() (collections.Machines, map[string]NotUpToDateResult) { + return c.machinesNotUptoDate, c.machinesNotUpToDateResults } // UpToDateMachines returns the machines that are up to date with the control @@ -273,18 +269,18 @@ func (c *ControlPlane) UpToDateMachines() collections.Machines { return c.Machines.Difference(c.machinesNotUptoDate) } -// getInfraResources fetches the external infrastructure resource for each machine in the collection and returns a map of machine.Name -> infraResource. -func getInfraResources(ctx context.Context, cl client.Client, machines collections.Machines) (map[string]*unstructured.Unstructured, error) { +// getInfraMachines fetches the InfraMachine for each machine in the collection and returns a map of machine.Name -> InfraMachine. +func getInfraMachines(ctx context.Context, cl client.Client, machines collections.Machines) (map[string]*unstructured.Unstructured, error) { result := map[string]*unstructured.Unstructured{} for _, m := range machines { - infraObj, err := external.GetObjectFromContractVersionedRef(ctx, cl, m.Spec.InfrastructureRef, m.Namespace) + infraMachine, err := external.GetObjectFromContractVersionedRef(ctx, cl, m.Spec.InfrastructureRef, m.Namespace) if err != nil { if apierrors.IsNotFound(errors.Cause(err)) { continue } - return nil, errors.Wrapf(err, "failed to retrieve infra obj for machine %q", m.Name) + return nil, errors.Wrapf(err, "failed to retrieve InfraMachine for Machine %s", m.Name) } - result[m.Name] = infraObj + result[m.Name] = infraMachine } return result, nil } @@ -297,14 +293,14 @@ func getKubeadmConfigs(ctx context.Context, cl client.Client, machines collectio if !bootstrapRef.IsDefined() { continue } - machineConfig := &bootstrapv1.KubeadmConfig{} - if err := cl.Get(ctx, client.ObjectKey{Name: bootstrapRef.Name, Namespace: m.Namespace}, machineConfig); err != nil { + kubeadmConfig := &bootstrapv1.KubeadmConfig{} + if err := cl.Get(ctx, client.ObjectKey{Name: bootstrapRef.Name, Namespace: m.Namespace}, kubeadmConfig); err != nil { if apierrors.IsNotFound(errors.Cause(err)) { continue } - return nil, errors.Wrapf(err, "failed to retrieve bootstrap config for machine %q", m.Name) + return nil, errors.Wrapf(err, "failed to retrieve KubeadmConfig for Machine %s", m.Name) } - result[m.Name] = machineConfig + result[m.Name] = kubeadmConfig } return result, nil } diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index 121c459fb5b9..f3f059e6ba01 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -122,17 +122,17 @@ func TestControlPlane(t *testing.T) { g.Expect(controlPlane.Machines).To(HaveLen(5)) - machinesNotUptoDate, machinesNotUptoDateConditionMessages := controlPlane.NotUpToDateMachines() + machinesNotUptoDate, machinesNotUpToDateResults := controlPlane.NotUpToDateMachines() g.Expect(machinesNotUptoDate.Names()).To(ConsistOf("m2", "m3")) - g.Expect(machinesNotUptoDateConditionMessages).To(HaveLen(2)) - g.Expect(machinesNotUptoDateConditionMessages).To(HaveKeyWithValue("m2", []string{"Version v1.29.0, v1.31.0 required"})) - g.Expect(machinesNotUptoDateConditionMessages).To(HaveKeyWithValue("m3", []string{"Version v1.29.3, v1.31.0 required"})) + g.Expect(machinesNotUpToDateResults).To(HaveLen(2)) + g.Expect(machinesNotUpToDateResults["m2"].ConditionMessages).To(Equal([]string{"Version v1.29.0, v1.31.0 required"})) + g.Expect(machinesNotUpToDateResults["m3"].ConditionMessages).To(Equal([]string{"Version v1.29.3, v1.31.0 required"})) - machinesNeedingRollout, machinesNotUptoDateLogMessages := controlPlane.MachinesNeedingRollout() + machinesNeedingRollout, machinesNotUpToDateResults := controlPlane.MachinesNeedingRollout() g.Expect(machinesNeedingRollout.Names()).To(ConsistOf("m2")) - g.Expect(machinesNotUptoDateLogMessages).To(HaveLen(2)) - g.Expect(machinesNotUptoDateLogMessages).To(HaveKeyWithValue("m2", []string{"Machine version \"v1.29.0\" is not equal to KCP version \"v1.31.0\""})) - g.Expect(machinesNotUptoDateLogMessages).To(HaveKeyWithValue("m3", []string{"Machine version \"v1.29.3\" is not equal to KCP version \"v1.31.0\""})) + g.Expect(machinesNotUpToDateResults).To(HaveLen(2)) + g.Expect(machinesNotUpToDateResults["m2"].LogMessages).To(Equal([]string{"Machine version \"v1.29.0\" is not equal to KCP version \"v1.31.0\""})) + g.Expect(machinesNotUpToDateResults["m3"].LogMessages).To(Equal([]string{"Machine version \"v1.29.3\" is not equal to KCP version \"v1.31.0\""})) upToDateMachines := controlPlane.UpToDateMachines() g.Expect(upToDateMachines).To(HaveLen(3)) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index fb66f1b1453d..ab6b9c6dfa7b 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -467,12 +467,12 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl } // Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations. - machinesNeedingRollout, machinesNeedingRolloutLogMessages := controlPlane.MachinesNeedingRollout() + machinesNeedingRollout, machinesNeedingRolloutResults := controlPlane.MachinesNeedingRollout() switch { case len(machinesNeedingRollout) > 0: var allMessages []string - for machine, messages := range machinesNeedingRolloutLogMessages { - allMessages = append(allMessages, fmt.Sprintf("Machine %s needs rollout: %s", machine, strings.Join(messages, ","))) + for machine, machinesNeedingRolloutResult := range machinesNeedingRolloutResults { + allMessages = append(allMessages, fmt.Sprintf("Machine %s needs rollout: %s", machine, strings.Join(machinesNeedingRolloutResult.LogMessages, ","))) } log.Info(fmt.Sprintf("Rolling out Control Plane machines: %s", strings.Join(allMessages, ",")), "machinesNeedingRollout", machinesNeedingRollout.Names()) v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateV1Beta1Condition, controlplanev1.RollingUpdateInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(machinesNeedingRollout), len(controlPlane.Machines)-len(machinesNeedingRollout)) @@ -840,7 +840,7 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro return errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine)) } // Update in-place mutating fields on InfrastructureMachine. - if err := r.updateExternalObject(ctx, infraMachine, controlPlane.KCP, controlPlane.Cluster); err != nil { + if err := r.updateExternalObject(ctx, infraMachine, infraMachine.GroupVersionKind(), controlPlane.KCP, controlPlane.Cluster); err != nil { return errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine)) } } @@ -849,8 +849,6 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro // Only update the KubeadmConfig if it is already found, otherwise just skip it. // This could happen e.g. if the cache is not up-to-date yet. if kubeadmConfigFound { - // Note: Set the GroupVersionKind because updateExternalObject depends on it. - kubeadmConfig.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) // Cleanup managed fields of all KubeadmConfigs to drop ownership of labels and annotations // from "manager". We do this so that KubeadmConfigs that are created using the Create method // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" @@ -859,7 +857,7 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro return errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig)) } // Update in-place mutating fields on BootstrapConfig. - if err := r.updateExternalObject(ctx, kubeadmConfig, controlPlane.KCP, controlPlane.Cluster); err != nil { + if err := r.updateExternalObject(ctx, kubeadmConfig, bootstrapv1.GroupVersion.WithKind("KubeadmConfig"), controlPlane.KCP, controlPlane.Cluster); err != nil { return errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig)) } } @@ -980,16 +978,17 @@ func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneAndMachinesConditio } func reconcileMachineUpToDateCondition(_ context.Context, controlPlane *internal.ControlPlane) { - machinesNotUptoDate, machinesNotUptoDateConditionMessages := controlPlane.NotUpToDateMachines() + machinesNotUptoDate, machinesNotUpToDateResults := controlPlane.NotUpToDateMachines() machinesNotUptoDateNames := sets.New(machinesNotUptoDate.Names()...) for _, machine := range controlPlane.Machines { if machinesNotUptoDateNames.Has(machine.Name) { // Note: the code computing the message for KCP's RolloutOut condition is making assumptions on the format/content of this message. message := "" - if reasons, ok := machinesNotUptoDateConditionMessages[machine.Name]; ok { - for i := range reasons { - reasons[i] = fmt.Sprintf("* %s", reasons[i]) + if machinesNotUpToDateResult, ok := machinesNotUpToDateResults[machine.Name]; ok && len(machinesNotUpToDateResult.ConditionMessages) > 0 { + var reasons []string + for _, conditionMessage := range machinesNotUpToDateResult.ConditionMessages { + reasons = append(reasons, fmt.Sprintf("* %s", conditionMessage)) } message = strings.Join(reasons, "\n") } diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index e9c6ad4cb992..a802d899a5f0 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -25,6 +25,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -304,9 +305,9 @@ func (r *KubeadmControlPlaneReconciler) generateKubeadmConfig(ctx context.Contex } // updateExternalObject updates the external object with the labels and annotations from KCP. -func (r *KubeadmControlPlaneReconciler) updateExternalObject(ctx context.Context, obj client.Object, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { +func (r *KubeadmControlPlaneReconciler) updateExternalObject(ctx context.Context, obj client.Object, objGVK schema.GroupVersionKind, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { updatedObject := &unstructured.Unstructured{} - updatedObject.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + updatedObject.SetGroupVersionKind(objGVK) updatedObject.SetNamespace(obj.GetNamespace()) updatedObject.SetName(obj.GetName()) // Set the UID to ensure that Server-Side-Apply only performs an update diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index a16ad847bef1..f290d05ad800 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -39,7 +39,7 @@ import ( ) func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - logger := ctrl.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) bootstrapSpec := controlPlane.InitialControlPlaneConfig() @@ -56,12 +56,12 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd) if err != nil { - logger.Error(err, "Failed to create initial control plane Machine") + log.Error(err, "Failed to create initial control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } - logger.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). + log.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). Info("Machine created (scale up)", "Machine", klog.KObj(newMachine), newMachine.Spec.InfrastructureRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.InfrastructureRef.Name), @@ -72,7 +72,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte } func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - logger := ctrl.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) // Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() { @@ -95,12 +95,12 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd) if err != nil { - logger.Error(err, "Failed to create additional control plane Machine") + log.Error(err, "Failed to create additional control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } - logger.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). + log.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). Info("Machine created (scale up)", "Machine", klog.KObj(newMachine), newMachine.Spec.InfrastructureRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.InfrastructureRef.Name), @@ -115,7 +115,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( controlPlane *internal.ControlPlane, outdatedMachines collections.Machines, ) (ctrl.Result, error) { - logger := ctrl.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) // Pick the Machine that we should scale down. machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines) @@ -131,12 +131,12 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { - logger.Error(err, "Failed to create client to workload cluster") + log.Error(err, "Failed to create client to workload cluster") return ctrl.Result{}, errors.Wrapf(err, "failed to create client to workload cluster") } if machineToDelete == nil { - logger.Info("Failed to pick control plane Machine to delete") + log.Info("Failed to pick control plane Machine to delete") return ctrl.Result{}, errors.New("failed to pick control plane Machine to delete") } @@ -144,7 +144,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( if controlPlane.IsEtcdManaged() { etcdLeaderCandidate := controlPlane.Machines.Newest() if err := workloadCluster.ForwardEtcdLeadership(ctx, machineToDelete, etcdLeaderCandidate); err != nil { - logger.Error(err, "Failed to move leadership to candidate machine", "candidate", etcdLeaderCandidate.Name) + log.Error(err, "Failed to move leadership to candidate machine", "candidate", etcdLeaderCandidate.Name) return ctrl.Result{}, err } @@ -152,14 +152,14 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( } if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) { - logger.Error(err, "Failed to delete control plane machine") + log.Error(err, "Failed to delete control plane machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleDown", "Failed to delete control plane Machine %s for cluster %s control plane: %v", machineToDelete.Name, klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } // Note: We intentionally log after Delete because we want this log line to show up only after DeletionTimestamp has been set. // Also, setting DeletionTimestamp doesn't mean the Machine is actually deleted (deletion takes some time). - logger.WithValues(controlPlane.StatusToLogKeyAndValues(nil, machineToDelete)...). + log.WithValues(controlPlane.StatusToLogKeyAndValues(nil, machineToDelete)...). Info("Deleting Machine (scale down)", "Machine", klog.KObj(machineToDelete)) // Requeue the control plane, in case there are additional operations to perform @@ -175,7 +175,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( // // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneAndMachinesConditions before this. func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam - logger := ctrl.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) // If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet, // so it is considered ok to proceed. @@ -196,7 +196,7 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con if version, ok := controlPlane.Cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok { v = version } - logger.Info(fmt.Sprintf("Waiting for a version upgrade to %s to be propagated", v)) + log.Info(fmt.Sprintf("Waiting for a version upgrade to %s to be propagated", v)) controlPlane.PreflightCheckResults.TopologyVersionMismatch = true return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, nil } @@ -205,7 +205,7 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con // If there are deleting machines, wait for the operation to complete. if controlPlane.HasDeletingMachine() { controlPlane.PreflightCheckResults.HasDeletingMachine = true - logger.Info("Waiting for machines to be deleted", "machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) + log.Info("Waiting for machines to be deleted", "machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } @@ -261,7 +261,7 @@ loopmachines: aggregatedError := kerrors.NewAggregate(machineErrors) r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "ControlPlaneUnhealthy", "Waiting for control plane to pass preflight checks to continue reconciliation: %v", aggregatedError) - logger.Info("Waiting for control plane to pass preflight checks", "failures", aggregatedError.Error()) + log.Info("Waiting for control plane to pass preflight checks", "failures", aggregatedError.Error()) return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, nil } diff --git a/controlplane/kubeadm/internal/controllers/upgrade.go b/controlplane/kubeadm/internal/controllers/upgrade.go index d267a4205e0e..7768c03877b8 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade.go +++ b/controlplane/kubeadm/internal/controllers/upgrade.go @@ -35,13 +35,13 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( controlPlane *internal.ControlPlane, machinesRequireUpgrade collections.Machines, ) (ctrl.Result, error) { - logger := ctrl.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) // TODO: handle reconciliation of etcd members and kubeadm config in case they get out of sync with cluster workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { - logger.Error(err, "failed to get remote client for workload cluster", "Cluster", klog.KObj(controlPlane.Cluster)) + log.Error(err, "failed to get remote client for workload cluster", "Cluster", klog.KObj(controlPlane.Cluster)) return ctrl.Result{}, err } @@ -95,7 +95,7 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( } return r.scaleDownControlPlane(ctx, controlPlane, machinesRequireUpgrade) default: - logger.Info("RolloutStrategy type is not set to RollingUpdate, unable to determine the strategy for rolling out machines") + log.Info("RolloutStrategy type is not set to RollingUpdate, unable to determine the strategy for rolling out machines") return ctrl.Result{}, nil } } diff --git a/controlplane/kubeadm/internal/filters.go b/controlplane/kubeadm/internal/filters.go index 812ddb05a4a1..510a8290e4f0 100644 --- a/controlplane/kubeadm/internal/filters.go +++ b/controlplane/kubeadm/internal/filters.go @@ -33,6 +33,47 @@ import ( "sigs.k8s.io/cluster-api/util/collections" ) +// NotUpToDateResult is the result of calling the UpToDate func for a Machine. +type NotUpToDateResult struct { + LogMessages []string + ConditionMessages []string +} + +// UpToDate checks if a Machine is up to date with the control plane's configuration. +// If not, messages explaining why are provided with different level of detail for logs and conditions. +func UpToDate(machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlane, reconciliationTime *metav1.Time, infraMachines map[string]*unstructured.Unstructured, kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig) (bool, *NotUpToDateResult, error) { + res := &NotUpToDateResult{} + + // Machines whose certificates are about to expire. + if collections.ShouldRolloutBefore(reconciliationTime, kcp.Spec.Rollout.Before)(machine) { + res.LogMessages = append(res.LogMessages, "certificates will expire soon, rolloutBefore expired") + res.ConditionMessages = append(res.ConditionMessages, "Certificates will expire soon") + } + + // Machines that are scheduled for rollout (KCP.Spec.RolloutAfter set, + // the RolloutAfter deadline is expired, and the machine was created before the deadline). + if collections.ShouldRolloutAfter(reconciliationTime, kcp.Spec.Rollout.After)(machine) { + res.LogMessages = append(res.LogMessages, "rolloutAfter expired") + res.ConditionMessages = append(res.ConditionMessages, "KubeadmControlPlane spec.rolloutAfter expired") + } + + // Machines that do not match with KCP config. + matches, specLogMessages, specConditionMessages, err := matchesMachineSpec(infraMachines, kubeadmConfigs, kcp, machine) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to determine if Machine %s is up-to-date", machine.Name) + } + if !matches { + res.LogMessages = append(res.LogMessages, specLogMessages...) + res.ConditionMessages = append(res.ConditionMessages, specConditionMessages...) + } + + if len(res.LogMessages) > 0 || len(res.ConditionMessages) > 0 { + return false, res, nil + } + + return true, nil, nil +} + // matchesMachineSpec checks if a Machine matches any of a set of KubeadmConfigs and a set of infra machine configs. // If it doesn't, it returns the reasons why. // Kubernetes version, infrastructure template, and KubeadmConfig field need to be equivalent. @@ -41,21 +82,18 @@ import ( // - mutated in-place (ex: NodeDrainTimeoutSeconds) // - are not dictated by KCP (ex: ProviderID) // - are not relevant for the rollout decision (ex: failureDomain). -func matchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (bool, []string, []string, error) { +func matchesMachineSpec(infraMachines map[string]*unstructured.Unstructured, kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (bool, []string, []string, error) { logMessages := []string{} conditionMessages := []string{} if !collections.MatchesKubernetesVersion(kcp.Spec.Version)(machine) { - machineVersion := "" - if machine != nil && machine.Spec.Version != "" { - machineVersion = machine.Spec.Version - } + machineVersion := machine.Spec.Version logMessages = append(logMessages, fmt.Sprintf("Machine version %q is not equal to KCP version %q", machineVersion, kcp.Spec.Version)) // Note: the code computing the message for KCP's RolloutOut condition is making assumptions on the format/content of this message. conditionMessages = append(conditionMessages, fmt.Sprintf("Version %s, %s required", machineVersion, kcp.Spec.Version)) } - reason, matches, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, machine) + reason, matches, err := matchesKubeadmConfig(kubeadmConfigs, kcp, machine) if err != nil { return false, nil, nil, errors.Wrapf(err, "failed to match Machine spec") } @@ -64,7 +102,7 @@ func matchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, mach conditionMessages = append(conditionMessages, "KubeadmConfig is not up-to-date") } - if reason, matches := matchesTemplateClonedFrom(infraConfigs, kcp, machine); !matches { + if reason, matches := matchesInfraMachine(infraMachines, kcp, machine); !matches { logMessages = append(logMessages, reason) conditionMessages = append(conditionMessages, fmt.Sprintf("%s is not up-to-date", machine.Spec.InfrastructureRef.Kind)) } @@ -76,59 +114,19 @@ func matchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, mach return true, nil, nil, nil } -// UpToDate checks if a Machine is up to date with the control plane's configuration. -// If not, messages explaining why are provided with different level of detail for logs and conditions. -func UpToDate(machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlane, reconciliationTime *metav1.Time, infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig) (bool, []string, []string, error) { - logMessages := []string{} - conditionMessages := []string{} - - // Machines whose certificates are about to expire. - if collections.ShouldRolloutBefore(reconciliationTime, kcp.Spec.Rollout.Before)(machine) { - logMessages = append(logMessages, "certificates will expire soon, rolloutBefore expired") - conditionMessages = append(conditionMessages, "Certificates will expire soon") - } - - // Machines that are scheduled for rollout (KCP.Spec.RolloutAfter set, - // the RolloutAfter deadline is expired, and the machine was created before the deadline). - if collections.ShouldRolloutAfter(reconciliationTime, kcp.Spec.Rollout.After)(machine) { - logMessages = append(logMessages, "rolloutAfter expired") - conditionMessages = append(conditionMessages, "KubeadmControlPlane spec.rolloutAfter expired") - } - - // Machines that do not match with KCP config. - matches, specLogMessages, specConditionMessages, err := matchesMachineSpec(infraConfigs, machineConfigs, kcp, machine) - if err != nil { - return false, nil, nil, errors.Wrapf(err, "failed to determine if Machine %s is up-to-date", machine.Name) - } - if !matches { - logMessages = append(logMessages, specLogMessages...) - conditionMessages = append(conditionMessages, specConditionMessages...) - } - - if len(logMessages) > 0 || len(conditionMessages) > 0 { - return false, logMessages, conditionMessages, nil - } - - return true, nil, nil, nil -} - -// matchesTemplateClonedFrom checks if a Machine has a corresponding infrastructure machine that +// matchesInfraMachine checks if a Machine has a corresponding infrastructure machine that // matches a given KCP infra template and if it doesn't match returns the reason why. // Note: Differences to the labels and annotations on the infrastructure machine are not considered for matching // criteria, because changes to labels and annotations are propagated in-place to the infrastructure machines. -// TODO: This function will be renamed in a follow-up PR to something better. (ex: MatchesInfraMachine). -func matchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructured, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool) { - if machine == nil { - return "Machine cannot be compared with KCP.spec.machineTemplate.spec.infrastructureRef: Machine is nil", false - } - infraObj, found := infraConfigs[machine.Name] +func matchesInfraMachine(infraMachines map[string]*unstructured.Unstructured, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool) { + currentInfraMachine, found := infraMachines[machine.Name] if !found { // Return true here because failing to get infrastructure machine should not be considered as unmatching. return "", true } - clonedFromName, ok1 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] - clonedFromGroupKind, ok2 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] + clonedFromName, ok1 := currentInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] + clonedFromGroupKind, ok2 := currentInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] if !ok1 || !ok2 { // All kcp cloned infra machines should have this annotation. // Missing the annotation may be due to older version machines or adopted machines. @@ -147,14 +145,10 @@ func matchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructure return "", true } -// matchesKubeadmBootstrapConfig checks if machine's KubeadmConfigSpec is equivalent with KCP's KubeadmConfigSpec. +// matchesKubeadmConfig checks if machine's KubeadmConfigSpec is equivalent with KCP's KubeadmConfigSpec. // Note: Differences to the labels and annotations on the KubeadmConfig are not considered for matching // criteria, because changes to labels and annotations are propagated in-place to KubeadmConfig. -func matchesKubeadmBootstrapConfig(machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool, error) { - if machine == nil { - return "Machine KubeadmConfig cannot be compared: Machine is nil", false, nil - } - +func matchesKubeadmConfig(kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool, error) { bootstrapRef := machine.Spec.Bootstrap.ConfigRef if !bootstrapRef.IsDefined() { // Missing bootstrap reference should not be considered as unmatching. @@ -162,7 +156,7 @@ func matchesKubeadmBootstrapConfig(machineConfigs map[string]*bootstrapv1.Kubead return "", true, nil } - machineConfig, found := machineConfigs[machine.Name] + currentKubeadmConfig, found := kubeadmConfigs[machine.Name] if !found { // Return true here because failing to get KubeadmConfig should not be considered as unmatching. // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. @@ -170,7 +164,7 @@ func matchesKubeadmBootstrapConfig(machineConfigs map[string]*bootstrapv1.Kubead } // Check if KCP and machine ClusterConfiguration matches, if not return - match, diff, err := matchClusterConfiguration(machineConfig, kcp) + match, diff, err := matchClusterConfiguration(currentKubeadmConfig, kcp) if err != nil { return "", false, errors.Wrapf(err, "failed to match KubeadmConfig") } @@ -181,7 +175,7 @@ func matchesKubeadmBootstrapConfig(machineConfigs map[string]*bootstrapv1.Kubead // Check if KCP and machine InitConfiguration or JoinConfiguration matches // NOTE: only one between init configuration and join configuration is set on a machine, depending // on the fact that the machine was the initial control plane node or a joining control plane node. - match, diff, err = matchInitOrJoinConfiguration(machineConfig, kcp) + match, diff, err = matchInitOrJoinConfiguration(currentKubeadmConfig, kcp) if err != nil { return "", false, errors.Wrapf(err, "failed to match KubeadmConfig") } diff --git a/controlplane/kubeadm/internal/filters_test.go b/controlplane/kubeadm/internal/filters_test.go index 243908a7dd70..4456e5d3f079 100644 --- a/controlplane/kubeadm/internal/filters_test.go +++ b/controlplane/kubeadm/internal/filters_test.go @@ -710,7 +710,7 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { }) } -func TestMatchesKubeadmBootstrapConfig(t *testing.T) { +func TestMatchesKubeadmConfig(t *testing.T) { t.Run("returns true if ClusterConfiguration is equal", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ @@ -751,7 +751,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: machineConfig, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -796,7 +796,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: machineConfig, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig ClusterConfiguration is outdated: diff: v1beta2.ClusterConfiguration{ @@ -848,7 +848,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -899,7 +899,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta2.KubeadmConfigSpec{ @@ -960,7 +960,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1011,7 +1011,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta2.KubeadmConfigSpec{ @@ -1073,7 +1073,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1117,7 +1117,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeFalse()) g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta2.KubeadmConfigSpec{ @@ -1183,7 +1183,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = nil machineConfigs[m.Name].Labels = nil - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1193,7 +1193,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = kcp.Spec.MachineTemplate.ObjectMeta.Annotations machineConfigs[m.Name].Labels = nil - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1203,7 +1203,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = nil machineConfigs[m.Name].Labels = kcp.Spec.MachineTemplate.ObjectMeta.Labels - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1213,7 +1213,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Labels = kcp.Spec.MachineTemplate.ObjectMeta.Labels machineConfigs[m.Name].Annotations = kcp.Spec.MachineTemplate.ObjectMeta.Annotations - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, match, err := matchesKubeadmConfig(machineConfigs, kcp, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1221,14 +1221,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }) } -func TestMatchesTemplateClonedFrom(t *testing.T) { - t.Run("nil machine returns false", func(t *testing.T) { - g := NewWithT(t) - reason, match := matchesTemplateClonedFrom(nil, nil, nil) - g.Expect(match).To(BeFalse()) - g.Expect(reason).To(Equal("Machine cannot be compared with KCP.spec.machineTemplate.spec.infrastructureRef: Machine is nil")) - }) - +func TestMatchesInfraMachine(t *testing.T) { t.Run("returns true if machine not found", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{} @@ -1241,7 +1234,7 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { }, }, } - reason, match := matchesTemplateClonedFrom(map[string]*unstructured.Unstructured{}, kcp, machine) + reason, match := matchesInfraMachine(map[string]*unstructured.Unstructured{}, kcp, machine) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -1301,7 +1294,7 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", }) infraConfigs[m.Name].SetLabels(nil) - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + reason, match := matchesInfraMachine(infraConfigs, kcp, m) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -1314,7 +1307,7 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { "test": "annotation", }) infraConfigs[m.Name].SetLabels(nil) - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + reason, match := matchesInfraMachine(infraConfigs, kcp, m) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -1326,7 +1319,7 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", }) infraConfigs[m.Name].SetLabels(kcp.Spec.MachineTemplate.ObjectMeta.Labels) - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + reason, match := matchesInfraMachine(infraConfigs, kcp, m) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -1339,14 +1332,14 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { "test": "annotation", }) infraConfigs[m.Name].SetLabels(kcp.Spec.MachineTemplate.ObjectMeta.Labels) - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + reason, match := matchesInfraMachine(infraConfigs, kcp, m) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) }) } -func TestMatchesTemplateClonedFrom_WithClonedFromAnnotations(t *testing.T) { +func TestMatchesInfraMachine_WithClonedFromAnnotations(t *testing.T) { kcp := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -1427,7 +1420,7 @@ func TestMatchesTemplateClonedFrom_WithClonedFromAnnotations(t *testing.T) { }, }, } - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, machine) + reason, match := matchesInfraMachine(infraConfigs, kcp, machine) g.Expect(match).To(Equal(tt.expectMatch)) g.Expect(reason).To(Equal(tt.expectReason)) }) @@ -1618,11 +1611,15 @@ func TestUpToDate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - upToDate, logMessages, conditionMessages, err := UpToDate(tt.machine, tt.kcp, &reconciliationTime, tt.infraConfigs, tt.machineConfigs) + upToDate, res, err := UpToDate(tt.machine, tt.kcp, &reconciliationTime, tt.infraConfigs, tt.machineConfigs) g.Expect(err).ToNot(HaveOccurred()) g.Expect(upToDate).To(Equal(tt.expectUptoDate)) - g.Expect(logMessages).To(BeComparableTo(tt.expectLogMessages)) - g.Expect(conditionMessages).To(Equal(tt.expectConditionMessages)) + if upToDate { + g.Expect(res).To(BeNil()) + } else { + g.Expect(res.LogMessages).To(BeComparableTo(tt.expectLogMessages)) + g.Expect(res.ConditionMessages).To(Equal(tt.expectConditionMessages)) + } }) } }