Skip to content

Commit 51ce7b1

Browse files
committed
e2e: adds test for nodeadm
1 parent 0e920d2 commit 51ce7b1

File tree

2 files changed

+230
-0
lines changed

2 files changed

+230
-0
lines changed
Lines changed: 174 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
//go:build e2e
2+
// +build e2e
3+
4+
/*
5+
Copyright 2020 The Kubernetes Authors.
6+
7+
Licensed under the Apache License, Version 2.0 (the "License");
8+
you may not use this file except in compliance with the License.
9+
You may obtain a copy of the License at
10+
11+
http://www.apache.org/licenses/LICENSE-2.0
12+
13+
Unless required by applicable law or agreed to in writing, software
14+
distributed under the License is distributed on an "AS IS" BASIS,
15+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
See the License for the specific language governing permissions and
17+
limitations under the License.
18+
*/
19+
20+
package managed
21+
22+
import (
23+
"context"
24+
"fmt"
25+
26+
"github.com/blang/semver"
27+
"github.com/onsi/ginkgo/v2"
28+
. "github.com/onsi/gomega"
29+
corev1 "k8s.io/api/core/v1"
30+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31+
ref "k8s.io/client-go/tools/reference"
32+
33+
eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2"
34+
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
35+
"sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared"
36+
"sigs.k8s.io/cluster-api/test/framework"
37+
"sigs.k8s.io/cluster-api/util"
38+
)
39+
40+
// EKS cluster upgrade tests.
41+
var _ = ginkgo.Describe("EKS Cluster upgrade test", func() {
42+
var (
43+
namespace *corev1.Namespace
44+
ctx context.Context
45+
specName = "eks-upgrade"
46+
clusterName string
47+
initialVersion string
48+
upgradeToVersion string
49+
)
50+
51+
shared.ConditionalIt(runUpgradeTests, "[managed] [upgrade] [nodeadm] should create a cluster and upgrade the kubernetes version", func() {
52+
ginkgo.By("should have a valid test configuration")
53+
Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil")
54+
Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
55+
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.EksUpgradeFromVersion))
56+
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.EksUpgradeToVersion))
57+
ctx = context.TODO()
58+
namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx)
59+
clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
60+
61+
initialVersion = e2eCtx.E2EConfig.MustGetVariable(shared.EksUpgradeFromVersion)
62+
upgradeToVersion = e2eCtx.E2EConfig.MustGetVariable(shared.EksUpgradeToVersion)
63+
64+
ginkgo.By("default iam role should exist")
65+
VerifyRoleExistsAndOwned(ctx, ekscontrolplanev1.DefaultEKSControlPlaneRole, clusterName, false, e2eCtx.AWSSession)
66+
67+
ginkgo.By("should create an EKS control plane")
68+
ManagedClusterSpec(ctx, func() ManagedClusterSpecInput {
69+
return ManagedClusterSpecInput{
70+
E2EConfig: e2eCtx.E2EConfig,
71+
ConfigClusterFn: defaultConfigCluster,
72+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
73+
AWSSession: e2eCtx.BootstrapUserAWSSession,
74+
Namespace: namespace,
75+
ClusterName: clusterName,
76+
Flavour: EKSControlPlaneOnlyFlavor, // TODO (richardcase) - change in the future when upgrades to machinepools work
77+
ControlPlaneMachineCount: 1, // NOTE: this cannot be zero as clusterctl returns an error
78+
WorkerMachineCount: 0,
79+
KubernetesVersion: initialVersion,
80+
}
81+
})
82+
83+
ginkgo.By(fmt.Sprintf("getting cluster with name %s", clusterName))
84+
cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
85+
Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
86+
Namespace: namespace.Name,
87+
Name: clusterName,
88+
})
89+
Expect(cluster).NotTo(BeNil(), "couldn't find cluster")
90+
91+
ginkgo.By("should create a MachineDeployment")
92+
MachineDeploymentSpec(ctx, func() MachineDeploymentSpecInput {
93+
return MachineDeploymentSpecInput{
94+
E2EConfig: e2eCtx.E2EConfig,
95+
ConfigClusterFn: defaultConfigCluster,
96+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
97+
AWSSession: e2eCtx.BootstrapUserAWSSession,
98+
Namespace: namespace,
99+
ClusterName: clusterName,
100+
Replicas: 1,
101+
Cleanup: false,
102+
}
103+
})
104+
105+
ginkgo.By(fmt.Sprintf("should upgrade control plane to version %s", upgradeToVersion))
106+
UpgradeControlPlaneVersionSpec(ctx, func() UpgradeControlPlaneVersionSpecInput {
107+
return UpgradeControlPlaneVersionSpecInput{
108+
E2EConfig: e2eCtx.E2EConfig,
109+
AWSSession: e2eCtx.BootstrapUserAWSSession,
110+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
111+
ClusterName: clusterName,
112+
Namespace: namespace,
113+
UpgradeVersion: upgradeToVersion,
114+
}
115+
})
116+
117+
ginkgo.By(fmt.Sprintf("should upgrade mahchine deployments to version %s", upgradeToVersion))
118+
kube133, err := semver.ParseTolerant("1.33.0")
119+
Expect(err).To(BeNil(), "semver should pass")
120+
upgradeToVersionParse, err := semver.ParseTolerant(upgradeToVersion)
121+
Expect(err).To(BeNil(), "semver should pass")
122+
123+
md := framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{
124+
Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
125+
Cluster: cluster,
126+
}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...)
127+
var nodeadmConfigTemplate *eksbootstrapv1.NodeadmConfigTemplate
128+
if upgradeToVersionParse.GTE(kube133) {
129+
ginkgo.By("creating a nodeadmconfigtemplate object")
130+
nodeadmConfigTemplate = &eksbootstrapv1.NodeadmConfigTemplate{
131+
ObjectMeta: metav1.ObjectMeta{
132+
Name: fmt.Sprintf("%s-nodeadm-config", clusterName),
133+
Namespace: namespace.Name,
134+
},
135+
Spec: eksbootstrapv1.NodeadmConfigTemplateSpec{
136+
Template: eksbootstrapv1.NodeadmConfigTemplateResource{
137+
Spec: eksbootstrapv1.NodeadmConfigSpec{
138+
PreBootstrapCommands: []string{
139+
"echo \"hello world\"",
140+
},
141+
},
142+
},
143+
},
144+
}
145+
ginkgo.By("creating the nodeadm config template in the cluster")
146+
Expect(e2eCtx.Environment.BootstrapClusterProxy.GetClient().Create(ctx, nodeadmConfigTemplate)).To(Succeed())
147+
}
148+
ginkgo.By("upgrading machine deployments")
149+
input := UpgradeMachineDeploymentsAndWaitInput{
150+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
151+
Cluster: cluster,
152+
UpgradeVersion: upgradeToVersion,
153+
MachineDeployments: md,
154+
WaitForMachinesToBeUpgraded: e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes"),
155+
}
156+
if nodeadmConfigTemplate != nil {
157+
nodeadmRef, err := ref.GetReference(initScheme(), nodeadmConfigTemplate)
158+
Expect(err).To(BeNil(), "object should have ref")
159+
input.UpgradeBootstrapTemplate = nodeadmRef
160+
}
161+
UpgradeMachineDeploymentsAndWait(ctx, input)
162+
163+
framework.DeleteCluster(ctx, framework.DeleteClusterInput{
164+
Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
165+
Cluster: cluster,
166+
})
167+
framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{
168+
ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
169+
Cluster: cluster,
170+
ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath,
171+
ArtifactFolder: e2eCtx.Settings.ArtifactFolder,
172+
}, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...)
173+
})
174+
})

test/e2e/suites/managed/machine_deployment.go

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,17 +22,21 @@ package managed
2222
import (
2323
"context"
2424
"fmt"
25+
"time"
2526

2627
"github.com/aws/aws-sdk-go-v2/aws"
2728
. "github.com/onsi/ginkgo/v2"
2829
. "github.com/onsi/gomega"
2930
corev1 "k8s.io/api/core/v1"
31+
"k8s.io/klog/v2"
3032
"k8s.io/utils/ptr"
3133

34+
"sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
3235
"sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared"
3336
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3437
"sigs.k8s.io/cluster-api/test/framework"
3538
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
39+
"sigs.k8s.io/cluster-api/util/patch"
3640
)
3741

3842
// MachineDeploymentSpecInput is the input for MachineDeploymentSpec.
@@ -112,3 +116,55 @@ func MachineDeploymentSpec(ctx context.Context, inputGetter func() MachineDeploy
112116
}, input.E2EConfig.GetIntervals("", "wait-delete-machine")...)
113117
}
114118
}
119+
120+
// UpgradeMachineDeploymentsAndWaitInput is the input type for UpgradeMachineDeploymentsAndWait.
121+
// This function is copied from capi-core, but also allows the user to change
122+
// the bootstrap reference as well..
123+
type UpgradeMachineDeploymentsAndWaitInput struct {
124+
BootstrapClusterProxy framework.ClusterProxy
125+
Cluster *clusterv1.Cluster
126+
UpgradeVersion string
127+
UpgradeMachineTemplate *string
128+
UpgradeBootstrapTemplate *corev1.ObjectReference
129+
MachineDeployments []*clusterv1.MachineDeployment
130+
WaitForMachinesToBeUpgraded []interface{}
131+
}
132+
133+
// UpgradeMachineDeploymentsAndWait upgrades a machine deployment and waits for its machines to be upgraded.
134+
func UpgradeMachineDeploymentsAndWait(ctx context.Context, input UpgradeMachineDeploymentsAndWaitInput) {
135+
Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachineDeploymentsAndWait")
136+
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachineDeploymentsAndWait")
137+
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachineDeploymentsAndWait")
138+
Expect(input.UpgradeVersion).ToNot(BeNil(), "Invalid argument. input.UpgradeVersion can't be nil when calling UpgradeMachineDeploymentsAndWait")
139+
Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling UpgradeMachineDeploymentsAndWait")
140+
141+
mgmtClient := input.BootstrapClusterProxy.GetClient()
142+
143+
for _, deployment := range input.MachineDeployments {
144+
log := logger.FromContext(ctx)
145+
patchHelper, err := patch.NewHelper(deployment, mgmtClient)
146+
Expect(err).ToNot(HaveOccurred())
147+
148+
oldVersion := deployment.Spec.Template.Spec.Version
149+
deployment.Spec.Template.Spec.Version = &input.UpgradeVersion
150+
if input.UpgradeMachineTemplate != nil {
151+
deployment.Spec.Template.Spec.InfrastructureRef.Name = *input.UpgradeMachineTemplate
152+
}
153+
if input.UpgradeBootstrapTemplate != nil {
154+
deployment.Spec.Template.Spec.Bootstrap.ConfigRef = input.UpgradeBootstrapTemplate
155+
}
156+
Eventually(func() error {
157+
return patchHelper.Patch(ctx, deployment)
158+
}, time.Minute*3, time.Second*3).Should(Succeed(), "Failed to patch Kubernetes version on MachineDeployment %s", klog.KObj(deployment))
159+
160+
log.Logf("Waiting for Kubernetes versions of machines in MachineDeployment %s to be upgraded from %s to %s",
161+
deployment.Name, *oldVersion, input.UpgradeVersion)
162+
framework.WaitForMachineDeploymentMachinesToBeUpgraded(ctx, framework.WaitForMachineDeploymentMachinesToBeUpgradedInput{
163+
Lister: mgmtClient,
164+
Cluster: input.Cluster,
165+
MachineCount: int(*deployment.Spec.Replicas),
166+
KubernetesUpgradeVersion: input.UpgradeVersion,
167+
MachineDeployment: *deployment,
168+
}, input.WaitForMachinesToBeUpgraded...)
169+
}
170+
}

0 commit comments

Comments
 (0)