Skip to content

Commit 3135b45

Browse files
Merge pull request #2213 from metal3-io-bot/cherry-pick-2201-to-release-1.9
[release-1.9] 🌱 Remove CP scaling from e2e-feature-test
2 parents fa47499 + 0203918 commit 3135b45

File tree

9 files changed

+199
-100
lines changed

9 files changed

+199
-100
lines changed

scripts/environment.sh

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,13 @@ if [[ ${GINKGO_FOCUS:-} == "integration" || ${GINKGO_FOCUS:-} == "basic" ]]; the
7878
export WORKER_MACHINE_COUNT=${WORKER_MACHINE_COUNT:-"1"}
7979
fi
8080

81+
# IPReuse feature test environment vars and config
82+
if [[ ${GINKGO_FOCUS:-} == "features" && ${GINKGO_SKIP:-} == "pivoting remediation" ]]; then
83+
export NUM_NODES="5"
84+
export CONTROL_PLANE_MACHINE_COUNT=${CONTROL_PLANE_MACHINE_COUNT:-"3"}
85+
export WORKER_MACHINE_COUNT=${WORKER_MACHINE_COUNT:-"2"}
86+
fi
87+
8188
# Exported to the cluster templates
8289
# Generate user ssh key
8390
if [ ! -f "${HOME}/.ssh/id_rsa" ]; then

test/e2e/basic_integration_test.go

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,21 @@ var _ = Describe("When testing basic cluster creation [basic]", Label("basic"),
2323
By("Fetching cluster configuration")
2424
k8sVersion := e2eConfig.GetVariable("KUBERNETES_VERSION")
2525
By("Provision Workload cluster")
26-
targetCluster, _ = createTargetCluster(k8sVersion)
26+
targetCluster, _ = CreateTargetCluster(ctx, func() CreateTargetClusterInput {
27+
return CreateTargetClusterInput{
28+
E2EConfig: e2eConfig,
29+
BootstrapClusterProxy: bootstrapClusterProxy,
30+
SpecName: specName,
31+
ClusterName: clusterName,
32+
K8sVersion: k8sVersion,
33+
KCPMachineCount: int64(numberOfControlplane),
34+
WorkerMachineCount: int64(numberOfWorkers),
35+
ClusterctlLogFolder: clusterctlLogFolder,
36+
ClusterctlConfigPath: clusterctlConfigPath,
37+
OSType: osType,
38+
Namespace: namespace,
39+
}
40+
})
2741
})
2842

2943
AfterEach(func() {

test/e2e/common.go

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
apierrors "k8s.io/apimachinery/pkg/api/errors"
3333
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3434
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
35+
"k8s.io/apimachinery/pkg/labels"
3536
"k8s.io/apimachinery/pkg/types"
3637
kerrors "k8s.io/apimachinery/pkg/util/errors"
3738
"k8s.io/client-go/kubernetes"
@@ -997,3 +998,53 @@ func CreateOrUpdateWithNamespace(ctx context.Context, p framework.ClusterProxy,
997998
}
998999
return kerrors.NewAggregate(retErrs)
9991000
}
1001+
1002+
type CreateTargetClusterInput struct {
1003+
E2EConfig *clusterctl.E2EConfig
1004+
BootstrapClusterProxy framework.ClusterProxy
1005+
SpecName string
1006+
ClusterName string
1007+
K8sVersion string
1008+
KCPMachineCount int64
1009+
WorkerMachineCount int64
1010+
ClusterctlLogFolder string
1011+
ClusterctlConfigPath string
1012+
OSType string
1013+
Namespace string
1014+
}
1015+
1016+
func CreateTargetCluster(ctx context.Context, inputGetter func() CreateTargetClusterInput) (framework.ClusterProxy, *clusterctl.ApplyClusterTemplateAndWaitResult) {
1017+
By("Creating a high available cluster")
1018+
input := inputGetter()
1019+
imageURL, imageChecksum := EnsureImage(input.K8sVersion)
1020+
os.Setenv("IMAGE_RAW_CHECKSUM", imageChecksum)
1021+
os.Setenv("IMAGE_RAW_URL", imageURL)
1022+
controlPlaneMachineCount := input.KCPMachineCount
1023+
workerMachineCount := input.WorkerMachineCount
1024+
result := clusterctl.ApplyClusterTemplateAndWaitResult{}
1025+
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
1026+
ClusterProxy: input.BootstrapClusterProxy,
1027+
ConfigCluster: clusterctl.ConfigClusterInput{
1028+
LogFolder: input.ClusterctlLogFolder,
1029+
ClusterctlConfigPath: input.ClusterctlConfigPath,
1030+
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
1031+
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
1032+
Flavor: input.OSType,
1033+
Namespace: input.Namespace,
1034+
ClusterName: input.ClusterName,
1035+
KubernetesVersion: input.K8sVersion,
1036+
ControlPlaneMachineCount: &controlPlaneMachineCount,
1037+
WorkerMachineCount: &workerMachineCount,
1038+
},
1039+
WaitForClusterIntervals: input.E2EConfig.GetIntervals(input.SpecName, "wait-cluster"),
1040+
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(input.SpecName, "wait-control-plane"),
1041+
WaitForMachineDeployments: input.E2EConfig.GetIntervals(input.SpecName, "wait-worker-nodes"),
1042+
}, &result)
1043+
targetCluster := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, input.Namespace, result.Cluster.Name)
1044+
framework.WaitForPodListCondition(ctx, framework.WaitForPodListConditionInput{
1045+
Lister: targetCluster.GetClient(),
1046+
ListOptions: &client.ListOptions{LabelSelector: labels.Everything(), Namespace: "kube-system"},
1047+
Condition: framework.PhasePodCondition(corev1.PodRunning),
1048+
}, input.E2EConfig.GetIntervals(input.SpecName, "wait-all-pod-to-be-running-on-target-cluster")...)
1049+
return targetCluster, &result
1050+
}

test/e2e/integration_test.go

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,21 @@ var _ = Describe("When testing integration [integration]", Label("integration"),
2525
numberOfControlplane = int(*e2eConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT"))
2626
k8sVersion := e2eConfig.GetVariable("KUBERNETES_VERSION")
2727
By("Provision Workload cluster")
28-
targetCluster, _ = createTargetCluster(k8sVersion)
28+
targetCluster, _ = CreateTargetCluster(ctx, func() CreateTargetClusterInput {
29+
return CreateTargetClusterInput{
30+
E2EConfig: e2eConfig,
31+
BootstrapClusterProxy: bootstrapClusterProxy,
32+
SpecName: specName,
33+
ClusterName: clusterName,
34+
K8sVersion: k8sVersion,
35+
KCPMachineCount: int64(numberOfControlplane),
36+
WorkerMachineCount: int64(numberOfWorkers),
37+
ClusterctlLogFolder: clusterctlLogFolder,
38+
ClusterctlConfigPath: clusterctlConfigPath,
39+
OSType: osType,
40+
Namespace: namespace,
41+
}
42+
})
2943
By("Pivot objects to target cluster")
3044
pivoting(ctx, func() PivotingInput {
3145
return PivotingInput{

test/e2e/ip_reuse.go

Lines changed: 45 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -30,51 +30,53 @@ func IPReuse(ctx context.Context, inputGetter func() IPReuseInput) {
3030
targetClusterClient := input.TargetCluster.GetClient()
3131
managementClusterClient := input.BootstrapClusterProxy.GetClient()
3232
fromK8sVersion := input.E2EConfig.GetVariable("FROM_K8S_VERSION")
33-
kubernetesVersion := input.E2EConfig.GetVariable("KUBERNETES_VERSION")
33+
toK8sVersion := input.E2EConfig.GetVariable("KUBERNETES_VERSION")
34+
numberOfControlplane := *input.E2EConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT")
35+
numberOfWorkers := *input.E2EConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT")
36+
numberOfAllBmh := numberOfControlplane + numberOfWorkers
3437

35-
// scale down KCP to 1
36-
By("Scale the controlplane down to 1")
37-
ScaleKubeadmControlPlane(ctx, managementClusterClient, client.ObjectKey{Namespace: input.Namespace, Name: input.ClusterName}, 1)
38-
Byf("Wait until controlplane is scaled down and %d BMHs are Available", 2)
39-
WaitForNumBmhInState(ctx, bmov1alpha1.StateAvailable, WaitForNumInput{
38+
// Download node image
39+
Byf("Download image %s", toK8sVersion)
40+
imageURL, imageChecksum := EnsureImage(toK8sVersion)
41+
42+
// Upgrade KCP
43+
By("Create new KCP Metal3MachineTemplate with upgraded image to boot")
44+
KCPm3MachineTemplateName := fmt.Sprintf("%s-controlplane", input.ClusterName)
45+
KCPNewM3MachineTemplateName := fmt.Sprintf("%s-new-controlplane", input.ClusterName)
46+
CreateNewM3MachineTemplate(ctx, input.Namespace, KCPNewM3MachineTemplateName, KCPm3MachineTemplateName, managementClusterClient, imageURL, imageChecksum)
47+
48+
Byf("Update KCP to upgrade k8s version and binaries from %s to %s", fromK8sVersion, toK8sVersion)
49+
kcpObj := framework.GetKubeadmControlPlaneByCluster(ctx, framework.GetKubeadmControlPlaneByClusterInput{
50+
Lister: managementClusterClient,
51+
ClusterName: input.ClusterName,
52+
Namespace: input.Namespace,
53+
})
54+
helper, err := patch.NewHelper(kcpObj, managementClusterClient)
55+
Expect(err).NotTo(HaveOccurred())
56+
kcpObj.Spec.MachineTemplate.InfrastructureRef.Name = KCPNewM3MachineTemplateName
57+
kcpObj.Spec.Version = toK8sVersion
58+
kcpObj.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal = 0
59+
60+
Expect(helper.Patch(ctx, kcpObj)).To(Succeed())
61+
62+
Byf("Wait until %d Control Plane machines become running and updated with the new %s k8s version", numberOfControlplane, toK8sVersion)
63+
runningAndUpgraded := func(machine clusterv1.Machine) bool {
64+
running := machine.Status.GetTypedPhase() == clusterv1.MachinePhaseRunning
65+
upgraded := *machine.Spec.Version == toK8sVersion
66+
return (running && upgraded)
67+
}
68+
WaitForNumMachines(ctx, runningAndUpgraded, WaitForNumInput{
4069
Client: managementClusterClient,
4170
Options: []client.ListOption{client.InNamespace(input.Namespace)},
42-
Replicas: 2,
43-
Intervals: input.E2EConfig.GetIntervals(input.SpecName, "wait-cp-available"),
71+
Replicas: int(numberOfControlplane),
72+
Intervals: input.E2EConfig.GetIntervals(input.Namespace, "wait-machine-running"),
4473
})
4574

4675
ListBareMetalHosts(ctx, managementClusterClient, client.InNamespace(input.Namespace))
4776
ListMetal3Machines(ctx, managementClusterClient, client.InNamespace(input.Namespace))
4877
ListMachines(ctx, managementClusterClient, client.InNamespace(input.Namespace))
4978
ListNodes(ctx, targetClusterClient)
5079

51-
// scale up MD to 3
52-
By("Scale the worker up to 3")
53-
ScaleMachineDeployment(ctx, managementClusterClient, input.ClusterName, input.Namespace, 3)
54-
By("Waiting for one BMH to become provisioning")
55-
WaitForNumBmhInState(ctx, bmov1alpha1.StateProvisioning, WaitForNumInput{
56-
Client: managementClusterClient,
57-
Options: []client.ListOption{client.InNamespace(input.Namespace)},
58-
Replicas: 2,
59-
Intervals: input.E2EConfig.GetIntervals(input.SpecName, "wait-machine-remediation"),
60-
})
61-
62-
By("Waiting for all BMHs to become provisioned")
63-
WaitForNumBmhInState(ctx, bmov1alpha1.StateProvisioned, WaitForNumInput{
64-
Client: managementClusterClient,
65-
Options: []client.ListOption{client.InNamespace(input.Namespace)},
66-
Replicas: 4,
67-
Intervals: input.E2EConfig.GetIntervals(input.SpecName, "wait-machine-remediation"),
68-
})
69-
70-
By("Waiting for all Machines to be Running")
71-
WaitForNumMachinesInState(ctx, clusterv1.MachinePhaseRunning, WaitForNumInput{
72-
Client: managementClusterClient,
73-
Options: []client.ListOption{client.InNamespace(input.Namespace)},
74-
Replicas: 4,
75-
Intervals: input.E2EConfig.GetIntervals(input.SpecName, "wait-machine-remediation"),
76-
})
77-
7880
By("Get the IPPools in the cluster")
7981
baremetalv4Pool, provisioningPool := GetIPPools(ctx, managementClusterClient, input.ClusterName, input.Namespace)
8082
Expect(baremetalv4Pool).To(HaveLen(1))
@@ -115,29 +117,25 @@ func IPReuse(ctx context.Context, inputGetter func() IPReuseInput) {
115117
Expect(machineDeployments).To(HaveLen(1), "Expected exactly 1 MachineDeployment")
116118
md := machineDeployments[0]
117119

118-
// Download node image
119-
Byf("Download image %s", kubernetesVersion)
120-
imageURL, imageChecksum := EnsureImage(kubernetesVersion)
121-
122120
By("Create new worker Metal3MachineTemplate with upgraded image to boot")
123121
m3MachineTemplateName := md.Spec.Template.Spec.InfrastructureRef.Name
124122
newM3MachineTemplateName := fmt.Sprintf("%s-new", m3MachineTemplateName)
125123
CreateNewM3MachineTemplate(ctx, input.Namespace, newM3MachineTemplateName, m3MachineTemplateName, managementClusterClient, imageURL, imageChecksum)
126124

127-
Byf("Update MachineDeployment maxUnavailable to number of workers and k8s version from %s to %s", fromK8sVersion, kubernetesVersion)
128-
helper, err := patch.NewHelper(md, managementClusterClient)
125+
Byf("Update MachineDeployment maxUnavailable to number of workers and k8s version from %s to %s", fromK8sVersion, toK8sVersion)
126+
helper, err = patch.NewHelper(md, managementClusterClient)
129127
Expect(err).NotTo(HaveOccurred())
130128
md.Spec.Template.Spec.InfrastructureRef.Name = newM3MachineTemplateName
131-
md.Spec.Template.Spec.Version = &kubernetesVersion
129+
md.Spec.Template.Spec.Version = &toK8sVersion
132130
md.Spec.Strategy.RollingUpdate.MaxSurge.IntVal = 0
133-
md.Spec.Strategy.RollingUpdate.MaxUnavailable.IntVal = 3
131+
md.Spec.Strategy.RollingUpdate.MaxUnavailable.IntVal = numberOfWorkers
134132
Expect(helper.Patch(ctx, md)).To(Succeed())
135133

136-
Byf("Wait until %d BMH(s) in deprovisioning state", 3)
134+
Byf("Wait until %d BMH(s) in deprovisioning state", numberOfWorkers)
137135
WaitForNumBmhInState(ctx, bmov1alpha1.StateDeprovisioning, WaitForNumInput{
138136
Client: managementClusterClient,
139137
Options: []client.ListOption{client.InNamespace(input.Namespace)},
140-
Replicas: 3,
138+
Replicas: int(numberOfWorkers),
141139
Intervals: input.E2EConfig.GetIntervals(input.SpecName, "wait-bmh-deprovisioning"),
142140
})
143141

@@ -146,11 +144,11 @@ func IPReuse(ctx context.Context, inputGetter func() IPReuseInput) {
146144
ListMachines(ctx, managementClusterClient, client.InNamespace(input.Namespace))
147145
ListNodes(ctx, targetClusterClient)
148146

149-
Byf("Wait until all %d machine(s) become(s) running", 4)
147+
Byf("Wait until all %d machine(s) become(s) running", numberOfAllBmh)
150148
WaitForNumMachinesInState(ctx, clusterv1.MachinePhaseRunning, WaitForNumInput{
151149
Client: managementClusterClient,
152150
Options: []client.ListOption{client.InNamespace(input.Namespace)},
153-
Replicas: 4,
151+
Replicas: int(numberOfAllBmh),
154152
Intervals: input.E2EConfig.GetIntervals(input.SpecName, "wait-machine-running"),
155153
})
156154

test/e2e/ip_reuse_test.go

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,22 @@ var _ = Describe("When testing ip reuse [ip-reuse] [features]", Label("ip-reuse"
2121
clusterctlLogFolder = filepath.Join(os.TempDir(), "target_cluster_logs", bootstrapClusterProxy.GetName())
2222
})
2323
It("Should create a workload cluster then verify ip allocation reuse while upgrading k8s", func() {
24+
targetCluster, _ = CreateTargetCluster(ctx, func() CreateTargetClusterInput {
25+
return CreateTargetClusterInput{
26+
E2EConfig: e2eConfig,
27+
BootstrapClusterProxy: bootstrapClusterProxy,
28+
SpecName: specName,
29+
ClusterName: clusterName,
30+
K8sVersion: e2eConfig.GetVariable("FROM_K8S_VERSION"),
31+
KCPMachineCount: int64(numberOfControlplane),
32+
WorkerMachineCount: int64(numberOfWorkers),
33+
ClusterctlLogFolder: clusterctlLogFolder,
34+
ClusterctlConfigPath: clusterctlConfigPath,
35+
OSType: osType,
36+
Namespace: namespace,
37+
}
38+
})
2439
IPReuse(ctx, func() IPReuseInput {
25-
targetCluster, _ = createTargetCluster(e2eConfig.GetVariable("FROM_K8S_VERSION"))
2640
return IPReuseInput{
2741
E2EConfig: e2eConfig,
2842
BootstrapClusterProxy: bootstrapClusterProxy,

test/e2e/pivoting_based_feature_test.go

Lines changed: 21 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -8,22 +8,17 @@ import (
88

99
. "github.com/onsi/ginkgo/v2"
1010
. "github.com/onsi/gomega"
11-
corev1 "k8s.io/api/core/v1"
12-
"k8s.io/apimachinery/pkg/labels"
1311
"sigs.k8s.io/cluster-api/test/framework"
14-
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
1512
"sigs.k8s.io/controller-runtime/pkg/client"
1613
)
1714

1815
var (
19-
ctx = context.TODO()
20-
specName = "metal3"
21-
namespace = "metal3"
22-
clusterName = "test1"
23-
clusterctlLogFolder string
24-
targetCluster framework.ClusterProxy
25-
controlPlaneMachineCount int64
26-
workerMachineCount int64
16+
ctx = context.TODO()
17+
specName = "metal3"
18+
namespace = "metal3"
19+
clusterName = "test1"
20+
clusterctlLogFolder string
21+
targetCluster framework.ClusterProxy
2722
)
2823

2924
/*
@@ -91,7 +86,21 @@ var _ = Describe("Testing features in ephemeral or target cluster [pivoting] [fe
9186
})
9287

9388
It("Should get a management cluster then test cert rotation and node reuse", func() {
94-
targetCluster, _ = createTargetCluster(e2eConfig.GetVariable("FROM_K8S_VERSION"))
89+
targetCluster, _ = CreateTargetCluster(ctx, func() CreateTargetClusterInput {
90+
return CreateTargetClusterInput{
91+
E2EConfig: e2eConfig,
92+
BootstrapClusterProxy: bootstrapClusterProxy,
93+
SpecName: specName,
94+
ClusterName: clusterName,
95+
K8sVersion: e2eConfig.GetVariable("FROM_K8S_VERSION"),
96+
KCPMachineCount: int64(numberOfControlplane),
97+
WorkerMachineCount: int64(numberOfWorkers),
98+
ClusterctlLogFolder: clusterctlLogFolder,
99+
ClusterctlConfigPath: clusterctlConfigPath,
100+
OSType: osType,
101+
Namespace: namespace,
102+
}
103+
})
95104
managementCluster := bootstrapClusterProxy
96105
// If not running ephemeral test, use the target cluster for management
97106
if !ephemeralTest {
@@ -169,38 +178,3 @@ var _ = Describe("Testing features in ephemeral or target cluster [pivoting] [fe
169178
})
170179

171180
})
172-
173-
func createTargetCluster(k8sVersion string) (framework.ClusterProxy, *clusterctl.ApplyClusterTemplateAndWaitResult) {
174-
By("Creating a high available cluster")
175-
imageURL, imageChecksum := EnsureImage(k8sVersion)
176-
os.Setenv("IMAGE_RAW_CHECKSUM", imageChecksum)
177-
os.Setenv("IMAGE_RAW_URL", imageURL)
178-
controlPlaneMachineCount = int64(numberOfControlplane)
179-
workerMachineCount = int64(numberOfWorkers)
180-
result := clusterctl.ApplyClusterTemplateAndWaitResult{}
181-
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
182-
ClusterProxy: bootstrapClusterProxy,
183-
ConfigCluster: clusterctl.ConfigClusterInput{
184-
LogFolder: clusterctlLogFolder,
185-
ClusterctlConfigPath: clusterctlConfigPath,
186-
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
187-
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
188-
Flavor: osType,
189-
Namespace: namespace,
190-
ClusterName: clusterName,
191-
KubernetesVersion: k8sVersion,
192-
ControlPlaneMachineCount: &controlPlaneMachineCount,
193-
WorkerMachineCount: &workerMachineCount,
194-
},
195-
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
196-
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
197-
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
198-
}, &result)
199-
targetCluster := bootstrapClusterProxy.GetWorkloadCluster(ctx, namespace, clusterName)
200-
framework.WaitForPodListCondition(ctx, framework.WaitForPodListConditionInput{
201-
Lister: targetCluster.GetClient(),
202-
ListOptions: &client.ListOptions{LabelSelector: labels.Everything(), Namespace: "kube-system"},
203-
Condition: framework.PhasePodCondition(corev1.PodRunning),
204-
}, e2eConfig.GetIntervals(specName, "wait-all-pod-to-be-running-on-target-cluster")...)
205-
return targetCluster, &result
206-
}

test/e2e/remediation_based_feature_test.go

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,21 @@ var _ = Describe("Testing nodes remediation [remediation] [features]", Label("re
7272

7373
It("Should create a cluster and run remediation based tests", func() {
7474
By("Creating target cluster")
75-
targetCluster, _ = createTargetCluster(e2eConfig.GetVariable("KUBERNETES_VERSION"))
76-
75+
targetCluster, _ = CreateTargetCluster(ctx, func() CreateTargetClusterInput {
76+
return CreateTargetClusterInput{
77+
E2EConfig: e2eConfig,
78+
BootstrapClusterProxy: bootstrapClusterProxy,
79+
SpecName: specName,
80+
ClusterName: clusterName,
81+
K8sVersion: e2eConfig.GetVariable("FROM_K8S_VERSION"),
82+
KCPMachineCount: int64(numberOfControlplane),
83+
WorkerMachineCount: int64(numberOfWorkers),
84+
ClusterctlLogFolder: clusterctlLogFolder,
85+
ClusterctlConfigPath: clusterctlConfigPath,
86+
OSType: osType,
87+
Namespace: namespace,
88+
}
89+
})
7790
// Run Metal3Remediation test first, doesn't work after remediation...
7891
By("Running node remediation tests")
7992
nodeRemediation(ctx, func() NodeRemediation {

0 commit comments

Comments
 (0)