Skip to content

Commit e95ebe5

Browse files
Adopt k8s conformance test from CAPI
Signed-off-by: erjavaskivuori <[email protected]>
1 parent 0d5dc96 commit e95ebe5

File tree

7 files changed

+131
-8
lines changed

7 files changed

+131
-8
lines changed

docs/e2e-test.md

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ Below are the tests that you can use with `GINKGO_FOCUS` and `GINKGO_SKIP`
5454
- remediation
5555
- pivoting
5656
- k8s-upgrade
57+
- k8s-conformance
5758
- clusterctl-upgrade
5859
- scalability
5960
- integration
@@ -81,12 +82,13 @@ sudo rm -rf /opt/metal3-dev-env/
8182

8283
## Included tests
8384

84-
The e2e tests currently include three different sets:
85+
The e2e tests currently include five different sets:
8586

8687
1. Pivoting based feature tests
87-
1. Remediation based feature tests
88-
1. clusterctl upgrade tests
89-
1. K8s upgrade tests
88+
2. Remediation based feature tests
89+
3. clusterctl upgrade tests
90+
4. K8s upgrade tests
91+
5. K8s conformance tests
9092

9193
### Pivoting based feature tests
9294

@@ -164,6 +166,14 @@ Release 1.8 branch k8s-upgrade test:
164166
When Kubernetes 1.34 is released, k8s-upgrade `v1.33` => `v1.34` will be
165167
supported in v1.10.x (but not in v1.9.x)
166168

169+
### K8s conformance tests
170+
171+
The conformance tests are a subset of Kubernetes' E2E test set. The standard set
172+
of conformance tests includes those defined by the [Conformance] tag in the
173+
[kubernetes e2e suite](https://github.com/kubernetes/kubernetes/blob/master/test/conformance/testdata/conformance.yaml).
174+
Refer to [Conformance Tests per Release](https://github.com/cncf/k8s-conformance/blob/master/docs/README.md)
175+
for more information on which tests are required for each Kubernetes release.
176+
167177
## Guidelines to follow when adding new E2E tests
168178

169179
- Tests should be defined in a new file and separate test spec, unless the new

scripts/environment.sh

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,13 @@ if [[ ${GINKGO_FOCUS:-} == "features" && ${GINKGO_SKIP:-} == "pivoting remediati
8585
export WORKER_MACHINE_COUNT=${WORKER_MACHINE_COUNT:-"2"}
8686
fi
8787

88+
# K8s conformance test environment vars and config
89+
if [[ ${GINKGO_FOCUS:-} == "k8s-conformance" ]]; then
90+
export NUM_NODES="6"
91+
export CONTROL_PLANE_MACHINE_COUNT=${CONTROL_PLANE_MACHINE_COUNT:-"1"}
92+
export WORKER_MACHINE_COUNT=${WORKER_MACHINE_COUNT:-"5"}
93+
fi
94+
8895
# Exported to the cluster templates
8996
# Generate user ssh key
9097
if [ ! -f "${HOME}/.ssh/id_rsa" ]; then

test/e2e/config/e2e_conf.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,7 @@ providers:
177177

178178
variables:
179179
CNI: "/tmp/calico.yaml"
180+
KUBETEST_CONFIGURATION: "./data/kubetest/conformance.yaml"
180181
KUBERNETES_VERSION: "v1.33.0"
181182
# KUBERNETES_PATCH_FROM_VERSION and KUBERNETES_PATCH_TO_VERSION are used to
182183
# test upgrade scenarios where we only want to upgrade the patch version of
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
ginkgo.focus: \[Conformance\]
2+
ginkgo.skip: \[Serial\]
3+
disable-log-dump: true
4+
# ginkgo.progress flag is deprecated but its still used in
5+
# k8s versions <= v1.26, we have to keep it if we want to
6+
# test these versions
7+
ginkgo.progress: true
8+
ginkgo.slow-spec-threshold: 120s
9+
ginkgo.flake-attempts: 3
10+
ginkgo.trace: true
11+
ginkgo.v: true
12+
# Use 5m instead of the default 10m to fail faster
13+
# if kube-system Pods are not coming up.
14+
system-pods-startup-timeout: 5m
15+
ginkgo.noColor: true

test/e2e/e2e_suite_test.go

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
2626
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
2727
ctrl "sigs.k8s.io/controller-runtime"
28+
"sigs.k8s.io/controller-runtime/pkg/client"
2829
)
2930

3031
const (
@@ -300,3 +301,26 @@ func updateCalico(config *clusterctl.E2EConfig, calicoYaml, calicoInterface stri
300301
err = os.WriteFile(calicoYaml, yamlOut, 0600)
301302
Expect(err).ToNot(HaveOccurred(), "Cannot print out the update to the file")
302303
}
304+
305+
// postNamespaceCreated is a hook function that can be called after creating
306+
// a namespace, it creates the needed bmhs in the namespace hosting the cluster.
307+
func postNamespaceCreated(clusterProxy framework.ClusterProxy, clusterNamespace string) {
308+
// Apply secrets and bmhs for all nodes in the cluster to host the target cluster
309+
nodes := int(*e2eConfig.MustGetInt32PtrVariable("NUM_NODES"))
310+
for i := range nodes {
311+
resource, err := os.ReadFile(filepath.Join(workDir, fmt.Sprintf("bmhs/node_%d.yaml", i)))
312+
Expect(err).ShouldNot(HaveOccurred())
313+
Expect(CreateOrUpdateWithNamespace(ctx, clusterProxy, resource, clusterNamespace)).ShouldNot(HaveOccurred())
314+
}
315+
clusterClient := clusterProxy.GetClient()
316+
ListBareMetalHosts(ctx, clusterClient, client.InNamespace(clusterNamespace))
317+
WaitForNumBmhInState(ctx, bmov1alpha1.StateAvailable, WaitForNumInput{
318+
Client: clusterClient,
319+
Options: []client.ListOption{client.InNamespace(clusterNamespace)},
320+
Replicas: nodes,
321+
Intervals: e2eConfig.GetIntervals(specName, "wait-bmh-available"),
322+
})
323+
ListBareMetalHosts(ctx, clusterClient, client.InNamespace(clusterNamespace))
324+
325+
ListBareMetalHosts(ctx, bootstrapClusterProxy.GetClient(), client.InNamespace(clusterNamespace))
326+
}

test/e2e/k8s_conformance_test.go

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
package e2e
2+
3+
import (
4+
"os"
5+
"os/exec"
6+
"path/filepath"
7+
"strings"
8+
9+
bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
10+
. "github.com/onsi/ginkgo/v2"
11+
. "github.com/onsi/gomega"
12+
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
13+
"sigs.k8s.io/controller-runtime/pkg/client"
14+
)
15+
16+
var _ = Describe("When testing K8S conformance [k8s-conformance]", Label("k8s-conformance"), func() {
17+
BeforeEach(func() {
18+
osType := strings.ToLower(os.Getenv("OS"))
19+
Expect(osType).ToNot(Equal(""))
20+
validateGlobals(specName)
21+
// We need to override clusterctl apply log folder to avoid getting our credentials exposed.
22+
clusterctlLogFolder = filepath.Join(os.TempDir(), "target_cluster_logs", bootstrapClusterProxy.GetName())
23+
24+
Logf("Removing existing BMHs from source")
25+
bmhData, err := os.ReadFile(filepath.Join(workDir, bmhCrsFile))
26+
Expect(err).ToNot(HaveOccurred(), "BMH CRs file not found")
27+
kubeConfigPath := bootstrapClusterProxy.GetKubeconfigPath()
28+
err = KubectlDelete(ctx, kubeConfigPath, bmhData, "-n", "metal3")
29+
Expect(err).ToNot(HaveOccurred(), "Failed to delete existing BMHs")
30+
Logf("BMHs are removed")
31+
})
32+
// Note: This installs a cluster based on KUBERNETES_VERSION and runs conformance tests.
33+
capi_e2e.K8SConformanceSpec(ctx, func() capi_e2e.K8SConformanceSpecInput {
34+
return capi_e2e.K8SConformanceSpecInput{
35+
E2EConfig: e2eConfig,
36+
ClusterctlConfigPath: clusterctlConfigPath,
37+
BootstrapClusterProxy: bootstrapClusterProxy,
38+
ArtifactFolder: artifactFolder,
39+
SkipCleanup: skipCleanup,
40+
PostNamespaceCreated: postNamespaceCreated,
41+
Flavor: osType,
42+
}
43+
})
44+
AfterEach(func() {
45+
ListBareMetalHosts(ctx, bootstrapClusterProxy.GetClient(), client.InNamespace(namespace))
46+
ListMetal3Machines(ctx, bootstrapClusterProxy.GetClient(), client.InNamespace(namespace))
47+
ListMachines(ctx, bootstrapClusterProxy.GetClient(), client.InNamespace(namespace))
48+
// Recreate bmh that was used in capi namespace in metal3
49+
//#nosec G204 -- We need to pass in the file name here.
50+
cmd := exec.Command("bash", "-c", "kubectl apply -f bmhosts_crs.yaml -n metal3")
51+
cmd.Dir = workDir
52+
output, err := cmd.CombinedOutput()
53+
Logf("Applying bmh to metal3 namespace : \n %v", string(output))
54+
Expect(err).ToNot(HaveOccurred())
55+
// wait for all bmh to become available
56+
bootstrapClient := bootstrapClusterProxy.GetClient()
57+
ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace))
58+
WaitForNumBmhInState(ctx, bmov1alpha1.StateAvailable, WaitForNumInput{
59+
Client: bootstrapClient,
60+
Options: []client.ListOption{client.InNamespace(namespace)},
61+
Replicas: 4,
62+
Intervals: e2eConfig.GetIntervals(specName, "wait-bmh-available"),
63+
})
64+
ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace))
65+
})
66+
})

test/e2e/upgrade_clusterctl_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ var _ = Describe("When testing cluster upgrade from releases (v1.10=>current) [c
8282
os.Setenv("CAPM3_VERSION", contract)
8383
os.Setenv("KUBECONFIG_BOOTSTRAP", bootstrapClusterProxy.GetKubeconfigPath())
8484
},
85-
PostNamespaceCreated: postNamespaceCreated,
85+
PostNamespaceCreated: postClusterctlUpgradeNamespaceCreated,
8686
PreUpgrade: func(clusterProxy framework.ClusterProxy) {
8787
preUpgrade(clusterProxy, bmoToRelease, ironicToRelease)
8888
},
@@ -137,7 +137,7 @@ var _ = Describe("When testing cluster upgrade from releases (v1.9=>current) [cl
137137
os.Setenv("CAPM3_VERSION", contract)
138138
os.Setenv("KUBECONFIG_BOOTSTRAP", bootstrapClusterProxy.GetKubeconfigPath())
139139
},
140-
PostNamespaceCreated: postNamespaceCreated,
140+
PostNamespaceCreated: postClusterctlUpgradeNamespaceCreated,
141141
PreUpgrade: func(clusterProxy framework.ClusterProxy) {
142142
preUpgrade(clusterProxy, bmoToRelease, ironicToRelease)
143143
},
@@ -153,9 +153,9 @@ var _ = Describe("When testing cluster upgrade from releases (v1.9=>current) [cl
153153
})
154154
})
155155

156-
// postNamespaceCreated is a hook function that should be called from ClusterctlUpgradeSpec after creating
156+
// postClusterctlUpgradeNamespaceCreated is a hook function that should be called from ClusterctlUpgradeSpec after creating
157157
// the namespace, it creates the needed bmhs in namespace hosting the cluster.
158-
func postNamespaceCreated(clusterProxy framework.ClusterProxy, clusterNamespace string) {
158+
func postClusterctlUpgradeNamespaceCreated(clusterProxy framework.ClusterProxy, clusterNamespace string) {
159159
// Check which from which cluster creation this call is coming
160160
// if isBootstrapProxy==true then this call when creating the management else we are creating the workload.
161161
isBootstrapProxy := !strings.HasPrefix(clusterProxy.GetName(), "clusterctl-upgrade")

0 commit comments

Comments
 (0)