Skip to content

Commit fd1aaee

Browse files
committed
E2E tests
1 parent e0d6a79 commit fd1aaee

File tree

6 files changed

+208
-30
lines changed

6 files changed

+208
-30
lines changed

Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -374,6 +374,7 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai
374374
cp "$(RELEASE_DIR)/main/cluster-template-ignition.yaml" "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/base/cluster-template-ignition.yaml"
375375
"$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/base" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template.yaml"
376376
"$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/hw-upgrade" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-hw-upgrade.yaml"
377+
"$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/multi-disk" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-multi-disk.yaml"
377378
"$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/storage-policy" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-storage-policy.yaml"
378379
"$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/conformance" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-conformance.yaml"
379380
# Since CAPI uses different flavor names for KCP and MD remediation using MHC

pkg/services/govmomi/vcenter/clone_test.go

Lines changed: 75 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,72 @@ func TestGetDiskSpec(t *testing.T) {
113113
additionalCloneDiskSizes: []int32{defaultSizeGiB},
114114
err: "Error getting disk config spec for additional disk: can't resize template disk down, initial capacity is larger: 23068672KiB > 20971520KiB",
115115
},
116+
}
117+
118+
for _, test := range testCases {
119+
tc := test
120+
t.Run(tc.name, func(t *testing.T) {
121+
cloneSpec := infrav1.VirtualMachineCloneSpec{
122+
DiskGiB: tc.cloneDiskSize,
123+
AdditionalDisksGiB: tc.additionalCloneDiskSizes,
124+
DataDisks: tc.dataDisks,
125+
}
126+
vsphereVM := &infrav1.VSphereVM{
127+
Spec: infrav1.VSphereVMSpec{
128+
VirtualMachineCloneSpec: cloneSpec,
129+
},
130+
}
131+
vmContext := &capvcontext.VMContext{VSphereVM: vsphereVM}
132+
deviceResults, err := getDiskSpec(vmContext, tc.disks)
133+
if (tc.err != "" && err == nil) || (tc.err == "" && err != nil) || (err != nil && tc.err != err.Error()) {
134+
t.Fatalf("Expected to get '%v' error from getDiskSpec, got: '%v'", tc.err, err)
135+
}
136+
if deviceFound := len(deviceResults) != 0; tc.expectDevice != deviceFound {
137+
t.Fatalf("Expected to get a device: %v, but got: '%#v'", tc.expectDevice, deviceResults)
138+
}
139+
if tc.expectDevice {
140+
primaryDevice := deviceResults[0]
141+
validateDiskSpec(t, primaryDevice, tc.cloneDiskSize)
142+
if len(tc.additionalCloneDiskSizes) != 0 {
143+
secondaryDevice := deviceResults[1]
144+
validateDiskSpec(t, secondaryDevice, tc.additionalCloneDiskSizes[0])
145+
}
146+
147+
// Check number of disks present
148+
if len(deviceResults) != tc.expectedDiskCount {
149+
t.Fatalf("Expected device count to be %v, but found %v", tc.expectedDiskCount, len(deviceResults))
150+
}
151+
}
152+
})
153+
}
154+
}
155+
156+
func TestCreateDiskSpec(t *testing.T) {
157+
model, session, server := initSimulator(t)
158+
t.Cleanup(model.Remove)
159+
t.Cleanup(server.Close)
160+
vm := simulator.Map.Any("VirtualMachine").(*simulator.VirtualMachine)
161+
machine := object.NewVirtualMachine(session.Client.Client, vm.Reference())
162+
163+
devices, err := machine.Device(ctx.TODO())
164+
if err != nil {
165+
t.Fatalf("Failed to obtain vm devices: %v", err)
166+
}
167+
defaultDisks := devices.SelectByType((*types.VirtualDisk)(nil))
168+
if len(defaultDisks) < 1 {
169+
t.Fatal("Unable to find attached disk for data disk testing")
170+
}
171+
172+
testCases := []struct {
173+
expectDevice bool
174+
cloneDiskSize int32
175+
additionalCloneDiskSizes []int32
176+
name string
177+
disks object.VirtualDeviceList
178+
dataDisks []infrav1.VSphereDisk
179+
expectedDiskCount int
180+
err string
181+
}{
116182
{
117183
name: "Successfully add data disk",
118184
disks: devices,
@@ -121,8 +187,7 @@ func TestGetDiskSpec(t *testing.T) {
121187
SizeGiB: 10,
122188
},
123189
},
124-
expectDevice: true,
125-
expectedDiskCount: 2,
190+
expectDevice: true,
126191
},
127192
{
128193
name: "Successfully add multiple data disks",
@@ -135,8 +200,7 @@ func TestGetDiskSpec(t *testing.T) {
135200
SizeGiB: 20,
136201
},
137202
},
138-
expectDevice: true,
139-
expectedDiskCount: 3,
203+
expectDevice: true,
140204
},
141205
{
142206
name: "Successfully add multiple data disks when template has multiple disks",
@@ -149,42 +213,23 @@ func TestGetDiskSpec(t *testing.T) {
149213
SizeGiB: 20,
150214
},
151215
},
152-
expectDevice: true,
153-
expectedDiskCount: 4,
216+
expectDevice: true,
154217
},
155218
}
156219

157220
for _, test := range testCases {
158221
tc := test
159222
t.Run(tc.name, func(t *testing.T) {
160-
cloneSpec := infrav1.VirtualMachineCloneSpec{
161-
DiskGiB: tc.cloneDiskSize,
162-
AdditionalDisksGiB: tc.additionalCloneDiskSizes,
163-
DataDisks: tc.dataDisks,
164-
}
165-
vsphereVM := &infrav1.VSphereVM{
166-
Spec: infrav1.VSphereVMSpec{
167-
VirtualMachineCloneSpec: cloneSpec,
168-
},
169-
}
170-
vmContext := &capvcontext.VMContext{VSphereVM: vsphereVM}
171-
deviceResults, err := getDiskSpec(ctx.TODO(), vmContext, tc.disks)
223+
deviceResults, err := createDataDisks(ctx.TODO(), tc.dataDisks, tc.disks)
172224
if (tc.err != "" && err == nil) || (tc.err == "" && err != nil) || (err != nil && tc.err != err.Error()) {
173-
t.Fatalf("Expected to get '%v' error from getDiskSpec, got: '%v'", tc.err, err)
225+
t.Fatalf("Expected to get '%v' error from createDataDisks, got: '%v'", tc.err, err)
174226
}
175227
if deviceFound := len(deviceResults) != 0; tc.expectDevice != deviceFound {
176228
t.Fatalf("Expected to get a device: %v, but got: '%#v'", tc.expectDevice, deviceResults)
177229
}
178230
if tc.expectDevice {
179-
primaryDevice := deviceResults[0]
180-
validateDiskSpec(t, primaryDevice, tc.cloneDiskSize)
181-
if len(tc.additionalCloneDiskSizes) != 0 {
182-
secondaryDevice := deviceResults[1]
183-
validateDiskSpec(t, secondaryDevice, tc.additionalCloneDiskSizes[0])
184-
}
185-
186231
// Check number of disks present
187-
if len(deviceResults) != tc.expectedDiskCount {
232+
if len(deviceResults) != len(tc.dataDisks) {
188233
t.Fatalf("Expected device count to be %v, but found %v", tc.expectedDiskCount, len(deviceResults))
189234
}
190235
}
@@ -248,21 +293,21 @@ func TestAssignUnitNumber(t *testing.T) {
248293
controller: controller,
249294
dataDisks: createDataDiskDefinitions(1),
250295
startingOffset: 50,
251-
err: "unable to assign unit number due to offset 50 exceeding max allowed of 30",
296+
err: "50 exceeds maximum number of units 30",
252297
},
253298
{
254299
name: "Add data disk with no ova disk",
255300
devices: nil,
256301
controller: nil,
257302
dataDisks: createDataDiskDefinitions(1),
258-
err: "unable to assign unit number due to controller parameter being nil",
303+
err: "controller parameter cannot be nil",
259304
},
260305
{
261306
name: "Add too many data disks with 1 ova disk",
262307
devices: deviceList,
263308
controller: controller,
264309
dataDisks: createDataDiskDefinitions(40),
265-
err: "unable to find available unit number",
310+
err: "all unit numbers are already in-use",
266311
},
267312
}
268313

test/e2e/config/vsphere.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,7 @@ providers:
173173
- sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-ipam.yaml"
174174
- sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-kcp-remediation.yaml"
175175
- sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-md-remediation.yaml"
176+
- sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-multi-disk.yaml"
176177
- sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-node-drain.yaml"
177178
- sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-ownerrefs-finalizers.yaml"
178179
- sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-pci.yaml"
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
2+
kind: VSphereMachineTemplate
3+
metadata:
4+
name: '${CLUSTER_NAME}'
5+
namespace: '${NAMESPACE}'
6+
spec:
7+
template:
8+
spec:
9+
dataDisks:
10+
- name: "etcd"
11+
sizeGiB: 10
12+
- name: "container-images"
13+
sizeGiB: 20
14+
---
15+
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
16+
kind: VSphereMachineTemplate
17+
metadata:
18+
name: '${CLUSTER_NAME}-worker'
19+
namespace: '${NAMESPACE}'
20+
spec:
21+
template:
22+
spec:
23+
dataDisks:
24+
- name: "container-images"
25+
sizeGiB: 20
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
apiVersion: kustomize.config.k8s.io/v1beta1
2+
kind: Kustomization
3+
resources:
4+
- ../base
5+
patchesStrategicMerge:
6+
- data-disks-patch.yaml

test/e2e/multi-disk_test.go

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
/*
2+
Copyright 2022 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package e2e
18+
19+
import (
20+
"context"
21+
"fmt"
22+
23+
. "github.com/onsi/ginkgo/v2"
24+
. "github.com/onsi/gomega"
25+
"github.com/vmware/govmomi/vim25/types"
26+
"k8s.io/utils/ptr"
27+
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
28+
"sigs.k8s.io/cluster-api/test/framework"
29+
. "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions"
30+
)
31+
32+
type DiskSpecInput struct {
33+
InfraClients
34+
Global GlobalInput
35+
SpecName string
36+
Namespace string
37+
ClusterName string
38+
}
39+
40+
var _ = Describe("Ensure govmomi mode is able to add additional disks to VMs", func() {
41+
const specName = "multi-disk"
42+
Setup(specName, func(testSpecificSettingsGetter func() testSettings) {
43+
capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput {
44+
return capi_e2e.QuickStartSpecInput{
45+
E2EConfig: e2eConfig,
46+
ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath,
47+
BootstrapClusterProxy: bootstrapClusterProxy,
48+
ArtifactFolder: artifactFolder,
49+
SkipCleanup: skipCleanup,
50+
Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("multi-disk")),
51+
PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc,
52+
PostMachinesProvisioned: func(_ framework.ClusterProxy, namespace, clusterName string) {
53+
dsi := DiskSpecInput{
54+
SpecName: specName,
55+
Namespace: namespace,
56+
ClusterName: clusterName,
57+
InfraClients: InfraClients{
58+
Client: vsphereClient,
59+
RestClient: restClient,
60+
Finder: vsphereFinder,
61+
},
62+
Global: GlobalInput{
63+
BootstrapClusterProxy: bootstrapClusterProxy,
64+
ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath,
65+
E2EConfig: e2eConfig,
66+
ArtifactFolder: artifactFolder,
67+
},
68+
}
69+
verifyDisks(ctx, dsi)
70+
},
71+
ControlPlaneMachineCount: ptr.To[int64](1),
72+
WorkerMachineCount: ptr.To[int64](1),
73+
}
74+
})
75+
})
76+
})
77+
78+
func verifyDisks(ctx context.Context, input DiskSpecInput) {
79+
Byf("Fetching the VSphereVM objects for the cluster %s", input.ClusterName)
80+
vms := getVSphereVMsForCluster(input.ClusterName, input.Namespace)
81+
82+
By("Verifying the disks attached to the VMs")
83+
for _, vm := range vms.Items {
84+
// vSphere machine object should have the data disks configured. We will add +1 to the count since the os image
85+
// needs to be included for comparison.
86+
Byf("VM %s Spec has %d DataDisks defined", vm.Name, len(vm.Spec.DataDisks))
87+
diskCount := 1 + len(vm.Spec.DataDisks)
88+
Expect(diskCount).ToNot(Equal(1), "Total disk count should be larger than 1 for this test")
89+
90+
vmObj, err := input.Finder.VirtualMachine(ctx, vm.Name)
91+
Expect(err).NotTo(HaveOccurred())
92+
93+
devices, err := vmObj.Device(ctx)
94+
Expect(err).NotTo(HaveOccurred())
95+
96+
// We expect control plane VMs to have 3 disks, and the compute VMs will have 2.
97+
disks := devices.SelectByType((*types.VirtualDisk)(nil))
98+
Expect(disks).To(HaveLen(diskCount), fmt.Sprintf("Disk count of VM should be %d", diskCount))
99+
}
100+
}

0 commit comments

Comments
 (0)