Skip to content

Commit 626ec21

Browse files
committed
Support for Failure Domain for VSphereMachine
- Introduces support for failure domain during VSphereVM generation. Signed-off-by: Sagar Muchhal <[email protected]>
1 parent 2a6b6a0 commit 626ec21

File tree

8 files changed

+607
-23
lines changed

8 files changed

+607
-23
lines changed

controllers/vspheremachine_controller.go

Lines changed: 87 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@ import (
3131
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
3232
"k8s.io/apimachinery/pkg/runtime"
3333
apitypes "k8s.io/apimachinery/pkg/types"
34+
kerrors "k8s.io/apimachinery/pkg/util/errors"
35+
"k8s.io/utils/integer"
3436
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
3537
clusterutilv1 "sigs.k8s.io/cluster-api/util"
3638
"sigs.k8s.io/cluster-api/util/conditions"
@@ -46,7 +48,6 @@ import (
4648
"sigs.k8s.io/controller-runtime/pkg/reconcile"
4749
"sigs.k8s.io/controller-runtime/pkg/source"
4850

49-
kerrors "k8s.io/apimachinery/pkg/util/errors"
5051
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
5152
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
5253
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/record"
@@ -334,6 +335,11 @@ func (r machineReconciler) reconcileNormal(ctx *context.MachineContext) (reconci
334335
return reconcile.Result{}, nil
335336
}
336337

338+
// Propagating the failure domain name to the VSphereMachine object
339+
if failureDomain := ctx.Machine.Spec.FailureDomain; failureDomain != nil {
340+
ctx.VSphereMachine.Spec.FailureDomain = failureDomain
341+
}
342+
337343
// TODO(akutz) Determine the version of vSphere.
338344
vm, err := r.reconcileNormalPre7(ctx, vsphereVM)
339345
if err != nil {
@@ -441,12 +447,18 @@ func (r machineReconciler) reconcileNormalPre7(ctx *context.MachineContext, vsph
441447
// clone spec.
442448
ctx.VSphereMachine.Spec.VirtualMachineCloneSpec.DeepCopyInto(&vm.Spec.VirtualMachineCloneSpec)
443449

450+
// If Failure Domain is present on CAPI machine, use that to override the vm clone spec.
451+
if overrideFunc, ok := r.generateOverrideFunc(ctx); ok {
452+
overrideFunc(vm)
453+
}
454+
444455
// Several of the VSphereVM's clone spec properties can be derived
445456
// from multiple places. The order is:
446457
//
447-
// 1. From the VSphereMachine.Spec (the DeepCopyInto above)
448-
// 2. From the VSphereCluster.Spec.CloudProviderConfiguration.Workspace
449-
// 3. From the VSphereCluster.Spec
458+
// 1. From the Machine.Spec.FailureDomain
459+
// 2. From the VSphereMachine.Spec (the DeepCopyInto above)
460+
// 3. From the VSphereCluster.Spec.CloudProviderConfiguration.Workspace
461+
// 4. From the VSphereCluster.Spec
450462
vsphereCloudConfig := ctx.VSphereCluster.Spec.CloudProviderConfiguration.Workspace
451463
if vm.Spec.Server == "" {
452464
if vm.Spec.Server = vsphereCloudConfig.Server; vm.Spec.Server == "" {
@@ -486,10 +498,80 @@ func (r machineReconciler) reconcileNormalPre7(ctx *context.MachineContext, vsph
486498
return vm, nil
487499
}
488500

501+
// generateOverrideFunc returns a function which can override the values in the VSphereVM Spec
502+
// with the values from the FailureDomain (if any) set on the owner CAPI machine.
503+
func (r machineReconciler) generateOverrideFunc(ctx *context.MachineContext) (func(vm *infrav1.VSphereVM), bool) {
504+
var overrideWithFailureDomainFunc func(vm *infrav1.VSphereVM)
505+
if failureDomainName := ctx.Machine.Spec.FailureDomain; failureDomainName != nil {
506+
var vsphereDeploymentZoneList infrav1.VSphereDeploymentZoneList
507+
if err := r.Client.List(ctx, &vsphereDeploymentZoneList); err != nil {
508+
r.Logger.Error(err, "unable to fetch list of deployment zones")
509+
return overrideWithFailureDomainFunc, false
510+
}
511+
512+
var vsphereFailureDomain infrav1.VSphereFailureDomain
513+
if err := r.Client.Get(ctx, client.ObjectKey{Name: *failureDomainName}, &vsphereFailureDomain); err != nil {
514+
r.Logger.Error(err, "unable to fetch failure domain", "name", *failureDomainName)
515+
return overrideWithFailureDomainFunc, false
516+
}
517+
518+
for index := range vsphereDeploymentZoneList.Items {
519+
zone := vsphereDeploymentZoneList.Items[index]
520+
if zone.Spec.FailureDomain == *ctx.Machine.Spec.FailureDomain {
521+
overrideWithFailureDomainFunc = func(vm *infrav1.VSphereVM) {
522+
vm.Spec.Server = zone.Spec.Server
523+
vm.Spec.Datacenter = vsphereFailureDomain.Spec.Topology.Datacenter
524+
if zone.Spec.PlacementConstraint.Folder != "" {
525+
vm.Spec.Folder = zone.Spec.PlacementConstraint.Folder
526+
}
527+
if zone.Spec.PlacementConstraint.ResourcePool != "" {
528+
vm.Spec.ResourcePool = zone.Spec.PlacementConstraint.ResourcePool
529+
}
530+
if vsphereFailureDomain.Spec.Topology.Datastore != "" {
531+
vm.Spec.Datastore = vsphereFailureDomain.Spec.Topology.Datastore
532+
}
533+
if len(vsphereFailureDomain.Spec.Topology.Networks) > 0 {
534+
vm.Spec.Network.Devices = overrideNetworkDeviceSpecs(vm.Spec.Network.Devices, vsphereFailureDomain.Spec.Topology.Networks)
535+
}
536+
}
537+
return overrideWithFailureDomainFunc, true
538+
}
539+
}
540+
}
541+
return overrideWithFailureDomainFunc, false
542+
}
543+
544+
// overrideNetworkDeviceSpecs updates the network devices with the network definitions from the PlacementConstraint.
545+
// The substitution is done based on the order in which the network devices have been defined.
546+
//
547+
// In case there are more network definitions than the number of network devices specified, the definitions are appended to the list.
548+
func overrideNetworkDeviceSpecs(deviceSpecs []infrav1.NetworkDeviceSpec, networks []string) []infrav1.NetworkDeviceSpec {
549+
index, length := 0, len(networks)
550+
551+
devices := make([]infrav1.NetworkDeviceSpec, 0, integer.IntMax(length, len(deviceSpecs)))
552+
// override the networks on the VM spec with placement constraint network definitions
553+
for i := range deviceSpecs {
554+
vmNetworkDeviceSpec := deviceSpecs[i]
555+
if i < length {
556+
index++
557+
vmNetworkDeviceSpec.NetworkName = networks[i]
558+
}
559+
devices = append(devices, vmNetworkDeviceSpec)
560+
}
561+
// append the remaining network definitions to the VM spec
562+
for ; index < length; index++ {
563+
devices = append(devices, infrav1.NetworkDeviceSpec{
564+
NetworkName: networks[index],
565+
})
566+
}
567+
568+
return devices
569+
}
570+
489571
func (r machineReconciler) reconcileNetwork(ctx *context.MachineContext, vm *unstructured.Unstructured) (bool, error) {
490572
var errs []error
491573
if networkStatusListOfIfaces, ok, _ := unstructured.NestedSlice(vm.Object, "status", "network"); ok {
492-
networkStatusList := []infrav1.NetworkStatus{}
574+
var networkStatusList []infrav1.NetworkStatus
493575
for i, networkStatusListMemberIface := range networkStatusListOfIfaces {
494576
if buf, err := json.Marshal(networkStatusListMemberIface); err != nil {
495577
ctx.Logger.Error(err,
Lines changed: 209 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,209 @@
1+
/*
2+
Copyright 2021 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package controllers
18+
19+
import (
20+
. "github.com/onsi/ginkgo"
21+
. "github.com/onsi/gomega"
22+
23+
corev1 "k8s.io/api/core/v1"
24+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
25+
"k8s.io/utils/pointer"
26+
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
27+
"sigs.k8s.io/cluster-api/util/conditions"
28+
"sigs.k8s.io/cluster-api/util/patch"
29+
"sigs.k8s.io/controller-runtime/pkg/client"
30+
31+
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
32+
)
33+
34+
var _ = Describe("VsphereMachineReconciler", func() {
35+
36+
var (
37+
capiCluster *clusterv1.Cluster
38+
capiMachine *clusterv1.Machine
39+
40+
infraCluster *infrav1.VSphereCluster
41+
infraMachine *infrav1.VSphereMachine
42+
43+
testNs *corev1.Namespace
44+
key client.ObjectKey
45+
)
46+
47+
isPresentAndFalseWithReason := func(getter conditions.Getter, condition clusterv1.ConditionType, reason string) bool {
48+
ExpectWithOffset(1, testEnv.Get(ctx, key, getter)).To(Succeed())
49+
if !conditions.Has(getter, condition) {
50+
return false
51+
}
52+
objectCondition := conditions.Get(getter, condition)
53+
return objectCondition.Status == corev1.ConditionFalse &&
54+
objectCondition.Reason == reason
55+
}
56+
57+
BeforeEach(func() {
58+
var err error
59+
testNs, err = testEnv.CreateNamespace(ctx, "vsphere-machine-reconciler")
60+
Expect(err).NotTo(HaveOccurred())
61+
62+
capiCluster = &clusterv1.Cluster{
63+
ObjectMeta: metav1.ObjectMeta{
64+
GenerateName: "test1-",
65+
Namespace: testNs.Name,
66+
},
67+
Spec: clusterv1.ClusterSpec{
68+
InfrastructureRef: &corev1.ObjectReference{
69+
APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3",
70+
Kind: "VSphereCluster",
71+
Name: "vsphere-test1",
72+
},
73+
},
74+
}
75+
Expect(testEnv.Create(ctx, capiCluster)).To(Succeed())
76+
77+
infraCluster = &infrav1.VSphereCluster{
78+
ObjectMeta: metav1.ObjectMeta{
79+
Name: "vsphere-test1",
80+
Namespace: testNs.Name,
81+
OwnerReferences: []metav1.OwnerReference{
82+
{
83+
APIVersion: "cluster.x-k8s.io/v1alpha3",
84+
Kind: "Cluster",
85+
Name: capiCluster.Name,
86+
UID: "blah",
87+
},
88+
},
89+
},
90+
Spec: infrav1.VSphereClusterSpec{},
91+
}
92+
Expect(testEnv.Create(ctx, infraCluster)).To(Succeed())
93+
94+
capiMachine = &clusterv1.Machine{
95+
ObjectMeta: metav1.ObjectMeta{
96+
GenerateName: "machine-created-",
97+
Namespace: testNs.Name,
98+
Finalizers: []string{clusterv1.MachineFinalizer},
99+
Labels: map[string]string{
100+
clusterv1.ClusterLabelName: capiCluster.Name,
101+
},
102+
},
103+
Spec: clusterv1.MachineSpec{
104+
ClusterName: capiCluster.Name,
105+
InfrastructureRef: corev1.ObjectReference{
106+
APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3",
107+
Kind: "VSphereMachine",
108+
Name: "vsphere-machine-1",
109+
},
110+
},
111+
}
112+
Expect(testEnv.Create(ctx, capiMachine)).To(Succeed())
113+
114+
infraMachine = &infrav1.VSphereMachine{
115+
ObjectMeta: metav1.ObjectMeta{
116+
Name: "vsphere-machine-1",
117+
Namespace: testNs.Name,
118+
Labels: map[string]string{
119+
clusterv1.ClusterLabelName: capiCluster.Name,
120+
clusterv1.MachineControlPlaneLabelName: "",
121+
},
122+
OwnerReferences: []metav1.OwnerReference{
123+
{
124+
APIVersion: clusterv1.GroupVersion.String(),
125+
Kind: "Machine",
126+
Name: capiMachine.Name,
127+
UID: "blah",
128+
},
129+
},
130+
},
131+
Spec: infrav1.VSphereMachineSpec{
132+
VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{
133+
Template: "ubuntu-k9s-1.19",
134+
Network: infrav1.NetworkSpec{
135+
Devices: []infrav1.NetworkDeviceSpec{
136+
{NetworkName: "network-1", DHCP4: true},
137+
},
138+
},
139+
},
140+
},
141+
}
142+
Expect(testEnv.Create(ctx, infraMachine)).To(Succeed())
143+
144+
key = client.ObjectKey{Namespace: testNs.Name, Name: infraMachine.Name}
145+
})
146+
147+
AfterEach(func() {
148+
Expect(testEnv.Cleanup(ctx, testNs, capiCluster, infraCluster, capiMachine, infraMachine)).To(Succeed())
149+
})
150+
151+
It("waits for cluster status to be ready", func() {
152+
Eventually(func() bool {
153+
// this is to make sure that the VSphereMachine is created before the next check for the
154+
// presence of conditions on the VSphereMachine proceeds.
155+
if err := testEnv.Get(ctx, key, infraMachine); err != nil {
156+
return false
157+
}
158+
return isPresentAndFalseWithReason(infraMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason)
159+
}, timeout).Should(BeTrue())
160+
161+
By("setting the cluster infrastructure to be ready")
162+
Eventually(func() error {
163+
ph, err := patch.NewHelper(capiCluster, testEnv)
164+
Expect(err).ShouldNot(HaveOccurred())
165+
capiCluster.Status.InfrastructureReady = true
166+
return ph.Patch(ctx, capiCluster, patch.WithStatusObservedGeneration{})
167+
}, timeout).Should(BeNil())
168+
169+
Eventually(func() bool {
170+
return isPresentAndFalseWithReason(infraMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason)
171+
}, timeout).Should(BeFalse())
172+
})
173+
174+
Context("With Cluster Infrastructure status ready", func() {
175+
BeforeEach(func() {
176+
ph, err := patch.NewHelper(capiCluster, testEnv)
177+
Expect(err).ShouldNot(HaveOccurred())
178+
capiCluster.Status.InfrastructureReady = true
179+
Expect(ph.Patch(ctx, capiCluster, patch.WithStatusObservedGeneration{})).To(Succeed())
180+
})
181+
182+
It("moves to VSphere VM creation", func() {
183+
Eventually(func() bool {
184+
vms := infrav1.VSphereVMList{}
185+
Expect(testEnv.List(ctx, &vms, client.InNamespace(testNs.Name), client.MatchingLabels{
186+
clusterv1.ClusterLabelName: capiCluster.Name,
187+
})).To(Succeed())
188+
return isPresentAndFalseWithReason(infraMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason) &&
189+
len(vms.Items) == 0
190+
}, timeout).Should(BeTrue())
191+
192+
By("setting the bootstrap data")
193+
Eventually(func() error {
194+
ph, err := patch.NewHelper(capiMachine, testEnv)
195+
Expect(err).ShouldNot(HaveOccurred())
196+
capiMachine.Spec.Bootstrap = clusterv1.Bootstrap{
197+
DataSecretName: pointer.StringPtr("some-secret"),
198+
}
199+
return ph.Patch(ctx, capiMachine, patch.WithStatusObservedGeneration{})
200+
}, timeout).Should(BeNil())
201+
202+
Eventually(func() int {
203+
vms := infrav1.VSphereVMList{}
204+
Expect(testEnv.List(ctx, &vms)).To(Succeed())
205+
return len(vms.Items)
206+
}, timeout).Should(BeNumerically(">", 0))
207+
})
208+
})
209+
})

0 commit comments

Comments
 (0)