Skip to content

Commit 6218924

Browse files
committed
Refine godoc
Signed-off-by: Gong Zhang <[email protected]>
1 parent 9cc5762 commit 6218924

File tree

2 files changed

+39
-37
lines changed

2 files changed

+39
-37
lines changed

controllers/vmware/virtualmachinegroup_reconciler_test.go

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -121,11 +121,9 @@ func TestIsMemberUpdateAllowed(t *testing.T) {
121121
wantErr bool
122122
}{
123123
{
124-
name: "Allow member update if VirtualMachineGroup not existed",
125-
targetMember: []vmoprv1.GroupMember{member(memberName1)},
126-
vmgInput: &vmoprv1.VirtualMachineGroup{
127-
ObjectMeta: metav1.ObjectMeta{Name: vmgName, Namespace: vmgNamespace},
128-
},
124+
name: "Allow member update if VirtualMachineGroup not existed",
125+
targetMember: []vmoprv1.GroupMember{member(memberName1)},
126+
vmgInput: baseVMG.DeepCopy(),
129127
mdNames: []string{mdName1},
130128
existingObjects: nil,
131129
wantAllowed: true,
@@ -134,17 +132,8 @@ func TestIsMemberUpdateAllowed(t *testing.T) {
134132
{
135133
name: "Allow member update if it is removing",
136134
targetMember: []vmoprv1.GroupMember{},
137-
vmgInput: func() *vmoprv1.VirtualMachineGroup {
138-
v := baseVMG.DeepCopy()
139-
v.Spec.BootOrder = []vmoprv1.VirtualMachineGroupBootOrderGroup{
140-
{Members: []vmoprv1.GroupMember{
141-
{
142-
Name: memberName1,
143-
Kind: memberKind,
144-
}}}}
145-
return v
146-
}(),
147-
mdNames: []string{mdName1},
135+
vmgInput: baseVMG.DeepCopy(),
136+
mdNames: []string{mdName1},
148137
existingObjects: func() []runtime.Object {
149138
v := baseVMG.DeepCopy()
150139
v.Spec.BootOrder = []vmoprv1.VirtualMachineGroupBootOrderGroup{
@@ -328,7 +317,7 @@ func TestIsMemberUpdateAllowed(t *testing.T) {
328317
g.Expect(err).To(HaveOccurred())
329318
} else {
330319
g.Expect(err).NotTo(HaveOccurred())
331-
g.Expect(gotAllowed).To(Equal(true))
320+
g.Expect(gotAllowed).To(Equal(tt.wantAllowed))
332321
}
333322
})
334323
}

pkg/services/vmoperator/vmopmachine.go

Lines changed: 33 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -199,15 +199,31 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
199199
// Set the VM state. Will get reset throughout the reconcile
200200
supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePending
201201

202+
// Get the VirtualMachine object Key
203+
vmOperatorVM := &vmoprv1.VirtualMachine{}
204+
key, err := virtualMachineObjectKey(supervisorMachineCtx.Machine.Name, supervisorMachineCtx.Machine.Namespace, supervisorMachineCtx.VSphereMachine.Spec.NamingStrategy)
205+
if err != nil {
206+
return false, err
207+
}
208+
209+
// When creating a new cluster and the user doesn't provide info about placement of VMs in a specific failure domain,
210+
// CAPV will define affinity rules to ensure proper placement of the machine.
211+
//
212+
// - All the machines belonging to the same MachineDeployment should be placed in the same failure domain - required.
213+
// - All the machines belonging to the same MachineDeployment should be spread across esxi hosts in the same failure domain - best-efforts.
214+
// - Different MachineDeployments and corresponding VMs should be spread across failure domains - best-efforts.
215+
//
216+
// Note: Control plane VM placement doesn't follow the above rules, and the assumption
217+
// is that failureDomain is always set for control plane VMs.
202218
var affInfo *affinityInfo
203219
if feature.Gates.Enabled(feature.NodeAutoPlacement) &&
204220
!infrautilv1.IsControlPlaneMachine(machineCtx.GetVSphereMachine()) {
205-
vmOperatorVMGroup := &vmoprv1.VirtualMachineGroup{}
221+
vmGroup := &vmoprv1.VirtualMachineGroup{}
206222
key := client.ObjectKey{
207223
Namespace: supervisorMachineCtx.Cluster.Namespace,
208224
Name: supervisorMachineCtx.Cluster.Name,
209225
}
210-
err := v.Client.Get(ctx, key, vmOperatorVMGroup)
226+
err := v.Client.Get(ctx, key, vmGroup)
211227
if err != nil {
212228
if !apierrors.IsNotFound(err) {
213229
return false, err
@@ -222,10 +238,10 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
222238
return true, nil
223239
}
224240

225-
// Proceed only if the machine is a member of the VirtualMachineGroup.
226-
isMember, err := v.checkVirtualMachineGroupMembership(vmOperatorVMGroup, supervisorMachineCtx)
241+
// Proceed only if the VSphereMachine is a member of the VirtualMachineGroup.
242+
isMember, err := v.checkVirtualMachineGroupMembership(vmGroup, key.Name)
227243
if err != nil {
228-
return true, errors.Wrapf(err, "%s", fmt.Sprintf("failed to check if VirtualMachine %s is a member of VirtualMachineGroup %s", supervisorMachineCtx.VSphereMachine.Name, klog.KObj(vmOperatorVMGroup)))
244+
return true, errors.Wrapf(err, "%s", fmt.Sprintf("failed to check if VirtualMachine %s is a member of VirtualMachineGroup %s", supervisorMachineCtx.VSphereMachine.Name, klog.KObj(vmGroup)))
229245
}
230246
if !isMember {
231247
v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{
@@ -238,18 +254,19 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
238254
}
239255

240256
affInfo = &affinityInfo{
241-
vmGroupName: vmOperatorVMGroup.Name,
257+
vmGroupName: vmGroup.Name,
242258
}
243259

244260
// Set the zone label using the annotation of the per-md zone mapping from VirtualMachineGroup.
245261
// This is for new VMs created during day-2 operations when Node Auto Placement is enabled.
246262
mdName := supervisorMachineCtx.Machine.Labels[clusterv1.MachineDeploymentNameLabel]
247-
if fd, ok := vmOperatorVMGroup.Annotations[fmt.Sprintf("%s/%s", ZoneAnnotationPrefix, mdName)]; ok && fd != "" {
263+
if fd, ok := vmGroup.Annotations[fmt.Sprintf("%s/%s", ZoneAnnotationPrefix, mdName)]; ok && fd != "" {
248264
affInfo.failureDomain = fd
249265
}
250266

251-
// Fetch machine deployments without explicit failureDomain specified
252-
// to use when setting the anti-affinity rules.
267+
// VM in a MachineDeployment ideally should be placed in a different failure domain than VMs
268+
// in other MachineDeployments.
269+
// In order to do so, collect names of all the MachineDeployments except the one the VM belongs to.
253270
machineDeployments := &clusterv1.MachineDeploymentList{}
254271
if err := v.Client.List(ctx, machineDeployments,
255272
client.InNamespace(supervisorMachineCtx.Cluster.Namespace),
@@ -266,6 +283,7 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
266283

267284
affInfo.affinitySpec = vmoprv1.AffinitySpec{
268285
VMAffinity: &vmoprv1.VMAffinitySpec{
286+
// All the machines belonging to the same MachineDeployment should be placed in the same failure domain - required.
269287
RequiredDuringSchedulingPreferredDuringExecution: []vmoprv1.VMAffinityTerm{
270288
{
271289
LabelSelector: &metav1.LabelSelector{
@@ -278,6 +296,7 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
278296
},
279297
},
280298
VMAntiAffinity: &vmoprv1.VMAntiAffinitySpec{
299+
// All the machines belonging to the same MachineDeployment should be spread across esxi hosts in the same failure domain - best-efforts.
281300
PreferredDuringSchedulingPreferredDuringExecution: []vmoprv1.VMAffinityTerm{
282301
{
283302
LabelSelector: &metav1.LabelSelector{
@@ -291,6 +310,7 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
291310
},
292311
}
293312
if len(othermMDNames) > 0 {
313+
// Different MachineDeployments and corresponding VMs should be spread across failure domains - best-efforts.
294314
affInfo.affinitySpec.VMAntiAffinity.PreferredDuringSchedulingPreferredDuringExecution = append(
295315
affInfo.affinitySpec.VMAntiAffinity.PreferredDuringSchedulingPreferredDuringExecution,
296316
vmoprv1.VMAffinityTerm{
@@ -309,16 +329,13 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
309329
}
310330
}
311331

332+
// If the failureDomain is explicitly define for a machine, forward this info to the VM.
333+
// Note: for consistency, affinity rules will be set on all the VMs, no matter if they are explicitly assigned to a failureDomain or not.
312334
if supervisorMachineCtx.Machine.Spec.FailureDomain != "" {
313335
supervisorMachineCtx.VSphereMachine.Spec.FailureDomain = ptr.To(supervisorMachineCtx.Machine.Spec.FailureDomain)
314336
}
315337

316338
// Check for the presence of an existing object
317-
vmOperatorVM := &vmoprv1.VirtualMachine{}
318-
key, err := virtualMachineObjectKey(supervisorMachineCtx.Machine.Name, supervisorMachineCtx.Machine.Namespace, supervisorMachineCtx.VSphereMachine.Spec.NamingStrategy)
319-
if err != nil {
320-
return false, err
321-
}
322339
if err := v.Client.Get(ctx, *key, vmOperatorVM); err != nil {
323340
if !apierrors.IsNotFound(err) {
324341
return false, err
@@ -988,14 +1005,10 @@ func getMachineDeploymentNameForCluster(cluster *clusterv1.Cluster) string {
9881005
}
9891006

9901007
// checkVirtualMachineGroupMembership checks if the machine is in the first boot order group
991-
// and performs logic if a match is found.
992-
func (v *VmopMachineService) checkVirtualMachineGroupMembership(vmOperatorVMGroup *vmoprv1.VirtualMachineGroup, supervisorMachineCtx *vmware.SupervisorMachineContext) (bool, error) {
1008+
// and performs logic if a match is found, as first boot order contains all the worker VMs.
1009+
func (v *VmopMachineService) checkVirtualMachineGroupMembership(vmOperatorVMGroup *vmoprv1.VirtualMachineGroup, virtualMachineName string) (bool, error) {
9931010
if len(vmOperatorVMGroup.Spec.BootOrder) > 0 {
9941011
for _, member := range vmOperatorVMGroup.Spec.BootOrder[0].Members {
995-
virtualMachineName, err := GenerateVirtualMachineName(supervisorMachineCtx.Machine.Name, supervisorMachineCtx.VSphereMachine.Spec.NamingStrategy)
996-
if err != nil {
997-
return false, err
998-
}
9991012
if member.Name == virtualMachineName {
10001013
return true, nil
10011014
}

0 commit comments

Comments
 (0)