From ead95ae8aff432fabb40daa8297bb6683aefc758 Mon Sep 17 00:00:00 2001 From: Pujol Date: Fri, 17 Oct 2025 09:55:42 +0200 Subject: [PATCH 1/3] feat: Cisco NXOS VPCs Multi-Chassis Link Aggregation (MC-LAG) is quite vendor and platform specific. We don't see much intersection in their respective configuration to justify a common API type. Instead, we move forward with a platform specific API exclusive to Cisco NXOS devices. This commit adds new types, controller, and provider to configure virtual Port Channels (vPCs) via the operator. Implementation note: Consider the following information about the YANG model for configuring a vPC: * each vPC configured in the domain appears in the tree in this location: `vpc-items/inst-items/dom-items/if-items/If-list[id=30]` (where `30` is the vPC ID) * the peer-link interface is configured here: `vpc-items/inst-items/dom-items/keepalive-items/peerlink-items[id=po10]` The interfaces will be added to the vPC config by the LAG provider and not by this controller. Hence, if we apply a gNMI Replace operation on the xpath returned by VPC.XPath() we would remove any existing vPC interfaces. A gNMI Update operation will not modify the configuration introduced by the LAG provider. --- PROJECT | 8 + Tiltfile | 65 ++- api/cisco/nx/v1alpha1/groupversion_info.go | 11 + api/cisco/nx/v1alpha1/vpc_types.go | 267 ++++++++++ .../nx/v1alpha1/zz_generated.deepcopy.go | 172 +++++++ cmd/main.go | 19 + ...co.networking.metal.ironcore.dev_vpcs.yaml | 399 ++++++++++++++ config/crd/kustomization.yaml | 2 + config/rbac/cisco/nx/vpc_admin_role.yaml | 21 + config/rbac/cisco/nx/vpc_editor_role.yaml | 27 + config/rbac/cisco/nx/vpc_viewer_role.yaml | 32 ++ config/rbac/kustomization.yaml | 3 + config/rbac/role.yaml | 18 +- config/samples/cisco/nx/v1alpha1_vpc.yaml | 34 ++ config/samples/kustomization.yaml | 1 + config/samples/v1alpha1_interface.yaml | 98 +++- config/samples/v1alpha1_vrf.yaml | 23 +- internal/controller/cisco/nx/provider.go | 5 + internal/controller/cisco/nx/suite_test.go | 34 ++ .../controller/cisco/nx/vpc_controller.go | 486 ++++++++++++++++++ .../cisco/nx/vpc_controller_test.go | 217 ++++++++ .../controller/core/interface_controller.go | 3 + internal/controller/core/suite_test.go | 2 +- internal/provider/cisco/gnmiext/v2/client.go | 8 +- internal/provider/cisco/nxos/intf.go | 5 +- internal/provider/cisco/nxos/provider.go | 157 +++++- .../provider/cisco/nxos/testdata/vpc.json | 25 + .../provider/cisco/nxos/testdata/vpc.json.txt | 11 + internal/provider/cisco/nxos/vpc.go | 76 ++- internal/provider/cisco/nxos/vpc_test.go | 19 + internal/provider/provider.go | 2 + 31 files changed, 2170 insertions(+), 80 deletions(-) create mode 100644 api/cisco/nx/v1alpha1/vpc_types.go create mode 100644 config/crd/bases/nx.cisco.networking.metal.ironcore.dev_vpcs.yaml create mode 100644 config/rbac/cisco/nx/vpc_admin_role.yaml create mode 100644 config/rbac/cisco/nx/vpc_editor_role.yaml create mode 100644 config/rbac/cisco/nx/vpc_viewer_role.yaml create mode 100644 config/samples/cisco/nx/v1alpha1_vpc.yaml create mode 100644 internal/controller/cisco/nx/vpc_controller.go create mode 100644 internal/controller/cisco/nx/vpc_controller_test.go create mode 100644 internal/provider/cisco/nxos/testdata/vpc.json create mode 100644 internal/provider/cisco/nxos/testdata/vpc.json.txt diff --git a/PROJECT b/PROJECT index 9ca8813c..de1aab0e 100644 --- a/PROJECT +++ b/PROJECT @@ -187,4 +187,12 @@ resources: kind: EVPNInstance path: github.com/ironcore-dev/network-operator/api/core/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cisco.networking.metal.ironcore.dev + group: nx + kind: VPC + path: github.com/ironcore-dev/network-operator/api/cisco/nx/v1alpha1 version: "3" diff --git a/Tiltfile b/Tiltfile index 185b4c80..f887cb95 100644 --- a/Tiltfile +++ b/Tiltfile @@ -22,7 +22,7 @@ local_resource('controller-gen', 'make generate', ignore=['**/*/zz_generated.dee docker_build('ghcr.io/ironcore-dev/gnmi-test-server:latest', './test/gnmi') -provider = os.getenv('PROVIDER', 'openconfig') +provider = os.getenv('PROVIDER', 'cisco-nxos-gnmi') manager = kustomize('config/develop') manager = str(manager).replace('--provider=openconfig', '--provider={}'.format(provider)) @@ -39,68 +39,77 @@ def device_yaml(): return encode_yaml_stream(decoded) k8s_yaml(device_yaml()) -k8s_resource(new_name='leaf1', objects=['leaf1:device', 'secret-basic-auth:secret'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='leaf1', objects=['leaf1:device', 'secret-basic-auth:secret'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=True) k8s_yaml('./config/samples/v1alpha1_interface.yaml') -k8s_resource(new_name='lo0', objects=['lo0:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='lo1', objects=['lo1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='eth1-1', objects=['eth1-1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='eth1-2', objects=['eth1-2:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='lo0', objects=['lo0:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='lo1', objects=['lo1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='eth1-1', objects=['eth1-1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='eth1-2', objects=['eth1-2:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_resource(new_name='eth1-10', objects=['eth1-10:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='po10', objects=['po-10:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='svi-10', objects=['svi-10:interface'], resource_deps=['vlan-10'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='eth1-30', objects=['eth1-30:interface'], resource_deps=['vrf-vpc-keepalive'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='eth1-30', objects=['eth1-30:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='eth1-31', objects=['eth1-31:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='eth1-32', objects=['eth1-32:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +#k8s_resource(new_name='svi-10', objects=['svi-10:interface'], resource_deps=['vlan-10'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='po1', objects=['po1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='po2', objects=['po2:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) + + k8s_yaml('./config/samples/v1alpha1_banner.yaml') -k8s_resource(new_name='banner', objects=['banner:banner'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='banner', objects=['banner:banner'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_user.yaml') -k8s_resource(new_name='user', objects=['user:user', 'user-password:secret', 'user-ssh-key:secret'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='user', objects=['user:user', 'user-password:secret', 'user-ssh-key:secret'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_dns.yaml') -k8s_resource(new_name='dns', objects=['dns:dns'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='dns', objects=['dns:dns'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_ntp.yaml') -k8s_resource(new_name='ntp', objects=['ntp:ntp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='ntp', objects=['ntp:ntp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_acl.yaml') -k8s_resource(new_name='acl', objects=['acl:accesscontrollist'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='acl', objects=['acl:accesscontrollist'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_certificate.yaml') -k8s_resource(new_name='trustpoint', objects=['network-operator:issuer', 'network-operator-ca:certificate', 'trustpoint:certificate'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='trustpoint', objects=['network-operator:issuer', 'network-operator-ca:certificate', 'trustpoint:certificate'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_snmp.yaml') -k8s_resource(new_name='snmp', objects=['snmp:snmp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='snmp', objects=['snmp:snmp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_syslog.yaml') -k8s_resource(new_name='syslog', objects=['syslog:syslog'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='syslog', objects=['syslog:syslog'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_managementaccess.yaml') -k8s_resource(new_name='managementaccess', objects=['managementaccess:managementaccess'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='managementaccess', objects=['managementaccess:managementaccess'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_isis.yaml') -k8s_resource(new_name='isis-underlay', objects=['underlay:isis'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='underlay', objects=['underlay:isis'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_vrf.yaml') -k8s_resource(new_name='vrf-admin', objects=['vrf-cc-admin:vrf'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='vrf-001', objects=['vrf-cc-prod-001:vrf'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='vrf-vpc-keepalive', objects=['vpc-keepalive:vrf'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='vrf-admin', objects=['vrf-cc-admin:vrf'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='vrf-vpckeepalive', objects=['vrf-vpckeepalive:vrf'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_pim.yaml') -k8s_resource(new_name='pim', objects=['pim:pim'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='pim', objects=['pim:pim'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_bgp.yaml') -k8s_resource(new_name='bgp', objects=['bgp:bgp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='bgp', objects=['bgp:bgp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_bgppeer.yaml') -k8s_resource(new_name='peer-spine1', objects=['leaf1-spine1:bgppeer'], resource_deps=['bgp', 'lo0'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -k8s_resource(new_name='peer-spine2', objects=['leaf1-spine2:bgppeer'], resource_deps=['bgp', 'lo0'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='peer-spine1', objects=['leaf1-spine1:bgppeer'], resource_deps=['bgp', 'lo0'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='peer-spine2', objects=['leaf1-spine2:bgppeer'], resource_deps=['bgp', 'lo0'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_ospf.yaml') -k8s_resource(new_name='ospf-underlay', objects=['underlay:ospf'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +#k8s_resource(new_name='ospf-underlay', objects=['underlay:ospf'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_vlan.yaml') -k8s_resource(new_name='vlan-10', objects=['vlan-10:vlan'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +# k8s_resource(new_name='vlan-10', objects=['vlan-10:vlan'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) + +k8s_yaml('./config/samples/cisco/nx/v1alpha1_vpc.yaml') +# don't add a resource dependency to the interfaces here, otherwise we create a deadlock as the multichassis ID depends +# on the vPC being created first. +k8s_resource(new_name='vpc', objects=['leaf1-vpc:vpc'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_evi.yaml') k8s_resource(new_name='vxlan-100010', objects=['vxlan-100010:evpninstance'], resource_deps=['vlan-10'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) diff --git a/api/cisco/nx/v1alpha1/groupversion_info.go b/api/cisco/nx/v1alpha1/groupversion_info.go index 8190e2c5..9dbfde40 100644 --- a/api/cisco/nx/v1alpha1/groupversion_info.go +++ b/api/cisco/nx/v1alpha1/groupversion_info.go @@ -22,3 +22,14 @@ var ( // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme ) + +// WatchLabel is a label that can be applied to any Network API object. +// +// Controllers which allow for selective reconciliation may check this label and proceed +// with reconciliation of the object only if this label and a configured value is present. +const WatchLabel = "nx.cisco.networking.metal.ironcore.dev/watch-filter" + +// FinalizerName is the identifier used by the controllers to perform cleanup before a resource is deleted. +// It is added when the resource is created and ensures that the controller can handle teardown logic +// (e.g., deleting external dependencies) before Kubernetes finalizes the deletion. +const FinalizerName = "nx.cisco.networking.metal.ironcore.dev/finalizer" diff --git a/api/cisco/nx/v1alpha1/vpc_types.go b/api/cisco/nx/v1alpha1/vpc_types.go new file mode 100644 index 00000000..86f20051 --- /dev/null +++ b/api/cisco/nx/v1alpha1/vpc_types.go @@ -0,0 +1,267 @@ +// SPDX-FileCopyrightText: 2025 SAP SE or an SAP affiliate company and IronCore contributors +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corev1 "github.com/ironcore-dev/network-operator/api/core/v1alpha1" +) + +// VPCSpec defines the desired state of VPC (Cisco's NXOS Virtual Port Channel) +type VPCSpec struct { + // DeviceName is the name of the Device this object belongs to. The Device object must exist in the same namespace. + // Immutable. + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="DeviceRef is immutable" + DeviceRef corev1.LocalObjectReference `json:"deviceRef"` + + // DomainID is the vPC domain ID (1-1000). + // This uniquely identifies the vPC domain and must match on both peer switches. + // Maps to: "vpc domain " + // Changing this value will recreate the vPC domain and flap the peer-link and vPCs. + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=1000 + DomainID uint16 `json:"domainId"` + + // AdminState is the administrative state of the vPC domain (enabled/disabled). + // When disabled, the vPC domain is administratively shut down. + // Maps to: "vpc domain " being present (enabled) or "no vpc domain " (disabled) + // +required + // +kubebuilder:default="enabled" + // +kubebuilder:validation:Enum=enabled;disabled + AdminState string `json:"adminState"` + + // RolePriority is the role priority for this vPC domain (1-65535). + // The switch with the lower role priority becomes the operational primary. + // Maps to: "role priority " + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + RolePriority uint16 `json:"rolePriority"` + + // SystemPriority is the system priority for this vPC domain (1-65535). + // Used to ensure that the vPC devices are primary devices on LACP. Must match on both peers. + // Maps to: "system-priority " + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + SystemPriority uint16 `json:"systemPriority"` + + // DelayRestoreSVI is the delay in seconds (1-3600) before bringing up interface-vlan (SVI) after peer-link comes up. + // This prevents traffic blackholing during convergence. + // Maps to: "delay restore interface-vlan " + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=3600 + DelayRestoreSVI uint16 `json:"delayRestoreSVI"` + + // DelayRestoreVPC is the delay in seconds (1-3600) before bringing up vPC member ports after the peer-link is restored. + // This ensures the vPC peer-link is stable before enabling member ports. + // Maps to: "delay restore " + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=3600 + DelayRestoreVPC uint16 `json:"delayRestoreVPC"` + + // FastConvergence enables faster convergence for vPC by reducing timers. + // Maps to: "fast-convergence" when enabled + // +required + FastConvergence AdminSt `json:"fastConvergence"` + + // Peer contains the vPC peer configuration including peer-link, keepalive. + // +required + Peer Peer `json:"peer"` +} + +// AdminSt represents administrative state (enabled/disabled). +// Used for various vPC features. +type AdminSt struct { + // Enabled indicates whether the feature is administratively enabled (true) or disabled (false). + // +required + Enabled bool `json:"enabled"` +} + +// Peer defines the vPC peer configuration. +// Encompasses all settings related to the relationship between the two vPC peer switches. +type Peer struct { + // InterfaceAggregateRef is a reference to an Interface resource with type `Aggregate`. + // This is a dedicated port-channel between the two switches, that will be configured as the vPC peer-link. + // and which carries control and data traffic between the two vPC peers. + // Maps to: "vpc peer-link" configured on the referenced port-channel interface + // +required + InterfaceAggregateRef corev1.LocalObjectReference `json:"interfaceAggregateRef,omitempty"` + + // KeepAlive defines the out-of-band keepalive configuration. + // +required + KeepAlive KeepAlive `json:"keepalive"` + + // AutoRecovery defines auto-recovery settings for restoring vPC after peer failure. + // +required + AutoRecovery AutoRecovery `json:"autoRecovery"` + + // Switch enables peer-switch functionality on this peer. + // When enabled, both vPC peers use the same spanning-tree bridge ID, allowing both + // to forward traffic for all VLANs without blocking any ports. + // Maps to: "peer-switch" when enabled + // +required + Switch AdminSt `json:"switch"` + + // Gateway enables peer-gateway functionality on this peer. + // When enabled, each vPC peer can act as the active gateway for packets destined to the + // peer's MAC address, improving convergence. + // Maps to: "peer-gateway" when enabled + // +required + Gateway AdminSt `json:"gateway"` + + // Router enables Layer 3 peer-router functionality on this peer. + // Maps to: "layer3 peer-router" when enabled + // +required + Router AdminSt `json:"router"` +} + +// KeepAlive defines the vPC keepalive link configuration. +// The keepalive is typically a separate out-of-band link (often over mgmt0) used to monitor +// peer health. It does not carry data traffic. +type KeepAlive struct { + // Destination is the destination IP address of the vPC peer's keepalive interface. + // This is the IP address the local switch will send keepalive messages to. + // Maps to: "peer-keepalive destination ..." + // +kubebuilder:validation:Format=ipv4 + // +required + Destination string `json:"destination"` + + // Source is the source IP address for keepalive messages. + // This is the local IP address used to send keepalive packets to the peer. + // Maps to: "peer-keepalive destination source ..." + // +kubebuilder:validation:Format=ipv4 + // +required + Source string `json:"source"` + + // VRFRef is an optional reference to a VRF resource. + // If specified, the keepalive will use this VRF for routing keepalive packets. + // Typically used when keepalive is over a management VRF. + // Maps to: "peer-keepalive destination source vrf " + // The VRF must exist on the Device referenced by the parent VPC resource. + // If omitted, the default VRF is used. + // +optional + VRFRef *corev1.LocalObjectReference `json:"vrf,omitempty"` +} + +// AutoRecovery holds auto-recovery settings. +// It allows a vPC peer to automatically restore vPC operation after detecting +// that the peer is no longer reachable via keepalive link. +// +kubebuilder:validation:XValidation:rule="self.enabled ? has(self.reloadDelay) : !has(self.reloadDelay)",message="reloadDelay must be set when enabled and absent when disabled" +type AutoRecovery struct { + // Enabled indicates whether auto-recovery is enabled. + // When enabled, the switch will wait for ReloadDelay seconds after peer failure + // before assuming the peer is dead and restoring vPC functionality. + // Maps to: "auto-recovery" being present (enabled) or absent (disabled) + Enabled bool `json:"enabled,omitempty"` + + // ReloadDelay is the time in seconds (60-3600) to wait before assuming the peer is dead + // and automatically recovering vPC operation. + // Must be set when Enabled is true. + // Maps to: "auto-recovery reload-delay " + // +optional + // +kubebuilder:validation:Minimum=60 + // +kubebuilder:validation:Maximum=3600 + ReloadDelay uint32 `json:"reloadDelay,omitempty"` +} + +// VPCStatus defines the observed state of VPC. +type VPCStatus struct { + // Conditions represent the latest available observations of the VPC's state. + // Standard conditions include: + // - Ready: overall readiness of the vPC domain + // - Configured: whether the vPC configuration was successfully applied to the device + // - Operational: whether the vPC domain is operationally up (peer-link and keepalive status) + //+listType=map + //+listMapKey=type + //+patchStrategy=merge + //+patchMergeKey=type + //+optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // DomainID is the vPC domain ID as reported by the device. + // +optional + DomainID uint16 `json:"domainId,omitempty"` + + // Role indicates the current operational role of this vPC peer. + // Possible values: + // - Primary: This switch is the primary vPC peer (lower role priority or elected) + // - Secondary: This switch is the secondary vPC peer + // - Unknown: Role has not been established (e.g., peer-link down, domain not formed) + // +optional + Role VPCRole `json:"role,omitempty"` + + // PeerUptime indicates how long the vPC peer has been up and reachable via keepalive. + // +optional + PeerUptime metav1.Duration `json:"peerUptime,omitempty"` +} + +// The VPCRole type represents the operational role of a vPC peer as returned by the device. +type VPCRole string + +const ( + VPCRolePrimary VPCRole = "Pri" + VPCRolePrimaryOperationalSecondary VPCRole = "Pri/Sec" + VPCRoleSecondary VPCRole = "Sec" + VPCRoleSecondaryOperationalPrimary VPCRole = "Sec/Pri" + VPCRoleUnknown VPCRole = "Unknown" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=vpcs +// +kubebuilder:resource:singular=vpc +// +kubebuilder:resource:shortName=vpc +// +kubebuilder:printcolumn:name="Device",type=string,JSONPath=`.spec.deviceRef.name` +// +kubebuilder:printcolumn:name="Domain",type=string,JSONPath=`.spec.domainId` +// +kubebuilder:printcolumn:name="Enabled",type=string,JSONPath=`.spec.adminState` +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status` +// +kubebuilder:printcolumn:name="Configured",type=string,JSONPath=`.status.conditions[?(@.type=="Configured")].status`,priority=1 +// +kubebuilder:printcolumn:name="Operational",type=string,JSONPath=`.status.conditions[?(@.type=="Operational")].status`,priority=1 +// +kubebuilder:printcolumn:name="DomainID",type=string,JSONPath=`.status.domainId`,priority=1 +// +kubebuilder:printcolumn:name="Role",type=string,JSONPath=`.status.role`,priority=1 +// +kubebuilder:printcolumn:name="PeerUptime",type="date",JSONPath=`.status.peerUptime`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// VPC is the Schema for the VPCs API +type VPC struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec defines the desired state of VPC + // +required + Spec VPCSpec `json:"spec,omitempty"` + + // status defines the observed state of VPC + // +optional + Status VPCStatus `json:"status,omitempty,omitzero"` +} + +// GetConditions implements conditions.Getter. +func (in *VPC) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions implements conditions.Setter. +func (in *VPC) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// +kubebuilder:object:root=true + +// VPCList contains a list of VPC +type VPCList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPC `json:"items"` +} + +func init() { + SchemeBuilder.Register(&VPC{}, &VPCList{}) +} diff --git a/api/cisco/nx/v1alpha1/zz_generated.deepcopy.go b/api/cisco/nx/v1alpha1/zz_generated.deepcopy.go index 4feadef3..1346b82c 100644 --- a/api/cisco/nx/v1alpha1/zz_generated.deepcopy.go +++ b/api/cisco/nx/v1alpha1/zz_generated.deepcopy.go @@ -8,10 +8,41 @@ package v1alpha1 import ( + corev1alpha1 "github.com/ironcore-dev/network-operator/api/core/v1alpha1" "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminSt) DeepCopyInto(out *AdminSt) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminSt. +func (in *AdminSt) DeepCopy() *AdminSt { + if in == nil { + return nil + } + out := new(AdminSt) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoRecovery) DeepCopyInto(out *AutoRecovery) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoRecovery. +func (in *AutoRecovery) DeepCopy() *AutoRecovery { + if in == nil { + return nil + } + out := new(AutoRecovery) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Console) DeepCopyInto(out *Console) { *out = *in @@ -28,6 +59,26 @@ func (in *Console) DeepCopy() *Console { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeepAlive) DeepCopyInto(out *KeepAlive) { + *out = *in + if in.VRFRef != nil { + in, out := &in.VRFRef, &out.VRFRef + *out = new(corev1alpha1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeepAlive. +func (in *KeepAlive) DeepCopy() *KeepAlive { + if in == nil { + return nil + } + out := new(KeepAlive) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagementAccessConfig) DeepCopyInto(out *ManagementAccessConfig) { *out = *in @@ -103,6 +154,27 @@ func (in *ManagementAccessConfigSpec) DeepCopy() *ManagementAccessConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Peer) DeepCopyInto(out *Peer) { + *out = *in + out.InterfaceAggregateRef = in.InterfaceAggregateRef + in.KeepAlive.DeepCopyInto(&out.KeepAlive) + out.AutoRecovery = in.AutoRecovery + out.Switch = in.Switch + out.Gateway = in.Gateway + out.Router = in.Router +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Peer. +func (in *Peer) DeepCopy() *Peer { + if in == nil { + return nil + } + out := new(Peer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SSH) DeepCopyInto(out *SSH) { *out = *in @@ -214,3 +286,103 @@ func (in *SystemStatus) DeepCopy() *SystemStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPC) DeepCopyInto(out *VPC) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPC. +func (in *VPC) DeepCopy() *VPC { + if in == nil { + return nil + } + out := new(VPC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPC) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCList) DeepCopyInto(out *VPCList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPC, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCList. +func (in *VPCList) DeepCopy() *VPCList { + if in == nil { + return nil + } + out := new(VPCList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPCList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCSpec) DeepCopyInto(out *VPCSpec) { + *out = *in + out.DeviceRef = in.DeviceRef + out.FastConvergence = in.FastConvergence + in.Peer.DeepCopyInto(&out.Peer) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCSpec. +func (in *VPCSpec) DeepCopy() *VPCSpec { + if in == nil { + return nil + } + out := new(VPCSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCStatus) DeepCopyInto(out *VPCStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.PeerUptime = in.PeerUptime +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCStatus. +func (in *VPCStatus) DeepCopy() *VPCStatus { + if in == nil { + return nil + } + out := new(VPCStatus) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/main.go b/cmd/main.go index 6691b281..dc067f56 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -427,6 +427,25 @@ func main() { os.Exit(1) } + if err := (&nxcontroller.VPCReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("vpc-controller"), + WatchFilterValue: watchFilterValue, + Provider: prov, + RequeueInterval: requeueInterval, + }).SetupWithManager(ctx, mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "VPC") + os.Exit(1) + } + + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err := webhookv1alpha1.SetupVRFWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "VRF") + os.Exit(1) + } + } + if err := (&nxcontroller.SystemReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), diff --git a/config/crd/bases/nx.cisco.networking.metal.ironcore.dev_vpcs.yaml b/config/crd/bases/nx.cisco.networking.metal.ironcore.dev_vpcs.yaml new file mode 100644 index 00000000..5a3d30b5 --- /dev/null +++ b/config/crd/bases/nx.cisco.networking.metal.ironcore.dev_vpcs.yaml @@ -0,0 +1,399 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: vpcs.nx.cisco.networking.metal.ironcore.dev +spec: + group: nx.cisco.networking.metal.ironcore.dev + names: + kind: VPC + listKind: VPCList + plural: vpcs + shortNames: + - vpc + singular: vpc + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.deviceRef.name + name: Device + type: string + - jsonPath: .spec.domainId + name: Domain + type: string + - jsonPath: .spec.adminState + name: Enabled + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Configured")].status + name: Configured + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Operational")].status + name: Operational + priority: 1 + type: string + - jsonPath: .status.domainId + name: DomainID + priority: 1 + type: string + - jsonPath: .status.role + name: Role + priority: 1 + type: string + - jsonPath: .status.peerUptime + name: PeerUptime + priority: 1 + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: VPC is the Schema for the VPCs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec defines the desired state of VPC + properties: + adminState: + default: enabled + description: |- + AdminState is the administrative state of the vPC domain (enabled/disabled). + When disabled, the vPC domain is administratively shut down. + Maps to: "vpc domain " being present (enabled) or "no vpc domain " (disabled) + enum: + - enabled + - disabled + type: string + delayRestoreSVI: + description: |- + DelayRestoreSVI is the delay in seconds (1-3600) before bringing up interface-vlan (SVI) after peer-link comes up. + This prevents traffic blackholing during convergence. + Maps to: "delay restore interface-vlan " + maximum: 3600 + minimum: 1 + type: integer + delayRestoreVPC: + description: |- + DelayRestoreVPC is the delay in seconds (1-3600) before bringing up vPC member ports after the peer-link is restored. + This ensures the vPC peer-link is stable before enabling member ports. + Maps to: "delay restore " + maximum: 3600 + minimum: 1 + type: integer + deviceRef: + description: |- + DeviceName is the name of the Device this object belongs to. The Device object must exist in the same namespace. + Immutable. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + maxLength: 63 + minLength: 1 + type: string + required: + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: DeviceRef is immutable + rule: self == oldSelf + domainId: + description: |- + DomainID is the vPC domain ID (1-1000). + This uniquely identifies the vPC domain and must match on both peer switches. + Maps to: "vpc domain " + Changing this value will recreate the vPC domain and flap the peer-link and vPCs. + maximum: 1000 + minimum: 1 + type: integer + fastConvergence: + description: |- + FastConvergence enables faster convergence for vPC by reducing timers. + Maps to: "fast-convergence" when enabled + properties: + enabled: + description: Enabled indicates whether the feature is administratively + enabled (true) or disabled (false). + type: boolean + required: + - enabled + type: object + peer: + description: Peer contains the vPC peer configuration including peer-link, + keepalive. + properties: + autoRecovery: + description: AutoRecovery defines auto-recovery settings for restoring + vPC after peer failure. + properties: + enabled: + description: |- + Enabled indicates whether auto-recovery is enabled. + When enabled, the switch will wait for ReloadDelay seconds after peer failure + before assuming the peer is dead and restoring vPC functionality. + Maps to: "auto-recovery" being present (enabled) or absent (disabled) + type: boolean + reloadDelay: + description: |- + ReloadDelay is the time in seconds (60-3600) to wait before assuming the peer is dead + and automatically recovering vPC operation. + Must be set when Enabled is true. + Maps to: "auto-recovery reload-delay " + format: int32 + maximum: 3600 + minimum: 60 + type: integer + type: object + x-kubernetes-validations: + - message: reloadDelay must be set when enabled and absent when + disabled + rule: 'self.enabled ? has(self.reloadDelay) : !has(self.reloadDelay)' + gateway: + description: |- + Gateway enables peer-gateway functionality on this peer. + When enabled, each vPC peer can act as the active gateway for packets destined to the + peer's MAC address, improving convergence. + Maps to: "peer-gateway" when enabled + properties: + enabled: + description: Enabled indicates whether the feature is administratively + enabled (true) or disabled (false). + type: boolean + required: + - enabled + type: object + interfaceAggregateRef: + description: |- + InterfaceAggregateRef is a reference to an Interface resource with type `Aggregate`. + This is a dedicated port-channel between the two switches, that will be configured as the vPC peer-link. + and which carries control and data traffic between the two vPC peers. + Maps to: "vpc peer-link" configured on the referenced port-channel interface + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + maxLength: 63 + minLength: 1 + type: string + required: + - name + type: object + x-kubernetes-map-type: atomic + keepalive: + description: KeepAlive defines the out-of-band keepalive configuration. + properties: + destination: + description: |- + Destination is the destination IP address of the vPC peer's keepalive interface. + This is the IP address the local switch will send keepalive messages to. + Maps to: "peer-keepalive destination ..." + format: ipv4 + type: string + source: + description: |- + Source is the source IP address for keepalive messages. + This is the local IP address used to send keepalive packets to the peer. + Maps to: "peer-keepalive destination source ..." + format: ipv4 + type: string + vrf: + description: |- + VRFRef is an optional reference to a VRF resource. + If specified, the keepalive will use this VRF for routing keepalive packets. + Typically used when keepalive is over a management VRF. + Maps to: "peer-keepalive destination source vrf " + The VRF must exist on the Device referenced by the parent VPC resource. + If omitted, the default VRF is used. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + maxLength: 63 + minLength: 1 + type: string + required: + - name + type: object + x-kubernetes-map-type: atomic + required: + - destination + - source + type: object + router: + description: |- + Router enables Layer 3 peer-router functionality on this peer. + Maps to: "layer3 peer-router" when enabled + properties: + enabled: + description: Enabled indicates whether the feature is administratively + enabled (true) or disabled (false). + type: boolean + required: + - enabled + type: object + switch: + description: |- + Switch enables peer-switch functionality on this peer. + When enabled, both vPC peers use the same spanning-tree bridge ID, allowing both + to forward traffic for all VLANs without blocking any ports. + Maps to: "peer-switch" when enabled + properties: + enabled: + description: Enabled indicates whether the feature is administratively + enabled (true) or disabled (false). + type: boolean + required: + - enabled + type: object + required: + - autoRecovery + - gateway + - interfaceAggregateRef + - keepalive + - router + - switch + type: object + rolePriority: + description: |- + RolePriority is the role priority for this vPC domain (1-65535). + The switch with the lower role priority becomes the operational primary. + Maps to: "role priority " + maximum: 65535 + minimum: 1 + type: integer + systemPriority: + description: |- + SystemPriority is the system priority for this vPC domain (1-65535). + Used to ensure that the vPC devices are primary devices on LACP. Must match on both peers. + Maps to: "system-priority " + maximum: 65535 + minimum: 1 + type: integer + required: + - adminState + - delayRestoreSVI + - delayRestoreVPC + - deviceRef + - domainId + - fastConvergence + - peer + - rolePriority + type: object + status: + description: status defines the observed state of VPC + properties: + conditions: + description: |- + Conditions represent the latest available observations of the VPC's state. + Standard conditions include: + - Ready: overall readiness of the vPC domain + - Configured: whether the vPC configuration was successfully applied to the device + - Operational: whether the vPC domain is operationally up (peer-link and keepalive status) + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + domainId: + description: DomainID is the vPC domain ID as reported by the device. + type: integer + peerUptime: + description: PeerUptime indicates how long the vPC peer has been up + and reachable via keepalive. + type: string + role: + description: |- + Role indicates the current operational role of this vPC peer. + Possible values: + - Primary: This switch is the primary vPC peer (lower role priority or elected) + - Secondary: This switch is the secondary vPC peer + - Unknown: Role has not been established (e.g., peer-link down, domain not formed) + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 782ca184..d58c08d7 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -19,10 +19,12 @@ resources: - bases/networking.metal.ironcore.dev_bgp.yaml - bases/networking.metal.ironcore.dev_bgppeers.yaml - bases/networking.metal.ironcore.dev_ospf.yaml +- bases/networking.metal.ironcore.dev_vlans.yaml - bases/nx.cisco.networking.metal.ironcore.dev_systems.yaml - bases/nx.cisco.networking.metal.ironcore.dev_managementaccessconfigs.yaml - bases/networking.metal.ironcore.dev_vlans.yaml - bases/networking.metal.ironcore.dev_evpninstances.yaml +- bases/nx.cisco.networking.metal.ironcore.dev_vpcs.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/config/rbac/cisco/nx/vpc_admin_role.yaml b/config/rbac/cisco/nx/vpc_admin_role.yaml new file mode 100644 index 00000000..e8310534 --- /dev/null +++ b/config/rbac/cisco/nx/vpc_admin_role.yaml @@ -0,0 +1,21 @@ +# This rule is not used by the project network-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over nx.cisco.networking.metal.ironcore.dev. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: network-operator + app.kubernetes.io/managed-by: kustomize + name: nx.cisco-vpc-admin-role +rules: +- apiGroups: + - nx.cisco.networking.metal.ironcore.dev + resources: + - vpcs + verbs: + - '*' diff --git a/config/rbac/cisco/nx/vpc_editor_role.yaml b/config/rbac/cisco/nx/vpc_editor_role.yaml new file mode 100644 index 00000000..303cf9ee --- /dev/null +++ b/config/rbac/cisco/nx/vpc_editor_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project network-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over nx.cisco.networking.metal.ironcore.dev. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: network-operator + app.kubernetes.io/managed-by: kustomize + name: nx.cisco-vpc-editor-role +rules: +- apiGroups: + - nx.cisco.networking.metal.ironcore.dev + resources: + - vpcs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/config/rbac/cisco/nx/vpc_viewer_role.yaml b/config/rbac/cisco/nx/vpc_viewer_role.yaml new file mode 100644 index 00000000..ba73403e --- /dev/null +++ b/config/rbac/cisco/nx/vpc_viewer_role.yaml @@ -0,0 +1,32 @@ +# This rule is not used by the project network-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the nx.cisco.networking.metal.ironcore.dev. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: network-operator + app.kubernetes.io/managed-by: kustomize + name: nx.cisco-vpcs-viewer-role +rules: +- apiGroups: + - nx.cisco.networking.metal.ironcore.dev + resources: + - vpcs + verbs: + - get + - list + - watch +- apiGroups: + - networking.metal.ironcore.dev + resources: + - vrfs + - interfaces + verbs: + - get + - list + - watch diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index db30d741..4dde4c2b 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -22,6 +22,9 @@ resources: # default, aiding admins in cluster management. Those roles are # not used by the network-operator itself. You can comment the following lines # if you do not want those helpers be installed with your Project. +- cisco/nx/vpc_admin_role.yaml +- cisco/nx/vpc_editor_role.yaml +- cisco/nx/vpc_viewer_role.yaml - device_admin_role.yaml - device_editor_role.yaml - device_viewer_role.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index d12eafe7..5ecf4403 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -12,13 +12,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - apiGroups: - "" resources: @@ -28,6 +21,14 @@ rules: - list - update - watch +- apiGroups: + - "" + - nx.cisco.networking.metal.ironcore.dev + resources: + - events + verbs: + - create + - patch - apiGroups: - networking.metal.ironcore.dev resources: @@ -120,6 +121,7 @@ rules: - nx.cisco.networking.metal.ironcore.dev resources: - systems + - vpcs verbs: - create - delete @@ -132,12 +134,14 @@ rules: - nx.cisco.networking.metal.ironcore.dev resources: - systems/finalizers + - vpcs/finalizers verbs: - update - apiGroups: - nx.cisco.networking.metal.ironcore.dev resources: - systems/status + - vpcs/status verbs: - get - patch diff --git a/config/samples/cisco/nx/v1alpha1_vpc.yaml b/config/samples/cisco/nx/v1alpha1_vpc.yaml new file mode 100644 index 00000000..b677e3f5 --- /dev/null +++ b/config/samples/cisco/nx/v1alpha1_vpc.yaml @@ -0,0 +1,34 @@ +apiVersion: nx.cisco.networking.metal.ironcore.dev/v1alpha1 +kind: VPC +metadata: + labels: + app.kubernetes.io/name: network-operator + app.kubernetes.io/managed-by: kustomize + networking.metal.ironcore.dev/device-name: leaf1 + name: leaf1-vpc +spec: + deviceRef: + name: leaf1 + domainId: 5 + adminState: enabled + rolePriority: 100 + systemPriority: 10 + delayRestoreSVI: 140 + delayRestoreVPC: 150 + peer: + interfaceAggregateRef: + name: po1 + switch: + enabled: true + gateway: + enabled: true + router: + enabled: true + keepalive: + source: 10.1.1.1 + destination: 10.1.1.2 + autoRecovery: + enabled: true + reloadDelay: 360 + fastConvergence: + enabled: true diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 6cbe9356..5efe2a08 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -21,4 +21,5 @@ resources: - cisco/nx/v1alpha1_system.yaml - cisco/nx/v1alpha1_managementaccessconfig.yaml - v1alpha1_evi.yaml +- cisco/nx/v1alpha1_vpc.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/v1alpha1_interface.yaml b/config/samples/v1alpha1_interface.yaml index 633ba14c..138df392 100644 --- a/config/samples/v1alpha1_interface.yaml +++ b/config/samples/v1alpha1_interface.yaml @@ -113,11 +113,75 @@ metadata: app.kubernetes.io/name: network-operator app.kubernetes.io/managed-by: kustomize networking.metal.ironcore.dev/device-name: leaf1 - name: po-10 + name: eth1-30 +spec: + deviceRef: + name: leaf1 + name: eth1/30 + description: vPC Keep-Alive + adminState: Up + type: Physical + mtu: 1500 + ipv4: + addresses: + - 10.1.1.1/30 + vrfRef: + name: vpc-keepalive +--- +apiVersion: networking.metal.ironcore.dev/v1alpha1 +kind: Interface +metadata: + labels: + app.kubernetes.io/name: network-operator + app.kubernetes.io/managed-by: kustomize + networking.metal.ironcore.dev/device-name: leaf1 + name: eth1-31 +spec: + deviceRef: + name: leaf1 + name: eth1/31 + description: vPC Peer-Link + adminState: Up + type: Physical + mtu: 1500 + switchport: + mode: Trunk + nativeVlan: 1 + allowedVlans: [10] +--- +apiVersion: networking.metal.ironcore.dev/v1alpha1 +kind: Interface +metadata: + labels: + app.kubernetes.io/name: network-operator + app.kubernetes.io/managed-by: kustomize + networking.metal.ironcore.dev/device-name: leaf1 + name: eth1-32 spec: deviceRef: name: leaf1 - name: po10 + name: eth1/32 + description: vPC Peer-Link + adminState: Up + type: Physical + mtu: 1500 + switchport: + mode: Trunk + nativeVlan: 1 + allowedVlans: [10] +--- +apiVersion: networking.metal.ironcore.dev/v1alpha1 +kind: Interface +metadata: + labels: + app.kubernetes.io/name: network-operator + app.kubernetes.io/managed-by: kustomize + networking.metal.ironcore.dev/device-name: leaf1 + name: po1 +spec: + deviceRef: + name: leaf1 + name: po1 description: vPC to Host1 adminState: Up type: Aggregate @@ -130,9 +194,11 @@ spec: controlProtocol: mode: Active memberInterfaceRefs: - - name: eth1-10 + - name: eth1-31 + - name: eth1-32 multichassis: id: 10 + enabled: true --- apiVersion: networking.metal.ironcore.dev/v1alpha1 kind: Interface @@ -163,16 +229,24 @@ metadata: app.kubernetes.io/name: network-operator app.kubernetes.io/managed-by: kustomize networking.metal.ironcore.dev/device-name: leaf1 - name: eth1-30 + name: po2 spec: deviceRef: name: leaf1 - name: eth1/30 - description: vPC Keepalive + name: po2 + description: vPC KeepAlive adminState: Up - type: Physical - ipv4: - addresses: - - 10.1.1.1/30 - vrfRef: - name: vpc-keepalive + type: Aggregate + mtu: 1500 + switchport: + mode: Trunk + nativeVlan: 1 + allowedVlans: [10] + aggregation: + controlProtocol: + mode: Active + memberInterfaceRefs: + - name: eth1-10 + multichassis: + enabled: true + id: 1 diff --git a/config/samples/v1alpha1_vrf.yaml b/config/samples/v1alpha1_vrf.yaml index 1c7ace76..3e521634 100644 --- a/config/samples/v1alpha1_vrf.yaml +++ b/config/samples/v1alpha1_vrf.yaml @@ -37,28 +37,7 @@ metadata: app.kubernetes.io/name: network-operator app.kubernetes.io/managed-by: kustomize networking.metal.ironcore.dev/device-name: leaf1 - name: vrf-cc-prod-001 -spec: - deviceRef: - name: leaf1 - name: CC-PROD-001 - routeDistinguisher: 127.0.0.1:30000 - vni: 100 - routeTargets: - - value: 10.1.1.1:200 - addressFamilies: - - IPv4 - - IPv4EVPN - action: Both ---- -apiVersion: networking.metal.ironcore.dev/v1alpha1 -kind: VRF -metadata: - labels: - app.kubernetes.io/name: network-operator - app.kubernetes.io/managed-by: kustomize - networking.metal.ironcore.dev/device-name: leaf1 - name: vpc-keepalive + name: vrf-vpckeepalive spec: deviceRef: name: leaf1 diff --git a/internal/controller/cisco/nx/provider.go b/internal/controller/cisco/nx/provider.go index 0672e340..961064c5 100644 --- a/internal/controller/cisco/nx/provider.go +++ b/internal/controller/cisco/nx/provider.go @@ -7,6 +7,7 @@ import ( "context" nxv1alpha1 "github.com/ironcore-dev/network-operator/api/cisco/nx/v1alpha1" + v1alpha1 "github.com/ironcore-dev/network-operator/api/core/v1alpha1" "github.com/ironcore-dev/network-operator/internal/provider" "github.com/ironcore-dev/network-operator/internal/provider/cisco/nxos" ) @@ -17,6 +18,10 @@ type Provider interface { EnsureSystemSettings(ctx context.Context, s *nxv1alpha1.System) error ResetSystemSettings(ctx context.Context) error + + EnsureVPC(ctx context.Context, vpc *nxv1alpha1.VPC, vrf *v1alpha1.VRF) error + DeleteVPC(context.Context) error + GetStatusVPC(context.Context) (nxos.VPCStatus, error) } var _ Provider = (*nxos.Provider)(nil) diff --git a/internal/controller/cisco/nx/suite_test.go b/internal/controller/cisco/nx/suite_test.go index 11eb9c5c..9760c1f2 100644 --- a/internal/controller/cisco/nx/suite_test.go +++ b/internal/controller/cisco/nx/suite_test.go @@ -9,6 +9,7 @@ import ( "path/filepath" "sync" "testing" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -27,6 +28,7 @@ import ( "github.com/ironcore-dev/network-operator/api/core/v1alpha1" "github.com/ironcore-dev/network-operator/internal/deviceutil" "github.com/ironcore-dev/network-operator/internal/provider" + "github.com/ironcore-dev/network-operator/internal/provider/cisco/nxos" // +kubebuilder:scaffold:imports ) @@ -105,6 +107,14 @@ var _ = BeforeSuite(func() { }).SetupWithManager(k8sManager) Expect(err).NotTo(HaveOccurred()) + err = (&VPCReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Recorder: recorder, + Provider: prov, + }).SetupWithManager(ctx, k8sManager) + Expect(err).NotTo(HaveOccurred()) + go func() { defer GinkgoRecover() err = k8sManager.Start(ctx) @@ -151,6 +161,7 @@ type MockProvider struct { sync.Mutex Settings *nxv1alpha1.System + VPC *nxv1alpha1.VPC } var _ Provider = (*MockProvider)(nil) @@ -175,3 +186,26 @@ func (p *MockProvider) ResetSystemSettings(ctx context.Context) error { p.Settings = nil return nil } + +func (p *MockProvider) EnsureVPC(_ context.Context, vpc *nxv1alpha1.VPC, _ *v1alpha1.VRF) error { + p.Lock() + defer p.Unlock() + p.VPC = vpc + return nil +} + +func (p *MockProvider) DeleteVPC(_ context.Context) error { + p.Lock() + defer p.Unlock() + p.VPC = nil + return nil +} + +func (p *MockProvider) GetStatusVPC(_ context.Context) (nxos.VPCStatus, error) { + return nxos.VPCStatus{ + KeepAliveStatus: true, + KeepAliveStatusMessage: "operational", + PeerUptime: 3600 * time.Second, + Role: nxv1alpha1.VPCRolePrimary, + }, nil +} diff --git a/internal/controller/cisco/nx/vpc_controller.go b/internal/controller/cisco/nx/vpc_controller.go new file mode 100644 index 00000000..e63ae191 --- /dev/null +++ b/internal/controller/cisco/nx/vpc_controller.go @@ -0,0 +1,486 @@ +// SPDX-FileCopyrightText: 2025 SAP SE or an SAP affiliate company and IronCore contributors +// SPDX-License-Identifier: Apache-2.0 + +package nx + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/ironcore-dev/network-operator/internal/conditions" + "github.com/ironcore-dev/network-operator/internal/provider" + + nxv1 "github.com/ironcore-dev/network-operator/api/cisco/nx/v1alpha1" + corev1 "github.com/ironcore-dev/network-operator/api/core/v1alpha1" + controllercore "github.com/ironcore-dev/network-operator/internal/controller/core" + "github.com/ironcore-dev/network-operator/internal/deviceutil" +) + +// VPCReconciler reconciles a VPC object +type VPCReconciler struct { + client.Client + Scheme *runtime.Scheme + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string + + // Recorder is used to record events for the controller. + // More info: https://book.kubebuilder.io/reference/raising-events + Recorder record.EventRecorder + + // Provider is the driver that will be used to create & delete the vPC + Provider provider.ProviderFunc + + // RequeueInterval is the duration after which the controller should requeue the reconciliation, + // regardless of changes. + RequeueInterval time.Duration +} + +// // scope holds the different objects that are read and used during the reconcile. +type vpcScope struct { + Device *corev1.Device + VPC *nxv1.VPC + Connection *deviceutil.Connection + Provider Provider + // VRF is the VRF referenced in the KeepAlive configuration + VRF *corev1.VRF +} + +// +kubebuilder:rbac:groups=nx.cisco.networking.metal.ironcore.dev,resources=vpcs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=nx.cisco.networking.metal.ironcore.dev,resources=vpcs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=nx.cisco.networking.metal.ironcore.dev,resources=vpcs/finalizers,verbs=update +// +kubebuilder:rbac:groups=nx.cisco.networking.metal.ironcore.dev,resources=events,verbs=create;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile +func (r *VPCReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := logf.FromContext(ctx) + log.Info("Reconciling resource") + + obj := new(nxv1.VPC) + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + if apierrors.IsNotFound(err) { + log.Info("VPC resource not found. Ignoring since object must be deleted.") + return ctrl.Result{}, nil + } + log.Error(err, "Failed to get resource") + return ctrl.Result{}, err + } + + prov, ok := r.Provider().(Provider) + if !ok { + meta.SetStatusCondition(&obj.Status.Conditions, metav1.Condition{ + Type: corev1.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: corev1.NotImplementedReason, + Message: "Provider does not implement provider.VPCProvider", + }) + return ctrl.Result{}, r.Status().Update(ctx, obj) + } + + device, err := deviceutil.GetDeviceByName(ctx, r, obj.Namespace, obj.Spec.DeviceRef.Name) + if err != nil { + return ctrl.Result{}, err + } + + conn, err := deviceutil.GetDeviceConnection(ctx, r, device) + if err != nil { + return ctrl.Result{}, err + } + + s := &vpcScope{ + Device: device, + VPC: obj, + Connection: conn, + Provider: prov, + } + + if !obj.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(obj, nxv1.FinalizerName) { + if err := r.finalize(ctx, s); err != nil { + log.Error(err, "Failed to finalize resource") + return ctrl.Result{}, err + } + controllerutil.RemoveFinalizer(obj, nxv1.FinalizerName) + if err := r.Update(ctx, obj); err != nil { + log.Error(err, "Failed to remove finalizer from resource") + return ctrl.Result{}, err + } + } + log.Info("Resource is being deleted, skipping reconciliation") + return ctrl.Result{}, nil + } + + if !controllerutil.ContainsFinalizer(obj, nxv1.FinalizerName) { + controllerutil.AddFinalizer(obj, nxv1.FinalizerName) + if err := r.Update(ctx, obj); err != nil { + log.Error(err, "Failed to add finalizer to resource") + return ctrl.Result{}, err + } + log.Info("Added finalizer to resource") + return ctrl.Result{}, nil + } + + orig := obj.DeepCopy() + if conditions.InitializeConditions(obj, corev1.ReadyCondition) { + log.Info("Initializing status conditions") + return ctrl.Result{}, r.Status().Update(ctx, obj) + } + + // Always attempt to update the metadata/status after reconciliation + defer func() { + if !equality.Semantic.DeepEqual(orig.ObjectMeta, obj.ObjectMeta) { + if err := r.Patch(ctx, obj, client.MergeFrom(orig)); err != nil { + log.Error(err, "Failed to update resource metadata") + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + return + } + + if !equality.Semantic.DeepEqual(orig.Status, obj.Status) { + if err := r.Status().Patch(ctx, obj, client.MergeFrom(orig)); err != nil { + log.Error(err, "Failed to update status") + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + } + }() + + err = r.reconcile(ctx, s) + if err != nil { + log.Error(err, "Failed to reconcile resource") + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: controllercore.Jitter(r.RequeueInterval)}, nil +} + +// reconcile contains the main reconciliation logic for the VPC resource. +func (r *VPCReconciler) reconcile(ctx context.Context, s *vpcScope) (reterr error) { + if s.VPC.Labels == nil { + s.VPC.Labels = make(map[string]string) + } + s.VPC.Labels[corev1.DeviceLabel] = s.Device.Name + + // Ensure owner reference to device + if !controllerutil.HasControllerReference(s.VPC) { + if err := controllerutil.SetOwnerReference(s.Device, s.VPC, r.Scheme, controllerutil.WithBlockOwnerDeletion(true)); err != nil { + return err + } + } + + defer func() { + conditions.RecomputeReady(s.VPC) + }() + + // Validate refs but don't return early, we want to update .VPC.status fields with data from the remote device state + if err := r.validateInterfaceRef(ctx, s); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, fmt.Errorf("vpc: failed to validate peer interface reference: %w", err)}) + } + if err := r.validateVRFRef(ctx, s); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, fmt.Errorf("vpc: failed to validate KeepAlive VRF reference: %w", err)}) + } + + // Connect to remote device + var connErr error + if connErr = s.Provider.Connect(ctx, s.Connection); connErr != nil { + r.resetStatus(ctx, &s.VPC.Status) + return kerrors.NewAggregate([]error{reterr, fmt.Errorf("failed to connect to provider: %w", connErr)}) + } + defer func() { + if err := s.Provider.Disconnect(ctx, s.Connection); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + // Realize the vPC via the provider and update configuration status + err := s.Provider.EnsureVPC(ctx, s.VPC, s.VRF) + cond := conditions.FromError(err) + conditions.Set(s.VPC, cond) + if err != nil { + reterr = kerrors.NewAggregate([]error{reterr, fmt.Errorf("vpc: failed to ensure vPC configuration: %w", err)}) + } + + // Retrieve and update operational status and nil out the status on error to avoid stale state + status, err := s.Provider.GetStatusVPC(ctx) + if err != nil { + r.resetStatus(ctx, &s.VPC.Status) + return kerrors.NewAggregate([]error{reterr, fmt.Errorf("failed to get interface status: %w", err)}) + } + + if reterr != nil { + return reterr + } + + cond = metav1.Condition{ + Type: corev1.OperationalCondition, + Status: metav1.ConditionTrue, + Reason: corev1.OperationalReason, + Message: "vPC is up", + } + if !status.KeepAliveStatus { + cond.Status = metav1.ConditionFalse + cond.Reason = corev1.DegradedReason + cond.Message = "vPC is down" + } + if status.KeepAliveStatusMessage != "" { + cond.Message = fmt.Sprintf("%s, device returned %q", cond.Message, status.KeepAliveStatusMessage) + } + conditions.Set(s.VPC, cond) + + return reterr +} + +// validateInterfaceRef validates that the peer's interface reference exists and is of type Aggregate. +// Must ignore aggregate status: Port-channels require the domain to be configured first. +func (r *VPCReconciler) validateInterfaceRef(ctx context.Context, s *vpcScope) error { + intf := new(corev1.Interface) + intf.Name = s.VPC.Spec.Peer.InterfaceAggregateRef.Name + intf.Namespace = s.VPC.Namespace + + if err := r.Get(ctx, client.ObjectKey{Name: intf.Name, Namespace: intf.Namespace}, intf); err != nil { + if apierrors.IsNotFound(err) { + conditions.Set(s.VPC, metav1.Condition{ + Type: corev1.ConfiguredCondition, + Status: metav1.ConditionFalse, + Reason: corev1.WaitingForDependenciesReason, + Message: fmt.Sprintf("interface resource '%s' not found in namespace '%s'", intf.Name, intf.Namespace), + }) + return fmt.Errorf("member interface %q not found", intf.Name) + } + return fmt.Errorf("failed to get member interface %q: %w", intf.Name, err) + } + + if intf.Spec.Type != corev1.InterfaceTypeAggregate { + conditions.Set(s.VPC, metav1.Condition{ + Type: corev1.ConfiguredCondition, + Status: metav1.ConditionFalse, + Reason: corev1.InvalidInterfaceTypeReason, + Message: fmt.Sprintf("interface referenced by '%s' must be of type %q", intf.Name, corev1.InterfaceTypeAggregate), + }) + return fmt.Errorf("interface referenced by '%s' must be of type %q", intf.Name, corev1.InterfaceTypeAggregate) + } + + if s.VPC.Spec.DeviceRef.Name != intf.Spec.DeviceRef.Name { + conditions.Set(s.VPC, metav1.Condition{ + Type: corev1.ConfiguredCondition, + Status: metav1.ConditionFalse, + Reason: corev1.CrossDeviceReferenceReason, + Message: fmt.Sprintf("interface '%s' deviceRef '%s' does not match vPC deviceRef '%s'", intf.Name, intf.Spec.DeviceRef.Name, s.VPC.Spec.DeviceRef.Name), + }) + return fmt.Errorf("interface '%s' deviceRef '%s' does not match vPC deviceRef '%s'", intf.Name, intf.Spec.DeviceRef.Name, s.VPC.Spec.DeviceRef.Name) + } + return nil +} + +// validateVRFRef validates the VRF reference in the KeepAlive configuration, and updates the scope accordingly. +func (r *VPCReconciler) validateVRFRef(ctx context.Context, s *vpcScope) error { + if s.VPC.Spec.Peer.KeepAlive.VRFRef == nil { + return nil + } + + vrf := new(corev1.VRF) + vrf.Name = s.VPC.Spec.Peer.KeepAlive.VRFRef.Name + vrf.Namespace = s.VPC.Namespace + + if err := r.Get(ctx, client.ObjectKey{Name: vrf.Name, Namespace: vrf.Namespace}, vrf); err != nil { + if apierrors.IsNotFound(err) { + conditions.Set(s.VPC, metav1.Condition{ + Type: corev1.ConfiguredCondition, + Status: metav1.ConditionFalse, + Reason: corev1.WaitingForDependenciesReason, + Message: fmt.Sprintf("VRF resource '%s' not found in namespace '%s'", vrf.Name, vrf.Namespace), + }) + return fmt.Errorf("VRF %q not found", vrf.Name) + } + return fmt.Errorf("failed to get VRF %q: %w", vrf.Name, err) + } + + if s.VPC.Spec.DeviceRef.Name != vrf.Spec.DeviceRef.Name { + conditions.Set(s.VPC, metav1.Condition{ + Type: corev1.ConfiguredCondition, + Status: metav1.ConditionFalse, + Reason: corev1.CrossDeviceReferenceReason, + Message: fmt.Sprintf("VRF '%s' deviceRef '%s' does not match VPC deviceRef '%s'", vrf.Name, vrf.Spec.DeviceRef.Name, s.VPC.Spec.DeviceRef.Name), + }) + return fmt.Errorf("VRF '%s' deviceRef '%s' does not match VPC deviceRef '%s'", vrf.Name, vrf.Spec.DeviceRef.Name, s.VPC.Spec.DeviceRef.Name) + } + + s.VRF = vrf + return nil +} + +func (r *VPCReconciler) resetStatus(_ context.Context, s *nxv1.VPCStatus) { + s.Role = nxv1.VPCRoleUnknown + s.PeerUptime = metav1.Duration{Duration: 0} +} + +// SetupWithManager sets up the controller with the Manager. +func (r *VPCReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + labelSelector := metav1.LabelSelector{} + if r.WatchFilterValue != "" { + labelSelector.MatchLabels = map[string]string{nxv1.WatchLabel: r.WatchFilterValue} + } + + filter, err := predicate.LabelSelectorPredicate(labelSelector) + if err != nil { + return fmt.Errorf("failed to create label selector predicate: %w", err) + } + + // Note: interface type indexer already defined in a different controller + + // Index vPCs by their peer interface reference + if err := mgr.GetFieldIndexer().IndexField(ctx, &nxv1.VPC{}, ".spec.peer.interfaceAggregateRef.name", func(obj client.Object) []string { + vpc := obj.(*nxv1.VPC) + return []string{vpc.Spec.Peer.InterfaceAggregateRef.Name} + }); err != nil { + return err + } + + // Index vPCs by their device reference + if err := mgr.GetFieldIndexer().IndexField(ctx, &nxv1.VPC{}, ".spec.deviceRef.name", func(obj client.Object) []string { + vpc := obj.(*nxv1.VPC) + return []string{vpc.Spec.DeviceRef.Name} + }); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&nxv1.VPC{}). + Named("vpc"). + WithEventFilter(filter). + // Trigger reconciliation also for updates, e.g., if port-channel goes down + Watches( + &corev1.Interface{}, + handler.EnqueueRequestsFromMapFunc(r.mapAggregateToVPC), + builder.WithPredicates(predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // Only trigger for Aggregate type + iface := e.Object.(*corev1.Interface) + return iface.Spec.Type == corev1.InterfaceTypeAggregate + }, + DeleteFunc: func(e event.DeleteEvent) bool { + iface := e.Object.(*corev1.Interface) + return iface.Spec.Type == corev1.InterfaceTypeAggregate + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldIface := e.ObjectOld.(*corev1.Interface) + newIface := e.ObjectNew.(*corev1.Interface) + return newIface.Spec.Type == corev1.InterfaceTypeAggregate && + !equality.Semantic.DeepEqual(oldIface.Status, newIface.Status) + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + }), + ). + // Trigger reconciliation if the referenced VRF name changes + Watches( + &corev1.VRF{}, + handler.EnqueueRequestsFromMapFunc(r.mapVRFToVPC), + builder.WithPredicates(predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldVRF := e.ObjectOld.(*corev1.VRF) + newVRF := e.ObjectNew.(*corev1.VRF) + return oldVRF.Spec.Name != newVRF.Spec.Name || + !equality.Semantic.DeepEqual(oldVRF.Status, newVRF.Status) + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + }), + ). + Complete(r) +} + +func (r *VPCReconciler) mapAggregateToVPC(ctx context.Context, obj client.Object) []ctrl.Request { + iface, ok := obj.(*corev1.Interface) + if !ok { + panic(fmt.Sprintf("Expected a Interface but got a %T", obj)) + } + + vpc := new(nxv1.VPC) + var vpcs nxv1.VPCList + if err := r.List(ctx, &vpcs, + client.InNamespace(vpc.Namespace), + client.MatchingFields{ + ".spec.peer.interfaceAggregateRef.name": iface.Name, + ".spec.deviceRef.name": iface.Spec.DeviceRef.Name, + }, + ); err != nil { + return nil + } + + requests := make([]reconcile.Request, 0, len(vpcs.Items)) + for i := range vpcs.Items { + requests = append(requests, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&vpcs.Items[i]), + }) + } + return requests +} + +func (r *VPCReconciler) mapVRFToVPC(ctx context.Context, obj client.Object) []ctrl.Request { + vrf, ok := obj.(*corev1.VRF) + if !ok { + panic(fmt.Sprintf("Expected a VRF but got a %T", obj)) + } + + vpc := new(nxv1.VPC) + var vpcs nxv1.VPCList + if err := r.List(ctx, &vpcs, + client.InNamespace(vpc.Namespace), + client.MatchingFields{ + ".spec.peer.keepAlive.vrfRef.name": vrf.Name, + ".spec.deviceRef.name": vrf.Spec.DeviceRef.Name, + }, + ); err != nil { + return nil + } + + requests := make([]reconcile.Request, 0, len(vpcs.Items)) + for i := range vpcs.Items { + requests = append(requests, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&vpcs.Items[i]), + }) + } + return requests +} + +func (r *VPCReconciler) finalize(ctx context.Context, s *vpcScope) (reterr error) { + if err := s.Provider.Connect(ctx, s.Connection); err != nil { + return fmt.Errorf("failed to connect to provider: %w", err) + } + defer func() { + if err := s.Provider.Disconnect(ctx, s.Connection); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + return s.Provider.DeleteVPC(ctx) +} diff --git a/internal/controller/cisco/nx/vpc_controller_test.go b/internal/controller/cisco/nx/vpc_controller_test.go new file mode 100644 index 00000000..0e9283f2 --- /dev/null +++ b/internal/controller/cisco/nx/vpc_controller_test.go @@ -0,0 +1,217 @@ +// SPDX-FileCopyrightText: 2025 SAP SE or an SAP affiliate company and IronCore contributors +// SPDX-License-Identifier: Apache-2.0 + +package nx + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + nxv1 "github.com/ironcore-dev/network-operator/api/cisco/nx/v1alpha1" + corev1 "github.com/ironcore-dev/network-operator/api/core/v1alpha1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("VPC Controller", func() { + Context("When reconciling a resource", func() { + const ( + vpcName = "vpc1" + deviceName = "leaf1" + poName = "po1" + vrfName = "vrf1" + physName = "eth1-1" + ) + var ( + deviceKey = client.ObjectKey{Name: deviceName, Namespace: metav1.NamespaceDefault} + vpcKey = client.ObjectKey{Name: vpcName, Namespace: metav1.NamespaceDefault} + poKey = client.ObjectKey{Name: poName, Namespace: metav1.NamespaceDefault} + vrfKey = client.ObjectKey{Name: vrfName, Namespace: metav1.NamespaceDefault} + physKey = client.ObjectKey{Name: physName, Namespace: metav1.NamespaceDefault} + ) + + BeforeEach(func() { + By("Creating the custom resource for the Kind Device") + device := &corev1.Device{} + if err := k8sClient.Get(ctx, deviceKey, device); errors.IsNotFound(err) { + resource := &corev1.Device{ + ObjectMeta: metav1.ObjectMeta{ + Name: deviceName, + Namespace: metav1.NamespaceDefault, + }, + Spec: corev1.DeviceSpec{ + Endpoint: corev1.Endpoint{ + Address: "192.168.10.2:9339", + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + + By("Creating the custom resource for the Kind Interface (Physical)") + phyIf := &corev1.Interface{} + if err := k8sClient.Get(ctx, physKey, phyIf); errors.IsNotFound(err) { + resource := &corev1.Interface{ + ObjectMeta: metav1.ObjectMeta{ + Name: physName, + Namespace: metav1.NamespaceDefault, + }, + Spec: corev1.InterfaceSpec{ + DeviceRef: corev1.LocalObjectReference{Name: deviceName}, + Name: physName, + Type: corev1.InterfaceTypePhysical, + AdminState: "Up", + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + + By("Creating the custom resource for the Kind Interface (Aggregate)") + aggIf := &corev1.Interface{} + if err := k8sClient.Get(ctx, poKey, aggIf); errors.IsNotFound(err) { + resource := &corev1.Interface{ + ObjectMeta: metav1.ObjectMeta{ + Name: poName, + Namespace: metav1.NamespaceDefault, + }, + Spec: corev1.InterfaceSpec{ + DeviceRef: corev1.LocalObjectReference{Name: deviceName}, + Name: poName, + Type: corev1.InterfaceTypeAggregate, + AdminState: "Up", + Aggregation: &corev1.Aggregation{ + ControlProtocol: corev1.ControlProtocol{Mode: corev1.LACPModeActive}, + MemberInterfaceRefs: []corev1.LocalObjectReference{ + {Name: physName}, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + + By("Creating the custom resource for the Kind VRF") + vrf := &corev1.VRF{} + if err := k8sClient.Get(ctx, vrfKey, vrf); errors.IsNotFound(err) { + resource := &corev1.VRF{ + ObjectMeta: metav1.ObjectMeta{ + Name: vrfName, + Namespace: metav1.NamespaceDefault, + }, + Spec: corev1.VRFSpec{ + DeviceRef: corev1.LocalObjectReference{Name: deviceName}, + Name: vrfName, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + + By("Creating the custom resource for the Kind VPC") + vpc := &nxv1.VPC{} + if err := k8sClient.Get(ctx, vpcKey, vpc); errors.IsNotFound(err) { + resource := &nxv1.VPC{ + ObjectMeta: metav1.ObjectMeta{ + Name: vpcName, + Namespace: metav1.NamespaceDefault, + }, + Spec: nxv1.VPCSpec{ + DeviceRef: corev1.LocalObjectReference{Name: deviceName}, + DomainID: 2, + RolePriority: 100, + SystemPriority: 10, + DelayRestoreSVI: 140, + DelayRestoreVPC: 150, + Peer: nxv1.Peer{ + InterfaceAggregateRef: corev1.LocalObjectReference{Name: poName}, + Switch: nxv1.AdminSt{Enabled: true}, + Gateway: nxv1.AdminSt{Enabled: true}, + KeepAlive: nxv1.KeepAlive{ + Source: "10.114.235.155", + Destination: "10.114.235.156", + VRFRef: &corev1.LocalObjectReference{Name: vrfName}, + }, + AutoRecovery: nxv1.AutoRecovery{ + Enabled: true, + ReloadDelay: 360, + }, + }, + FastConvergence: nxv1.AdminSt{Enabled: true}, + AdminState: "enabled", + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + var resource client.Object = &nxv1.VPC{} + err := k8sClient.Get(ctx, vpcKey, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance VPC") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + resource = &corev1.Device{} + err = k8sClient.Get(ctx, deviceKey, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Device") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + By("Ensuring the resource is deleted from the provider") + Eventually(func(g Gomega) { + g.Expect(testProvider.VPC).To(BeNil(), "Provider VPC should be nil") + }).Should(Succeed()) + }) + + It("Should successfully reconcile the resource", func() { + By("Adding a finalizer to the resource") + Eventually(func(g Gomega) { + resource := &nxv1.VPC{} + g.Expect(k8sClient.Get(ctx, vpcKey, resource)).To(Succeed()) + g.Expect(controllerutil.ContainsFinalizer(resource, nxv1.FinalizerName)).To(BeTrue()) + }).Should(Succeed()) + + By("Adding the device label to the resource") + Eventually(func(g Gomega) { + resource := &nxv1.VPC{} + g.Expect(k8sClient.Get(ctx, vpcKey, resource)).To(Succeed()) + g.Expect(resource.Labels).To(HaveKeyWithValue(corev1.DeviceLabel, deviceName)) + }).Should(Succeed()) + + By("Adding the device as a owner reference") + Eventually(func(g Gomega) { + resource := &nxv1.VPC{} + g.Expect(k8sClient.Get(ctx, vpcKey, resource)).To(Succeed()) + g.Expect(resource.OwnerReferences).To(HaveLen(1)) + g.Expect(resource.OwnerReferences[0].Kind).To(Equal("Device")) + g.Expect(resource.OwnerReferences[0].Name).To(Equal(deviceName)) + }).Should(Succeed()) + + By("Updating the resource status") + Eventually(func(g Gomega) { + resource := &nxv1.VPC{} + g.Expect(k8sClient.Get(ctx, vpcKey, resource)).To(Succeed()) + g.Expect(resource.Status.Conditions).To(HaveLen(3)) + g.Expect(resource.Status.Conditions[0].Type).To(Equal(corev1.ReadyCondition)) + g.Expect(resource.Status.Conditions[0].Status).To(Equal(metav1.ConditionTrue)) + g.Expect(resource.Status.Conditions[1].Type).To(Equal(corev1.ConfiguredCondition)) + g.Expect(resource.Status.Conditions[1].Status).To(Equal(metav1.ConditionTrue)) + g.Expect(resource.Status.Conditions[2].Type).To(Equal(corev1.OperationalCondition)) + g.Expect(resource.Status.Conditions[2].Status).To(Equal(metav1.ConditionTrue)) + + }).Should(Succeed()) + + By("Ensuring the resource is created in the provider") + Eventually(func(g Gomega) { + g.Expect(testProvider.VPC).ToNot(BeNil(), "Provider VPC should not be nil") + if testProvider.VPC != nil { + g.Expect(testProvider.VPC.Spec.DomainID).To(Equal(uint16(2))) + } + }).Should(Succeed()) + }) + }) +}) diff --git a/internal/controller/core/interface_controller.go b/internal/controller/core/interface_controller.go index d4852384..e23e87e8 100644 --- a/internal/controller/core/interface_controller.go +++ b/internal/controller/core/interface_controller.go @@ -422,6 +422,9 @@ func (r *InterfaceReconciler) reconcile(ctx context.Context, s *scope) (_ ctrl.R cond.Reason = v1alpha1.DegradedReason cond.Message = "Interface is operationally down" } + if status.OperMessage != "" { + cond.Message = fmt.Sprintf("Device returned %q", status.OperMessage) + } conditions.Set(s.Interface, cond) return ctrl.Result{RequeueAfter: Jitter(r.RequeueInterval)}, nil diff --git a/internal/controller/core/suite_test.go b/internal/controller/core/suite_test.go index 9835d185..9006b690 100644 --- a/internal/controller/core/suite_test.go +++ b/internal/controller/core/suite_test.go @@ -46,7 +46,7 @@ var ( func TestControllers(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Controller Suite") + RunSpecs(t, "Core Controller Suite") } var _ = BeforeSuite(func() { diff --git a/internal/provider/cisco/gnmiext/v2/client.go b/internal/provider/cisco/gnmiext/v2/client.go index 2f8f8afe..80daf05b 100644 --- a/internal/provider/cisco/gnmiext/v2/client.go +++ b/internal/provider/cisco/gnmiext/v2/client.go @@ -112,8 +112,12 @@ func WithLogger(logger logr.Logger) Option { } } -// ErrNil indicates that the value for a xpath is not defined. -var ErrNil = errors.New("gnmiext: nil") +var ( + // ErrNil indicates that the value for a xpath is not defined. + ErrNil = errors.New("gnmiext: nil") + // ErrInterfaceNotFound indicates that the requested interface does not exist on the device. + ErrInterfaceNotFound = errors.New("gnmiext: The interface does not exist") +) // GetConfig retrieves config and unmarshals it into the provided targets. // If some of the values for the given xpaths are not defined, [ErrNil] is returned. diff --git a/internal/provider/cisco/nxos/intf.go b/internal/provider/cisco/nxos/intf.go index b73781ad..89d84b90 100644 --- a/internal/provider/cisco/nxos/intf.go +++ b/internal/provider/cisco/nxos/intf.go @@ -185,8 +185,9 @@ func (p *PortChannel) XPath() string { } type PortChannelOperItems struct { - ID string `json:"-"` - OperSt OperSt `json:"operSt"` + ID string `json:"-"` + OperSt OperSt `json:"operSt"` + OperStQual string `json:"operStQual,omitempty"` } func (p *PortChannelOperItems) XPath() string { diff --git a/internal/provider/cisco/nxos/provider.go b/internal/provider/cisco/nxos/provider.go index d2822575..85d1ab84 100644 --- a/internal/provider/cisco/nxos/provider.go +++ b/internal/provider/cisco/nxos/provider.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "math" + "net" "net/netip" "reflect" "slices" @@ -743,6 +744,8 @@ func (p *Provider) EnsureInterface(ctx context.Context, req *provider.EnsureInte } } + conf = append(conf, pc) + if req.MultiChassisID != nil { v := new(VPCIf) v.ID = int(*req.MultiChassisID) @@ -750,8 +753,6 @@ func (p *Provider) EnsureInterface(ctx context.Context, req *provider.EnsureInte conf = append(conf, v) } - conf = append(conf, pc) - case v1alpha1.InterfaceTypeRoutedVLAN: f := new(Feature) f.Name = "ifvlan" @@ -783,7 +784,6 @@ func (p *Provider) EnsureInterface(ctx context.Context, req *provider.EnsureInte if addr != nil { conf = append(conf, addr) } - return p.client.Update(ctx, conf...) } @@ -852,7 +852,10 @@ func (p *Provider) GetInterfaceStatus(ctx context.Context, req *provider.Interfa return provider.InterfaceStatus{}, err } - var operSt OperSt + var ( + operSt OperSt + operMsg string + ) switch req.Interface.Spec.Type { case v1alpha1.InterfaceTypePhysical: phys := new(PhysIfOperItems) @@ -877,6 +880,7 @@ func (p *Provider) GetInterfaceStatus(ctx context.Context, req *provider.Interfa return provider.InterfaceStatus{}, err } operSt = pc.OperSt + operMsg = pc.OperStQual case v1alpha1.InterfaceTypeRoutedVLAN: svi := new(SwitchVirtualInterfaceOperItems) @@ -891,7 +895,8 @@ func (p *Provider) GetInterfaceStatus(ctx context.Context, req *provider.Interfa } return provider.InterfaceStatus{ - OperStatus: operSt == OperStUp, + OperStatus: operSt == OperStUp, + OperMessage: operMsg, }, nil } @@ -1952,6 +1957,148 @@ func (p *Provider) ResetSystemSettings(ctx context.Context) error { ) } +// VPCStatus represents the operational status of a vPC configuration on the device. +type VPCStatus struct { + // KeepAliveStatus indicates whether the keepalive link is operationally up (true) or down (false). + KeepAliveStatus bool + // KeepAliveStatusMessage provides additional human-readable information returned by the device + KeepAliveStatusMessage string + // Role represents the role of the vPC peer. + Role nxv1alpha1.VPCRole + // PeerUptime indicates the uptime of the vPC peer link in human-readable format provided by Cisco. + PeerUptime time.Duration +} + +// EnsureVPC applies the vPC configuration on the device. It also ensures that the vPC feature +// is enabled on the device. If vrf is not nil, the keep-alive link will be configured within the specified VRF. +// The port-channel interface required to establish a vPC are managed by the Interface controller and are not +// configured here. The port-channel status must not be made a dependency here, as they require the vPC domain +// to be configured first (which is realized here). +func (p *Provider) EnsureVPC(ctx context.Context, vpc *nxv1alpha1.VPC, vrf *v1alpha1.VRF) (reterr error) { + f := new(Feature) + f.Name = "vpc" + f.AdminSt = AdminStEnabled + + v := new(VPC) + v.Id = vpc.Spec.DomainID + + switch vpc.Spec.AdminState { + case "enabled": + v.AdminSt = AdminStEnabled + case "disabled": + v.AdminSt = AdminStDisabled + return fmt.Errorf("nxos-vpc: invalid admin state %q", vpc.Spec.AdminState) + default: + } + + if vpc.Spec.RolePriority > 0 { + v.RolePrio = vpc.Spec.RolePriority + } + + if vpc.Spec.SystemPriority > 0 { + v.SysPrio = vpc.Spec.SystemPriority + } + + if vpc.Spec.DelayRestoreSVI > 0 { + v.DelayRestoreSVI = vpc.Spec.DelayRestoreSVI + } + + if vpc.Spec.DelayRestoreVPC > 0 { + v.DelayRestoreVPC = vpc.Spec.DelayRestoreVPC + } + + v.FastConvergence = AdminStDisabled + if vpc.Spec.FastConvergence.Enabled { + v.FastConvergence = AdminStEnabled + } + + peer := vpc.Spec.Peer + + v.PeerSwitch = AdminStDisabled + if peer.Switch.Enabled { + v.PeerSwitch = AdminStEnabled + } + + v.PeerGateway = AdminStDisabled + if peer.Gateway.Enabled { + v.PeerGateway = AdminStEnabled + } + + v.L3PeerRouter = AdminStDisabled + if peer.Router.Enabled { + v.L3PeerRouter = AdminStEnabled + } + + v.AutoRecovery = AdminStDisabled + if peer.AutoRecovery.Enabled { + v.AutoRecovery = AdminStEnabled + v.AutoRecoveryReloadDelay = peer.AutoRecovery.ReloadDelay + } + + ipaddr := net.ParseIP(peer.KeepAlive.Destination) + if ipaddr == nil { + return fmt.Errorf("nxos-vpc: invalid keep-alive destination IP address %q", peer.KeepAlive.Destination) + } + v.KeepAliveItems.DestIP = peer.KeepAlive.Destination + + ipaddr = net.ParseIP(peer.KeepAlive.Source) + if ipaddr == nil { + return fmt.Errorf("nxos-vpc: invalid keep-alive source IP address %q", peer.KeepAlive.Source) + } + v.KeepAliveItems.SrcIP = peer.KeepAlive.Source + + if vrf != nil { + v.KeepAliveItems.VRF = vrf.Spec.Name + } + // Patch operation required: Interface controller adds port-channels into a container within the vPC XPath. + return p.client.Patch(ctx, f, v) +} + +func (p *Provider) DeleteVPC(ctx context.Context) error { + v := new(VPC) + return p.client.Delete(ctx, v) +} + +// GetStatusVPC retrieves the current status of the vPC configuration on the device. +func (p *Provider) GetStatusVPC(ctx context.Context) (VPCStatus, error) { + vpcOper := new(VPCOper) + if err := p.client.GetState(ctx, vpcOper); err != nil && !errors.Is(err, gnmiext.ErrNil) { + return VPCStatus{}, err + } + + vpcSt := VPCStatus{} + vpcSt.KeepAliveStatusMessage = vpcOper.KeepAliveItems.OperSt + vpcSt.KeepAliveStatus = false + + // Cisco returns a string composed of values coming from a bitmask, which values are: + // https://pubhub.devnetcloud.com/media/dme-docs-10-4-3/docs/System/vpc%3AKeepalive/ + // We only consider the "operational" value to indicate that the keep-alive link is up, any other + // combination indicates that the link is down or in an error state. This assumption might need to + // be revisited. + if vpcOper.KeepAliveItems.OperSt == "operational" { + vpcSt.KeepAliveStatus = true + } + + if uptime, err := parsePeerUptime(vpcOper.KeepAliveItems.PeerUpTime); err == nil { + vpcSt.PeerUptime = uptime + } + + switch vpcOper.Role { + case vpcRolePrimary: + vpcSt.Role = nxv1alpha1.VPCRolePrimary + case vpcRoleSecondary: + vpcSt.Role = nxv1alpha1.VPCRoleSecondary + case vpcRolePrimaryOperationalSecondary: + vpcSt.Role = nxv1alpha1.VPCRolePrimaryOperationalSecondary + case vpcRoleSecondaryOperationalPrimary: + vpcSt.Role = nxv1alpha1.VPCRoleSecondaryOperationalPrimary + default: + vpcSt.Role = nxv1alpha1.VPCRoleUnknown + } + + return vpcSt, nil +} + func init() { provider.Register("cisco-nxos-gnmi", NewProvider) } diff --git a/internal/provider/cisco/nxos/testdata/vpc.json b/internal/provider/cisco/nxos/testdata/vpc.json new file mode 100644 index 00000000..8ea47c09 --- /dev/null +++ b/internal/provider/cisco/nxos/testdata/vpc.json @@ -0,0 +1,25 @@ +{ + "vpc-items": { + "inst-items": { + "dom-items": { + "adminSt": "enabled", + "autoRecovery": "enabled", + "autoRecoveryIntvl": 360, + "delayRestoreSVI": 45, + "delayRestoreVPC": 150, + "fastConvergence": "enabled", + "id": 2, + "l3PeerRouter": "enabled", + "peerGw": "enabled", + "peerSwitch": "enabled", + "rolePrio": 100, + "sysPrio": 10, + "keepalive-items": { + "destIp": "10.114.235.156", + "srcIp": "10.114.235.155", + "vrf": "management" + } + } + } + } +} diff --git a/internal/provider/cisco/nxos/testdata/vpc.json.txt b/internal/provider/cisco/nxos/testdata/vpc.json.txt new file mode 100644 index 00000000..3bbb4ff8 --- /dev/null +++ b/internal/provider/cisco/nxos/testdata/vpc.json.txt @@ -0,0 +1,11 @@ +vpc domain 2 + peer-switch + role priority 100 + system-priority 10 + peer-keepalive destination 10.114.235.156 source 10.114.235.155 + delay restore 150 + peer-gateway + layer3 peer-router + auto-recovery reload-delay 360 + delay restore interface-vlan 45 + fast-convergence diff --git a/internal/provider/cisco/nxos/vpc.go b/internal/provider/cisco/nxos/vpc.go index ce5f4749..077a17a1 100644 --- a/internal/provider/cisco/nxos/vpc.go +++ b/internal/provider/cisco/nxos/vpc.go @@ -1,14 +1,48 @@ // SPDX-FileCopyrightText: 2025 SAP SE or an SAP affiliate company and IronCore contributors // SPDX-License-Identifier: Apache-2.0 - package nxos import ( + "fmt" + "regexp" "strconv" + "time" "github.com/ironcore-dev/network-operator/internal/provider/cisco/gnmiext/v2" ) +var ( + _ gnmiext.Configurable = (*VPC)(nil) +) + +// VPC represents a virtual Port Channel (vPC) +type VPC struct { + AdminSt AdminSt `json:"adminSt,omitempty"` + AutoRecovery AdminSt `json:"autoRecovery,omitempty"` + // AutoRecoveryReloadDelay is the time to wait before assuming peer dead and restoring vpcs + AutoRecoveryReloadDelay uint32 `json:"autoRecoveryIntvl,omitempty"` + // DelayRestoreSVI is the delay in bringing up the interface-vlan + DelayRestoreSVI uint16 `json:"delayRestoreSVI,omitempty"` + // DelayRestoreVPC is the delay in bringing up the vPC links after restoring the peer-link + DelayRestoreVPC uint16 `json:"delayRestoreVPC,omitempty"` + FastConvergence AdminSt `json:"fastConvergence,omitempty"` + Id uint16 `json:"id"` + L3PeerRouter AdminSt `json:"l3PeerRouter,omitempty"` + PeerGateway AdminSt `json:"peerGw,omitempty"` + PeerSwitch AdminSt `json:"peerSwitch,omitempty"` + RolePrio uint16 `json:"rolePrio,omitempty"` + SysPrio uint16 `json:"sysPrio,omitempty"` + KeepAliveItems struct { + DestIP string `json:"destIp,omitempty"` + SrcIP string `json:"srcIp,omitempty"` + VRF string `json:"vrf,omitempty"` + } `json:"keepalive-items,omitzero"` +} + +func (v *VPC) XPath() string { + return "System/vpc-items/inst-items/dom-items" +} + var _ gnmiext.Configurable = (*VPCIf)(nil) type VPCIf struct { @@ -44,3 +78,43 @@ func (v *VPCIfItems) GetListItemByInterface(name string) *VPCIf { } return nil } + +// VPCOper represents the operational status of a vPC domain +type VPCOper struct { + KeepAliveItems struct { + OperSt string `json:"operSt,omitempty"` + PeerUpTime string `json:"peerUpTime,omitempty"` + } `json:"keepalive-items,omitzero"` + Role VPCRole `json:"summOperRole,omitempty"` +} + +func (*VPCOper) XPath() string { + return "System/vpc-items/inst-items/dom-items" +} + +// VPCRole represents the role of a vPC peer. The value `election-not-done` +// will be ignored and mapped to `unknown` role. +type VPCRole string + +const ( + vpcRolePrimary VPCRole = "cfg-master-oper-master" + vpcRolePrimaryOperationalSecondary VPCRole = "cfg-master-oper-slave" + vpcRoleSecondary VPCRole = "cfg-slave-oper-slave" + vpcRoleSecondaryOperationalPrimary VPCRole = "cfg-slave-oper-master" +) + +// parsePeerUptime parses the peerUpTime string returned by the device +// Assumes the format is "() seconds", e.g., "(3600) seconds". +// Ignores trailing information, i.e., milliseconds. +func parsePeerUptime(s string) (time.Duration, error) { + re := regexp.MustCompile(`^\((\d+)\)\s*seconds`) + m := re.FindStringSubmatch(s) + if len(m) != 2 { + return 0, fmt.Errorf("invalid peerUpTime format: %s", s) + } + seconds, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return 0, err + } + return time.Duration(seconds) * time.Second, nil +} diff --git a/internal/provider/cisco/nxos/vpc_test.go b/internal/provider/cisco/nxos/vpc_test.go index d9a0d263..69d4b3bc 100644 --- a/internal/provider/cisco/nxos/vpc_test.go +++ b/internal/provider/cisco/nxos/vpc_test.go @@ -7,4 +7,23 @@ func init() { v := &VPCIf{ID: 10} v.SetPortChannel("po10") Register("vpc_member", v) + + vpc := &VPC{ + AdminSt: AdminStEnabled, + AutoRecovery: AdminStEnabled, + AutoRecoveryReloadDelay: 360, + DelayRestoreSVI: 45, + DelayRestoreVPC: 150, + FastConvergence: AdminStEnabled, + Id: 2, + L3PeerRouter: AdminStEnabled, + PeerGateway: AdminStEnabled, + PeerSwitch: AdminStEnabled, + RolePrio: 100, + SysPrio: 10, + } + vpc.KeepAliveItems.DestIP = "10.114.235.156" + vpc.KeepAliveItems.SrcIP = "10.114.235.155" + vpc.KeepAliveItems.VRF = "management" + Register("vpc", vpc) } diff --git a/internal/provider/provider.go b/internal/provider/provider.go index d0c59822..3985bf96 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -112,6 +112,8 @@ func (IPv4Unnumbered) isIPv4() {} type InterfaceStatus struct { // OperStatus indicates whether the interface is operationally up (true) or down (false). OperStatus bool + // OperMessage provides additional information about the operational status of the interface. + OperMessage string } // BannerProvider is the interface for the realization of the Banner objects over different providers. From db7cc45353fcc78398932392ec07172120249aa5 Mon Sep 17 00:00:00 2001 From: Pujol Date: Sat, 29 Nov 2025 18:11:36 +0100 Subject: [PATCH 2/3] fix: VRF webhook validation path --- api/core/v1alpha1/vrf_types.go | 2 +- config/webhook/service.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/v1alpha1/vrf_types.go b/api/core/v1alpha1/vrf_types.go index 08c5741c..f91524d6 100644 --- a/api/core/v1alpha1/vrf_types.go +++ b/api/core/v1alpha1/vrf_types.go @@ -118,7 +118,7 @@ type VRFStatus struct { // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // VRF is the Schema for the vrfs API -// +kubebuilder:webhook:path=/validate-networking.metal.ironcore.dev-v1alpha1-vrf,mutating=false,failurePolicy=Fail,sideEffects=None,groups=networking.metal.ironcore.dev,resources=vrfs,verbs=create;update,versions=v1alpha1,name=vvrf.kb.io,admissionReviewVersions=v1 +// +kubebuilder:webhook:path=/validate-networking-metal-ironcore-dev-v1alpha1-vrf,mutating=false,failurePolicy=Fail,sideEffects=None,groups=networking.metal.ironcore.dev,resources=vrfs,verbs=create;update,versions=v1alpha1,name=vvrf.kb.io,admissionReviewVersions=v1 type VRF struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml index 91dd42fc..c9c9257d 100644 --- a/config/webhook/service.yaml +++ b/config/webhook/service.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/name: network-operator app.kubernetes.io/managed-by: kustomize name: webhook-service - namespace: system + namespace: network-operator-system spec: ports: - port: 443 From a3d7d0cfa2ea4db014df4710ceb3130687d04776 Mon Sep 17 00:00:00 2001 From: Pujol Date: Mon, 1 Dec 2025 10:06:33 +0100 Subject: [PATCH 3/3] wip --- Tiltfile | 50 ++++++++++------------ config/samples/cisco/nx/v1alpha1_vpc.yaml | 4 +- config/samples/v1alpha1_interface.yaml | 51 +++++++++++------------ 3 files changed, 50 insertions(+), 55 deletions(-) diff --git a/Tiltfile b/Tiltfile index f887cb95..0013498b 100644 --- a/Tiltfile +++ b/Tiltfile @@ -39,76 +39,72 @@ def device_yaml(): return encode_yaml_stream(decoded) k8s_yaml(device_yaml()) -k8s_resource(new_name='leaf1', objects=['leaf1:device', 'secret-basic-auth:secret'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=True) +k8s_resource(new_name='leaf1', objects=['leaf1:device', 'secret-basic-auth:secret'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_interface.yaml') -# k8s_resource(new_name='lo0', objects=['lo0:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -# k8s_resource(new_name='lo1', objects=['lo1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -# k8s_resource(new_name='eth1-1', objects=['eth1-1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -# k8s_resource(new_name='eth1-2', objects=['eth1-2:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='lo0', objects=['lo0:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='lo1', objects=['lo1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='eth1-1', objects=['eth1-1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='eth1-2', objects=['eth1-2:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_resource(new_name='eth1-10', objects=['eth1-10:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_resource(new_name='eth1-30', objects=['eth1-30:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_resource(new_name='eth1-31', objects=['eth1-31:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_resource(new_name='eth1-32', objects=['eth1-32:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -#k8s_resource(new_name='svi-10', objects=['svi-10:interface'], resource_deps=['vlan-10'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='svi-10', objects=['svi-10:interface'], resource_deps=['vlan-10'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_resource(new_name='po1', objects=['po1:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_resource(new_name='po2', objects=['po2:interface'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) - - k8s_yaml('./config/samples/v1alpha1_banner.yaml') -# k8s_resource(new_name='banner', objects=['banner:banner'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='banner', objects=['banner:banner'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_user.yaml') -# k8s_resource(new_name='user', objects=['user:user', 'user-password:secret', 'user-ssh-key:secret'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='user', objects=['user:user', 'user-password:secret', 'user-ssh-key:secret'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_dns.yaml') -# k8s_resource(new_name='dns', objects=['dns:dns'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='dns', objects=['dns:dns'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_ntp.yaml') -# k8s_resource(new_name='ntp', objects=['ntp:ntp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='ntp', objects=['ntp:ntp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_acl.yaml') -# k8s_resource(new_name='acl', objects=['acl:accesscontrollist'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='acl', objects=['acl:accesscontrollist'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_certificate.yaml') -# k8s_resource(new_name='trustpoint', objects=['network-operator:issuer', 'network-operator-ca:certificate', 'trustpoint:certificate'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='trustpoint', objects=['network-operator:issuer', 'network-operator-ca:certificate', 'trustpoint:certificate'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_snmp.yaml') -# k8s_resource(new_name='snmp', objects=['snmp:snmp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='snmp', objects=['snmp:snmp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_syslog.yaml') -# k8s_resource(new_name='syslog', objects=['syslog:syslog'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='syslog', objects=['syslog:syslog'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_managementaccess.yaml') -# k8s_resource(new_name='managementaccess', objects=['managementaccess:managementaccess'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='managementaccess', objects=['managementaccess:managementaccess'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_isis.yaml') -# k8s_resource(new_name='underlay', objects=['underlay:isis'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='underlay', objects=['underlay:isis'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_vrf.yaml') -# k8s_resource(new_name='vrf-admin', objects=['vrf-cc-admin:vrf'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='vrf-admin', objects=['vrf-cc-admin:vrf'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_resource(new_name='vrf-vpckeepalive', objects=['vrf-vpckeepalive:vrf'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_pim.yaml') -# k8s_resource(new_name='pim', objects=['pim:pim'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='pim', objects=['pim:pim'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_bgp.yaml') -# k8s_resource(new_name='bgp', objects=['bgp:bgp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='bgp', objects=['bgp:bgp'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_bgppeer.yaml') -# k8s_resource(new_name='peer-spine1', objects=['leaf1-spine1:bgppeer'], resource_deps=['bgp', 'lo0'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) -# k8s_resource(new_name='peer-spine2', objects=['leaf1-spine2:bgppeer'], resource_deps=['bgp', 'lo0'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='peer-spine1', objects=['leaf1-spine1:bgppeer'], resource_deps=['bgp', 'lo0'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='peer-spine2', objects=['leaf1-spine2:bgppeer'], resource_deps=['bgp', 'lo0'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_ospf.yaml') -#k8s_resource(new_name='ospf-underlay', objects=['underlay:ospf'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='ospf-underlay', objects=['underlay:ospf'], resource_deps=['lo0', 'lo1', 'eth1-1', 'eth1-2'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_vlan.yaml') -# k8s_resource(new_name='vlan-10', objects=['vlan-10:vlan'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) +k8s_resource(new_name='vlan-10', objects=['vlan-10:vlan'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/cisco/nx/v1alpha1_vpc.yaml') -# don't add a resource dependency to the interfaces here, otherwise we create a deadlock as the multichassis ID depends -# on the vPC being created first. k8s_resource(new_name='vpc', objects=['leaf1-vpc:vpc'], trigger_mode=TRIGGER_MODE_MANUAL, auto_init=False) k8s_yaml('./config/samples/v1alpha1_evi.yaml') diff --git a/config/samples/cisco/nx/v1alpha1_vpc.yaml b/config/samples/cisco/nx/v1alpha1_vpc.yaml index b677e3f5..2fd8ee6d 100644 --- a/config/samples/cisco/nx/v1alpha1_vpc.yaml +++ b/config/samples/cisco/nx/v1alpha1_vpc.yaml @@ -10,7 +10,7 @@ spec: deviceRef: name: leaf1 domainId: 5 - adminState: enabled + adminState: enabled rolePriority: 100 systemPriority: 10 delayRestoreSVI: 140 @@ -27,6 +27,8 @@ spec: keepalive: source: 10.1.1.1 destination: 10.1.1.2 + vrfRef: + name: vpc-keepalive autoRecovery: enabled: true reloadDelay: 360 diff --git a/config/samples/v1alpha1_interface.yaml b/config/samples/v1alpha1_interface.yaml index 138df392..b711865a 100644 --- a/config/samples/v1alpha1_interface.yaml +++ b/config/samples/v1alpha1_interface.yaml @@ -182,7 +182,7 @@ spec: deviceRef: name: leaf1 name: po1 - description: vPC to Host1 + description: vPC Peer-Link adminState: Up type: Aggregate mtu: 1500 @@ -196,31 +196,6 @@ spec: memberInterfaceRefs: - name: eth1-31 - name: eth1-32 - multichassis: - id: 10 - enabled: true ---- -apiVersion: networking.metal.ironcore.dev/v1alpha1 -kind: Interface -metadata: - labels: - app.kubernetes.io/name: network-operator - app.kubernetes.io/managed-by: kustomize - networking.metal.ironcore.dev/device-name: leaf1 - name: svi-10 -spec: - deviceRef: - name: leaf1 - name: vlan10 - description: SVI for VLAN 10 - adminState: Up - type: RoutedVLAN - mtu: 1500 - vlanRef: - name: vlan-10 - ipv4: - addresses: - - 192.168.10.254/24 --- apiVersion: networking.metal.ironcore.dev/v1alpha1 kind: Interface @@ -249,4 +224,26 @@ spec: - name: eth1-10 multichassis: enabled: true - id: 1 + id: 2 +--- +apiVersion: networking.metal.ironcore.dev/v1alpha1 +kind: Interface +metadata: + labels: + app.kubernetes.io/name: network-operator + app.kubernetes.io/managed-by: kustomize + networking.metal.ironcore.dev/device-name: leaf1 + name: svi-10 +spec: + deviceRef: + name: leaf1 + name: vlan10 + description: SVI for VLAN 10 + adminState: Up + type: RoutedVLAN + mtu: 1500 + vlanRef: + name: vlan-10 + ipv4: + addresses: + - 192.168.10.254/24