From 4b9124fed5e0407480a38e4c271a615bed3be729 Mon Sep 17 00:00:00 2001 From: Sebastian Sch Date: Sun, 6 Jul 2025 12:37:36 +0300 Subject: [PATCH 1/2] add support for hugepages in virtual CI machines --- hack/run-e2e-conformance-virtual-cluster.sh | 12 ++++++++++++ hack/run-e2e-conformance-virtual-ocp.sh | 20 ++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/hack/run-e2e-conformance-virtual-cluster.sh b/hack/run-e2e-conformance-virtual-cluster.sh index 353c8528d8..8a83a05df1 100755 --- a/hack/run-e2e-conformance-virtual-cluster.sh +++ b/hack/run-e2e-conformance-virtual-cluster.sh @@ -214,6 +214,17 @@ systemctl enable --now load-br-netfilter systemctl restart NetworkManager +echo 'GRUB_TIMEOUT=1 +GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)" +GRUB_DEFAULT=saved +GRUB_DISABLE_SUBMENU=true +GRUB_TERMINAL_OUTPUT="console" +GRUB_CMDLINE_LINUX="console=ttyS0,115200n8 no_timer_check net.ifnames=0 hugepagesz=2M hugepages=16 crashkernel=1G-4G:192M,4G-64G:256M,64G-:512M" +GRUB_DISABLE_RECOVERY="true" +GRUB_ENABLE_BLSCFG=true' > /etc/default/grub + +grub2-mkconfig -o /boot/grub2/grub.cfg + EOF } @@ -222,6 +233,7 @@ update_host $cluster_name-ctlplane-0 for ((num=0; num Date: Sun, 6 Jul 2025 12:38:12 +0300 Subject: [PATCH 2/2] Add webhook test for hugepages mount and downward api Signed-off-by: Sebastian Sch --- hack/run-e2e-conformance-virtual-cluster.sh | 3 +- test/conformance/tests/test_sriov_operator.go | 87 +++++++++++++++++++ test/util/pod/pod.go | 30 ++++++- 3 files changed, 117 insertions(+), 3 deletions(-) diff --git a/hack/run-e2e-conformance-virtual-cluster.sh b/hack/run-e2e-conformance-virtual-cluster.sh index 8a83a05df1..7abd1ad4d2 100755 --- a/hack/run-e2e-conformance-virtual-cluster.sh +++ b/hack/run-e2e-conformance-virtual-cluster.sh @@ -233,7 +233,8 @@ update_host $cluster_name-ctlplane-0 for ((num=0; num 0 { + hasHugepages = true + hugepagesName = string(resourceName) + hupagesAmount = resource.Value() + break + } + } + if !hasHugepages { + Skip("No hugepages found on the node") + } + + sriovNetwork := &sriovv1.SriovNetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-apivolnetwork", + Namespace: operatorNamespace, + }, + Spec: sriovv1.SriovNetworkSpec{ + ResourceName: resourceName, + IPAM: `{"type":"host-local","subnet":"10.10.10.0/24","rangeStart":"10.10.10.171","rangeEnd":"10.10.10.181"}`, + NetworkNamespace: namespaces.Test, + }} + err := clients.Create(context.Background(), sriovNetwork) + Expect(err).ToNot(HaveOccurred()) + + waitForNetAttachDef("test-apivolnetwork", namespaces.Test) + + podDefinition := pod.RedefineWithHugepages(pod.DefineWithNetworks([]string{sriovNetwork.Name}), hugepagesName, hupagesAmount) + created, err := clients.Pods(namespaces.Test).Create(context.Background(), podDefinition, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + runningPod := waitForPodRunning(created) + + var downwardVolume *corev1.Volume + for _, v := range runningPod.Spec.Volumes { + if v.Name == volumePodNetInfo { + downwardVolume = v.DeepCopy() + break + } + } + + // In the DownwardAPI the resource injector rename the hugepage size with underscores + // example hugepages-1Gi -> hugepages_1Gi + result := strings.Replace(hugepagesName, "-", "_", 1) + if len(result) > 0 { + result = result[:len(result)-1] + } + + Expect(downwardVolume).ToNot(BeNil(), "Downward volume not found") + Expect(downwardVolume.DownwardAPI).ToNot(BeNil(), "Downward api not found in volume") + Expect(downwardVolume.DownwardAPI.Items).To(SatisfyAll( + ContainElement(MatchFields(IgnoreExtras, Fields{ + "Path": Equal(fmt.Sprintf("%s_request_test", result)), + "ResourceFieldRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "ContainerName": Equal("test"), + "Resource": Equal(fmt.Sprintf("requests.%s", hugepagesName)), + })), + })), ContainElement(MatchFields(IgnoreExtras, Fields{ + "Path": Equal(fmt.Sprintf("%s_limit_test", result)), + "ResourceFieldRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "ContainerName": Equal("test"), + "Resource": Equal(fmt.Sprintf("limits.%s", hugepagesName)), + })), + })), ContainElement(MatchFields(IgnoreExtras, Fields{ + "Path": Equal("annotations"), + "FieldRef": PointTo(MatchFields(IgnoreExtras, Fields{ + "APIVersion": Equal("v1"), + "FieldPath": Equal("metadata.annotations"), + })), + })))) }) }) diff --git a/test/util/pod/pod.go b/test/util/pod/pod.go index c99ae00b2d..c833ac3a55 100644 --- a/test/util/pod/pod.go +++ b/test/util/pod/pod.go @@ -8,10 +8,11 @@ import ( "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" testclient "github.com/k8snetworkplumbingwg/sriov-network-operator/test/util/client" "github.com/k8snetworkplumbingwg/sriov-network-operator/test/util/images" @@ -26,7 +27,7 @@ func GetDefinition() *corev1.Pod { GenerateName: "testpod-", Namespace: namespaces.Test}, Spec: corev1.PodSpec{ - TerminationGracePeriodSeconds: pointer.Int64Ptr(0), + TerminationGracePeriodSeconds: ptr.To[int64](0), Containers: []corev1.Container{{Name: "test", Image: images.Test(), SecurityContext: &corev1.SecurityContext{ @@ -103,6 +104,31 @@ func RedefineWithCapabilities(pod *corev1.Pod, capabilitiesList []corev1.Capabil return pod } +func RedefineWithHugepages(pod *corev1.Pod, hugepagesName string, hugepagesAmount int64) *corev1.Pod { + pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ + Name: "hugepages", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumHugePages, + }, + }, + }) + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: "hugepages", + MountPath: "/hugepages", + }) + + resources := corev1.ResourceList{ + corev1.ResourceName(hugepagesName): *resource.NewQuantity(hugepagesAmount, resource.BinarySI), + corev1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI), + } + + pod.Spec.Containers[0].Resources.Requests = resources + pod.Spec.Containers[0].Resources.Limits = resources + + return pod +} + // ExecCommand runs command in the pod and returns buffer output func ExecCommand(cs *testclient.ClientSet, pod *corev1.Pod, command ...string) (string, string, error) { var buf, errbuf bytes.Buffer