Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions hack/run-e2e-conformance-virtual-cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,17 @@ systemctl enable --now load-br-netfilter

systemctl restart NetworkManager

echo 'GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
GRUB_DEFAULT=saved
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="console=ttyS0,115200n8 no_timer_check net.ifnames=0 hugepagesz=2M hugepages=16 crashkernel=1G-4G:192M,4G-64G:256M,64G-:512M"
GRUB_DISABLE_RECOVERY="true"
GRUB_ENABLE_BLSCFG=true' > /etc/default/grub

grub2-mkconfig -o /boot/grub2/grub.cfg

EOF

}
Expand All @@ -222,6 +233,8 @@ update_host $cluster_name-ctlplane-0
for ((num=0; num<NUM_OF_WORKERS; num++))
do
update_host $cluster_name-worker-$num
sleep 5
kcli ssh $cluster_name-worker-$num sudo reboot || true
done

# remove the patch after multus bug is fixed
Expand Down
20 changes: 20 additions & 0 deletions hack/run-e2e-conformance-virtual-ocp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,26 @@ if [ `cat /etc/hosts | grep ${api_ip} | grep "default-route-openshift-image-regi
sed -i "s/${api_ip}/${api_ip} default-route-openshift-image-registry.apps.${cluster_name}.${domain_name}/g" /etc/hosts
fi

# Create performance profile to allocate hugepages
cat <<EOF | oc apply -f -
apiVersion: performance.openshift.io/v1
kind: PerformanceProfile
metadata:
name: performance
spec:
cpu:
isolated: "2-5"
reserved: "0-1"
hugepages:
defaultHugepagesSize: 2M
pages:
- count: 16
size: 2M
nodeSelector:
node-role.kubernetes.io/worker: ""
realTimeKernel:
enabled: false
EOF

cat <<EOF | oc apply -f -
apiVersion: v1
Expand Down
87 changes: 87 additions & 0 deletions test/conformance/tests/test_sriov_operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,93 @@ var _ = Describe("[sriov] operator", Ordered, func() {
FieldPath: "metadata.annotations",
},
})))

By("checking the label is present in the pod")
stdout, stderr, err := pod.ExecCommand(clients, runningPod, "/bin/bash", "-c", "cat /etc/podnetinfo/labels")
Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("stdout: %s, stderr: %s", stdout, stderr))
Expect(stdout).To(ContainSubstring("anyname=\"anyvalue\""))
})

It("should inject also hugepages if requested in the pod", func() {
var hugepagesName string
var hupagesAmount int64

hasHugepages := false
nodeObj := &corev1.Node{}
Eventually(func() error {
return clients.Get(context.Background(), runtimeclient.ObjectKey{Name: node}, nodeObj)
}, 10*time.Second, 1*time.Second).ShouldNot(HaveOccurred())

for resourceName, resource := range nodeObj.Status.Allocatable {
if strings.HasPrefix(string(resourceName), "hugepages") && resource.Value() > 0 {
hasHugepages = true
hugepagesName = string(resourceName)
hupagesAmount = resource.Value()
break
}
}
if !hasHugepages {
Skip("No hugepages found on the node")
}

sriovNetwork := &sriovv1.SriovNetwork{
ObjectMeta: metav1.ObjectMeta{
Name: "test-apivolnetwork",
Namespace: operatorNamespace,
},
Spec: sriovv1.SriovNetworkSpec{
ResourceName: resourceName,
IPAM: `{"type":"host-local","subnet":"10.10.10.0/24","rangeStart":"10.10.10.171","rangeEnd":"10.10.10.181"}`,
NetworkNamespace: namespaces.Test,
}}
err := clients.Create(context.Background(), sriovNetwork)
Expect(err).ToNot(HaveOccurred())

waitForNetAttachDef("test-apivolnetwork", namespaces.Test)

podDefinition := pod.RedefineWithHugepages(pod.DefineWithNetworks([]string{sriovNetwork.Name}), hugepagesName, hupagesAmount)
created, err := clients.Pods(namespaces.Test).Create(context.Background(), podDefinition, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())

runningPod := waitForPodRunning(created)

var downwardVolume *corev1.Volume
for _, v := range runningPod.Spec.Volumes {
if v.Name == volumePodNetInfo {
downwardVolume = v.DeepCopy()
break
}
}

// In the DownwardAPI the resource injector rename the hugepage size with underscores
// example hugepages-1Gi -> hugepages_1Gi
result := strings.Replace(hugepagesName, "-", "_", 1)
if len(result) > 0 {
result = result[:len(result)-1]
}

Expect(downwardVolume).ToNot(BeNil(), "Downward volume not found")
Expect(downwardVolume.DownwardAPI).ToNot(BeNil(), "Downward api not found in volume")
Expect(downwardVolume.DownwardAPI.Items).To(SatisfyAll(
ContainElement(MatchFields(IgnoreExtras, Fields{
"Path": Equal(fmt.Sprintf("%s_request_test", result)),
"ResourceFieldRef": PointTo(MatchFields(IgnoreExtras, Fields{
"ContainerName": Equal("test"),
"Resource": Equal(fmt.Sprintf("requests.%s", hugepagesName)),
})),
})), ContainElement(MatchFields(IgnoreExtras, Fields{
"Path": Equal(fmt.Sprintf("%s_limit_test", result)),
"ResourceFieldRef": PointTo(MatchFields(IgnoreExtras, Fields{
"ContainerName": Equal("test"),
"Resource": Equal(fmt.Sprintf("limits.%s", hugepagesName)),
})),
})), ContainElement(MatchFields(IgnoreExtras, Fields{
"Path": Equal("annotations"),
"FieldRef": PointTo(MatchFields(IgnoreExtras, Fields{
"APIVersion": Equal("v1"),
"FieldPath": Equal("metadata.annotations"),
})),
}))))
})
})

Expand Down
30 changes: 28 additions & 2 deletions test/util/pod/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@ import (
"time"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/utils/pointer"
"k8s.io/utils/ptr"

testclient "github.com/k8snetworkplumbingwg/sriov-network-operator/test/util/client"
"github.com/k8snetworkplumbingwg/sriov-network-operator/test/util/images"
Expand All @@ -26,7 +27,7 @@ func GetDefinition() *corev1.Pod {
GenerateName: "testpod-",
Namespace: namespaces.Test},
Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: pointer.Int64Ptr(0),
TerminationGracePeriodSeconds: ptr.To[int64](0),
Containers: []corev1.Container{{Name: "test",
Image: images.Test(),
SecurityContext: &corev1.SecurityContext{
Expand Down Expand Up @@ -103,6 +104,31 @@ func RedefineWithCapabilities(pod *corev1.Pod, capabilitiesList []corev1.Capabil
return pod
}

func RedefineWithHugepages(pod *corev1.Pod, hugepagesName string, hugepagesAmount int64) *corev1.Pod {
pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
Name: "hugepages",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumHugePages,
},
},
})
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
Name: "hugepages",
MountPath: "/hugepages",
})

resources := corev1.ResourceList{
corev1.ResourceName(hugepagesName): *resource.NewQuantity(hugepagesAmount, resource.BinarySI),
corev1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI),
}

pod.Spec.Containers[0].Resources.Requests = resources
pod.Spec.Containers[0].Resources.Limits = resources

return pod
}

// ExecCommand runs command in the pod and returns buffer output
func ExecCommand(cs *testclient.ClientSet, pod *corev1.Pod, command ...string) (string, string, error) {
var buf, errbuf bytes.Buffer
Expand Down
Loading