Skip to content

Commit ccb071f

Browse files
Merge pull request #1399 from SargunNarula/refactor_getsmtlevel
OCPBUGS-62605: e2e: refactor GetSMTLevel to remove Gomega assertions
2 parents 4970228 + 0be8ef3 commit ccb071f

File tree

2 files changed

+19
-11
lines changed

2 files changed

+19
-11
lines changed

test/e2e/performanceprofile/functests/1_performance/cpu_management.go

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,8 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
109109
onlineCPUSet, err = nodes.GetOnlineCPUsSet(ctx, workerRTNode)
110110
Expect(err).ToNot(HaveOccurred())
111111
cpuID := onlineCPUSet.UnsortedList()[0]
112-
smtLevel = nodes.GetSMTLevel(ctx, cpuID, workerRTNode)
112+
smtLevel, err = nodes.GetSMTLevel(ctx, cpuID, workerRTNode)
113+
Expect(err).ToNot(HaveOccurred(), "Unable to fetch SMT level on node %s, Error: %v", workerRTNode.Name, err)
113114
getter, err = cgroup.BuildGetter(ctx, testclient.DataPlaneClient, testclient.K8sClient)
114115
Expect(err).ToNot(HaveOccurred())
115116
cgroupV2, err = cgroup.IsVersion2(ctx, testclient.DataPlaneClient)
@@ -251,7 +252,8 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
251252

252253
DescribeTable("Verify CPU usage by stress PODs", func(ctx context.Context, guaranteed bool) {
253254
cpuID := onlineCPUSet.UnsortedList()[0]
254-
smtLevel := nodes.GetSMTLevel(ctx, cpuID, workerRTNode)
255+
smtLevel, err := nodes.GetSMTLevel(ctx, cpuID, workerRTNode)
256+
Expect(err).ToNot(HaveOccurred(), "Unable to fetch SMT level on node %s, Error: %v", workerRTNode.Name, err)
255257
if smtLevel < 2 {
256258
Skip(fmt.Sprintf("designated worker node %q has SMT level %d - minimum required 2", workerRTNode.Name, smtLevel))
257259
}
@@ -281,7 +283,6 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
281283
}
282284

283285
By(fmt.Sprintf("create a %s QoS stress pod requesting %d cpus", expectedQos, cpuRequest))
284-
var err error
285286
err = testclient.DataPlaneClient.Create(ctx, testpod)
286287
Expect(err).ToNot(HaveOccurred())
287288

@@ -414,7 +415,8 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
414415
}
415416

416417
cpuID := onlineCPUSet.UnsortedList()[0]
417-
smtLevel = nodes.GetSMTLevel(context.TODO(), cpuID, workerRTNode)
418+
smtLevel, err = nodes.GetSMTLevel(context.TODO(), cpuID, workerRTNode)
419+
Expect(err).ToNot(HaveOccurred(), "Unable to fetch SMT level on node %s, Error: %v", workerRTNode.Name, err)
418420
})
419421

420422
AfterEach(func() {
@@ -584,7 +586,8 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
584586
// also covers Hyper-thread aware sheduling [test_id:46545] Odd number of isolated CPU threads
585587
// any random existing cpu is fine
586588
cpuID := onlineCPUSet.UnsortedList()[0]
587-
smtLevel := nodes.GetSMTLevel(context.TODO(), cpuID, workerRTNode)
589+
smtLevel, err := nodes.GetSMTLevel(context.TODO(), cpuID, workerRTNode)
590+
Expect(err).ToNot(HaveOccurred(), "Unable to fetch SMT level on node %s, Error: %v", workerRTNode.Name, err)
588591
if smtLevel < 2 {
589592
Skip(fmt.Sprintf("designated worker node %q has SMT level %d - minimum required 2", workerRTNode.Name, smtLevel))
590593
}
@@ -593,7 +596,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
593596
testpod = promotePodToGuaranteed(getStressPod(workerRTNode.Name, cpuCount))
594597
testpod.Namespace = testutils.NamespaceTesting
595598

596-
err := testclient.DataPlaneClient.Create(context.TODO(), testpod)
599+
err = testclient.DataPlaneClient.Create(context.TODO(), testpod)
597600
Expect(err).ToNot(HaveOccurred())
598601

599602
currentPod, err := pods.WaitForPredicate(context.TODO(), client.ObjectKeyFromObject(testpod), 10*time.Minute, func(pod *corev1.Pod) (bool, error) {
@@ -643,7 +646,8 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
643646
// Check for SMT enabled
644647
// any random existing cpu is fine
645648
cpuID := onlineCPUSet.UnsortedList()[0]
646-
smtLevel := nodes.GetSMTLevel(ctx, cpuID, workerRTNode)
649+
smtLevel, err := nodes.GetSMTLevel(ctx, cpuID, workerRTNode)
650+
Expect(err).ToNot(HaveOccurred(), "Unable to fetch SMT level on node %s, Error: %v", workerRTNode.Name, err)
647651
hasWP := checkForWorkloadPartitioning(ctx)
648652

649653
// Following checks are required to map test_id scenario correctly to the type of node under test

test/e2e/performanceprofile/functests/utils/nodes/nodes.go

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -262,16 +262,20 @@ func GetOnlineCPUsSet(ctx context.Context, node *corev1.Node) (cpuset.CPUSet, er
262262

263263
// GetSMTLevel returns the SMT level on the node using the given cpuID as target
264264
// Use a random cpuID from the return value of GetOnlineCPUsSet if not sure
265-
func GetSMTLevel(ctx context.Context, cpuID int, node *corev1.Node) int {
265+
func GetSMTLevel(ctx context.Context, cpuID int, node *corev1.Node) (int, error) {
266266
cmd := []string{"/bin/sh", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/topology/thread_siblings_list | tr -d \"\n\r\"", cpuID)}
267267
out, err := ExecCommand(ctx, node, cmd)
268-
ExpectWithOffset(1, err).ToNot(HaveOccurred())
268+
if err != nil {
269+
return 0, err
270+
}
269271
threadSiblingsList := testutils.ToString(out)
270272
// how many thread sibling you have = SMT level
271273
// example: 2-way SMT means 2 threads sibling for each thread
272274
cpus, err := cpuset.Parse(strings.TrimSpace(threadSiblingsList))
273-
ExpectWithOffset(1, err).ToNot(HaveOccurred())
274-
return cpus.Size()
275+
if err != nil {
276+
return 0, err
277+
}
278+
return cpus.Size(), nil
275279
}
276280

277281
// GetNumaNodes returns the number of numa nodes and the associated cpus as list on the node

0 commit comments

Comments
 (0)