Skip to content

Commit 056a9b4

Browse files
authored
Topology constraint at any subgroup hierarchy level (#560)
* Topology constraint at any subgroup hierarchy level * Sort selected domains based on number of non allocated GPUs * TopologyConstraint will be checked in topology plugin instead of allocated algorithm * Added combined required and preferred topology constraint tests
1 parent 304392a commit 056a9b4

File tree

11 files changed

+1269
-58
lines changed

11 files changed

+1269
-58
lines changed

pkg/scheduler/actions/allocate/allocateTopology_test.go

Lines changed: 1102 additions & 0 deletions
Large diffs are not rendered by default.

pkg/scheduler/actions/allocate/allocate_subgroups_test.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ func getAllocationSubGroupsTestsMetadata() []integration_tests_utils.TestTopolog
4949
RequiredGPUsPerTask: 1,
5050
QueueName: "queue0",
5151
Priority: constants.PriorityTrainNumber,
52-
SubGroups: map[string]*subgroup_info.PodSet{
52+
PodSets: map[string]*subgroup_info.PodSet{
5353
"sub0": subgroup_info.NewPodSet("sub0", 1, nil),
5454
"sub1": subgroup_info.NewPodSet("sub1", 1, nil),
5555
},
@@ -105,7 +105,7 @@ func getAllocationSubGroupsTestsMetadata() []integration_tests_utils.TestTopolog
105105
RequiredGPUsPerTask: 1,
106106
QueueName: "queue0",
107107
Priority: constants.PriorityTrainNumber,
108-
SubGroups: map[string]*subgroup_info.PodSet{
108+
PodSets: map[string]*subgroup_info.PodSet{
109109
"sub0": subgroup_info.NewPodSet("sub0", 1, nil),
110110
"sub1": subgroup_info.NewPodSet("sub1", 1, nil),
111111
},
@@ -176,7 +176,7 @@ func getAllocationSubGroupsTestsMetadata() []integration_tests_utils.TestTopolog
176176
Name: "job0",
177177
QueueName: "queue0",
178178
Priority: constants.PriorityTrainNumber,
179-
SubGroups: map[string]*subgroup_info.PodSet{
179+
PodSets: map[string]*subgroup_info.PodSet{
180180
"sub0": subgroup_info.NewPodSet("sub0", 2, nil),
181181
"sub1": subgroup_info.NewPodSet("sub1", 2, nil),
182182
},
@@ -264,7 +264,7 @@ func getAllocationSubGroupsTestsMetadata() []integration_tests_utils.TestTopolog
264264
Name: "job0",
265265
QueueName: "queue0",
266266
Priority: constants.PriorityTrainNumber,
267-
SubGroups: map[string]*subgroup_info.PodSet{
267+
PodSets: map[string]*subgroup_info.PodSet{
268268
"sub0": subgroup_info.NewPodSet("sub0", 2, nil),
269269
"sub1": subgroup_info.NewPodSet("sub1", 1, nil),
270270
},
@@ -368,7 +368,7 @@ func getAllocationSubGroupsTestsMetadata() []integration_tests_utils.TestTopolog
368368
Name: "job0",
369369
QueueName: "queue0",
370370
Priority: constants.PriorityTrainNumber,
371-
SubGroups: map[string]*subgroup_info.PodSet{
371+
PodSets: map[string]*subgroup_info.PodSet{
372372
"sub0": subgroup_info.NewPodSet("sub0", 2, nil),
373373
"sub1": subgroup_info.NewPodSet("sub1", 1, nil),
374374
},
@@ -472,7 +472,7 @@ func getAllocationSubGroupsTestsMetadata() []integration_tests_utils.TestTopolog
472472
Name: "pending_job0",
473473
QueueName: "queue0",
474474
Priority: constants.PriorityTrainNumber,
475-
SubGroups: map[string]*subgroup_info.PodSet{
475+
PodSets: map[string]*subgroup_info.PodSet{
476476
"sub0": subgroup_info.NewPodSet("sub0", 1, nil),
477477
"sub1": subgroup_info.NewPodSet("sub1", 2, nil),
478478
},
@@ -543,7 +543,7 @@ func getAllocationSubGroupsTestsMetadata() []integration_tests_utils.TestTopolog
543543
Name: "pending_job0",
544544
QueueName: "queue0",
545545
Priority: constants.PriorityTrainNumber,
546-
SubGroups: map[string]*subgroup_info.PodSet{
546+
PodSets: map[string]*subgroup_info.PodSet{
547547
"sub0": subgroup_info.NewPodSet("sub0", 1, nil),
548548
"sub1": subgroup_info.NewPodSet("sub1", 1, nil),
549549
},
@@ -575,7 +575,7 @@ func getAllocationSubGroupsTestsMetadata() []integration_tests_utils.TestTopolog
575575
Name: "pending_job1",
576576
QueueName: "queue0",
577577
Priority: constants.PriorityTrainNumber,
578-
SubGroups: map[string]*subgroup_info.PodSet{
578+
PodSets: map[string]*subgroup_info.PodSet{
579579
"sub0": subgroup_info.NewPodSet("sub0", 1, nil),
580580
"sub1": subgroup_info.NewPodSet("sub1", 1, nil),
581581
},

pkg/scheduler/actions/common/allocate.go

Lines changed: 81 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import (
1010
"github.com/NVIDIA/KAI-scheduler/pkg/scheduler/api/node_info"
1111
"github.com/NVIDIA/KAI-scheduler/pkg/scheduler/api/pod_info"
1212
"github.com/NVIDIA/KAI-scheduler/pkg/scheduler/api/podgroup_info"
13+
"github.com/NVIDIA/KAI-scheduler/pkg/scheduler/api/podgroup_info/subgroup_info"
1314
"github.com/NVIDIA/KAI-scheduler/pkg/scheduler/framework"
1415
"github.com/NVIDIA/KAI-scheduler/pkg/scheduler/gpu_sharing"
1516
"github.com/NVIDIA/KAI-scheduler/pkg/scheduler/log"
@@ -26,32 +27,85 @@ func AllocateJob(ssn *framework.Session, stmt *framework.Statement, nodes []*nod
2627
}
2728
return false
2829
}
30+
return allocateSubGroupSet(ssn, stmt, nodes, job, job.RootSubGroupSet, tasksToAllocate, isPipelineOnly)
31+
}
2932

30-
podSets := job.RootSubGroupSet.GetAllPodSets()
31-
nodeSets, err := ssn.SubsetNodesFn(job, &job.RootSubGroupSet.SubGroupInfo, podSets, tasksToAllocate, nodes)
33+
func allocateSubGroupSet(ssn *framework.Session, stmt *framework.Statement, nodes []*node_info.NodeInfo,
34+
job *podgroup_info.PodGroupInfo, subGroupSet *subgroup_info.SubGroupSet, tasksToAllocate []*pod_info.PodInfo,
35+
isPipelineOnly bool,
36+
) bool {
37+
nodeSets, err := ssn.SubsetNodesFn(job, &subGroupSet.SubGroupInfo, subGroupSet.GetAllPodSets(), tasksToAllocate, nodes)
3238
if err != nil {
3339
log.InfraLogger.Errorf(
3440
"Failed to run SubsetNodes on job <%s/%s>: %v", job.Namespace, job.Namespace, err)
3541
return false
3642
}
43+
3744
for _, nodeSet := range nodeSets {
45+
cp := stmt.Checkpoint()
46+
if allocateSubGroupSetOnNodes(ssn, stmt, nodeSet, job, subGroupSet, tasksToAllocate, isPipelineOnly) {
47+
return true
48+
}
49+
if err := stmt.Rollback(cp); err != nil {
50+
log.InfraLogger.Errorf("Failed to rollback statement in session %v, err: %v", ssn.UID, err)
51+
}
52+
}
53+
54+
return false
55+
}
56+
57+
func allocateSubGroupSetOnNodes(ssn *framework.Session, stmt *framework.Statement, nodes node_info.NodeSet,
58+
job *podgroup_info.PodGroupInfo, subGroupSet *subgroup_info.SubGroupSet, tasksToAllocate []*pod_info.PodInfo,
59+
isPipelineOnly bool,
60+
) bool {
61+
for _, childSubGroupSet := range subGroupSet.GetChildGroups() {
62+
podSets := childSubGroupSet.GetAllPodSets()
63+
subGroupTasks := filterTasksForPodSets(podSets, tasksToAllocate)
64+
if !allocateSubGroupSet(ssn, stmt, nodes, job, childSubGroupSet, subGroupTasks, isPipelineOnly) {
65+
return false
66+
}
67+
}
68+
69+
for _, podSet := range subGroupSet.GetChildPodSets() {
70+
podSetTasks := filterTasksForPodSet(podSet, tasksToAllocate)
71+
if !allocatePodSet(ssn, stmt, nodes, job, podSet, podSetTasks, isPipelineOnly) {
72+
return false
73+
}
74+
}
75+
return true
76+
}
77+
78+
func allocatePodSet(ssn *framework.Session, stmt *framework.Statement, nodes node_info.NodeSet,
79+
job *podgroup_info.PodGroupInfo, podSet *subgroup_info.PodSet, tasksToAllocate []*pod_info.PodInfo,
80+
isPipelineOnly bool,
81+
) bool {
82+
podSets := map[string]*subgroup_info.PodSet{
83+
podSet.GetName(): podSet,
84+
}
85+
nodeSets, err := ssn.SubsetNodesFn(job, &podSet.SubGroupInfo, podSets, tasksToAllocate, nodes)
86+
if err != nil {
87+
log.InfraLogger.Errorf(
88+
"Failed to run SubsetNodes on job <%s/%s>: %v", job.Namespace, job.Namespace, err)
89+
return false
90+
}
91+
92+
for _, nodeSet := range nodeSets {
93+
cp := stmt.Checkpoint()
3894
if allocateTasksOnNodeSet(ssn, stmt, nodeSet, job, tasksToAllocate, isPipelineOnly) {
3995
return true
4096
}
97+
if err := stmt.Rollback(cp); err != nil {
98+
log.InfraLogger.Errorf("Failed to rollback statement in session %v, err: %v", ssn.UID, err)
99+
}
41100
}
42101
return false
43102
}
44103

45-
func allocateTasksOnNodeSet(ssn *framework.Session, stmt *framework.Statement, nodeSet node_info.NodeSet,
104+
func allocateTasksOnNodeSet(ssn *framework.Session, stmt *framework.Statement, nodes node_info.NodeSet,
46105
job *podgroup_info.PodGroupInfo, tasksToAllocate []*pod_info.PodInfo, isPipelineOnly bool) bool {
47-
cp := stmt.Checkpoint()
48106
for index, task := range tasksToAllocate {
49-
success := allocateTask(ssn, stmt, nodeSet, task, isPipelineOnly)
107+
success := allocateTask(ssn, stmt, nodes, task, isPipelineOnly)
50108
if !success {
51-
if err := stmt.Rollback(cp); err != nil {
52-
log.InfraLogger.Errorf("Failed to rollback statement in session %v, err: %v", ssn.UID, err)
53-
}
54-
55109
handleFailedTaskAllocation(job, task, index)
56110
return false
57111
}
@@ -181,3 +235,21 @@ func isGangScheduling(job *podgroup_info.PodGroupInfo) bool {
181235
}
182236
return false
183237
}
238+
239+
func filterTasksForPodSet(podSet *subgroup_info.PodSet, tasks []*pod_info.PodInfo) []*pod_info.PodInfo {
240+
return filterTasksForPodSets(map[string]*subgroup_info.PodSet{podSet.GetName(): podSet}, tasks)
241+
}
242+
243+
func filterTasksForPodSets(podSets map[string]*subgroup_info.PodSet, tasks []*pod_info.PodInfo) []*pod_info.PodInfo {
244+
var result []*pod_info.PodInfo
245+
for _, task := range tasks {
246+
subGroupName := task.SubGroupName
247+
if len(subGroupName) == 0 {
248+
subGroupName = podgroup_info.DefaultSubGroup
249+
}
250+
if _, found := podSets[subGroupName]; found {
251+
result = append(result, task)
252+
}
253+
}
254+
return result
255+
}

pkg/scheduler/actions/consolidation/consolidation_subgroups_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ func getSubGroupsConsolidationTestsMetadata() []integration_tests_utils.TestTopo
7070
Name: "pending_job0",
7171
Priority: constants.PriorityTrainNumber,
7272
QueueName: "queue0",
73-
SubGroups: map[string]*subgroup_info.PodSet{
73+
PodSets: map[string]*subgroup_info.PodSet{
7474
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
7575
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
7676
},
@@ -160,7 +160,7 @@ func getSubGroupsConsolidationTestsMetadata() []integration_tests_utils.TestTopo
160160
Name: "pending_job0",
161161
Priority: constants.PriorityTrainNumber,
162162
QueueName: "queue0",
163-
SubGroups: map[string]*subgroup_info.PodSet{
163+
PodSets: map[string]*subgroup_info.PodSet{
164164
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
165165
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
166166
},
@@ -291,7 +291,7 @@ func getSubGroupsConsolidationTestsMetadata() []integration_tests_utils.TestTopo
291291
Name: "pending_job0",
292292
Priority: constants.PriorityTrainNumber,
293293
QueueName: "queue0",
294-
SubGroups: map[string]*subgroup_info.PodSet{
294+
PodSets: map[string]*subgroup_info.PodSet{
295295
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
296296
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
297297
},
@@ -353,7 +353,7 @@ func getSubGroupsConsolidationTestsMetadata() []integration_tests_utils.TestTopo
353353
Name: "running_job",
354354
Priority: constants.PriorityTrainNumber,
355355
QueueName: "queue0",
356-
SubGroups: map[string]*subgroup_info.PodSet{
356+
PodSets: map[string]*subgroup_info.PodSet{
357357
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
358358
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
359359
},
@@ -445,7 +445,7 @@ func getSubGroupsConsolidationTestsMetadata() []integration_tests_utils.TestTopo
445445
Name: "running_job1",
446446
Priority: constants.PriorityTrainNumber,
447447
QueueName: "queue0",
448-
SubGroups: map[string]*subgroup_info.PodSet{
448+
PodSets: map[string]*subgroup_info.PodSet{
449449
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
450450
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
451451
},
@@ -540,7 +540,7 @@ func getSubGroupsConsolidationTestsMetadata() []integration_tests_utils.TestTopo
540540
Name: "running_job0",
541541
Priority: constants.PriorityTrainNumber,
542542
QueueName: "queue0",
543-
SubGroups: map[string]*subgroup_info.PodSet{
543+
PodSets: map[string]*subgroup_info.PodSet{
544544
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
545545
},
546546
Tasks: []*tasks_fake.TestTaskBasic{

pkg/scheduler/actions/preempt/preempt_subgroups_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ func getPreemptSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
6565
RequiredGPUsPerTask: 1,
6666
Priority: constants.PriorityBuildNumber,
6767
QueueName: "queue0",
68-
SubGroups: map[string]*subgroup_info.PodSet{
68+
PodSets: map[string]*subgroup_info.PodSet{
6969
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
7070
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
7171
},
@@ -145,7 +145,7 @@ func getPreemptSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
145145
RequiredGPUsPerTask: 2,
146146
Priority: constants.PriorityBuildNumber,
147147
QueueName: "queue0",
148-
SubGroups: map[string]*subgroup_info.PodSet{
148+
PodSets: map[string]*subgroup_info.PodSet{
149149
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
150150
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
151151
},
@@ -224,7 +224,7 @@ func getPreemptSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
224224
RequiredGPUsPerTask: 1,
225225
Priority: constants.PriorityBuildNumber,
226226
QueueName: "queue0",
227-
SubGroups: map[string]*subgroup_info.PodSet{
227+
PodSets: map[string]*subgroup_info.PodSet{
228228
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
229229
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
230230
},
@@ -324,7 +324,7 @@ func getPreemptSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
324324
RequiredGPUsPerTask: 1,
325325
Priority: constants.PriorityBuildNumber,
326326
QueueName: "queue0",
327-
SubGroups: map[string]*subgroup_info.PodSet{
327+
PodSets: map[string]*subgroup_info.PodSet{
328328
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
329329
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
330330
},
@@ -407,7 +407,7 @@ func getPreemptSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
407407
RequiredGPUsPerTask: 1,
408408
Priority: constants.PriorityTrainNumber,
409409
QueueName: "queue0",
410-
SubGroups: map[string]*subgroup_info.PodSet{
410+
PodSets: map[string]*subgroup_info.PodSet{
411411
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
412412
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
413413
},
@@ -503,7 +503,7 @@ func getPreemptSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
503503
RequiredGPUsPerTask: 1,
504504
Priority: constants.PriorityTrainNumber,
505505
QueueName: "queue0",
506-
SubGroups: map[string]*subgroup_info.PodSet{
506+
PodSets: map[string]*subgroup_info.PodSet{
507507
"sub-0": subgroup_info.NewPodSet("sub-0", 2, nil),
508508
"sub-1": subgroup_info.NewPodSet("sub-1", 2, nil),
509509
},

pkg/scheduler/actions/reclaim/reclaim_sub_group_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ func getReclaimSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
6666
RequiredGPUsPerTask: 1,
6767
Priority: constants.PriorityTrainNumber,
6868
QueueName: "queue1",
69-
SubGroups: map[string]*subgroup_info.PodSet{
69+
PodSets: map[string]*subgroup_info.PodSet{
7070
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
7171
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
7272
},
@@ -159,7 +159,7 @@ func getReclaimSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
159159
RequiredGPUsPerTask: 1,
160160
Priority: constants.PriorityTrainNumber,
161161
QueueName: "queue1",
162-
SubGroups: map[string]*subgroup_info.PodSet{
162+
PodSets: map[string]*subgroup_info.PodSet{
163163
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
164164
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
165165
},
@@ -270,7 +270,7 @@ func getReclaimSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
270270
RequiredGPUsPerTask: 1,
271271
Priority: constants.PriorityTrainNumber,
272272
QueueName: "queue1",
273-
SubGroups: map[string]*subgroup_info.PodSet{
273+
PodSets: map[string]*subgroup_info.PodSet{
274274
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
275275
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
276276
},
@@ -385,7 +385,7 @@ func getReclaimSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
385385
RequiredGPUsPerTask: 2,
386386
Priority: constants.PriorityTrainNumber,
387387
QueueName: "queue1",
388-
SubGroups: map[string]*subgroup_info.PodSet{
388+
PodSets: map[string]*subgroup_info.PodSet{
389389
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
390390
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
391391
},
@@ -456,7 +456,7 @@ func getReclaimSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
456456
RequiredGPUsPerTask: 1,
457457
Priority: constants.PriorityTrainNumber,
458458
QueueName: "queue0",
459-
SubGroups: map[string]*subgroup_info.PodSet{
459+
PodSets: map[string]*subgroup_info.PodSet{
460460
"sub-0": subgroup_info.NewPodSet("sub-0", 1, nil),
461461
"sub-1": subgroup_info.NewPodSet("sub-1", 1, nil),
462462
},
@@ -562,7 +562,7 @@ func getReclaimSubGroupsTestsMetadata() []integration_tests_utils.TestTopologyMe
562562
RequiredGPUsPerTask: 1,
563563
Priority: constants.PriorityTrainNumber,
564564
QueueName: "queue0",
565-
SubGroups: map[string]*subgroup_info.PodSet{
565+
PodSets: map[string]*subgroup_info.PodSet{
566566
"sub-0": subgroup_info.NewPodSet("sub-0", 2, nil),
567567
"sub-1": subgroup_info.NewPodSet("sub-1", 2, nil),
568568
},

pkg/scheduler/actions/stalegangeviction/stalegangeviction_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ func TestStaleGangEviction(t *testing.T) {
249249
Name: "job-1",
250250
QueueName: "q-1",
251251
MinAvailable: pointer.Int32(3),
252-
SubGroups: map[string]*subgroup_info.PodSet{
252+
PodSets: map[string]*subgroup_info.PodSet{
253253
"sub-group-0": subgroup_info.NewPodSet("sub-group-0", 2, nil),
254254
"sub-group-1": subgroup_info.NewPodSet("sub-group-1", 1, nil),
255255
},
@@ -329,7 +329,7 @@ func TestStaleGangEviction(t *testing.T) {
329329
Name: "job-1",
330330
QueueName: "q-1",
331331
MinAvailable: pointer.Int32(3),
332-
SubGroups: map[string]*subgroup_info.PodSet{
332+
PodSets: map[string]*subgroup_info.PodSet{
333333
"sub-group-0": subgroup_info.NewPodSet("sub-group-0", 2, nil),
334334
"sub-group-1": subgroup_info.NewPodSet("sub-group-1", 1, nil),
335335
},

pkg/scheduler/cache/status_updater/default_status_updater_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,7 @@ func TestDefaultStatusUpdater_RecordJobStatusEvent(t *testing.T) {
569569
State: pod_status.Pending,
570570
},
571571
},
572-
SubGroups: map[string]*subgroup_info.PodSet{
572+
PodSets: map[string]*subgroup_info.PodSet{
573573
"sub-group-1": func() *subgroup_info.PodSet {
574574
subGroup := subgroup_info.NewPodSet("sub-group-1", 1, nil)
575575
subGroup.AssignTask(&pod_info.PodInfo{UID: "test-task1", Status: pod_status.Pending})

0 commit comments

Comments
 (0)