Skip to content

Commit b746453

Browse files
committed
Revert "add JSON serialization tags to scheduler api structs (#11)"
This reverts commit 045a8dc.
1 parent 7c43481 commit b746453

File tree

35 files changed

+417
-397
lines changed

35 files changed

+417
-397
lines changed

Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ $(SERVICE_NAMES):
3333
$(MAKE) docker-build-generic SERVICE_NAME=$@
3434

3535
.PHONY: validate
36-
validate: generate manifests clients gen-license generate-mocks lint
36+
validate: generate manifests clients gen-license generate-mocks
3737
git diff --exit-code
3838

3939
.PHONY: generate-mocks
@@ -97,4 +97,4 @@ KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/k
9797
.PHONY: kustomize
9898
kustomize: $(KUSTOMIZE)
9999
$(KUSTOMIZE): $(LOCALBIN)
100-
test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) --output install_kustomize.sh && bash install_kustomize.sh $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); rm install_kustomize.sh; }
100+
test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) --output install_kustomize.sh && bash install_kustomize.sh $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); rm install_kustomize.sh; }

pkg/podgrouper/podgrouper/plugins/skiptopowner/skiptopowner_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@ import (
77
"context"
88
"testing"
99

10-
"github.com/NVIDIA/KAI-scheduler/pkg/podgrouper/podgrouper/plugins"
1110
. "github.com/onsi/ginkgo"
1211
. "github.com/onsi/gomega"
12+
"github.com/NVIDIA/KAI-scheduler/pkg/podgrouper/podgrouper/plugins"
1313
appsv1 "k8s.io/api/apps/v1"
1414
v1 "k8s.io/api/core/v1"
1515
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

pkg/scheduler/api/cluster_info.go

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -22,18 +22,18 @@ import (
2222

2323
// ClusterInfo is a snapshot of cluster by cache.
2424
type ClusterInfo struct {
25-
Pods []*v1.Pod `json:"pods,omitempty"`
26-
PodGroupInfos map[common_info.PodGroupID]*podgroup_info.PodGroupInfo `json:"podGroupInfos,omitempty"`
27-
Nodes map[string]*node_info.NodeInfo `json:"nodes,omitempty"`
28-
BindRequests bindrequest_info.BindRequestMap `json:"bindRequests,omitempty"`
29-
BindRequestsForDeletedNodes []*bindrequest_info.BindRequestInfo `json:"bindRequestsForDeletedNodes,omitempty"`
30-
Queues map[common_info.QueueID]*queue_info.QueueInfo `json:"queues,omitempty"`
31-
Departments map[common_info.QueueID]*queue_info.QueueInfo `json:"departments,omitempty"`
32-
StorageClaims map[storageclaim_info.Key]*storageclaim_info.StorageClaimInfo `json:"storageClaims,omitempty"`
33-
StorageCapacities map[common_info.StorageCapacityID]*storagecapacity_info.StorageCapacityInfo `json:"storageCapacities,omitempty"`
34-
CSIDrivers map[common_info.CSIDriverID]*csidriver_info.CSIDriverInfo `json:"csiDrivers,omitempty"`
35-
StorageClasses map[common_info.StorageClassID]*storageclass_info.StorageClassInfo `json:"storageClasses,omitempty"`
36-
ConfigMaps map[common_info.ConfigMapID]*configmap_info.ConfigMapInfo `json:"configMaps,omitempty"`
25+
Pods []*v1.Pod
26+
PodGroupInfos map[common_info.PodGroupID]*podgroup_info.PodGroupInfo
27+
Nodes map[string]*node_info.NodeInfo
28+
BindRequests bindrequest_info.BindRequestMap
29+
BindRequestsForDeletedNodes []*bindrequest_info.BindRequestInfo
30+
Queues map[common_info.QueueID]*queue_info.QueueInfo
31+
Departments map[common_info.QueueID]*queue_info.QueueInfo
32+
StorageClaims map[storageclaim_info.Key]*storageclaim_info.StorageClaimInfo
33+
StorageCapacities map[common_info.StorageCapacityID]*storagecapacity_info.StorageCapacityInfo
34+
CSIDrivers map[common_info.CSIDriverID]*csidriver_info.CSIDriverInfo
35+
StorageClasses map[common_info.StorageClassID]*storageclass_info.StorageClassInfo
36+
ConfigMaps map[common_info.ConfigMapID]*configmap_info.ConfigMapInfo
3737
}
3838

3939
func NewClusterInfo() *ClusterInfo {

pkg/scheduler/api/common_info/errors.go

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -67,19 +67,19 @@ func NewFitErrorInsufficientResource(
6767
var shortMessages []string
6868
var detailedMessages []string
6969

70-
if len(resourceRequested.MIGResources) > 0 {
71-
for migProfile, quant := range resourceRequested.MIGResources {
70+
if len(resourceRequested.MigResources()) > 0 {
71+
for migProfile, quant := range resourceRequested.MigResources() {
7272
availableMigProfilesQuant := int64(0)
7373
capacityMigProfilesQuant := int64(0)
74-
if _, found := availableResource.ScalarResources[migProfile]; found {
75-
availableMigProfilesQuant = availableResource.ScalarResources[migProfile]
76-
capacityMigProfilesQuant = capacityResource.ScalarResources[migProfile]
74+
if _, found := availableResource.ScalarResources()[migProfile]; found {
75+
availableMigProfilesQuant = availableResource.ScalarResources()[migProfile]
76+
capacityMigProfilesQuant = capacityResource.ScalarResources()[migProfile]
7777
}
7878
if availableMigProfilesQuant < quant {
7979
detailedMessages = append(detailedMessages, k8s_internal.NewInsufficientResourceErrorScalarResources(
8080
migProfile,
8181
quant,
82-
usedResource.ScalarResources[migProfile],
82+
usedResource.ScalarResources()[migProfile],
8383
capacityMigProfilesQuant,
8484
gangSchedulingJob))
8585
shortMessages = append(shortMessages, fmt.Sprintf("node(s) didn't have enough of mig profile: %s",
@@ -88,13 +88,13 @@ func NewFitErrorInsufficientResource(
8888
}
8989
} else {
9090
requestedGPUs := resourceRequested.GPUs()
91-
availableGPUs := availableResource.GPUs
91+
availableGPUs := availableResource.GPUs()
9292
if requestedGPUs > availableGPUs {
9393
detailedMessages = append(detailedMessages, k8s_internal.NewInsufficientResourceError(
9494
"GPUs",
9595
generateRequestedGpuString(resourceRequested),
96-
strconv.FormatFloat(usedResource.GPUs, 'g', 3, 64),
97-
strconv.FormatFloat(capacityResource.GPUs, 'g', 3, 64),
96+
strconv.FormatFloat(usedResource.GPUs(), 'g', 3, 64),
97+
strconv.FormatFloat(capacityResource.GPUs(), 'g', 3, 64),
9898
gangSchedulingJob))
9999
shortMessages = append(shortMessages, "node(s) didn't have enough resources: GPUs")
100100
}
@@ -106,40 +106,40 @@ func NewFitErrorInsufficientResource(
106106
}
107107
}
108108

109-
requestedCPUs := int64(resourceRequested.CPUMilliCores)
110-
availableCPUs := int64(availableResource.CPUMilliCores)
109+
requestedCPUs := int64(resourceRequested.Cpu())
110+
availableCPUs := int64(availableResource.Cpu())
111111
if requestedCPUs > availableCPUs {
112112
detailedMessages = append(detailedMessages, k8s_internal.NewInsufficientResourceError(
113113
"CPU cores",
114-
humanize.FtoaWithDigits(resourceRequested.CPUMilliCores/resource_info.MilliCPUToCores, 3),
115-
humanize.FtoaWithDigits(usedResource.CPUMilliCores/resource_info.MilliCPUToCores, 3),
116-
humanize.FtoaWithDigits(capacityResource.CPUMilliCores/resource_info.MilliCPUToCores, 3),
114+
humanize.FtoaWithDigits(resourceRequested.Cpu()/resource_info.MilliCPUToCores, 3),
115+
humanize.FtoaWithDigits(usedResource.Cpu()/resource_info.MilliCPUToCores, 3),
116+
humanize.FtoaWithDigits(capacityResource.Cpu()/resource_info.MilliCPUToCores, 3),
117117
gangSchedulingJob))
118118
shortMessages = append(shortMessages, "node(s) didn't have enough resources: CPU cores")
119119
}
120120

121-
if resourceRequested.MemoryBytes > availableResource.MemoryBytes {
121+
if resourceRequested.Memory() > availableResource.Memory() {
122122
detailedMessages = append(detailedMessages, k8s_internal.NewInsufficientResourceError(
123123
"memory",
124-
humanize.FtoaWithDigits(resourceRequested.MemoryBytes/resource_info.MemoryToGB, 3),
125-
humanize.FtoaWithDigits(usedResource.MemoryBytes/resource_info.MemoryToGB, 3),
126-
humanize.FtoaWithDigits(capacityResource.MemoryBytes/resource_info.MemoryToGB, 3),
124+
humanize.FtoaWithDigits(resourceRequested.Memory()/resource_info.MemoryToGB, 3),
125+
humanize.FtoaWithDigits(usedResource.Memory()/resource_info.MemoryToGB, 3),
126+
humanize.FtoaWithDigits(capacityResource.Memory()/resource_info.MemoryToGB, 3),
127127
gangSchedulingJob))
128128
shortMessages = append(shortMessages, "node(s) didn't have enough resources: memory")
129129
}
130130

131-
for requestedResourceName, requestedResourceQuant := range resourceRequested.ScalarResources {
131+
for requestedResourceName, requestedResourceQuant := range resourceRequested.ScalarResources() {
132132
availableResourceQuant := int64(0)
133133
capacityResourceQuant := int64(0)
134-
if _, found := availableResource.ScalarResources[requestedResourceName]; found {
135-
availableResourceQuant = availableResource.ScalarResources[requestedResourceName]
136-
capacityResourceQuant = capacityResource.ScalarResources[requestedResourceName]
134+
if _, found := availableResource.ScalarResources()[requestedResourceName]; found {
135+
availableResourceQuant = availableResource.ScalarResources()[requestedResourceName]
136+
capacityResourceQuant = capacityResource.ScalarResources()[requestedResourceName]
137137
}
138138
if availableResourceQuant < requestedResourceQuant {
139139
detailedMessages = append(detailedMessages, k8s_internal.NewInsufficientResourceErrorScalarResources(
140140
requestedResourceName,
141141
requestedResourceQuant,
142-
usedResource.ScalarResources[requestedResourceName], capacityResourceQuant,
142+
usedResource.ScalarResources()[requestedResourceName], capacityResourceQuant,
143143
gangSchedulingJob))
144144
shortMessages = append(shortMessages, fmt.Sprintf("node(s) didn't have enough resources: %s",
145145
requestedResourceName))

pkg/scheduler/api/configmap_info/configmap_info.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ import (
1111
)
1212

1313
type ConfigMapInfo struct {
14-
UID common_info.ConfigMapID `json:"uid,omitempty"`
15-
Name string `json:"name,omitempty"`
16-
Namespace string `json:"namespace,omitempty"`
14+
UID common_info.ConfigMapID
15+
Name string
16+
Namespace string
1717
}
1818

1919
func NewConfigMapInfo(configMap *v1.ConfigMap) *ConfigMapInfo {

pkg/scheduler/api/csidriver_info/csidriver_info.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,6 @@ package csidriver_info
66
import "github.com/NVIDIA/KAI-scheduler/pkg/scheduler/api/common_info"
77

88
type CSIDriverInfo struct {
9-
ID common_info.CSIDriverID `json:"id,omitempty"`
10-
CapacityEnabled bool `json:"capacityEnabled,omitempty"`
9+
ID common_info.CSIDriverID
10+
CapacityEnabled bool
1111
}

pkg/scheduler/api/node_info/gpu_sharing_node_info.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,8 @@ func getAcceptedTaskResourceWithoutSharedGPU(task *pod_info.PodInfo) *resource_i
6060
requestedResourceWithoutSharedGPU := resource_info.EmptyResource()
6161
requestedResourceWithoutSharedGPU.BaseResource = *task.AcceptedResource.BaseResource.Clone()
6262
requestedResourceWithoutSharedGPU.SetGPUs(task.AcceptedResource.GPUs())
63-
maps.Copy(requestedResourceWithoutSharedGPU.ScalarResources, task.AcceptedResource.MIGResources)
64-
maps.Copy(requestedResourceWithoutSharedGPU.ScalarResources, task.AcceptedResource.ScalarResources)
63+
maps.Copy(requestedResourceWithoutSharedGPU.ScalarResources(), task.AcceptedResource.MigResources())
64+
maps.Copy(requestedResourceWithoutSharedGPU.ScalarResources(), task.AcceptedResource.ScalarResources())
6565
if task.IsSharedGPUAllocation() {
6666
requestedResourceWithoutSharedGPU.SetGPUs(0)
6767
}
@@ -106,7 +106,7 @@ func (ni *NodeInfo) addSharedTaskResourcesPerPodGroup(task *pod_info.PodInfo, gp
106106
ni.Releasing.AddGPUs(1)
107107
ni.markSharedGpuAsReleasing(gpuGroup)
108108
}
109-
if int(ni.GetNumberOfGPUsInNode()) < int(ni.Idle.GPUs)+ni.getNumberOfUsedGPUs() {
109+
if int(ni.GetNumberOfGPUsInNode()) < int(ni.Idle.GPUs())+ni.getNumberOfUsedGPUs() {
110110
ni.Idle.SubGPUs(1)
111111
}
112112
}
@@ -122,7 +122,7 @@ func (ni *NodeInfo) addSharedTaskResourcesPerPodGroup(task *pod_info.PodInfo, gp
122122

123123
if ni.UsedSharedGPUsMemory[gpuGroup] <= ni.GetResourceGpuMemory(task.ResReq) {
124124
// no other fractional was allocated here yet
125-
if int(ni.GetNumberOfGPUsInNode()) < int(ni.Idle.GPUs)+ni.getNumberOfUsedGPUs() {
125+
if int(ni.GetNumberOfGPUsInNode()) < int(ni.Idle.GPUs())+ni.getNumberOfUsedGPUs() {
126126
ni.Idle.SubGPUs(1)
127127
}
128128
}
@@ -181,7 +181,7 @@ func (ni *NodeInfo) removeSharedTaskResourcesPerPodGroup(task *pod_info.PodInfo,
181181

182182
if ni.UsedSharedGPUsMemory[gpuGroup] <= 0 {
183183
// is this the last releasing task for this gpu
184-
if int(ni.GetNumberOfGPUsInNode()) >= int(ni.Idle.GPUs)+ni.getNumberOfUsedGPUs() {
184+
if int(ni.GetNumberOfGPUsInNode()) >= int(ni.Idle.GPUs())+ni.getNumberOfUsedGPUs() {
185185
ni.Idle.AddGPUs(1)
186186
}
187187
if ni.isSharedGpuMarkedAsReleasing(gpuGroup) {
@@ -211,7 +211,7 @@ func (ni *NodeInfo) removeSharedTaskResourcesPerPodGroup(task *pod_info.PodInfo,
211211

212212
if ni.UsedSharedGPUsMemory[gpuGroup] <= 0 {
213213
// no other fractional was allocated here yet
214-
if int(ni.GetNumberOfGPUsInNode()) >= int(ni.Idle.GPUs)+ni.getNumberOfUsedGPUs() {
214+
if int(ni.GetNumberOfGPUsInNode()) >= int(ni.Idle.GPUs())+ni.getNumberOfUsedGPUs() {
215215
ni.Idle.AddGPUs(1)
216216
}
217217
}
@@ -271,7 +271,7 @@ func (ni *NodeInfo) getNumberOfUsedSharedGPUs() int {
271271
}
272272

273273
func (ni *NodeInfo) getNumberOfUsedGPUs() int {
274-
return int(ni.Used.GPUs) + ni.getNumberOfUsedSharedGPUs()
274+
return int(ni.Used.GPUs()) + ni.getNumberOfUsedSharedGPUs()
275275
}
276276

277277
func (ni *NodeInfo) GetNumberOfAllocatedSharedGPUs() int {

pkg/scheduler/api/node_info/node_info.go

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -49,30 +49,30 @@ const (
4949

5050
// NodeInfo is node level aggregated information.
5151
type NodeInfo struct {
52-
Name string `json:"name,omitempty"`
53-
Node *v1.Node `json:"node,omitempty"`
52+
Name string
53+
Node *v1.Node
5454

5555
// The releasing resource on that node (excluding shared GPUs)
56-
Releasing *resource_info.Resource `json:"releasing,omitempty"`
56+
Releasing *resource_info.Resource
5757
// The idle resource on that node (excluding shared GPUs)
58-
Idle *resource_info.Resource `json:"idle,omitempty"`
58+
Idle *resource_info.Resource
5959
// The used resource on that node, including running and terminating
6060
// pods (excluding shared GPUs)
61-
Used *resource_info.Resource `json:"used,omitempty"`
61+
Used *resource_info.Resource
6262

63-
Allocatable *resource_info.Resource `json:"allocatable,omitempty"`
63+
Allocatable *resource_info.Resource
6464

65-
AccessibleStorageCapacities map[common_info.StorageClassID][]*sc_info.StorageCapacityInfo `json:"accessibleStorageCapacities,omitempty"`
65+
AccessibleStorageCapacities map[common_info.StorageClassID][]*sc_info.StorageCapacityInfo
6666

67-
PodInfos map[common_info.PodID]*pod_info.PodInfo `json:"podInfos,omitempty"`
68-
MaxTaskNum int `json:"maxTaskNum,omitempty"`
69-
MemoryOfEveryGpuOnNode int64 `json:"memoryOfEveryGpuOnNode,omitempty"`
70-
GpuMemorySynced bool `json:"gpuMemorySynced,omitempty"`
71-
LegacyMIGTasks map[common_info.PodID]string `json:"legacyMigTasks,omitempty"`
67+
PodInfos map[common_info.PodID]*pod_info.PodInfo
68+
MaxTaskNum int
69+
MemoryOfEveryGpuOnNode int64
70+
GpuMemorySynced bool
71+
LegacyMIGTasks map[common_info.PodID]string
7272

73-
PodAffinityInfo pod_affinity.NodePodAffinityInfo `json:"podAffinityInfo,omitempty"`
73+
PodAffinityInfo pod_affinity.NodePodAffinityInfo
7474

75-
GpuSharingNodeInfo `json:"gpuSharingNodeInfo,omitempty"`
75+
GpuSharingNodeInfo
7676
}
7777

7878
func NewNodeInfo(node *v1.Node, podAffinityInfo pod_affinity.NodePodAffinityInfo) *NodeInfo {
@@ -103,7 +103,7 @@ func NewNodeInfo(node *v1.Node, podAffinityInfo pod_affinity.NodePodAffinityInfo
103103
nodeInfo.MaxTaskNum = int(numTasks.Value())
104104

105105
capacity := resource_info.ResourceFromResourceList(node.Status.Capacity)
106-
if capacity.GPUs != nodeInfo.Allocatable.GPUs {
106+
if capacity.GPUs() != nodeInfo.Allocatable.GPUs() {
107107
log.InfraLogger.V(2).Warnf(
108108
"For node %s, the capacity and allocatable are different. Capacity %v, Allocatable %v",
109109
node.Name, capacity.DetailedString(), nodeInfo.Allocatable.DetailedString())
@@ -307,7 +307,7 @@ func (ni *NodeInfo) isTaskAllocatableOnNonAllocatedResources(
307307
if !ni.isValidGpuPortion(task.ResReq) {
308308
return false
309309
}
310-
nodeIdleOrReleasingWholeGpus := int64(math.Floor(nodeNonAllocatedResources.GPUs))
310+
nodeIdleOrReleasingWholeGpus := int64(math.Floor(nodeNonAllocatedResources.GPUs()))
311311
nodeNonAllocatedResourcesMatchingSharedGpus := ni.fractionTaskGpusAllocatableDeviceCount(task)
312312
if nodeIdleOrReleasingWholeGpus+nodeNonAllocatedResourcesMatchingSharedGpus >= task.ResReq.GetNumOfGpuDevices() {
313313
return true
@@ -515,9 +515,9 @@ func (ni *NodeInfo) String() string {
515515

516516
func (ni *NodeInfo) GetSumOfIdleGPUs() (float64, int64) {
517517
sumOfSharedGPUs, sumOfSharedGPUsMemory := ni.getSumOfAvailableSharedGPUs()
518-
idleGPUs := ni.Idle.GPUs
518+
idleGPUs := ni.Idle.GPUs()
519519

520-
for resourceName, qty := range ni.Idle.ScalarResources {
520+
for resourceName, qty := range ni.Idle.ScalarResources() {
521521
if !isMigResource(resourceName.String()) {
522522
continue
523523
}
@@ -534,9 +534,9 @@ func (ni *NodeInfo) GetSumOfIdleGPUs() (float64, int64) {
534534

535535
func (ni *NodeInfo) GetSumOfReleasingGPUs() (float64, int64) {
536536
sumOfSharedGPUs, sumOfSharedGPUsMemory := ni.getSumOfReleasingSharedGPUs()
537-
releasingGPUs := ni.Releasing.GPUs
537+
releasingGPUs := ni.Releasing.GPUs()
538538

539-
for resourceName, qty := range ni.Releasing.ScalarResources {
539+
for resourceName, qty := range ni.Releasing.ScalarResources() {
540540
if !isMigResource(resourceName.String()) {
541541
continue
542542
}
@@ -569,7 +569,7 @@ func (ni *NodeInfo) GetNumberOfGPUsInNode() int64 {
569569
numberOfGPUs, err := ni.getNodeGpuCountLabelValue()
570570
if err != nil {
571571
log.InfraLogger.V(6).Infof("Node: <%v> had no annotations of nvidia.com/gpu.count", ni.Name)
572-
return int64(ni.Allocatable.GPUs)
572+
return int64(ni.Allocatable.GPUs())
573573
}
574574
return int64(numberOfGPUs)
575575
}
@@ -629,7 +629,7 @@ func (ni *NodeInfo) IsCPUOnlyNode() bool {
629629
if ni.IsMIGEnabled() {
630630
return false
631631
}
632-
return ni.Allocatable.GPUs == 0
632+
return ni.Allocatable.GPUs() == 0
633633
}
634634

635635
func (ni *NodeInfo) IsMIGEnabled() bool {
@@ -638,7 +638,7 @@ func (ni *NodeInfo) IsMIGEnabled() bool {
638638
isMig, err := strconv.ParseBool(enabled)
639639
return err == nil && isMig
640640
}
641-
for nodeResource := range ni.Allocatable.ScalarResources {
641+
for nodeResource := range ni.Allocatable.ScalarResources() {
642642
if isMigResource(nodeResource.String()) {
643643
return true
644644
}
@@ -663,13 +663,13 @@ func (ni *NodeInfo) GetMigStrategy() MigStrategy {
663663

664664
func (ni *NodeInfo) GetRequiredInitQuota(pi *pod_info.PodInfo) *podgroup_info.JobRequirement {
665665
quota := podgroup_info.JobRequirement{}
666-
if len(pi.ResReq.MIGResources) != 0 {
666+
if len(pi.ResReq.MigResources()) != 0 {
667667
quota.GPU = pi.ResReq.GetSumGPUs()
668668
} else {
669669
quota.GPU = ni.getGpuMemoryFractionalOnNode(ni.GetResourceGpuMemory(pi.ResReq))
670670
}
671-
quota.MilliCPU = pi.ResReq.CPUMilliCores
672-
quota.Memory = pi.ResReq.MemoryBytes
671+
quota.MilliCPU = pi.ResReq.Cpu()
672+
quota.Memory = pi.ResReq.Memory()
673673
return &quota
674674
}
675675

@@ -682,7 +682,7 @@ func (ni *NodeInfo) setAcceptedResources(pi *pod_info.PodInfo) {
682682
if pi.IsMigCandidate() {
683683
pi.ResourceReceivedType = pod_info.ReceivedTypeMigInstance
684684
pi.AcceptedResource.GpuResourceRequirement =
685-
*resource_info.NewGpuResourceRequirementWithMig(pi.ResReq.MIGResources)
685+
*resource_info.NewGpuResourceRequirementWithMig(pi.ResReq.MigResources())
686686
} else if pi.IsFractionCandidate() {
687687
pi.ResourceReceivedType = pod_info.ReceivedTypeFraction
688688
pi.AcceptedResource.GpuResourceRequirement = *resource_info.NewGpuResourceRequirementWithMultiFraction(

0 commit comments

Comments
 (0)