Skip to content

Commit a0e1cce

Browse files
committed
added unit tests to test the method after it changes.
1 parent 6146e33 commit a0e1cce

File tree

2 files changed

+219
-1
lines changed

2 files changed

+219
-1
lines changed

core/task-executor/lib/reconcile/normalize.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ const normalizeResources = ({ pods, nodes } = {}) => {
321321
accumulator[nodeName].requests.gpu += requestGpu;
322322

323323
accumulator[nodeName].limits.cpu += limitsCpu;
324-
accumulator[nodeName].limits.memory += requestMem;
324+
accumulator[nodeName].limits.memory += limitsMem;
325325
accumulator[nodeName].limits.gpu += limitsGpu;
326326

327327
// Use actual requests value for worker and other pods accounting

core/task-executor/tests/normalizeTests.js

Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -448,6 +448,224 @@ describe('normalize', () => {
448448
expect(res.nodeList[1].free.cpu).to.eq(7.55);
449449
expect(res.nodeList[2].free.cpu).to.eq(7.8);
450450
});
451+
452+
it('should filter out nodes with NoSchedule taint', () => {
453+
const nodesWithTaint = {
454+
body: {
455+
items: [
456+
{ metadata: { name: 'node1', labels: {} }, spec: { taints: [{ effect: 'NoSchedule' }] }, status: { allocatable: { cpu: '4', memory: '8Gi' } } },
457+
{ metadata: { name: 'node2', labels: {} }, status: { allocatable: { cpu: '2', memory: '4Gi' } } }
458+
]
459+
}
460+
};
461+
const podsEmpty = { body: { items: [] } };
462+
463+
const res = normalizeResources({ pods: podsEmpty, nodes: nodesWithTaint });
464+
expect(res.nodeList.length).to.eq(1);
465+
expect(res.nodeList[0].name).to.eq('node2');
466+
});
467+
468+
it('should ignore pods not in Running or Pending state', () => {
469+
const nodes = {
470+
body: {
471+
items: [
472+
{ metadata: { name: 'node1', labels: {} }, status: { allocatable: { cpu: '4', memory: '8Gi' } } }
473+
]
474+
}
475+
};
476+
const pods = {
477+
body: {
478+
items: [
479+
{ status: { phase: 'Succeeded' }, spec: { nodeName: 'node1', containers: [] } },
480+
{ status: { phase: 'Failed' }, spec: { nodeName: 'node1', containers: [] } }
481+
]
482+
}
483+
};
484+
485+
const res = normalizeResources({ pods, nodes });
486+
expect(res.nodeList[0].requests.cpu).to.eq(0);
487+
expect(res.nodeList[0].requests.memory).to.eq(0);
488+
});
489+
490+
it('should account worker pods separately from other pods', () => {
491+
const nodes = {
492+
body: {
493+
items: [
494+
{ metadata: { name: 'node1', labels: {} }, status: { allocatable: { cpu: '4', memory: '8Gi' } } }
495+
]
496+
}
497+
};
498+
const pods = {
499+
body: {
500+
items: [
501+
{
502+
status: { phase: 'Running' },
503+
spec: { nodeName: 'node1', containers: [{ resources: { requests: { cpu: '100m', memory: '128Mi' } } }] },
504+
metadata: { labels: { type: 'worker', 'algorithm-name': 'algo1' }, name: 'workerPod1' }
505+
},
506+
{
507+
status: { phase: 'Running' },
508+
spec: { nodeName: 'node1', containers: [{ resources: { requests: { cpu: '200m', memory: '256Mi' } } }] },
509+
metadata: { labels: { type: 'other' }, name: 'otherPod1' }
510+
}
511+
]
512+
}
513+
};
514+
515+
const res = normalizeResources({ pods, nodes });
516+
const node = res.nodeList[0];
517+
expect(node.workersTotal.cpu).to.eq(0.1);
518+
expect(node.other.cpu).to.eq(0.2);
519+
expect(node.workers.length).to.eq(1);
520+
expect(node.workers[0].algorithmName).to.eq('algo1');
521+
});
522+
523+
it('should calculate GPU resources correctly', () => {
524+
const nodes = {
525+
body: {
526+
items: [
527+
{ metadata: { name: 'node1', labels: {} }, status: { allocatable: { cpu: '4', memory: '8Gi', 'nvidia.com/gpu': '2' } } }
528+
]
529+
}
530+
};
531+
const pods = {
532+
body: {
533+
items: [
534+
{
535+
status: { phase: 'Running' },
536+
spec: { nodeName: 'node1', containers: [{ resources: { limits: { 'nvidia.com/gpu': '1' } } }] },
537+
metadata: { labels: {}, name: 'gpuPod1' }
538+
}
539+
]
540+
}
541+
};
542+
543+
const res = normalizeResources({ pods, nodes });
544+
const node = res.nodeList[0];
545+
expect(node.total.gpu).to.eq(2);
546+
expect(node.requests.gpu).to.eq(1);
547+
expect(node.free.gpu).to.eq(1);
548+
expect(node.ratio.gpu).to.eq(0.5);
549+
});
550+
551+
it('should accumulate limits separately from requests', () => {
552+
const nodes = {
553+
body: {
554+
items: [
555+
{ metadata: { name: 'node1', labels: {} }, status: { allocatable: { cpu: '4', memory: '8Gi' } } }
556+
]
557+
}
558+
};
559+
const pods = {
560+
body: {
561+
items: [
562+
{
563+
status: { phase: 'Running' },
564+
spec: {
565+
nodeName: 'node1',
566+
containers: [{
567+
resources: {
568+
requests: { cpu: '100m', memory: '128Mi' },
569+
limits: { cpu: '200m', memory: '256Mi' }
570+
}
571+
}]
572+
},
573+
metadata: { labels: {}, name: 'pod1' }
574+
}
575+
]
576+
}
577+
};
578+
579+
const res = normalizeResources({ pods, nodes });
580+
const node = res.nodeList[0];
581+
expect(node.requests.cpu).to.eq(0.1);
582+
expect(node.limits.cpu).to.eq(0.2);
583+
expect(node.requests.memory).to.eq(128);
584+
expect(node.limits.memory).to.eq(256);
585+
});
586+
587+
it('should include nodes with no pods in nodeList with zero requests', () => {
588+
const nodes = {
589+
body: {
590+
items: [
591+
{ metadata: { name: 'node1', labels: {} }, status: { allocatable: { cpu: '4', memory: '8Gi' } } }
592+
]
593+
}
594+
};
595+
const pods = { body: { items: [] } };
596+
597+
const res = normalizeResources({ pods, nodes });
598+
expect(res.nodeList[0].requests.cpu).to.eq(0);
599+
expect(res.nodeList[0].free.cpu).to.eq(4);
600+
});
601+
602+
it('should still use actual requests for worker pods when useResourceLimits=true', () => {
603+
globalSettings.useResourceLimits = true;
604+
const nodes = {
605+
body: { items: [{ metadata: { name: 'node1', labels: {} }, status: { allocatable: { cpu: '4', memory: '8Gi' } } }] }
606+
};
607+
const pods = {
608+
body: {
609+
items: [
610+
{
611+
status: { phase: 'Running' },
612+
spec: {
613+
nodeName: 'node1',
614+
containers: [{
615+
resources: {
616+
requests: { cpu: '100m', memory: '128Mi' },
617+
limits: { cpu: '1000m', memory: '512Mi' }
618+
}
619+
}]
620+
},
621+
metadata: { labels: { type: 'worker', 'algorithm-name': 'algoX' }, name: 'workerPod1' }
622+
}
623+
]
624+
}
625+
};
626+
627+
const res = normalizeResources({ pods, nodes });
628+
const node = res.nodeList[0];
629+
630+
// requests bucket uses limit (1 core), not request
631+
expect(node.requests.cpu).to.eq(1);
632+
// workersTotal should use actual request (0.1 cores)
633+
expect(node.workersTotal.cpu).to.eq(0.1);
634+
});
635+
636+
it('should still use actual requests for "other" pods when useResourceLimits=true', () => {
637+
globalSettings.useResourceLimits = true;
638+
const nodes = {
639+
body: { items: [{ metadata: { name: 'node1', labels: {} }, status: { allocatable: { cpu: '4', memory: '8Gi' } } }] }
640+
};
641+
const pods = {
642+
body: {
643+
items: [
644+
{
645+
status: { phase: 'Running' },
646+
spec: {
647+
nodeName: 'node1',
648+
containers: [{
649+
resources: {
650+
requests: { cpu: '200m', memory: '256Mi' },
651+
limits: { cpu: '2000m', memory: '1Gi' }
652+
}
653+
}]
654+
},
655+
metadata: { labels: { type: 'other' }, name: 'otherPod1' }
656+
}
657+
]
658+
}
659+
};
660+
661+
const res = normalizeResources({ pods, nodes });
662+
const node = res.nodeList[0];
663+
664+
// requests bucket uses limit (2 cores)
665+
expect(node.requests.cpu).to.eq(2);
666+
// "other" bucket should still use actual request (0.2 cores)
667+
expect(node.other.cpu).to.eq(0.2);
668+
});
451669
});
452670

453671
describe('merge workers', () => {

0 commit comments

Comments
 (0)