Skip to content

Commit be49ad9

Browse files
authored
Merge pull request #612 from knarayan/trimaran1
Trimaran minor bug fixes
2 parents a3eb6cd + 2b59cf7 commit be49ad9

File tree

4 files changed

+187
-12
lines changed

4 files changed

+187
-12
lines changed

pkg/trimaran/collector.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ func checkSpecs(trimaranSpec *pluginConfig.TrimaranSpec) error {
123123
metricProviderType == string(pluginConfig.Prometheus) ||
124124
metricProviderType == string(pluginConfig.SignalFx)
125125
if !validMetricProviderType {
126-
return fmt.Errorf("invalid MetricProvider.Type, got %T", trimaranSpec.MetricProvider.Type)
126+
return fmt.Errorf("invalid MetricProvider.Type, got %v", trimaranSpec.MetricProvider.Type)
127127
}
128128
}
129129
return nil

pkg/trimaran/collector_test.go

Lines changed: 78 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,13 @@ var (
6464
},
6565
},
6666
}
67+
68+
noWatcherResponseForNode = watcher.WatcherMetrics{
69+
Window: watcher.Window{},
70+
Data: watcher.Data{
71+
NodeMetricsMap: map[string]watcher.NodeMetrics{},
72+
},
73+
}
6774
)
6875

6976
func TestNewCollector(t *testing.T) {
@@ -72,6 +79,28 @@ func TestNewCollector(t *testing.T) {
7279
assert.Nil(t, err)
7380
}
7481

82+
func TestNewCollectorSpecs(t *testing.T) {
83+
server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
84+
bytes, err := json.Marshal(watcherResponse)
85+
assert.Nil(t, err)
86+
resp.Write(bytes)
87+
}))
88+
defer server.Close()
89+
90+
metricProvider := pluginConfig.MetricProviderSpec{
91+
Type: "",
92+
}
93+
trimaranSpec := pluginConfig.TrimaranSpec{
94+
WatcherAddress: "",
95+
MetricProvider: metricProvider,
96+
}
97+
98+
col, err := NewCollector(&trimaranSpec)
99+
assert.Nil(t, col)
100+
expectedErr := "invalid MetricProvider.Type, got " + string(metricProvider.Type)
101+
assert.EqualError(t, err, expectedErr)
102+
}
103+
75104
func TestGetAllMetrics(t *testing.T) {
76105
server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
77106
bytes, err := json.Marshal(watcherResponse)
@@ -127,7 +156,55 @@ func TestGetNodeMetrics(t *testing.T) {
127156
assert.NotNil(t, collector)
128157
assert.Nil(t, err)
129158
nodeName := "node-1"
130-
metrics, _ := collector.GetNodeMetrics(nodeName)
159+
metrics, allMetrics := collector.GetNodeMetrics(nodeName)
131160
expectedMetrics := watcherResponse.Data.NodeMetricsMap[nodeName].Metrics
132161
assert.EqualValues(t, expectedMetrics, metrics)
162+
expectedAllMetrics := &watcherResponse
163+
assert.EqualValues(t, expectedAllMetrics, allMetrics)
164+
}
165+
166+
func TestGetNodeMetricsNilForNode(t *testing.T) {
167+
server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
168+
bytes, err := json.Marshal(noWatcherResponseForNode)
169+
assert.Nil(t, err)
170+
resp.Write(bytes)
171+
}))
172+
defer server.Close()
173+
174+
trimaranSpec := pluginConfig.TrimaranSpec{
175+
WatcherAddress: server.URL,
176+
}
177+
collector, err := NewCollector(&trimaranSpec)
178+
assert.NotNil(t, collector)
179+
assert.Nil(t, err)
180+
nodeName := "node-1"
181+
metrics, allMetrics := collector.GetNodeMetrics(nodeName)
182+
expectedMetrics := noWatcherResponseForNode.Data.NodeMetricsMap[nodeName].Metrics
183+
assert.EqualValues(t, expectedMetrics, metrics)
184+
assert.NotNil(t, allMetrics)
185+
expectedAllMetrics := &noWatcherResponseForNode
186+
assert.EqualValues(t, expectedAllMetrics, allMetrics)
187+
}
188+
189+
func TestNewCollectorLoadWatcher(t *testing.T) {
190+
server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
191+
bytes, err := json.Marshal(watcherResponse)
192+
assert.Nil(t, err)
193+
resp.Write(bytes)
194+
}))
195+
defer server.Close()
196+
197+
metricProvider := pluginConfig.MetricProviderSpec{
198+
Type: watcher.SignalFxClientName,
199+
Address: server.URL,
200+
Token: "PWNED",
201+
}
202+
trimaranSpec := pluginConfig.TrimaranSpec{
203+
WatcherAddress: "",
204+
MetricProvider: metricProvider,
205+
}
206+
207+
col, err := NewCollector(&trimaranSpec)
208+
assert.NotNil(t, col)
209+
assert.Nil(t, err)
133210
}

pkg/trimaran/resourcestats.go

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -198,15 +198,12 @@ func GetNodeRequestsAndLimits(podInfosOnNode []*framework.PodInfo, node *v1.Node
198198
// make sure limits not less than requests
199199
SetMaxLimits(requested, limits)
200200
}
201-
reqCpu := requested.MilliCPU
202-
reqMem := requested.Memory
203-
limitCpu := limits.MilliCPU
204-
limitMem := limits.Memory
201+
205202
// accumulate
206-
nodeRequest.MilliCPU += reqCpu
207-
nodeRequest.Memory += reqMem
208-
nodeLimit.MilliCPU += limitCpu
209-
nodeLimit.Memory += limitMem
203+
nodeRequest.MilliCPU += requested.MilliCPU
204+
nodeRequest.Memory += requested.Memory
205+
nodeLimit.MilliCPU += limits.MilliCPU
206+
nodeLimit.Memory += limits.Memory
210207
}
211208
// cap requests by node capacity
212209
setMin(&nodeRequest.MilliCPU, capCpu)

pkg/trimaran/resourcestats_test.go

Lines changed: 103 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ limitations under the License.
1717
package trimaran
1818

1919
import (
20+
"math"
2021
"reflect"
2122
"strconv"
2223
"testing"
@@ -375,6 +376,13 @@ func TestGetNodeRequestsAndLimits(t *testing.T) {
375376

376377
testNode := st.MakeNode().Name("test-node").Capacity(nr).Obj()
377378

379+
nr = map[v1.ResourceName]string{
380+
v1.ResourceCPU: "1600m",
381+
v1.ResourceMemory: "6Ki",
382+
}
383+
384+
testNodeWithLowNodeCapacity := st.MakeNode().Name("test-node").Capacity(nr).Obj()
385+
378386
var initCPUReq int64 = 100
379387
var initMemReq int64 = 2048
380388
contCPUReq := []int64{1000, 500}
@@ -406,6 +414,20 @@ func TestGetNodeRequestsAndLimits(t *testing.T) {
406414
podRequests := GetResourceRequested(pod)
407415
podLimits := GetResourceLimits(pod)
408416

417+
allocatableResources := testNodeWithLowNodeCapacity.Status.Allocatable
418+
amCpu := allocatableResources[v1.ResourceCPU]
419+
capCpu := amCpu.MilliValue()
420+
amMem := allocatableResources[v1.ResourceMemory]
421+
capMem := amMem.Value()
422+
423+
contCPULimit = []int64{1000, 400}
424+
425+
pod4 := getPodWithContainersAndOverhead(0, initCPUReq, initMemReq, contCPUReq, contMemReq)
426+
pod4 = getPodWithLimits(pod4, initCPULimit, initMemLimit, contCPULimit, contMemLimit)
427+
428+
pod4Requests := GetResourceRequested(pod4)
429+
pod4Limits := GetResourceLimits(pod4)
430+
409431
type args struct {
410432
podsOnNode []*framework.PodInfo
411433
node *v1.Node
@@ -485,12 +507,91 @@ func TestGetNodeRequestsAndLimits(t *testing.T) {
485507
},
486508
},
487509
},
510+
{
511+
name: "test-2",
512+
// Test case for Node with low capacity than the requested Pod limits
513+
args: args{
514+
podsOnNode: []*framework.PodInfo{},
515+
node: testNodeWithLowNodeCapacity,
516+
pod: pod,
517+
podRequests: podRequests,
518+
podLimits: podLimits,
519+
},
520+
want: &NodeRequestsAndLimits{
521+
NodeRequest: &framework.Resource{
522+
MilliCPU: int64(math.Min(float64(GetResourceRequested(pod).MilliCPU), float64(capCpu))),
523+
Memory: int64(math.Min(float64(GetResourceRequested(pod).Memory), float64(capMem))),
524+
},
525+
NodeLimit: &framework.Resource{
526+
MilliCPU: int64(math.Max(float64(GetResourceRequested(pod).MilliCPU), float64(GetResourceLimits(pod).MilliCPU))),
527+
Memory: int64(math.Max(float64(GetResourceRequested(pod).Memory), float64(GetResourceLimits(pod).Memory))),
528+
},
529+
NodeRequestMinusPod: &framework.Resource{
530+
MilliCPU: 0,
531+
Memory: 0,
532+
},
533+
NodeLimitMinusPod: &framework.Resource{
534+
MilliCPU: 0,
535+
Memory: 0,
536+
},
537+
Nodecapacity: &framework.Resource{
538+
MilliCPU: capCpu,
539+
Memory: capMem,
540+
},
541+
},
542+
},
543+
{
544+
name: "test-3",
545+
// Test case for Pod with more Requests than Limits
546+
args: args{
547+
podsOnNode: []*framework.PodInfo{},
548+
node: testNodeWithLowNodeCapacity,
549+
pod: pod4,
550+
podRequests: pod4Requests,
551+
podLimits: pod4Limits,
552+
},
553+
want: &NodeRequestsAndLimits{
554+
NodeRequest: &framework.Resource{
555+
MilliCPU: int64(math.Min(float64(GetResourceRequested(pod4).MilliCPU), float64(capCpu))),
556+
Memory: int64(math.Min(float64(GetResourceRequested(pod4).Memory), float64(capMem))),
557+
},
558+
NodeLimit: &framework.Resource{
559+
MilliCPU: int64(math.Min(
560+
math.Max(float64(GetResourceRequested(pod4).MilliCPU), float64(GetResourceLimits(pod4).MilliCPU)),
561+
float64(capCpu))),
562+
Memory: int64(math.Min(
563+
math.Max(float64(GetResourceRequested(pod4).Memory), float64(GetResourceLimits(pod4).Memory)),
564+
float64(capMem))),
565+
},
566+
NodeRequestMinusPod: &framework.Resource{
567+
MilliCPU: 0,
568+
Memory: 0,
569+
},
570+
NodeLimitMinusPod: &framework.Resource{
571+
MilliCPU: 0,
572+
Memory: 0,
573+
},
574+
Nodecapacity: &framework.Resource{
575+
MilliCPU: capCpu,
576+
Memory: capMem,
577+
},
578+
},
579+
},
488580
}
489581
for _, tt := range tests {
490582
t.Run(tt.name, func(t *testing.T) {
491-
if got := GetNodeRequestsAndLimits(tt.args.podsOnNode, tt.args.node, tt.args.pod,
583+
var got *NodeRequestsAndLimits
584+
SetMaxLimits(tt.args.podRequests, tt.args.podLimits)
585+
if got = GetNodeRequestsAndLimits(tt.args.podsOnNode, tt.args.node, tt.args.pod,
492586
tt.args.podRequests, tt.args.podLimits); !reflect.DeepEqual(got, tt.want) {
493-
t.Errorf("GetNodeRequestsAndLimits() = %v, want %v", got, tt.want)
587+
t.Errorf("GetNodeRequestsAndLimits(): got = {%+v, %+v, %+v, %+v, %+v}, want = {%+v, %+v, %+v, %+v, %+v}",
588+
*got.NodeRequest, *got.NodeLimit, *got.NodeRequestMinusPod, *got.NodeLimitMinusPod, *got.Nodecapacity,
589+
*tt.want.NodeRequest, *tt.want.NodeLimit, *tt.want.NodeRequestMinusPod, *tt.want.NodeLimitMinusPod, *tt.want.Nodecapacity)
590+
}
591+
// The below asserts should hold good for all test cases, ideally; "test-3" exercises this by setting NodeLimit less than NodeRequest
592+
if tt.name == "test-3" {
593+
assert.LessOrEqual(t, (*got.NodeRequest).MilliCPU, (*got.NodeLimit).MilliCPU)
594+
assert.LessOrEqual(t, (*got.NodeRequest).Memory, (*got.NodeLimit).Memory)
494595
}
495596
})
496597
}

0 commit comments

Comments
 (0)