Skip to content

Commit b8f4089

Browse files
committed
Create and use LatencyTracker interface for better testability
1 parent 296badd commit b8f4089

File tree

3 files changed

+8
-5
lines changed

3 files changed

+8
-5
lines changed

cluster-autoscaler/core/scaledown/actuation/actuator.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ const (
5959
type Actuator struct {
6060
ctx *context.AutoscalingContext
6161
nodeDeletionTracker *deletiontracker.NodeDeletionTracker
62-
nodeLatencyTracker *latencytracker.NodeLatencyTracker
62+
nodeLatencyTracker latencytracker.LatencyTracker
6363
nodeDeletionScheduler *GroupDeletionScheduler
6464
deleteOptions options.NodeDeleteOptions
6565
drainabilityRules rules.Rules
@@ -80,7 +80,7 @@ type actuatorNodeGroupConfigGetter interface {
8080
}
8181

8282
// NewActuator returns a new instance of Actuator.
83-
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, nlt *latencytracker.NodeLatencyTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter) *Actuator {
83+
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, nlt latencytracker.LatencyTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter) *Actuator {
8484
ndb := NewNodeDeletionBatcher(ctx, scaleStateNotifier, ndt, ctx.NodeDeletionBatcherInterval)
8585
legacyFlagDrainConfig := SingleRuleDrainConfig(ctx.MaxGracefulTerminationSec)
8686
var evictor Evictor

cluster-autoscaler/core/scaledown/latencytracker/node_latency_tracker.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,13 @@ import (
44
"time"
55

66
"k8s.io/autoscaler/cluster-autoscaler/metrics"
7-
87
"k8s.io/klog/v2"
98
)
109

10+
type LatencyTracker interface {
11+
ObserveDeletion(nodeName string, timestamp time.Time)
12+
UpdateStateWithUnneededList(list []NodeInfo, timestamp time.Time)
13+
}
1114
type NodeInfo struct {
1215
Name string
1316
UnneededSince time.Time

cluster-autoscaler/core/scaledown/planner/planner.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,11 @@ type Planner struct {
7777
cc controllerReplicasCalculator
7878
scaleDownSetProcessor nodes.ScaleDownSetProcessor
7979
scaleDownContext *nodes.ScaleDownContext
80-
nodeLatencyTracker *latencytracker.NodeLatencyTracker
80+
nodeLatencyTracker latencytracker.LatencyTracker
8181
}
8282

8383
// New creates a new Planner object.
84-
func New(context *context.AutoscalingContext, processors *processors.AutoscalingProcessors, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, nlt *latencytracker.NodeLatencyTracker) *Planner {
84+
func New(context *context.AutoscalingContext, processors *processors.AutoscalingProcessors, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, nlt latencytracker.LatencyTracker) *Planner {
8585
resourceLimitsFinder := resource.NewLimitsFinder(processors.CustomResourcesProcessor)
8686
minUpdateInterval := context.AutoscalingOptions.NodeGroupDefaults.ScaleDownUnneededTime
8787
if minUpdateInterval == 0*time.Nanosecond {

0 commit comments

Comments
 (0)