Skip to content

Commit f32d6cd

Browse files
committed
Move DRA provider to autoscaling context.
1 parent 34115f8 commit f32d6cd

File tree

4 files changed

+19
-19
lines changed

4 files changed

+19
-19
lines changed

cluster-autoscaler/context/autoscaling_context.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import (
2727
"k8s.io/autoscaler/cluster-autoscaler/expander"
2828
processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks"
2929
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
30+
draprovider "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/provider"
3031
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
3132
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
3233
"k8s.io/client-go/informers"
@@ -62,6 +63,8 @@ type AutoscalingContext struct {
6263
ClusterStateRegistry *clusterstate.ClusterStateRegistry
6364
//ProvisionRequstScaleUpMode indicates whether ClusterAutoscaler tries to accommodate ProvisioningRequest in current scale up iteration.
6465
ProvisioningRequestScaleUpMode bool
66+
// DraProvider is the provider for dynamic resources allocation.
67+
DraProvider *draprovider.Provider
6568
}
6669

6770
// AutoscalingKubeClients contains all Kubernetes API clients,
@@ -108,6 +111,7 @@ func NewAutoscalingContext(
108111
debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter,
109112
remainingPdbTracker pdb.RemainingPdbTracker,
110113
clusterStateRegistry *clusterstate.ClusterStateRegistry,
114+
draProvider *draprovider.Provider,
111115
) *AutoscalingContext {
112116
return &AutoscalingContext{
113117
AutoscalingOptions: options,
@@ -120,6 +124,7 @@ func NewAutoscalingContext(
120124
DebuggingSnapshotter: debuggingSnapshotter,
121125
RemainingPdbTracker: remainingPdbTracker,
122126
ClusterStateRegistry: clusterStateRegistry,
127+
DraProvider: draProvider,
123128
}
124129
}
125130

cluster-autoscaler/core/scaledown/actuation/actuator.go

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ import (
3737
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/predicate"
3838
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/store"
3939
"k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules"
40-
draprovider "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/provider"
4140
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
4241
"k8s.io/autoscaler/cluster-autoscaler/simulator/options"
4342
"k8s.io/autoscaler/cluster-autoscaler/simulator/utilization"
@@ -69,7 +68,6 @@ type Actuator struct {
6968
configGetter actuatorNodeGroupConfigGetter
7069
nodeDeleteDelayAfterTaint time.Duration
7170
pastLatencies *expiring.List
72-
draProvider *draprovider.Provider
7371
}
7472

7573
// actuatorNodeGroupConfigGetter is an interface to limit the functions that can be used
@@ -80,7 +78,7 @@ type actuatorNodeGroupConfigGetter interface {
8078
}
8179

8280
// NewActuator returns a new instance of Actuator.
83-
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter, draProvider *draprovider.Provider) *Actuator {
81+
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter) *Actuator {
8482
ndb := NewNodeDeletionBatcher(ctx, scaleStateNotifier, ndt, ctx.NodeDeletionBatcherInterval)
8583
legacyFlagDrainConfig := SingleRuleDrainConfig(ctx.MaxGracefulTerminationSec)
8684
var evictor Evictor
@@ -99,7 +97,6 @@ func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupch
9997
configGetter: configGetter,
10098
nodeDeleteDelayAfterTaint: ctx.NodeDeleteDelayAfterTaint,
10199
pastLatencies: expiring.NewList(),
102-
draProvider: draProvider,
103100
}
104101
}
105102

@@ -410,8 +407,8 @@ func (a *Actuator) createSnapshot(nodes []*apiv1.Node) (clustersnapshot.ClusterS
410407
nonExpendableScheduledPods := utils.FilterOutExpendablePods(scheduledPods, a.ctx.ExpendablePodsPriorityCutoff)
411408

412409
var draSnapshot drasnapshot.Snapshot
413-
if a.ctx.DynamicResourceAllocationEnabled && a.draProvider != nil {
414-
draSnapshot, err = a.draProvider.Snapshot()
410+
if a.ctx.DynamicResourceAllocationEnabled && a.ctx.DraProvider != nil {
411+
draSnapshot, err = a.ctx.DraProvider.Snapshot()
415412
if err != nil {
416413
return nil, err
417414
}

cluster-autoscaler/core/static_autoscaler.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ type StaticAutoscaler struct {
9393
processorCallbacks *staticAutoscalerProcessorCallbacks
9494
initialized bool
9595
taintConfig taints.TaintConfig
96-
draProvider *draprovider.Provider
9796
}
9897

9998
type staticAutoscalerProcessorCallbacks struct {
@@ -167,7 +166,8 @@ func NewStaticAutoscaler(
167166
processorCallbacks,
168167
debuggingSnapshotter,
169168
remainingPdbTracker,
170-
clusterStateRegistry)
169+
clusterStateRegistry,
170+
draProvider)
171171

172172
taintConfig := taints.NewTaintConfig(opts)
173173
processors.ScaleDownCandidatesNotifier.Register(clusterStateRegistry)
@@ -179,7 +179,7 @@ func NewStaticAutoscaler(
179179
processorCallbacks.scaleDownPlanner = scaleDownPlanner
180180

181181
ndt := deletiontracker.NewNodeDeletionTracker(0 * time.Second)
182-
scaleDownActuator := actuation.NewActuator(autoscalingContext, processors.ScaleStateNotifier, ndt, deleteOptions, drainabilityRules, processors.NodeGroupConfigProcessor, draProvider)
182+
scaleDownActuator := actuation.NewActuator(autoscalingContext, processors.ScaleStateNotifier, ndt, deleteOptions, drainabilityRules, processors.NodeGroupConfigProcessor)
183183
autoscalingContext.ScaleDownActuator = scaleDownActuator
184184

185185
if scaleUpOrchestrator == nil {
@@ -203,7 +203,6 @@ func NewStaticAutoscaler(
203203
processorCallbacks: processorCallbacks,
204204
clusterStateRegistry: clusterStateRegistry,
205205
taintConfig: taintConfig,
206-
draProvider: draProvider,
207206
}
208207
}
209208

@@ -337,8 +336,8 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr
337336
nonExpendableScheduledPods := core_utils.FilterOutExpendablePods(originalScheduledPods, a.ExpendablePodsPriorityCutoff)
338337

339338
var draSnapshot drasnapshot.Snapshot
340-
if a.AutoscalingContext.DynamicResourceAllocationEnabled && a.draProvider != nil {
341-
draSnapshot, err = a.draProvider.Snapshot()
339+
if a.AutoscalingContext.DynamicResourceAllocationEnabled && a.AutoscalingContext.DraProvider != nil {
340+
draSnapshot, err = a.AutoscalingContext.DraProvider.Snapshot()
342341
if err != nil {
343342
return caerrors.ToAutoscalerError(caerrors.ApiCallError, err)
344343
}

cluster-autoscaler/core/static_autoscaler_test.go

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ func (m *onNodeGroupDeleteMock) Delete(id string) error {
164164

165165
func setUpScaleDownActuator(ctx *context.AutoscalingContext, autoscalingOptions config.AutoscalingOptions) {
166166
deleteOptions := options.NewNodeDeleteOptions(autoscalingOptions)
167-
ctx.ScaleDownActuator = actuation.NewActuator(ctx, nil, deletiontracker.NewNodeDeletionTracker(0*time.Second), deleteOptions, rules.Default(deleteOptions), processorstest.NewTestProcessors(ctx).NodeGroupConfigProcessor, nil)
167+
ctx.ScaleDownActuator = actuation.NewActuator(ctx, nil, deletiontracker.NewNodeDeletionTracker(0*time.Second), deleteOptions, rules.Default(deleteOptions), processorstest.NewTestProcessors(ctx).NodeGroupConfigProcessor)
168168
}
169169

170170
type nodeGroup struct {
@@ -316,12 +316,12 @@ func setupAutoscaler(config *autoscalerSetupConfig) (*StaticAutoscaler, error) {
316316

317317
deleteOptions := options.NewNodeDeleteOptions(ctx.AutoscalingOptions)
318318
drainabilityRules := rules.Default(deleteOptions)
319-
draProvider := draprovider.NewProvider(config.mocks.resourceClaimLister, config.mocks.resourceSliceLister, config.mocks.deviceClassLister)
319+
ctx.DraProvider = draprovider.NewProvider(config.mocks.resourceClaimLister, config.mocks.resourceSliceLister, config.mocks.deviceClassLister)
320320
nodeDeletionTracker := config.mocks.nodeDeletionTracker
321321
if nodeDeletionTracker == nil {
322322
nodeDeletionTracker = deletiontracker.NewNodeDeletionTracker(0 * time.Second)
323323
}
324-
ctx.ScaleDownActuator = actuation.NewActuator(&ctx, clusterState, nodeDeletionTracker, deleteOptions, drainabilityRules, processors.NodeGroupConfigProcessor, draProvider)
324+
ctx.ScaleDownActuator = actuation.NewActuator(&ctx, clusterState, nodeDeletionTracker, deleteOptions, drainabilityRules, processors.NodeGroupConfigProcessor)
325325
sdPlanner := planner.New(&ctx, processors, deleteOptions, drainabilityRules)
326326

327327
processorCallbacks.scaleDownPlanner = sdPlanner
@@ -335,7 +335,6 @@ func setupAutoscaler(config *autoscalerSetupConfig) (*StaticAutoscaler, error) {
335335
processors: processors,
336336
loopStartNotifier: loopstart.NewObserversList(nil),
337337
processorCallbacks: processorCallbacks,
338-
draProvider: draProvider,
339338
}
340339

341340
return autoscaler, nil
@@ -2089,7 +2088,7 @@ func TestStaticAutoscalerUpcomingScaleDownCandidates(t *testing.T) {
20892088
csr := clusterstate.NewClusterStateRegistry(provider, csrConfig, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), processors.AsyncNodeGroupStateChecker)
20902089

20912090
// Setting the Actuator is necessary for testing any scale-down logic, it shouldn't have anything to do in this test.
2092-
actuator := actuation.NewActuator(&ctx, csr, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, processorstest.NewTestProcessors(&ctx).NodeGroupConfigProcessor, nil)
2091+
actuator := actuation.NewActuator(&ctx, csr, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, processorstest.NewTestProcessors(&ctx).NodeGroupConfigProcessor)
20932092
ctx.ScaleDownActuator = actuator
20942093

20952094
// Fake planner that keeps track of the scale-down candidates passed to UpdateClusterState.
@@ -2720,7 +2719,7 @@ func newScaleDownPlannerAndActuator(ctx *context.AutoscalingContext, p *ca_proce
27202719
nodeDeletionTracker = deletiontracker.NewNodeDeletionTracker(0 * time.Second)
27212720
}
27222721
planner := planner.New(ctx, p, deleteOptions, nil)
2723-
actuator := actuation.NewActuator(ctx, cs, nodeDeletionTracker, deleteOptions, nil, p.NodeGroupConfigProcessor, nil)
2722+
actuator := actuation.NewActuator(ctx, cs, nodeDeletionTracker, deleteOptions, nil, p.NodeGroupConfigProcessor)
27242723
return planner, actuator
27252724
}
27262725

@@ -2836,7 +2835,7 @@ func buildStaticAutoscaler(t *testing.T, provider cloudprovider.CloudProvider, a
28362835
processors.ScaleDownNodeProcessor = cp
28372836

28382837
csr := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{OkTotalUnreadyCount: 1}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), processors.AsyncNodeGroupStateChecker)
2839-
actuator := actuation.NewActuator(&ctx, csr, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, processors.NodeGroupConfigProcessor, nil)
2838+
actuator := actuation.NewActuator(&ctx, csr, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, processors.NodeGroupConfigProcessor)
28402839
ctx.ScaleDownActuator = actuator
28412840

28422841
deleteOptions := options.NewNodeDeleteOptions(ctx.AutoscalingOptions)

0 commit comments

Comments
 (0)