diff --git a/src/autoscaler/api/policyvalidator/policy_json.schema.json b/src/autoscaler/api/policyvalidator/policy_json.schema.json index 0c1e0a1e45..8c42a0089a 100644 --- a/src/autoscaler/api/policyvalidator/policy_json.schema.json +++ b/src/autoscaler/api/policyvalidator/policy_json.schema.json @@ -58,6 +58,12 @@ "type": "integer", "title": "Maximum how many instances of application can be provisioned as part of application scaling" }, +"scaling_rule_evaluation": { + "type": "string", + "enum": ["first_matching", "biased_to_scale_out"], + "default": "first_matching", + "title": "The Scaling Rule Evaluation Schema" + }, "scaling_rules": { "$id": "#/properties/scaling_rules", "type": "array", diff --git a/src/autoscaler/eventgenerator/aggregator/metric_poller_test.go b/src/autoscaler/eventgenerator/aggregator/metric_poller_test.go index 6d9229af76..ac4e04d989 100644 --- a/src/autoscaler/eventgenerator/aggregator/metric_poller_test.go +++ b/src/autoscaler/eventgenerator/aggregator/metric_poller_test.go @@ -53,7 +53,7 @@ var _ = Describe("MetricPoller", func() { BeforeEach(func() { metricFetcher, err := metric.NewLogCacheFetcherFactory(metric.StandardLogCacheFetcherCreator).CreateFetcher(logger, config.Config{ MetricCollector: config.MetricCollectorConfig{ - MetricCollectorURL: "this.endpoint.does.not.exist:1234", + MetricCollectorURL: "this.endpoint.is.invalid:1234", }, }) Expect(err).ToNot(HaveOccurred()) diff --git a/src/autoscaler/eventgenerator/cmd/eventgenerator/main.go b/src/autoscaler/eventgenerator/cmd/eventgenerator/main.go index ed557b90be..2d2a46f4cd 100644 --- a/src/autoscaler/eventgenerator/cmd/eventgenerator/main.go +++ b/src/autoscaler/eventgenerator/cmd/eventgenerator/main.go @@ -66,7 +66,7 @@ func main() { appManager := aggregator.NewAppManager(logger, egClock, conf.Aggregator.PolicyPollerInterval, len(conf.Server.NodeAddrs), conf.Server.NodeIndex, conf.Aggregator.MetricCacheSizePerApp, policyDb, appMetricDB) - triggersChan := make(chan []*models.Trigger, conf.Evaluator.TriggerArrayChannelSize) + triggersChan := make(chan *models.DynamicScalingRules, conf.Evaluator.TriggerArrayChannelSize) evaluationManager, err := generator.NewAppEvaluationManager(logger, conf.Evaluator.EvaluationManagerInterval, egClock, triggersChan, appManager.GetPolicies, conf.CircuitBreaker) if err != nil { @@ -190,7 +190,7 @@ func loadConfig(path string) (*config.Config, error) { return conf, nil } -func createEvaluators(logger lager.Logger, conf *config.Config, triggersChan chan []*models.Trigger, queryMetrics aggregator.QueryAppMetricsFunc, getBreaker func(string) *circuit.Breaker, setCoolDownExpired func(string, int64)) ([]*generator.Evaluator, error) { +func createEvaluators(logger lager.Logger, conf *config.Config, triggersChan chan *models.DynamicScalingRules, queryMetrics aggregator.QueryAppMetricsFunc, getBreaker func(string) *circuit.Breaker, setCoolDownExpired func(string, int64)) ([]*generator.Evaluator, error) { count := conf.Evaluator.EvaluatorCount seClient, err := helpers.CreateHTTPSClient(&conf.ScalingEngine.TLSClientCerts, helpers.DefaultClientConfig(), logger.Session("scaling_client")) diff --git a/src/autoscaler/eventgenerator/generator/appEvaluationManager.go b/src/autoscaler/eventgenerator/generator/appEvaluationManager.go index e30ff2322b..c9634e3dab 100644 --- a/src/autoscaler/eventgenerator/generator/appEvaluationManager.go +++ b/src/autoscaler/eventgenerator/generator/appEvaluationManager.go @@ -14,14 +14,14 @@ import ( circuit "github.com/rubyist/circuitbreaker" ) -type ConsumeAppMonitorMap func(map[string][]*models.Trigger, chan []*models.Trigger) +type ConsumeAppMonitorMap func(map[string]*models.DynamicScalingRules, chan *models.DynamicScalingRules) type AppEvaluationManager struct { evaluateInterval time.Duration logger lager.Logger emClock clock.Clock doneChan chan bool - triggerChan chan []*models.Trigger + triggerChan chan *models.DynamicScalingRules getPolicies aggregator.GetPoliciesFunc breakerConfig config.CircuitBreakerConfig breakers map[string]*circuit.Breaker @@ -31,7 +31,7 @@ type AppEvaluationManager struct { } func NewAppEvaluationManager(logger lager.Logger, evaluateInterval time.Duration, emClock clock.Clock, - triggerChan chan []*models.Trigger, getPolicies aggregator.GetPoliciesFunc, + triggerChan chan *models.DynamicScalingRules, getPolicies aggregator.GetPoliciesFunc, breakerConfig config.CircuitBreakerConfig) (*AppEvaluationManager, error) { return &AppEvaluationManager{ evaluateInterval: evaluateInterval, @@ -47,11 +47,11 @@ func NewAppEvaluationManager(logger lager.Logger, evaluateInterval time.Duration }, nil } -func (a *AppEvaluationManager) getTriggers(policyMap map[string]*models.AppPolicy) map[string][]*models.Trigger { +func (a *AppEvaluationManager) getTriggers(policyMap map[string]*models.AppPolicy) map[string]*models.DynamicScalingRules { if policyMap == nil { return nil } - triggersByApp := make(map[string][]*models.Trigger) + triggersByApp := make(map[string]*models.DynamicScalingRules) for appID, policy := range policyMap { now := a.emClock.Now().UnixNano() a.cooldownLock.RLock() @@ -74,7 +74,10 @@ func (a *AppEvaluationManager) getTriggers(policyMap map[string]*models.AppPolic Adjustment: rule.Adjustment, }) } - triggersByApp[appID] = triggers + triggersByApp[appID] = &models.DynamicScalingRules{ + Triggers: triggers, + ScalingRuleEvaluation: policy.ScalingPolicy.ScalingRuleEvaluation, + } } return triggersByApp } @@ -122,8 +125,8 @@ func (a *AppEvaluationManager) doEvaluate() { a.breakerLock.Unlock() triggers := a.getTriggers(policies) - for _, triggerArray := range triggers { - a.triggerChan <- triggerArray + for _, dynamicScalingRules := range triggers { + a.triggerChan <- dynamicScalingRules } } } diff --git a/src/autoscaler/eventgenerator/generator/appEvaluationManager_test.go b/src/autoscaler/eventgenerator/generator/appEvaluationManager_test.go index ecf90e81aa..bee6140b88 100644 --- a/src/autoscaler/eventgenerator/generator/appEvaluationManager_test.go +++ b/src/autoscaler/eventgenerator/generator/appEvaluationManager_test.go @@ -25,7 +25,7 @@ var _ = Describe("AppEvaluationManager", func() { fclock *fakeclock.FakeClock manager *AppEvaluationManager testEvaluateInterval time.Duration - triggerArrayChan chan []*models.Trigger + triggerArrayChan chan *models.DynamicScalingRules testAppId1 = "testAppId1" testAppId2 = "testAppId2" testMetricName = "Test-Metric-Name" @@ -73,7 +73,7 @@ var _ = Describe("AppEvaluationManager", func() { fclock = fakeclock.NewFakeClock(fakeTime) testEvaluateInterval = 1 * time.Second logger = lagertest.NewTestLogger("ApplicationManager-test") - triggerArrayChan = make(chan []*models.Trigger, 10) + triggerArrayChan = make(chan *models.DynamicScalingRules, 10) }) Describe("Start", func() { @@ -103,14 +103,14 @@ var _ = Describe("AppEvaluationManager", func() { It("should add triggers to evaluate", func() { fclock.Increment(10 * testEvaluateInterval) - var arr []*models.Trigger - var triggerArray = [][]*models.Trigger{} + var arr *models.DynamicScalingRules + var triggerArray = []*models.DynamicScalingRules{} Eventually(triggerArrayChan).Should(Receive(&arr)) triggerArray = append(triggerArray, arr) Eventually(triggerArrayChan).Should(Receive(&arr)) triggerArray = append(triggerArray, arr) Expect(triggerArray).Should(ContainElement( - []*models.Trigger{{ + &models.DynamicScalingRules{Triggers: []*models.Trigger{{ AppId: testAppId1, MetricType: testMetricName, BreachDurationSeconds: 200, @@ -118,9 +118,9 @@ var _ = Describe("AppEvaluationManager", func() { Threshold: 80, Operator: ">=", Adjustment: "1", - }})) + }}})) Expect(triggerArray).Should(ContainElement( - []*models.Trigger{{ + &models.DynamicScalingRules{Triggers: []*models.Trigger{{ AppId: testAppId2, MetricType: testMetricName, BreachDurationSeconds: 300, @@ -128,7 +128,7 @@ var _ = Describe("AppEvaluationManager", func() { Threshold: 20, Operator: "<=", Adjustment: "-1", - }})) + }}})) }) }) @@ -147,8 +147,8 @@ var _ = Describe("AppEvaluationManager", func() { }) It("should add triggers to evaluate after cooldown expired", func() { - var arr []*models.Trigger - var triggerArray = [][]*models.Trigger{} + var arr *models.DynamicScalingRules + var triggerArray = []*models.DynamicScalingRules{} fclock.Increment(10 * testEvaluateInterval) Consistently(triggerArrayChan).ShouldNot(Receive()) @@ -160,7 +160,7 @@ var _ = Describe("AppEvaluationManager", func() { triggerArray = append(triggerArray, arr) Expect(triggerArray).Should(ContainElement( - []*models.Trigger{{ + &models.DynamicScalingRules{Triggers: []*models.Trigger{{ AppId: testAppId2, MetricType: testMetricName, BreachDurationSeconds: 300, @@ -168,7 +168,7 @@ var _ = Describe("AppEvaluationManager", func() { Threshold: 20, Operator: "<=", Adjustment: "-1", - }})) + }}})) }) }) }) diff --git a/src/autoscaler/eventgenerator/generator/evaluator.go b/src/autoscaler/eventgenerator/generator/evaluator.go index 1aaa443fa0..952e3a657e 100644 --- a/src/autoscaler/eventgenerator/generator/evaluator.go +++ b/src/autoscaler/eventgenerator/generator/evaluator.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "slices" "strconv" "time" @@ -24,7 +25,7 @@ type Evaluator struct { logger lager.Logger httpClient *http.Client scalingEngineUrl string - triggerChan chan []*models.Trigger + triggerChan chan *models.DynamicScalingRules doneChan chan bool defaultBreachDurationSecs int queryAppMetrics aggregator.QueryAppMetricsFunc @@ -32,7 +33,7 @@ type Evaluator struct { setCoolDownExpired func(string, int64) } -func NewEvaluator(logger lager.Logger, httpClient *http.Client, scalingEngineUrl string, triggerChan chan []*models.Trigger, +func NewEvaluator(logger lager.Logger, httpClient *http.Client, scalingEngineUrl string, triggerChan chan *models.DynamicScalingRules, defaultBreachDurationSecs int, queryAppMetrics aggregator.QueryAppMetricsFunc, getBreaker func(string) *circuit.Breaker, setCoolDownExpired func(string, int64)) *Evaluator { return &Evaluator{ logger: logger.Session("Evaluator"), @@ -57,8 +58,8 @@ func (e *Evaluator) start() { select { case <-e.doneChan: return - case triggerArray := <-e.triggerChan: - e.doEvaluate(triggerArray) + case dynamicScalingRules := <-e.triggerChan: + e.doEvaluate(dynamicScalingRules) } } } @@ -69,18 +70,45 @@ func (e *Evaluator) Stop() { e.logger.Info("stopped") } -func (e *Evaluator) doEvaluate(triggerArray []*models.Trigger) { - for _, trigger := range triggerArray { +func (e *Evaluator) filterOutInvalidTriggers(rules *models.DynamicScalingRules) { + validTriggers := rules.Triggers[:0] + for _, trigger := range rules.Triggers { if trigger.BreachDurationSeconds <= 0 { trigger.BreachDurationSeconds = e.defaultBreachDurationSecs } - threshold := trigger.Threshold - operator := trigger.Operator - if !e.isValidOperator(operator) { + if !e.hasValidOperator(trigger) { e.logger.Error("operator-is-invalid", nil, lager.Data{"trigger": trigger}) continue } + validTriggers = append(validTriggers, trigger) + } + rules.Triggers = validTriggers +} + +func (e *Evaluator) doEvaluate(rules *models.DynamicScalingRules) { + e.filterOutInvalidTriggers(rules) + if rules.ScalingRuleEvaluation == models.BiasedToScaleOut { + slices.SortStableFunc(rules.Triggers, func(a, b *models.Trigger) int { + adj1, err1 := models.ParseAdjustment(a.Adjustment) + if err1 != nil { + e.logger.Error("do-evaluate", err1) + } + adj2, err2 := models.ParseAdjustment(b.Adjustment) + if err2 != nil { + e.logger.Error("do-evaluate", err2) + } + + // sort in descending order, so that the biggest scale out adjustment is evaluated first + return -models.CompareAdjustments(adj1, adj2) + }) + } + + e.evaluateFirstMatching(rules) +} + +func (e *Evaluator) evaluateFirstMatching(rules *models.DynamicScalingRules) { + for _, trigger := range rules.Triggers { appMetricList, err := e.retrieveAppMetrics(trigger) if err != nil { continue @@ -90,66 +118,70 @@ func (e *Evaluator) doEvaluate(triggerArray []*models.Trigger) { continue } - isBreached, appMetric := checkForBreach(appMetricList, e, trigger, operator, threshold) - - if isBreached { + if e.isBreached(trigger, appMetricList) { trigger.MetricUnit = appMetricList[0].Unit - e.logger.Info("send trigger alarm to scaling engine", lager.Data{"trigger": trigger, "last_metric": appMetric}) - - if appBreaker := e.getBreaker(trigger.AppId); appBreaker != nil { - if appBreaker.Tripped() { - e.logger.Info("circuit-tripped", lager.Data{"appId": trigger.AppId, "consecutiveFailures": appBreaker.ConsecFailures()}) - } - err = appBreaker.Call(func() error { return e.sendTriggerAlarm(trigger) }, 0) - if err != nil { - e.logger.Error("circuit-alarm-failed", err, lager.Data{"appId": trigger.AppId}) - } - } else { - err = e.sendTriggerAlarm(trigger) - if err != nil { - e.logger.Error("circuit-alarm-failed", err, lager.Data{"appId": trigger.AppId}) - } - } + e.logger.Info("send trigger alarm to scaling engine", lager.Data{"trigger": trigger, "last_metric": appMetricList[len(appMetricList)-1]}) + e.sendToScalingEngine(trigger) return } } } -func checkForBreach(appMetricList []*models.AppMetric, e *Evaluator, trigger *models.Trigger, operator string, threshold int64) (bool, *models.AppMetric) { +func (e *Evaluator) sendToScalingEngine(trigger *models.Trigger) { + if appBreaker := e.getBreaker(trigger.AppId); appBreaker != nil { + if appBreaker.Tripped() { + e.logger.Info("circuit-tripped", lager.Data{"appId": trigger.AppId, "consecutiveFailures": appBreaker.ConsecFailures()}) + } + err := appBreaker.Call(func() error { return e.sendTriggerAlarm(trigger) }, 0) + if err != nil { + e.logger.Error("circuit-alarm-failed", err, lager.Data{"appId": trigger.AppId}) + } + } else { + err := e.sendTriggerAlarm(trigger) + if err != nil { + e.logger.Error("circuit-alarm-failed", err, lager.Data{"appId": trigger.AppId}) + } + } +} + +func (e *Evaluator) isBreached(trigger *models.Trigger, appMetricList []*models.AppMetric) bool { + operator := trigger.Operator + threshold := trigger.Threshold + var appMetric *models.AppMetric for _, appMetric = range appMetricList { if appMetric.Value == "" { e.logger.Debug("should not send trigger alarm to scaling engine because there is empty value metric", lager.Data{"trigger": trigger, "appMetric": appMetric}) - return false, appMetric + return false } value, err := strconv.ParseInt(appMetric.Value, 10, 64) if err != nil { e.logger.Debug("should not send trigger alarm to scaling engine because parse metric value fails", lager.Data{"trigger": trigger, "appMetric": appMetric}) - return false, appMetric + return false } if operator == ">" { if value <= threshold { e.logger.Debug("should not send trigger alarm to scaling engine", lager.Data{"trigger": trigger, "appMetric": appMetric}) - return false, appMetric + return false } } else if operator == ">=" { if value < threshold { e.logger.Debug("should not send trigger alarm to scaling engine", lager.Data{"trigger": trigger, "appMetric": appMetric}) - return false, appMetric + return false } } else if operator == "<" { if value >= threshold { e.logger.Debug("should not send trigger alarm to scaling engine", lager.Data{"trigger": trigger, "appMetric": appMetric}) - return false, appMetric + return false } } else if operator == "<=" { if value > threshold { e.logger.Debug("should not send trigger alarm to scaling engine", lager.Data{"trigger": trigger, "appMetric": appMetric}) - return false, appMetric + return false } } } - return true, appMetric + return true } func (e *Evaluator) retrieveAppMetrics(trigger *models.Trigger) ([]*models.AppMetric, error) { @@ -202,7 +234,7 @@ func (e *Evaluator) sendTriggerAlarm(trigger *models.Trigger) error { return err } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() respBody, err := io.ReadAll(resp.Body) if err != nil { @@ -227,9 +259,9 @@ func (e *Evaluator) sendTriggerAlarm(trigger *models.Trigger) error { return err } -func (e *Evaluator) isValidOperator(operator string) bool { +func (e *Evaluator) hasValidOperator(trigger *models.Trigger) bool { for _, o := range validOperators { - if o == operator { + if o == trigger.Operator { return true } } diff --git a/src/autoscaler/eventgenerator/generator/evaluator_test.go b/src/autoscaler/eventgenerator/generator/evaluator_test.go index e24a27478e..accb34d7d8 100644 --- a/src/autoscaler/eventgenerator/generator/evaluator_test.go +++ b/src/autoscaler/eventgenerator/generator/evaluator_test.go @@ -25,7 +25,7 @@ var _ = Describe("Evaluator", func() { var ( logger *lagertest.TestLogger httpClient *http.Client - triggerChan chan []*models.Trigger + triggerChan chan *models.DynamicScalingRules scalingEngine *ghttp.Server evaluator *Evaluator testAppId = "testAppId" @@ -41,23 +41,23 @@ var _ = Describe("Evaluator", func() { fakeTime = time.Now() lock = &sync.Mutex{} scalingResult *models.AppScalingResult - triggerArrayGT = []*models.Trigger{{ + triggerArrayGT = &models.DynamicScalingRules{Triggers: []*models.Trigger{{ AppId: testAppId, MetricType: testMetricType, CoolDownSeconds: 300, Threshold: 500, Operator: ">", Adjustment: "+1", - }} - triggerArrayGE = []*models.Trigger{{ + }}} + triggerArrayGE = &models.DynamicScalingRules{Triggers: []*models.Trigger{{ AppId: testAppId, MetricType: testMetricType, CoolDownSeconds: 300, Threshold: 500, Operator: ">=", Adjustment: "+1", - }} - triggerArrayLT = []*models.Trigger{{ + }}} + triggerArrayLT = &models.DynamicScalingRules{Triggers: []*models.Trigger{{ AppId: testAppId, MetricType: testMetricType, BreachDurationSeconds: breachDurationSecs, @@ -65,8 +65,8 @@ var _ = Describe("Evaluator", func() { Threshold: 500, Operator: "<", Adjustment: "-1", - }} - triggerArrayLE = []*models.Trigger{{ + }}} + triggerArrayLE = &models.DynamicScalingRules{Triggers: []*models.Trigger{{ AppId: testAppId, MetricType: testMetricType, BreachDurationSeconds: breachDurationSecs, @@ -74,9 +74,9 @@ var _ = Describe("Evaluator", func() { Threshold: 500, Operator: "<=", Adjustment: "-1", - }} + }}} - firstTrigger = models.Trigger{ + scaleOutWhenGTE = models.Trigger{ AppId: testAppId, MetricType: testMetricType, MetricUnit: testMetricUnit, @@ -87,7 +87,7 @@ var _ = Describe("Evaluator", func() { Adjustment: "+1", } - secondTrigger = models.Trigger{ + scaleInWhenLTE = models.Trigger{ AppId: testAppId, MetricType: testMetricType, MetricUnit: testMetricUnit, @@ -97,12 +97,16 @@ var _ = Describe("Evaluator", func() { Operator: "<=", Adjustment: "-1", } - triggerArrayMultipleTriggers = []*models.Trigger{&firstTrigger, &secondTrigger} + multipleTriggers = &models.DynamicScalingRules{Triggers: []*models.Trigger{&scaleOutWhenGTE, &scaleInWhenLTE}} + multipleTriggersBiasedToScaleout = &models.DynamicScalingRules{ + Triggers: []*models.Trigger{&scaleInWhenLTE, &scaleOutWhenGTE}, + ScalingRuleEvaluation: models.BiasedToScaleOut, + } ) BeforeEach(func() { logger = lagertest.NewTestLogger("Evaluator-test") httpClient = cfhttp.NewClient() - triggerChan = make(chan []*models.Trigger, 1) + triggerChan = make(chan *models.DynamicScalingRules, 1) r := routes.NewRouter() path, err := r.CreateScalingEngineRoutes().Get(routes.ScaleRouteName).URLPath("appid", testAppId) @@ -150,9 +154,9 @@ var _ = Describe("Evaluator", func() { scalingEngine.Close() }) - Context("when evaluator is started", func() { + When("evaluator is started", func() { - Context("when the appMetrics are not enough", func() { + When("the appMetrics are not enough", func() { BeforeEach(func() { scalingEngine.RouteToHandler("POST", urlPath, ghttp.RespondWith(http.StatusOK, "successful")) Expect(triggerChan).To(BeSent(triggerArrayGT)) @@ -174,7 +178,7 @@ var _ = Describe("Evaluator", func() { BeforeEach(func() { Expect(triggerChan).To(BeSent(triggerArrayGT)) }) - Context("when the appMetrics breach the trigger", func() { + When("the appMetrics breach the trigger", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{600, 650, 620}, breachDurationSecs, true) queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { @@ -201,7 +205,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when the appMetrics do not breach the trigger", func() { + When("the appMetrics do not breach the trigger", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{200, 150, 600}, breachDurationSecs, true) queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { @@ -214,7 +218,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when appMetrics is empty", func() { + When("appMetrics is empty", func() { BeforeEach(func() { queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { return []*models.AppMetric{}, nil @@ -225,7 +229,7 @@ var _ = Describe("Evaluator", func() { Consistently(scalingEngine.ReceivedRequests).Should(HaveLen(0)) }) }) - Context("when the appMetrics contain empty value elements", func() { + When("the appMetrics contain empty value elements", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{600, 650, 620}, breachDurationSecs, true) appMetrics = append(appMetrics, &models.AppMetric{AppId: testAppId, @@ -248,7 +252,7 @@ var _ = Describe("Evaluator", func() { Expect(triggerChan).To(BeSent(triggerArrayGE)) }) - Context("when the appMetrics breach the trigger", func() { + When("the appMetrics breach the trigger", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{600, 500, 500}, breachDurationSecs, true) queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { @@ -261,7 +265,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when the appMetrics do not breach the trigger", func() { + When("the appMetrics do not breach the trigger", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{200, 150, 600}, breachDurationSecs, true) queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { @@ -274,7 +278,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when appMetrics is empty", func() { + When("appMetrics is empty", func() { BeforeEach(func() { queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { return []*models.AppMetric{}, nil @@ -286,7 +290,7 @@ var _ = Describe("Evaluator", func() { Consistently(scalingEngine.ReceivedRequests).Should(HaveLen(0)) }) }) - Context("when the appMetrics contain empty value elements", func() { + When("the appMetrics contain empty value elements", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{600, 500, 500}, breachDurationSecs, true) appMetrics = append(appMetrics, &models.AppMetric{AppId: testAppId, @@ -308,7 +312,7 @@ var _ = Describe("Evaluator", func() { BeforeEach(func() { Expect(triggerChan).To(BeSent(triggerArrayLT)) }) - Context("when the appMetrics breach the trigger", func() { + When("the appMetrics breach the trigger", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{200, 300, 400}, breachDurationSecs, true) queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { @@ -321,7 +325,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when the appMetrics do not breach the trigger", func() { + When("the appMetrics do not breach the trigger", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{500, 550, 600}, breachDurationSecs, true) queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { @@ -334,7 +338,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when appMetrics is empty", func() { + When("appMetrics is empty", func() { BeforeEach(func() { queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { return []*models.AppMetric{}, nil @@ -346,7 +350,7 @@ var _ = Describe("Evaluator", func() { Consistently(scalingEngine.ReceivedRequests).Should(HaveLen(0)) }) }) - Context("when the appMetrics contain empty value elements", func() { + When("the appMetrics contain empty value elements", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{200, 300, 400}, breachDurationSecs, true) appMetrics = append(appMetrics, &models.AppMetric{AppId: testAppId, @@ -368,7 +372,7 @@ var _ = Describe("Evaluator", func() { BeforeEach(func() { Expect(triggerChan).To(BeSent(triggerArrayLE)) }) - Context("when the appMetrics breach the trigger", func() { + When("the appMetrics breach the trigger", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{200, 500, 500}, breachDurationSecs, true) queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { @@ -382,7 +386,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when the appMetrics do not breach the trigger", func() { + When("the appMetrics do not breach the trigger", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{500, 550, 600}, breachDurationSecs, true) queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { @@ -395,7 +399,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when appMetrics is empty", func() { + When("appMetrics is empty", func() { BeforeEach(func() { queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { return []*models.AppMetric{}, nil @@ -407,7 +411,7 @@ var _ = Describe("Evaluator", func() { Consistently(scalingEngine.ReceivedRequests).Should(HaveLen(0)) }) }) - Context("when the appMetrics contain empty value elements", func() { + When("the appMetrics contain empty value elements", func() { BeforeEach(func() { appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{200, 500, 500}, breachDurationSecs, true) appMetrics = append(appMetrics, &models.AppMetric{AppId: testAppId, @@ -428,72 +432,146 @@ var _ = Describe("Evaluator", func() { }) Context("multiple triggers", func() { - BeforeEach(func() { - Expect(triggerChan).To(BeSent(triggerArrayMultipleTriggers)) - }) - Context("when only the first trigger breaches", func() { + When("evaluating default first matching", func() { BeforeEach(func() { - scalingEngine.AppendHandlers( - ghttp.CombineHandlers( - ghttp.VerifyRequest("POST", urlPath), - ghttp.VerifyJSONRepresenting(firstTrigger), - ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), - ), - ) - appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{500, 550, 600}, breachDurationSecs, true) - queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { - return appMetrics, nil - } + Expect(triggerChan).To(BeSent(multipleTriggers)) }) - It("should send alarm of first trigger to scaling engine", func() { - Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) - Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) - Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + When("only the first trigger breaches", func() { + BeforeEach(func() { + scalingEngine.AppendHandlers( + ghttp.CombineHandlers( + ghttp.VerifyRequest("POST", urlPath), + ghttp.VerifyJSONRepresenting(scaleOutWhenGTE), + ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), + ), + ) + appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{500, 550, 600}, breachDurationSecs, true) + queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { + return appMetrics, nil + } + }) + It("should send alarm of first trigger to scaling engine", func() { + Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + }) }) - }) - Context("when only second tigger breaches", func() { - BeforeEach(func() { - scalingEngine.AppendHandlers( - ghttp.CombineHandlers( - ghttp.VerifyRequest("POST", urlPath), - ghttp.VerifyJSONRepresenting(secondTrigger), - ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), - ), - ) - appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{300, 400, 500}, breachDurationSecs, true) - queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { - return appMetrics, nil - } + When("only second trigger breaches", func() { + BeforeEach(func() { + scalingEngine.AppendHandlers( + ghttp.CombineHandlers( + ghttp.VerifyRequest("POST", urlPath), + ghttp.VerifyJSONRepresenting(scaleInWhenLTE), + ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), + ), + ) + appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{300, 400, 500}, breachDurationSecs, true) + queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { + return appMetrics, nil + } + }) + It("should send alarm of second trigger to scaling engine", func() { + Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + + }) }) - It("should send alarm of second trigger to scaling engine", func() { - Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) - Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) - Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + When("both triggers breach", func() { + BeforeEach(func() { + scalingEngine.AppendHandlers( + ghttp.CombineHandlers( + ghttp.VerifyRequest("POST", urlPath), + ghttp.VerifyJSONRepresenting(scaleOutWhenGTE), + ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), + ), + ) + appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{500, 500, 500}, breachDurationSecs, true) + queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { + return appMetrics, nil + } + }) + It("should send alarm of first trigger to scaling engine", func() { + Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + }) }) + }) - Context("when both tiggers breach", func() { + When("evaluating biased to scaleout", func() { BeforeEach(func() { - scalingEngine.AppendHandlers( - ghttp.CombineHandlers( - ghttp.VerifyRequest("POST", urlPath), - ghttp.VerifyJSONRepresenting(firstTrigger), - ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), - ), - ) - appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{500, 500, 500}, breachDurationSecs, true) - queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { - return appMetrics, nil - } + Expect(triggerChan).To(BeSent(multipleTriggersBiasedToScaleout)) }) - It("should send alarm of first trigger to scaling engine", func() { - Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) - Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) - Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + When("only the first trigger breaches", func() { + BeforeEach(func() { + scalingEngine.AppendHandlers( + ghttp.CombineHandlers( + ghttp.VerifyRequest("POST", urlPath), + ghttp.VerifyJSONRepresenting(scaleOutWhenGTE), + ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), + ), + ) + appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{500, 550, 600}, breachDurationSecs, true) + queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { + return appMetrics, nil + } + }) + It("should send alarm of first trigger to scaling engine", func() { + Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + + }) + }) + + When("only second trigger breaches", func() { + BeforeEach(func() { + scalingEngine.AppendHandlers( + ghttp.CombineHandlers( + ghttp.VerifyRequest("POST", urlPath), + ghttp.VerifyJSONRepresenting(scaleInWhenLTE), + ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), + ), + ) + appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{300, 400, 500}, breachDurationSecs, true) + queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { + return appMetrics, nil + } + }) + It("should send alarm of second trigger to scaling engine", func() { + Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + + }) + }) + + When("both triggers breach", func() { + BeforeEach(func() { + scalingEngine.AppendHandlers( + ghttp.CombineHandlers( + ghttp.VerifyRequest("POST", urlPath), + ghttp.VerifyJSONRepresenting(scaleOutWhenGTE), + ghttp.RespondWithJSONEncoded(http.StatusOK, &scalingResult), + ), + ) + appMetrics := generateTestAppMetrics(testAppId, testMetricType, testMetricUnit, []int64{500, 500, 500}, breachDurationSecs, true) + queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { + return appMetrics, nil + } + }) + It("should send alarm of scaleout trigger to scaling engine", func() { + Eventually(scalingEngine.ReceivedRequests).Should(HaveLen(1)) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("send trigger alarm to scaling engine"))) + Eventually(logger.LogMessages).Should(ContainElement(ContainSubstring("successfully-send-trigger-alarm with trigger"))) + }) }) + }) }) @@ -507,8 +585,8 @@ var _ = Describe("Evaluator", func() { Expect(triggerChan).To(BeSent(triggerArrayGT)) }) - Context("when the scaling engine returns 200 with different scalingResults", func() { - Context("when cooldownExpiredAt is set to a valid timestamp in scalingResult ", func() { + When("the scaling engine returns 200 with different scalingResults", func() { + When("cooldownExpiredAt is set to a valid timestamp in scalingResult ", func() { BeforeEach(func() { scalingEngine.AppendHandlers( ghttp.CombineHandlers( @@ -529,7 +607,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when cooldownExpiredAt is 0 in scalingResult", func() { + When("cooldownExpiredAt is 0 in scalingResult", func() { BeforeEach(func() { scalingResult.CooldownExpiredAt = 0 @@ -551,7 +629,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when cooldownExpiredAt is not set in scalingResult", func() { + When("cooldownExpiredAt is not set in scalingResult", func() { BeforeEach(func() { scalingEngine.AppendHandlers( ghttp.CombineHandlers( @@ -571,7 +649,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when response is not a valid type scalingResult", func() { + When("response is not a valid type scalingResult", func() { BeforeEach(func() { scalingEngine.AppendHandlers( ghttp.CombineHandlers( @@ -592,7 +670,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when the scaling engine returns error", func() { + When("the scaling engine returns error", func() { BeforeEach(func() { scalingEngine.RouteToHandler("POST", urlPath, ghttp.RespondWithJSONEncoded(http.StatusBadRequest, "error")) }) @@ -603,7 +681,7 @@ var _ = Describe("Evaluator", func() { }) }) - PContext("when the scaling engine's response is too long", func() { + PWhen("the scaling engine's response is too long", func() { BeforeEach(func() { tmp := "" errorStr := "" @@ -712,7 +790,7 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when retrieving appMetrics failed", func() { + When("retrieving appMetrics failed", func() { BeforeEach(func() { queryAppMetrics = func(appID string, metricType string, start int64, end int64, orderType db.OrderType) ([]*models.AppMetric, error) { return nil, errors.New("an error") @@ -724,9 +802,9 @@ var _ = Describe("Evaluator", func() { }) }) - Context("when there are invalid operators in trigger", func() { + When("there are invalid operators in trigger", func() { BeforeEach(func() { - invalidTriggerArray := []*models.Trigger{{ + invalidTriggerArray := &models.DynamicScalingRules{Triggers: []*models.Trigger{{ AppId: testAppId, MetricType: testMetricType, BreachDurationSeconds: breachDurationSecs, @@ -734,8 +812,8 @@ var _ = Describe("Evaluator", func() { Threshold: 500, Operator: "invalid_operator", Adjustment: "1", - }} - triggerChan = make(chan []*models.Trigger, 1) + }}} + triggerChan = make(chan *models.DynamicScalingRules, 1) Eventually(triggerChan).Should(BeSent(invalidTriggerArray)) }) diff --git a/src/autoscaler/models/adjustment.go b/src/autoscaler/models/adjustment.go new file mode 100644 index 0000000000..e4ecaada20 --- /dev/null +++ b/src/autoscaler/models/adjustment.go @@ -0,0 +1,93 @@ +package models + +import ( + "strconv" + "strings" +) + +type Adjustment struct { + IsScaleOut bool + IsRelative bool + Value int +} + +func ParseAdjustment(adjustment string) (Adjustment, error) { + isScaleOut := strings.HasPrefix(adjustment, "+") + isRelative := strings.HasSuffix(adjustment, "%") + + if isScaleOut || strings.HasPrefix(adjustment, "-") { + adjustment = adjustment[1:] + } + + if isRelative { + adjustment = adjustment[:len(adjustment)-1] + } + + value, err := strconv.Atoi(adjustment) + if err != nil { + return Adjustment{}, err + } + + return Adjustment{ + IsScaleOut: isScaleOut, + IsRelative: isRelative, + Value: value, + }, nil +} + +func compareScaleOut(a, b Adjustment) int { + if a.IsScaleOut != b.IsScaleOut { + if a.IsScaleOut { + return 1 + } + return -1 + } + return 0 +} + +func compareRelative(a, b Adjustment) int { + if a.IsRelative != b.IsRelative { + if a.IsScaleOut { + if a.IsRelative { + return 1 + } + return -1 + } + if a.IsRelative { + return -1 + } + return 1 + } + return 0 +} + +func compareValue(a, b Adjustment) int { + if a.Value != b.Value { + if a.IsScaleOut { + return a.Value - b.Value + } + return b.Value - a.Value + } + return 0 +} + +func CompareAdjustments(a, b Adjustment) int { + // See [slices.SortFunc](https://pkg.go.dev/slices#SortFunc): + // cmp(a, b) should return a negative number when a < b, a positive number when + // a > b and zero when a == b or a and b are incomparable in the sense of + // a strict weak ordering. + + if differsByScaleOut := compareScaleOut(a, b); differsByScaleOut != 0 { + return differsByScaleOut + } + + if differsByRelative := compareRelative(a, b); differsByRelative != 0 { + return differsByRelative + } + + if differsByValue := compareValue(a, b); differsByValue != 0 { + return differsByValue + } + + return 0 +} diff --git a/src/autoscaler/models/adjustment_test.go b/src/autoscaler/models/adjustment_test.go new file mode 100644 index 0000000000..e8d94ade1e --- /dev/null +++ b/src/autoscaler/models/adjustment_test.go @@ -0,0 +1,68 @@ +package models_test + +import ( + . "code.cloudfoundry.org/app-autoscaler/src/autoscaler/models" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "slices" +) + +var _ = Describe("Adjustment Parsing", func() { + DescribeTable("ParseAdjustment", + func(input string, expected Adjustment, expectError bool) { + result, err := ParseAdjustment(input) + if expectError { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal(expected)) + } + }, + Entry("positive absolute value", "+10", Adjustment{IsScaleOut: true, IsRelative: false, Value: 10}, false), + Entry("negative absolute value", "-10", Adjustment{IsScaleOut: false, IsRelative: false, Value: 10}, false), + Entry("positive relative value", "+10%", Adjustment{IsScaleOut: true, IsRelative: true, Value: 10}, false), + Entry("negative relative value", "-10%", Adjustment{IsScaleOut: false, IsRelative: true, Value: 10}, false), + Entry("invalid value", "abc", Adjustment{}, true), + ) +}) + +var _ = Describe("Adjustment Comparison", func() { + DescribeTable("CompareAdjustments", + func(adj1, adj2 Adjustment, expected int) { + result := CompareAdjustments(adj1, adj2) + Expect(result).To(Equal(expected)) + }, + Entry("both positive absolute values, equal", Adjustment{IsScaleOut: true, IsRelative: false, Value: 10}, Adjustment{IsScaleOut: true, IsRelative: false, Value: 10}, 0), + Entry("both positive absolute values, different", Adjustment{IsScaleOut: true, IsRelative: false, Value: 10}, Adjustment{IsScaleOut: true, IsRelative: false, Value: 5}, 5), + Entry("positive vs negative absolute values", Adjustment{IsScaleOut: true, IsRelative: false, Value: 10}, Adjustment{IsScaleOut: false, IsRelative: false, Value: 10}, 1), + Entry("both negative absolute values, equal", Adjustment{IsScaleOut: false, IsRelative: false, Value: 10}, Adjustment{IsScaleOut: false, IsRelative: false, Value: 10}, 0), + Entry("both positive relative values, equal", Adjustment{IsScaleOut: true, IsRelative: true, Value: 10}, Adjustment{IsScaleOut: true, IsRelative: true, Value: 10}, 0), + Entry("positive vs negative relative values", Adjustment{IsScaleOut: true, IsRelative: true, Value: 10}, Adjustment{IsScaleOut: false, IsRelative: true, Value: 10}, 1), + Entry("absolute vs relative values", Adjustment{IsScaleOut: true, IsRelative: false, Value: 10}, Adjustment{IsScaleOut: true, IsRelative: true, Value: 10}, -1), + Entry("relative vs absolute values", Adjustment{IsScaleOut: true, IsRelative: true, Value: 10}, Adjustment{IsScaleOut: true, IsRelative: false, Value: 10}, 1), + ) +}) + +var _ = Describe("Adjustment Sorting", func() { + It("should sort adjustments using CompareAdjustments", func() { + adjustments := []Adjustment{ + {IsScaleOut: true, IsRelative: false, Value: 5}, + {IsScaleOut: false, IsRelative: true, Value: 10}, + {IsScaleOut: true, IsRelative: true, Value: 10}, + {IsScaleOut: false, IsRelative: false, Value: 5}, + } + + expected := []Adjustment{ + {IsScaleOut: false, IsRelative: true, Value: 10}, + {IsScaleOut: false, IsRelative: false, Value: 5}, + {IsScaleOut: true, IsRelative: false, Value: 5}, + {IsScaleOut: true, IsRelative: true, Value: 10}, + } + + slices.SortStableFunc(adjustments, func(a, b Adjustment) int { + return CompareAdjustments(a, b) + }) + + Expect(adjustments).To(Equal(expected)) + }) +}) diff --git a/src/autoscaler/models/policy.go b/src/autoscaler/models/policy.go index 4ae132f22f..db0aab2465 100644 --- a/src/autoscaler/models/policy.go +++ b/src/autoscaler/models/policy.go @@ -38,14 +38,22 @@ func (p *PolicyJson) GetAppPolicy() (*AppPolicy, error) { return &AppPolicy{AppId: p.AppId, ScalingPolicy: &scalingPolicy}, nil } +type ScalingRuleEvaluation string + +const ( + FirstMatching ScalingRuleEvaluation = "first_matching" + BiasedToScaleOut ScalingRuleEvaluation = "biased_to_scale_out" +) + // ScalingPolicy is a customer facing entity and represents the scaling policy for an application. // It can be created/deleted/retrieved by the user via the binding process and public api. If a change is required in the policy, // the corresponding endpoints should be also be updated in the public api server. type ScalingPolicy struct { - InstanceMin int `json:"instance_min_count"` - InstanceMax int `json:"instance_max_count"` - ScalingRules []*ScalingRule `json:"scaling_rules,omitempty"` - Schedules *ScalingSchedules `json:"schedules,omitempty"` + InstanceMin int `json:"instance_min_count"` + InstanceMax int `json:"instance_max_count"` + ScalingRules []*ScalingRule `json:"scaling_rules,omitempty"` + Schedules *ScalingSchedules `json:"schedules,omitempty"` + ScalingRuleEvaluation ScalingRuleEvaluation `json:"scaling_rule_evaluation,omitempty"` } func (s ScalingPolicy) String() string { @@ -117,6 +125,11 @@ func (r *ScalingRule) CoolDown(defaultCoolDownSecs int) time.Duration { return time.Duration(r.CoolDownSeconds) * time.Second } +type DynamicScalingRules struct { + Triggers []*Trigger `json:"triggers"` + ScalingRuleEvaluation ScalingRuleEvaluation `json:"scaling_rule_evaluation"` +} + type Trigger struct { AppId string `json:"app_id"` MetricType string `json:"metric_type"`