diff --git a/.github/workflows/pr-issue-validator.yaml b/.github/workflows/pr-issue-validator.yaml index ca7dee58f..7b6b64cc8 100644 --- a/.github/workflows/pr-issue-validator.yaml +++ b/.github/workflows/pr-issue-validator.yaml @@ -12,6 +12,7 @@ on: - 'release-**' - 'develop' - 'hotfix-**' + - 'kubecon-**' # paths-ignore: # - 'docs/**' # - '.github/' @@ -42,4 +43,4 @@ jobs: run: | wget https://raw.githubusercontent.com/devtron-labs/utilities/feat/central-pr-validator/.github/workflows/validateIssue.sh chmod +x validateIssue.sh - ./validateIssue.sh \ No newline at end of file + ./validateIssue.sh diff --git a/chart-sync/go.mod b/chart-sync/go.mod index 24baef510..986f2c440 100644 --- a/chart-sync/go.mod +++ b/chart-sync/go.mod @@ -4,7 +4,7 @@ go 1.24.0 toolchain go1.24.3 -replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 require ( github.com/caarlos0/env v3.5.0+incompatible diff --git a/chart-sync/go.sum b/chart-sync/go.sum index 957f4c175..781eb41b9 100644 --- a/chart-sync/go.sum +++ b/chart-sync/go.sum @@ -43,8 +43,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be h1:ZufKGk1RMcJsTDgpCfdWcrca90K3s1+88KlVL/4dEmU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 h1:xDbz5etX5h6foQDgpT9ukTo2I65b6q32Nu9do5nBPk8= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= diff --git a/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go b/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go new file mode 100644 index 000000000..372765014 --- /dev/null +++ b/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index ea16a2f72..2bbfa1dc1 100644 --- a/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go b/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go index fa6858e5d..5c9cb23bf 100644 --- a/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go +++ b/chart-sync/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go @@ -34,7 +34,7 @@ type Config struct { User string `env:"PG_USER" envDefault:"" description:"user for postgres" example:"postgres"` Password string `env:"PG_PASSWORD" envDefault:"" secretData:"-" description:"password for postgres, associated with PG_USER" example:"confidential ;)"` Database string `env:"PG_DATABASE" envDefault:"orchestrator" description:"postgres database to be made connection with" example:"orchestrator, casbin, git_sensor, lens"` - CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin""` + CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin"` ApplicationName string `env:"APP" envDefault:"orchestrator" description:"Application name"` ReadTimeout int64 `env:"PG_READ_TIMEOUT" envDefault:"30"` WriteTimeout int64 `env:"PG_WRITE_TIMEOUT" envDefault:"30"` @@ -71,10 +71,10 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { _, err := dbConnection.QueryOne(&test, `SELECT 1`) if err != nil { - logger.Errorw("error in connecting db ", "db", obfuscateSecretTags(cfg), "err", err) + logger.Errorw("error in connecting db ", "db", ObfuscateSecretTags(cfg), "err", err) return nil, err } else { - logger.Infow("connected with db", "db", obfuscateSecretTags(cfg)) + logger.Infow("connected with db", "db", ObfuscateSecretTags(cfg)) } // -------------- @@ -82,7 +82,7 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { return dbConnection, err } -func obfuscateSecretTags(cfg interface{}) interface{} { +func ObfuscateSecretTags(cfg interface{}) interface{} { cfgDpl := reflect.New(reflect.ValueOf(cfg).Elem().Type()).Interface() cfgDplElm := reflect.ValueOf(cfgDpl).Elem() diff --git a/chart-sync/vendor/modules.txt b/chart-sync/vendor/modules.txt index 3e81a62f7..f725a0b27 100644 --- a/chart-sync/vendor/modules.txt +++ b/chart-sync/vendor/modules.txt @@ -95,7 +95,7 @@ github.com/containerd/platforms # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/devtron-labs/common-lib v0.19.1 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib v0.19.1 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/constants github.com/devtron-labs/common-lib/fetchAllEnv @@ -975,4 +975,4 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 diff --git a/ci-runner/go.mod b/ci-runner/go.mod index 16239ab11..160609a9a 100644 --- a/ci-runner/go.mod +++ b/ci-runner/go.mod @@ -4,7 +4,7 @@ go 1.24.0 toolchain go1.24.3 -replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 require ( github.com/Knetic/govaluate v3.0.0+incompatible diff --git a/ci-runner/go.sum b/ci-runner/go.sum index 18e48eb18..f846bdf8f 100644 --- a/ci-runner/go.sum +++ b/ci-runner/go.sum @@ -114,8 +114,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be h1:ZufKGk1RMcJsTDgpCfdWcrca90K3s1+88KlVL/4dEmU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 h1:xDbz5etX5h6foQDgpT9ukTo2I65b6q32Nu9do5nBPk8= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= diff --git a/ci-runner/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go b/ci-runner/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go index b7434bb27..cfd7f98e4 100644 --- a/ci-runner/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go +++ b/ci-runner/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go @@ -124,6 +124,21 @@ const ( INFRA_HELM_RELEASE_ACTION_TOPIC string = "INFRA_HELM_RELEASE_ACTION_TOPIC" INFRA_HELM_RELEASE_ACTION_GROUP string = "INFRA_HELM_RELEASE_ACTION_GROUP" INFRA_HELM_RELEASE_ACTION_DURABLE string = "INFRA_HELM_RELEASE_ACTION_DURABLE" + COST_MODULE_INSTALLATION_TOPIC string = "COST_MODULE_INSTALLATION_TOPIC" + COST_MODULE_INSTALLATION_GROUP string = "COST_MODULE_INSTALLATION_GROUP" + COST_MODULE_INSTALLATION_DURABLE string = "COST_MODULE_INSTALLATION_DURABLE" + COST_MODULE_GPU_INSTALLATION_TOPIC string = "COST_MODULE_GPU_INSTALLATION_TOPIC" + COST_MODULE_GPU_INSTALLATION_GROUP string = "COST_MODULE_GPU_INSTALLATION_GROUP" + COST_MODULE_GPU_INSTALLATION_DURABLE string = "COST_MODULE_GPU_INSTALLATION_DURABLE" + STORAGE_MODULE_TOPIC string = "STORAGE_MODULE_TOPIC" + STORAGE_MODULE_GROUP string = "STORAGE_MODULE_GROUP" + STORAGE_MODULE_DURABLE string = "STORAGE_MODULE_DURABLE" + STORAGE_VELERO_INSTALL_TOPIC string = "STORAGE_VELERO_INSTALL_TOPIC" + STORAGE_VELERO_INSTALL_GROUP string = "STORAGE_VELERO_INSTALL_GROUP" + STORAGE_VELERO_INSTALL_DURABLE string = "STORAGE_VELERO_INSTALL_DURABLE" + STORAGE_VELERO_POST_INSTALLATION_TOPIC string = "STORAGE_VELERO_POST_INSTALLATION_TOPIC" + STORAGE_VELERO_POST_INSTALLATION_GROUP string = "STORAGE_VELERO_POST_INSTALLATION_GROUP" + STORAGE_VELERO_POST_INSTALLATION_DURABLE string = "STORAGE_VELERO_POST_INSTALLATION_DURABLE" ) type NatsTopic struct { @@ -179,6 +194,11 @@ var natsTopicMapping = map[string]NatsTopic{ INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE}, INFRA_HELM_RELEASE_ACTION_TOPIC: {topicName: INFRA_HELM_RELEASE_ACTION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRA_HELM_RELEASE_ACTION_GROUP, consumerName: INFRA_HELM_RELEASE_ACTION_DURABLE}, + COST_MODULE_INSTALLATION_TOPIC: {topicName: COST_MODULE_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_INSTALLATION_GROUP, consumerName: COST_MODULE_INSTALLATION_DURABLE}, + COST_MODULE_GPU_INSTALLATION_TOPIC: {topicName: COST_MODULE_GPU_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_GPU_INSTALLATION_GROUP, consumerName: COST_MODULE_GPU_INSTALLATION_DURABLE}, + STORAGE_MODULE_TOPIC: {topicName: STORAGE_MODULE_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_MODULE_GROUP, consumerName: STORAGE_MODULE_DURABLE}, + STORAGE_VELERO_INSTALL_TOPIC: {topicName: STORAGE_VELERO_INSTALL_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_INSTALL_GROUP, consumerName: STORAGE_VELERO_INSTALL_DURABLE}, + STORAGE_VELERO_POST_INSTALLATION_TOPIC: {topicName: STORAGE_VELERO_POST_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_POST_INSTALLATION_GROUP, consumerName: STORAGE_VELERO_POST_INSTALLATION_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -221,6 +241,11 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE: {}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE: {}, INFRA_HELM_RELEASE_ACTION_DURABLE: {}, + COST_MODULE_INSTALLATION_DURABLE: {}, + COST_MODULE_GPU_INSTALLATION_DURABLE: {}, + STORAGE_MODULE_DURABLE: {}, + STORAGE_VELERO_INSTALL_DURABLE: {}, + STORAGE_VELERO_POST_INSTALLATION_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string diff --git a/ci-runner/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go b/ci-runner/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go new file mode 100644 index 000000000..372765014 --- /dev/null +++ b/ci-runner/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/ci-runner/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/ci-runner/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index ea16a2f72..2bbfa1dc1 100644 --- a/ci-runner/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/ci-runner/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/ci-runner/vendor/modules.txt b/ci-runner/vendor/modules.txt index edea8f2a0..783276238 100644 --- a/ci-runner/vendor/modules.txt +++ b/ci-runner/vendor/modules.txt @@ -298,7 +298,7 @@ github.com/cncf/xds/go/xds/type/v3 # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/devtron-labs/common-lib v0.19.1 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib v0.19.1 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/blob-storage github.com/devtron-labs/common-lib/constants @@ -1199,4 +1199,4 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.22 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 diff --git a/common-lib/go.mod b/common-lib/go.mod index 47d519654..be2ca3d1d 100644 --- a/common-lib/go.mod +++ b/common-lib/go.mod @@ -32,6 +32,7 @@ require ( github.com/prometheus/client_golang v1.22.0 github.com/robfig/cron/v3 v3.0.1 github.com/stretchr/testify v1.10.0 + github.com/vmware-tanzu/velero v1.14.1 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 @@ -97,7 +98,6 @@ require ( github.com/moby/sys/atomicwriter v0.1.0 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/nxadm/tail v1.4.8 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/stretchr/objx v0.5.2 // indirect diff --git a/common-lib/go.sum b/common-lib/go.sum index d8ebf6d85..30bc000af 100644 --- a/common-lib/go.sum +++ b/common-lib/go.sum @@ -169,7 +169,6 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -375,6 +374,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/vmware-tanzu/velero v1.14.1 h1:HYj73scn7ZqtfTanjW/X4W0Hn3w/qcfoRbrHCWM52iI= +github.com/vmware-tanzu/velero v1.14.1/go.mod h1:/OzHzTvbevkkX+bK/BS4AgYMv6nKuOgsybuuvLWkSS0= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= @@ -494,7 +495,6 @@ golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/common-lib/pubsub-lib/JetStreamUtil.go b/common-lib/pubsub-lib/JetStreamUtil.go index b7434bb27..cfd7f98e4 100644 --- a/common-lib/pubsub-lib/JetStreamUtil.go +++ b/common-lib/pubsub-lib/JetStreamUtil.go @@ -124,6 +124,21 @@ const ( INFRA_HELM_RELEASE_ACTION_TOPIC string = "INFRA_HELM_RELEASE_ACTION_TOPIC" INFRA_HELM_RELEASE_ACTION_GROUP string = "INFRA_HELM_RELEASE_ACTION_GROUP" INFRA_HELM_RELEASE_ACTION_DURABLE string = "INFRA_HELM_RELEASE_ACTION_DURABLE" + COST_MODULE_INSTALLATION_TOPIC string = "COST_MODULE_INSTALLATION_TOPIC" + COST_MODULE_INSTALLATION_GROUP string = "COST_MODULE_INSTALLATION_GROUP" + COST_MODULE_INSTALLATION_DURABLE string = "COST_MODULE_INSTALLATION_DURABLE" + COST_MODULE_GPU_INSTALLATION_TOPIC string = "COST_MODULE_GPU_INSTALLATION_TOPIC" + COST_MODULE_GPU_INSTALLATION_GROUP string = "COST_MODULE_GPU_INSTALLATION_GROUP" + COST_MODULE_GPU_INSTALLATION_DURABLE string = "COST_MODULE_GPU_INSTALLATION_DURABLE" + STORAGE_MODULE_TOPIC string = "STORAGE_MODULE_TOPIC" + STORAGE_MODULE_GROUP string = "STORAGE_MODULE_GROUP" + STORAGE_MODULE_DURABLE string = "STORAGE_MODULE_DURABLE" + STORAGE_VELERO_INSTALL_TOPIC string = "STORAGE_VELERO_INSTALL_TOPIC" + STORAGE_VELERO_INSTALL_GROUP string = "STORAGE_VELERO_INSTALL_GROUP" + STORAGE_VELERO_INSTALL_DURABLE string = "STORAGE_VELERO_INSTALL_DURABLE" + STORAGE_VELERO_POST_INSTALLATION_TOPIC string = "STORAGE_VELERO_POST_INSTALLATION_TOPIC" + STORAGE_VELERO_POST_INSTALLATION_GROUP string = "STORAGE_VELERO_POST_INSTALLATION_GROUP" + STORAGE_VELERO_POST_INSTALLATION_DURABLE string = "STORAGE_VELERO_POST_INSTALLATION_DURABLE" ) type NatsTopic struct { @@ -179,6 +194,11 @@ var natsTopicMapping = map[string]NatsTopic{ INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE}, INFRA_HELM_RELEASE_ACTION_TOPIC: {topicName: INFRA_HELM_RELEASE_ACTION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRA_HELM_RELEASE_ACTION_GROUP, consumerName: INFRA_HELM_RELEASE_ACTION_DURABLE}, + COST_MODULE_INSTALLATION_TOPIC: {topicName: COST_MODULE_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_INSTALLATION_GROUP, consumerName: COST_MODULE_INSTALLATION_DURABLE}, + COST_MODULE_GPU_INSTALLATION_TOPIC: {topicName: COST_MODULE_GPU_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_GPU_INSTALLATION_GROUP, consumerName: COST_MODULE_GPU_INSTALLATION_DURABLE}, + STORAGE_MODULE_TOPIC: {topicName: STORAGE_MODULE_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_MODULE_GROUP, consumerName: STORAGE_MODULE_DURABLE}, + STORAGE_VELERO_INSTALL_TOPIC: {topicName: STORAGE_VELERO_INSTALL_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_INSTALL_GROUP, consumerName: STORAGE_VELERO_INSTALL_DURABLE}, + STORAGE_VELERO_POST_INSTALLATION_TOPIC: {topicName: STORAGE_VELERO_POST_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_POST_INSTALLATION_GROUP, consumerName: STORAGE_VELERO_POST_INSTALLATION_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -221,6 +241,11 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE: {}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE: {}, INFRA_HELM_RELEASE_ACTION_DURABLE: {}, + COST_MODULE_INSTALLATION_DURABLE: {}, + COST_MODULE_GPU_INSTALLATION_DURABLE: {}, + STORAGE_MODULE_DURABLE: {}, + STORAGE_VELERO_INSTALL_DURABLE: {}, + STORAGE_VELERO_POST_INSTALLATION_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string diff --git a/common-lib/utils/TimeUtils.go b/common-lib/utils/TimeUtils.go new file mode 100644 index 000000000..372765014 --- /dev/null +++ b/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/common-lib/utils/TimeUtils_test.go b/common-lib/utils/TimeUtils_test.go new file mode 100644 index 000000000..0c2c7586b --- /dev/null +++ b/common-lib/utils/TimeUtils_test.go @@ -0,0 +1,729 @@ +package utils + +import ( + "testing" + "time" +) + +func TestParseAndValidateTimeRange(t *testing.T) { + // Test nil input + t.Run("nil input", func(t *testing.T) { + var timeRange *TimeRangeRequest + result, err := timeRange.ParseAndValidateTimeRange() + if err == nil { + t.Error("Expected error for nil input") + } + if result == nil { + t.Error("Expected non-nil result") + } + }) + + // Test TimeWindow cases + t.Run("Today timeWindow", func(t *testing.T) { + timeWindow := Today + timeRange := &TimeRangeRequest{TimeWindow: &timeWindow} + result, err := timeRange.ParseAndValidateTimeRange() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result.From == nil || result.To == nil { + t.Error("Expected non-nil From and To times") + } + }) + + t.Run("Yesterday timeWindow", func(t *testing.T) { + timeWindow := Yesterday + timeRange := &TimeRangeRequest{TimeWindow: &timeWindow} + result, err := timeRange.ParseAndValidateTimeRange() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result.From == nil || result.To == nil { + t.Error("Expected non-nil From and To times") + } + }) + + t.Run("Week timeWindow", func(t *testing.T) { + timeWindow := Week + timeRange := &TimeRangeRequest{TimeWindow: &timeWindow} + result, err := timeRange.ParseAndValidateTimeRange() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result.From == nil || result.To == nil { + t.Error("Expected non-nil From and To times") + } + }) + + t.Run("Month timeWindow", func(t *testing.T) { + timeWindow := Month + timeRange := &TimeRangeRequest{TimeWindow: &timeWindow} + result, err := timeRange.ParseAndValidateTimeRange() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result.From == nil || result.To == nil { + t.Error("Expected non-nil From and To times") + } + }) + + t.Run("Quarter timeWindow", func(t *testing.T) { + timeWindow := Quarter + timeRange := &TimeRangeRequest{TimeWindow: &timeWindow} + result, err := timeRange.ParseAndValidateTimeRange() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result.From == nil || result.To == nil { + t.Error("Expected non-nil From and To times") + } + }) + + t.Run("LastWeek timeWindow", func(t *testing.T) { + timeWindow := LastWeek + timeRange := &TimeRangeRequest{TimeWindow: &timeWindow} + result, err := timeRange.ParseAndValidateTimeRange() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result.From == nil || result.To == nil { + t.Error("Expected non-nil From and To times") + } + }) + + t.Run("LastMonth timeWindow", func(t *testing.T) { + timeWindow := LastMonth + timeRange := &TimeRangeRequest{TimeWindow: &timeWindow} + result, err := timeRange.ParseAndValidateTimeRange() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result.From == nil || result.To == nil { + t.Error("Expected non-nil From and To times") + } + }) + + t.Run("invalid timeWindow", func(t *testing.T) { + invalidWindow := TimeWindows("invalid") + timeRange := &TimeRangeRequest{TimeWindow: &invalidWindow} + _, err := timeRange.ParseAndValidateTimeRange() + if err == nil { + t.Error("Expected error for invalid timeWindow") + } + }) + + // Test From/To date cases + t.Run("valid From and To dates", func(t *testing.T) { + from := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + to := time.Date(2023, 1, 31, 23, 59, 59, 0, time.UTC) + timeRange := &TimeRangeRequest{From: &from, To: &to} + result, err := timeRange.ParseAndValidateTimeRange() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !result.From.Equal(from) || !result.To.Equal(to) { + t.Error("From and To dates should match input") + } + }) + + t.Run("From date after To date", func(t *testing.T) { + from := time.Date(2023, 1, 31, 0, 0, 0, 0, time.UTC) + to := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + timeRange := &TimeRangeRequest{From: &from, To: &to} + _, err := timeRange.ParseAndValidateTimeRange() + if err == nil { + t.Error("Expected error when From date is after To date") + } + }) + + t.Run("missing From date", func(t *testing.T) { + to := time.Date(2023, 1, 31, 0, 0, 0, 0, time.UTC) + timeRange := &TimeRangeRequest{To: &to} + _, err := timeRange.ParseAndValidateTimeRange() + if err == nil { + t.Error("Expected error when From date is missing") + } + }) + + t.Run("missing To date", func(t *testing.T) { + from := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + timeRange := &TimeRangeRequest{From: &from} + _, err := timeRange.ParseAndValidateTimeRange() + if err == nil { + t.Error("Expected error when To date is missing") + } + }) + + t.Run("no timeWindow and no From/To dates", func(t *testing.T) { + timeRange := &TimeRangeRequest{} + _, err := timeRange.ParseAndValidateTimeRange() + if err == nil { + t.Error("Expected error when neither timeWindow nor From/To dates are provided") + } + }) +} + +func TestGetWeeklyTimeBoundaries(t *testing.T) { + t.Run("zero iterations", func(t *testing.T) { + result := GetWeeklyTimeBoundaries(0) + if len(result) != 0 { + t.Errorf("Expected empty slice for 0 iterations, got %d boundaries", len(result)) + } + }) + + t.Run("single iteration", func(t *testing.T) { + result := GetWeeklyTimeBoundaries(1) + if len(result) != 1 { + t.Errorf("Expected 1 boundary for 1 iteration, got %d", len(result)) + } + + boundary := result[0] + if boundary.StartTime.After(boundary.EndTime) { + t.Error("StartTime should not be after EndTime") + } + + // Check that the week starts on Monday + if boundary.StartTime.Weekday() != time.Monday { + t.Errorf("Expected week to start on Monday, got %v", boundary.StartTime.Weekday()) + } + }) + + t.Run("multiple iterations", func(t *testing.T) { + iterations := 3 + result := GetWeeklyTimeBoundaries(iterations) + + if len(result) != iterations { + t.Errorf("Expected %d boundaries, got %d", iterations, len(result)) + } + + // Check that boundaries are consecutive weeks going backwards + for i := 0; i < len(result)-1; i++ { + currentWeek := result[i] + nextWeek := result[i+1] + + // Current week's start should equal next week's end + if !currentWeek.StartTime.Equal(nextWeek.EndTime) { + t.Errorf("Week %d start (%v) should equal week %d end (%v)", + i, currentWeek.StartTime, i+1, nextWeek.EndTime) + } + + // Each week should start on Monday + if currentWeek.StartTime.Weekday() != time.Monday { + t.Errorf("Week %d should start on Monday, got %v", i, currentWeek.StartTime.Weekday()) + } + } + }) + + t.Run("negative iterations", func(t *testing.T) { + result := GetWeeklyTimeBoundaries(-1) + if len(result) != 0 { + t.Errorf("Expected empty slice for negative iterations, got %d boundaries", len(result)) + } + }) + + t.Run("boundary validation", func(t *testing.T) { + result := GetWeeklyTimeBoundaries(2) + + for i, boundary := range result { + // StartTime should not be after EndTime + if boundary.StartTime.After(boundary.EndTime) { + t.Errorf("Boundary %d: StartTime (%v) should not be after EndTime (%v)", + i, boundary.StartTime, boundary.EndTime) + } + + // StartTime should be on Monday + if boundary.StartTime.Weekday() != time.Monday { + t.Errorf("Boundary %d: StartTime should be on Monday, got %v", + i, boundary.StartTime.Weekday()) + } + + // Time difference should be less than 7 days (one week) + duration := boundary.EndTime.Sub(boundary.StartTime) + if duration >= 7*24*time.Hour && boundary.EndTime.Sub(time.Now()) > 10*time.Minute { + t.Errorf("Boundary %d: Duration (%v) should be less than 7 days", i, duration) + } + } + }) + + t.Run("chronological order", func(t *testing.T) { + result := GetWeeklyTimeBoundaries(4) + + // Boundaries should be in reverse chronological order (most recent first) + for i := 0; i < len(result)-1; i++ { + current := result[i] + next := result[i+1] + + if current.StartTime.Before(next.StartTime) { + t.Errorf("Boundaries should be in reverse chronological order: boundary %d start (%v) should be after boundary %d start (%v)", + i, current.StartTime, i+1, next.StartTime) + } + } + }) +} + +func TestGetMonthlyTimeBoundaries(t *testing.T) { + t.Run("zero iterations", func(t *testing.T) { + result := GetMonthlyTimeBoundaries(0) + if len(result) != 0 { + t.Errorf("Expected empty slice for 0 iterations, got %d boundaries", len(result)) + } + }) + + t.Run("single iteration", func(t *testing.T) { + result := GetMonthlyTimeBoundaries(1) + if len(result) != 1 { + t.Errorf("Expected 1 boundary for 1 iteration, got %d", len(result)) + } + + boundary := result[0] + if boundary.StartTime.After(boundary.EndTime) { + t.Error("StartTime should not be after EndTime") + } + + // Check that the month starts on the 1st + if boundary.StartTime.Day() != 1 { + t.Errorf("Expected month to start on the 1st, got day %d", boundary.StartTime.Day()) + } + }) + + t.Run("multiple iterations", func(t *testing.T) { + iterations := 3 + result := GetMonthlyTimeBoundaries(iterations) + + if len(result) != iterations { + t.Errorf("Expected %d boundaries, got %d", iterations, len(result)) + } + + // Check that boundaries are consecutive months going backwards + for i := 0; i < len(result)-1; i++ { + currentMonth := result[i] + nextMonth := result[i+1] + + // Current month's start should equal next month's end + if !currentMonth.StartTime.Equal(nextMonth.EndTime) { + t.Errorf("Month %d start (%v) should equal month %d end (%v)", + i, currentMonth.StartTime, i+1, nextMonth.EndTime) + } + + // Each month should start on the 1st + if currentMonth.StartTime.Day() != 1 { + t.Errorf("Month %d should start on the 1st, got day %d", i, currentMonth.StartTime.Day()) + } + } + }) + + t.Run("negative iterations", func(t *testing.T) { + result := GetMonthlyTimeBoundaries(-1) + if len(result) != 0 { + t.Errorf("Expected empty slice for negative iterations, got %d boundaries", len(result)) + } + }) + + t.Run("boundary validation", func(t *testing.T) { + result := GetMonthlyTimeBoundaries(2) + + for i, boundary := range result { + // StartTime should not be after EndTime + if boundary.StartTime.After(boundary.EndTime) { + t.Errorf("Boundary %d: StartTime (%v) should not be after EndTime (%v)", + i, boundary.StartTime, boundary.EndTime) + } + + // StartTime should be on the 1st + if boundary.StartTime.Day() != 1 { + t.Errorf("Boundary %d: StartTime should be on the 1st, got day %d", + i, boundary.StartTime.Day()) + } + + // EndTime should be the 1st of the next month + expectedEnd := boundary.StartTime.AddDate(0, 1, 0) + now := time.Now() + // For current month, end might be 'now' if we're still in the month + if i == 0 && now.Before(expectedEnd) { + expectedEnd = now + } + if !boundary.EndTime.Equal(expectedEnd) && boundary.EndTime.Sub(time.Now()) > 10*time.Minute { + t.Errorf("Boundary %d: EndTime (%v) should be %v", + i, boundary.EndTime, expectedEnd) + } + } + }) + + t.Run("chronological order", func(t *testing.T) { + result := GetMonthlyTimeBoundaries(4) + + // Boundaries should be in reverse chronological order (most recent first) + for i := 0; i < len(result)-1; i++ { + current := result[i] + next := result[i+1] + + if current.StartTime.Before(next.StartTime) { + t.Errorf("Boundaries should be in reverse chronological order: boundary %d start (%v) should be after boundary %d start (%v)", + i, current.StartTime, i+1, next.StartTime) + } + } + }) + + t.Run("current month adjustment", func(t *testing.T) { + result := GetMonthlyTimeBoundaries(1) + boundary := result[0] + now := time.Now() + + // For the current month, end time should not exceed now + if boundary.EndTime.After(now) { + t.Errorf("Current month end time (%v) should not be after now (%v)", + boundary.EndTime, now) + } + + // Start time should be the beginning of current month + expectedStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + if !boundary.StartTime.Equal(expectedStart) { + t.Errorf("Current month start time (%v) should be %v", + boundary.StartTime, expectedStart) + } + }) +} + +func TestGetQuarterlyTimeBoundaries(t *testing.T) { + t.Run("zero iterations", func(t *testing.T) { + result := GetQuarterlyTimeBoundaries(0) + if len(result) != 0 { + t.Errorf("Expected empty slice for 0 iterations, got %d boundaries", len(result)) + } + }) + + t.Run("single iteration", func(t *testing.T) { + result := GetQuarterlyTimeBoundaries(1) + if len(result) != 1 { + t.Errorf("Expected 1 boundary for 1 iteration, got %d", len(result)) + } + + boundary := result[0] + if boundary.StartTime.After(boundary.EndTime) { + t.Error("StartTime should not be after EndTime") + } + + // Check that the quarter starts on the 1st + if boundary.StartTime.Day() != 1 { + t.Errorf("Expected quarter to start on the 1st, got day %d", boundary.StartTime.Day()) + } + + // Check that it starts at the beginning of a quarter month (Jan, Apr, Jul, Oct) + expectedQuarterMonths := []time.Month{time.January, time.April, time.July, time.October} + found := false + for _, month := range expectedQuarterMonths { + if boundary.StartTime.Month() == month { + found = true + break + } + } + if !found { + t.Errorf("Expected quarter to start in Jan/Apr/Jul/Oct, got %v", boundary.StartTime.Month()) + } + }) + + t.Run("multiple iterations", func(t *testing.T) { + iterations := 3 + result := GetQuarterlyTimeBoundaries(iterations) + + if len(result) != iterations { + t.Errorf("Expected %d boundaries, got %d", iterations, len(result)) + } + + // Check that boundaries are consecutive quarters going backwards + for i := 0; i < len(result)-1; i++ { + currentQuarter := result[i] + nextQuarter := result[i+1] + + // Current quarter's start should equal next quarter's end + if !currentQuarter.StartTime.Equal(nextQuarter.EndTime) { + t.Errorf("Quarter %d start (%v) should equal quarter %d end (%v)", + i, currentQuarter.StartTime, i+1, nextQuarter.EndTime) + } + + // Each quarter should start on the 1st + if currentQuarter.StartTime.Day() != 1 { + t.Errorf("Quarter %d should start on the 1st, got day %d", i, currentQuarter.StartTime.Day()) + } + } + }) + + t.Run("negative iterations", func(t *testing.T) { + result := GetQuarterlyTimeBoundaries(-1) + if len(result) != 0 { + t.Errorf("Expected empty slice for negative iterations, got %d boundaries", len(result)) + } + }) + + t.Run("boundary validation", func(t *testing.T) { + result := GetQuarterlyTimeBoundaries(2) + + for i, boundary := range result { + // StartTime should not be after EndTime + if boundary.StartTime.After(boundary.EndTime) { + t.Errorf("Boundary %d: StartTime (%v) should not be after EndTime (%v)", + i, boundary.StartTime, boundary.EndTime) + } + + // StartTime should be on the 1st + if boundary.StartTime.Day() != 1 { + t.Errorf("Boundary %d: StartTime should be on the 1st, got day %d", + i, boundary.StartTime.Day()) + } + + // StartTime should be at beginning of quarter (Jan/Apr/Jul/Oct) + expectedQuarterMonths := []time.Month{time.January, time.April, time.July, time.October} + found := false + for _, month := range expectedQuarterMonths { + if boundary.StartTime.Month() == month { + found = true + break + } + } + if !found { + t.Errorf("Boundary %d: StartTime should be in Jan/Apr/Jul/Oct, got %v", + i, boundary.StartTime.Month()) + } + + // Duration should be approximately 3 months + expectedEnd := boundary.StartTime.AddDate(0, 3, 0) + now := time.Now() + // For current quarter, end might be 'now' if we're still in the quarter + if i == 0 && now.Before(expectedEnd) { + expectedEnd = now + } + if !boundary.EndTime.Equal(expectedEnd) && boundary.EndTime.Sub(time.Now()) > 10*time.Minute { + t.Errorf("Boundary %d: EndTime (%v) should be %v", + i, boundary.EndTime, expectedEnd) + } + } + }) + + t.Run("chronological order", func(t *testing.T) { + result := GetQuarterlyTimeBoundaries(4) + + // Boundaries should be in reverse chronological order (most recent first) + for i := 0; i < len(result)-1; i++ { + current := result[i] + next := result[i+1] + + if current.StartTime.Before(next.StartTime) { + t.Errorf("Boundaries should be in reverse chronological order: boundary %d start (%v) should be after boundary %d start (%v)", + i, current.StartTime, i+1, next.StartTime) + } + } + }) + + t.Run("current quarter adjustment", func(t *testing.T) { + result := GetQuarterlyTimeBoundaries(1) + boundary := result[0] + now := time.Now() + + // For the current quarter, end time should not exceed now + if boundary.EndTime.After(now) { + t.Errorf("Current quarter end time (%v) should not be after now (%v)", + boundary.EndTime, now) + } + + // Start time should be the beginning of current quarter + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + expectedStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + if !boundary.StartTime.Equal(expectedStart) { + t.Errorf("Current quarter start time (%v) should be %v", + boundary.StartTime, expectedStart) + } + }) + + t.Run("quarter calculation accuracy", func(t *testing.T) { + result := GetQuarterlyTimeBoundaries(4) + + for i, boundary := range result { + // Verify quarter months are correct + month := boundary.StartTime.Month() + switch month { + case time.January: + // Q1: Jan-Mar + expectedEnd := boundary.StartTime.AddDate(0, 3, 0) + if i == 0 && time.Now().Before(expectedEnd) { + expectedEnd = time.Now() + } + if expectedEnd.Month() != time.April && expectedEnd.Sub(time.Now()) > 10*time.Minute { + t.Errorf("Q1 boundary %d should end in April, got %v", i, expectedEnd.Month()) + } + case time.April: + // Q2: Apr-Jun + expectedEnd := boundary.StartTime.AddDate(0, 3, 0) + if i == 0 && time.Now().Before(expectedEnd) { + expectedEnd = time.Now() + } + if expectedEnd.Month() != time.July && expectedEnd.Sub(time.Now()) > 10*time.Minute { + t.Errorf("Q2 boundary %d should end in July, got %v", i, expectedEnd.Month()) + } + case time.July: + // Q3: Jul-Sep + expectedEnd := boundary.StartTime.AddDate(0, 3, 0) + if i == 0 && time.Now().Before(expectedEnd) { + expectedEnd = time.Now() + } + if expectedEnd.Month() != time.October && expectedEnd.Sub(time.Now()) > 10*time.Minute { + t.Errorf("Q3 boundary %d should end in October, got %v", i, expectedEnd.Month()) + } + case time.October: + // Q4: Oct-Dec + expectedEnd := boundary.StartTime.AddDate(0, 3, 0) + if i == 0 && time.Now().Before(expectedEnd) { + expectedEnd = time.Now() + } + if expectedEnd.Month() != time.January && expectedEnd.Sub(time.Now()) > 10*time.Minute { + t.Errorf("Q4 boundary %d should end in January of next year, got %v", i, expectedEnd.Month()) + } + default: + t.Errorf("Boundary %d starts in invalid quarter month: %v", i, month) + } + } + }) +} + +func TestGetYearlyTimeBoundaries(t *testing.T) { + t.Run("zero iterations", func(t *testing.T) { + result := GetYearlyTimeBoundaries(0) + if len(result) != 0 { + t.Errorf("Expected empty slice for 0 iterations, got %d boundaries", len(result)) + } + }) + + t.Run("single iteration", func(t *testing.T) { + result := GetYearlyTimeBoundaries(1) + if len(result) != 1 { + t.Errorf("Expected 1 boundary for 1 iteration, got %d", len(result)) + } + + boundary := result[0] + if boundary.StartTime.After(boundary.EndTime) { + t.Error("StartTime should not be after EndTime") + } + + // Check that the year starts on January 1st + if boundary.StartTime.Month() != time.January || boundary.StartTime.Day() != 1 { + t.Errorf("Expected year to start on January 1st, got %v %d", boundary.StartTime.Month(), boundary.StartTime.Day()) + } + }) + + t.Run("multiple iterations", func(t *testing.T) { + iterations := 3 + result := GetYearlyTimeBoundaries(iterations) + + if len(result) != iterations { + t.Errorf("Expected %d boundaries, got %d", iterations, len(result)) + } + + // Check that boundaries are consecutive years going backwards + for i := 0; i < len(result)-1; i++ { + currentYear := result[i] + nextYear := result[i+1] + + // Current year's start should equal next year's end + if !currentYear.StartTime.Equal(nextYear.EndTime) { + t.Errorf("Year %d start (%v) should equal year %d end (%v)", + i, currentYear.StartTime, i+1, nextYear.EndTime) + } + + // Each year should start on January 1st + if currentYear.StartTime.Month() != time.January || currentYear.StartTime.Day() != 1 { + t.Errorf("Year %d should start on January 1st, got %v %d", i, currentYear.StartTime.Month(), currentYear.StartTime.Day()) + } + } + }) + + t.Run("negative iterations", func(t *testing.T) { + result := GetYearlyTimeBoundaries(-1) + if len(result) != 0 { + t.Errorf("Expected empty slice for negative iterations, got %d boundaries", len(result)) + } + }) + + t.Run("boundary validation", func(t *testing.T) { + result := GetYearlyTimeBoundaries(2) + + for i, boundary := range result { + // StartTime should not be after EndTime + if boundary.StartTime.After(boundary.EndTime) { + t.Errorf("Boundary %d: StartTime (%v) should not be after EndTime (%v)", + i, boundary.StartTime, boundary.EndTime) + } + + // StartTime should be on January 1st + if boundary.StartTime.Month() != time.January || boundary.StartTime.Day() != 1 { + t.Errorf("Boundary %d: StartTime should be on January 1st, got %v %d", + i, boundary.StartTime.Month(), boundary.StartTime.Day()) + } + + // EndTime should be January 1st of the next year + expectedEnd := boundary.StartTime.AddDate(1, 0, 0) + now := time.Now() + // For current year, end might be 'now' if we're still in the year + if i == 0 && now.Before(expectedEnd) { + expectedEnd = now + } + if !boundary.EndTime.Equal(expectedEnd) && boundary.EndTime.Sub(time.Now()) > 10*time.Minute { + t.Errorf("Boundary %d: EndTime (%v) should be %v", + i, boundary.EndTime, expectedEnd) + } + } + }) + + t.Run("chronological order", func(t *testing.T) { + result := GetYearlyTimeBoundaries(4) + + // Boundaries should be in reverse chronological order (most recent first) + for i := 0; i < len(result)-1; i++ { + current := result[i] + next := result[i+1] + + if current.StartTime.Before(next.StartTime) { + t.Errorf("Boundaries should be in reverse chronological order: boundary %d start (%v) should be after boundary %d start (%v)", + i, current.StartTime, i+1, next.StartTime) + } + } + }) + + t.Run("current year adjustment", func(t *testing.T) { + result := GetYearlyTimeBoundaries(1) + boundary := result[0] + now := time.Now() + + // For the current year, end time should not exceed now + if boundary.EndTime.After(now) { + t.Errorf("Current year end time (%v) should not be after now (%v)", + boundary.EndTime, now) + } + + // Start time should be the beginning of current year + expectedStart := time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + if !boundary.StartTime.Equal(expectedStart) { + t.Errorf("Current year start time (%v) should be %v", + boundary.StartTime, expectedStart) + } + }) + + t.Run("year progression accuracy", func(t *testing.T) { + result := GetYearlyTimeBoundaries(5) + now := time.Now() + + for i, boundary := range result { + expectedYear := now.Year() - i + if boundary.StartTime.Year() != expectedYear { + t.Errorf("Boundary %d should be for year %d, got %d", + i, expectedYear, boundary.StartTime.Year()) + } + + // Verify it's exactly January 1st of that year + expectedStart := time.Date(expectedYear, time.January, 1, 0, 0, 0, 0, now.Location()) + if !boundary.StartTime.Equal(expectedStart) { + t.Errorf("Boundary %d start time (%v) should be exactly %v", + i, boundary.StartTime, expectedStart) + } + } + }) +} diff --git a/common-lib/utils/bean/bean.go b/common-lib/utils/bean/bean.go index ea16a2f72..2bbfa1dc1 100644 --- a/common-lib/utils/bean/bean.go +++ b/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/common-lib/utils/k8s/K8sService.go b/common-lib/utils/k8s/K8sService.go index c8f583ff3..681e96ad2 100644 --- a/common-lib/utils/k8s/K8sService.go +++ b/common-lib/utils/k8s/K8sService.go @@ -105,8 +105,7 @@ type K8sService interface { GetResourceIf(restConfig *rest.Config, groupVersionKind schema.GroupVersionKind) (resourceIf dynamic.NamespaceableResourceInterface, namespaced bool, err error) FetchConnectionStatusForCluster(k8sClientSet *kubernetes.Clientset) error CreateK8sClientSet(restConfig *rest.Config) (*kubernetes.Clientset, error) - CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, secretData map[string]string) error - + CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, dataString map[string]string, data map[string][]byte) error // below functions are exposed for K8sUtilExtended CreateNsWithLabels(namespace string, labels map[string]string, client *v12.CoreV1Client) (ns *v1.Namespace, err error) diff --git a/common-lib/utils/k8s/K8sUtil.go b/common-lib/utils/k8s/K8sUtil.go index f621257bd..434fb4874 100644 --- a/common-lib/utils/k8s/K8sUtil.go +++ b/common-lib/utils/k8s/K8sUtil.go @@ -1351,7 +1351,7 @@ func (impl *K8sServiceImpl) GetPodListByLabel(namespace, label string, clientSet return podList.Items, nil } -func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, secretData map[string]string) error { +func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, dataString map[string]string, data map[string][]byte) error { secret, err := impl.GetSecret(namespace, uniqueSecretName, client) statusError, ok := err.(*errors.StatusError) @@ -1361,13 +1361,18 @@ func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, } if ok && statusError != nil && statusError.Status().Code == http.StatusNotFound { - _, err = impl.CreateSecret(namespace, nil, uniqueSecretName, "", client, secretLabel, secretData) + _, err = impl.CreateSecret(namespace, data, uniqueSecretName, "", client, secretLabel, dataString) if err != nil { impl.logger.Errorw("Error in creating secret for chart repo", "uniqueSecretName", uniqueSecretName, "err", err) return err } } else { - secret.StringData = secretData + if len(data) > 0 { + secret.Data = data + } + if len(dataString) > 0 { + secret.StringData = dataString + } _, err = impl.UpdateSecret(namespace, secret, client) if err != nil { impl.logger.Errorw("Error in creating secret for chart repo", "uniqueSecretName", uniqueSecretName, "err", err) diff --git a/common-lib/utils/sql/connection.go b/common-lib/utils/sql/connection.go index fa6858e5d..5c9cb23bf 100644 --- a/common-lib/utils/sql/connection.go +++ b/common-lib/utils/sql/connection.go @@ -34,7 +34,7 @@ type Config struct { User string `env:"PG_USER" envDefault:"" description:"user for postgres" example:"postgres"` Password string `env:"PG_PASSWORD" envDefault:"" secretData:"-" description:"password for postgres, associated with PG_USER" example:"confidential ;)"` Database string `env:"PG_DATABASE" envDefault:"orchestrator" description:"postgres database to be made connection with" example:"orchestrator, casbin, git_sensor, lens"` - CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin""` + CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin"` ApplicationName string `env:"APP" envDefault:"orchestrator" description:"Application name"` ReadTimeout int64 `env:"PG_READ_TIMEOUT" envDefault:"30"` WriteTimeout int64 `env:"PG_WRITE_TIMEOUT" envDefault:"30"` @@ -71,10 +71,10 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { _, err := dbConnection.QueryOne(&test, `SELECT 1`) if err != nil { - logger.Errorw("error in connecting db ", "db", obfuscateSecretTags(cfg), "err", err) + logger.Errorw("error in connecting db ", "db", ObfuscateSecretTags(cfg), "err", err) return nil, err } else { - logger.Infow("connected with db", "db", obfuscateSecretTags(cfg)) + logger.Infow("connected with db", "db", ObfuscateSecretTags(cfg)) } // -------------- @@ -82,7 +82,7 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { return dbConnection, err } -func obfuscateSecretTags(cfg interface{}) interface{} { +func ObfuscateSecretTags(cfg interface{}) interface{} { cfgDpl := reflect.New(reflect.ValueOf(cfg).Elem().Type()).Interface() cfgDplElm := reflect.ValueOf(cfgDpl).Elem() diff --git a/common-lib/utils/storage/bean.go b/common-lib/utils/storage/bean.go new file mode 100644 index 000000000..040504a29 --- /dev/null +++ b/common-lib/utils/storage/bean.go @@ -0,0 +1,146 @@ +package storage + +import ( + veleroBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +// EventType represents the type of event +type EventType string + +func (e EventType) String() string { + return string(e) +} + +func (e EventType) IsCreated() bool { + return e == EventTypeAdded +} + +func (e EventType) IsUpdated() bool { + return e == EventTypeUpdated +} + +func (e EventType) IsDeleted() bool { + return e == EventTypeDeleted +} + +const ( + EventTypeAdded EventType = "ADDED" + EventTypeUpdated EventType = "UPDATED" + EventTypeDeleted EventType = "DELETED" +) + +// ResourceKind represents the kind of resource +type ResourceKind string + +func (r ResourceKind) String() string { + return string(r) +} + +func (r ResourceKind) IsBackup() bool { + return r == ResourceBackup +} + +func (r ResourceKind) IsRestore() bool { + return r == ResourceRestore +} + +func (r ResourceKind) IsBackupStorageLocation() bool { + return r == ResourceBackupStorageLocation +} + +func (r ResourceKind) IsVolumeSnapshotLocation() bool { + return r == ResourceVolumeSnapshotLocation +} + +func (r ResourceKind) IsBackupSchedule() bool { + return r == ResourceBackupSchedule +} + +const ( + ResourceBackup ResourceKind = "Backup" + ResourceRestore ResourceKind = "Restore" + ResourceBackupStorageLocation ResourceKind = "BackupStorageLocation" + ResourceVolumeSnapshotLocation ResourceKind = "VolumeSnapshotLocation" + ResourceBackupSchedule ResourceKind = "BackupSchedule" +) + +// LocationsStatus represents the status of a location +// NOTE: status is only available in case of BSL +type LocationsStatus struct { + *veleroBean.BackupStorageLocationStatus +} + +// BackupStatus represents the status of a backup +type BackupStatus struct { + *veleroBean.BackupStatus +} + +// RestoreStatus represents the status of a restore +type RestoreStatus struct { + *veleroBean.RestoreStatus +} + +// BackupScheduleStatus represents the status of a backup schedule +type BackupScheduleStatus struct { + *veleroBean.ScheduleStatus +} + +// VeleroResourceEvent represents the event sent by velero +type VeleroResourceEvent struct { + EventType EventType `json:"eventType"` + ResourceKind ResourceKind `json:"kind"` + ClusterId int `json:"clusterId"` + ResourceName string `json:"resourceName"` +} + +func NewVeleroResourceEvent() *VeleroResourceEvent { + return &VeleroResourceEvent{} +} + +// Getters + +// GetEventType returns the EventType +func (e *VeleroResourceEvent) GetEventType() any { + return e.EventType +} + +// GetResourceKind returns the ResourceKind +func (e *VeleroResourceEvent) GetResourceKind() ResourceKind { + return e.ResourceKind +} + +// GetClusterId returns the ClusterId +func (e *VeleroResourceEvent) GetClusterId() int { + return e.ClusterId +} + +// GetResourceName returns the ResourceName +func (e *VeleroResourceEvent) GetResourceName() string { + return e.ResourceName +} + +// Setters + +// SetEventType sets the EventType +func (e *VeleroResourceEvent) SetEventType(eventType EventType) *VeleroResourceEvent { + e.EventType = eventType + return e +} + +// SetClusterId sets the ClusterId +func (e *VeleroResourceEvent) SetClusterId(clusterId int) *VeleroResourceEvent { + e.ClusterId = clusterId + return e +} + +// SetResourceKind sets the ResourceKind +func (e *VeleroResourceEvent) SetResourceKind(resourceKind ResourceKind) *VeleroResourceEvent { + e.ResourceKind = resourceKind + return e +} + +// SetResourceName sets the ResourceName +func (e *VeleroResourceEvent) SetResourceName(resourceName string) *VeleroResourceEvent { + e.ResourceName = resourceName + return e +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/LICENSE b/common-lib/vendor/github.com/vmware-tanzu/velero/LICENSE new file mode 100644 index 000000000..5e0fd33cb --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/LICENSE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "{}" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright {yyyy} {name of copyright owner} + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/shared/data_move_operation_progress.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/shared/data_move_operation_progress.go new file mode 100644 index 000000000..f92b3e533 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/shared/data_move_operation_progress.go @@ -0,0 +1,29 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shared + +// DataMoveOperationProgress represents the progress of a +// data movement operation + +// +k8s:deepcopy-gen=true +type DataMoveOperationProgress struct { + // +optional + TotalBytes int64 `json:"totalBytes,omitempty"` + + // +optional + BytesDone int64 `json:"bytesDone,omitempty"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go new file mode 100644 index 000000000..6a062c4fe --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BackupRepositorySpec is the specification for a BackupRepository. +type BackupRepositorySpec struct { + // VolumeNamespace is the namespace this backup repository contains + // pod volume backups for. + VolumeNamespace string `json:"volumeNamespace"` + + // BackupStorageLocation is the name of the BackupStorageLocation + // that should contain this repository. + BackupStorageLocation string `json:"backupStorageLocation"` + + // RepositoryType indicates the type of the backend repository + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + RepositoryType string `json:"repositoryType"` + + // ResticIdentifier is the full restic-compatible string for identifying + // this repository. + ResticIdentifier string `json:"resticIdentifier"` + + // MaintenanceFrequency is how often maintenance should be run. + MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"` +} + +// BackupRepositoryPhase represents the lifecycle phase of a BackupRepository. +// +kubebuilder:validation:Enum=New;Ready;NotReady +type BackupRepositoryPhase string + +const ( + BackupRepositoryPhaseNew BackupRepositoryPhase = "New" + BackupRepositoryPhaseReady BackupRepositoryPhase = "Ready" + BackupRepositoryPhaseNotReady BackupRepositoryPhase = "NotReady" + + BackupRepositoryTypeRestic string = "restic" + BackupRepositoryTypeKopia string = "kopia" +) + +// BackupRepositoryStatus is the current status of a BackupRepository. +type BackupRepositoryStatus struct { + // Phase is the current state of the BackupRepository. + // +optional + Phase BackupRepositoryPhase `json:"phase,omitempty"` + + // Message is a message about the current status of the BackupRepository. + // +optional + Message string `json:"message,omitempty"` + + // LastMaintenanceTime is the last time maintenance was run. + // +optional + // +nullable + LastMaintenanceTime *metav1.Time `json:"lastMaintenanceTime,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Repository Type",type="string",JSONPath=".spec.repositoryType" +// + +type BackupRepository struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec BackupRepositorySpec `json:"spec,omitempty"` + + // +optional + Status BackupRepositoryStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=backuprepositories,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=backuprepositories/status,verbs=get;update;patch + +// BackupRepositoryList is a list of BackupRepositories. +type BackupRepositoryList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []BackupRepository `json:"items"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go new file mode 100644 index 000000000..858894dc7 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go @@ -0,0 +1,517 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Metadata struct { + Labels map[string]string `json:"labels,omitempty"` +} + +// BackupSpec defines the specification for a Velero backup. +type BackupSpec struct { + // +optional + Metadata `json:"metadata,omitempty"` + // IncludedNamespaces is a slice of namespace names to include objects + // from. If empty, all namespaces are included. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces contains a list of namespaces that are not + // included in the backup. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedResources is a slice of resource names to include + // in the backup. If empty, all resources are included. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources is a slice of resource names that are not + // included in the backup. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` + + // IncludedClusterScopedResources is a slice of cluster-scoped + // resource type names to include in the backup. + // If set to "*", all cluster-scoped resource types are included. + // The default value is empty, which means only related + // cluster-scoped resources are included. + // +optional + // +nullable + IncludedClusterScopedResources []string `json:"includedClusterScopedResources,omitempty"` + + // ExcludedClusterScopedResources is a slice of cluster-scoped + // resource type names to exclude from the backup. + // If set to "*", all cluster-scoped resource types are excluded. + // The default value is empty. + // +optional + // +nullable + ExcludedClusterScopedResources []string `json:"excludedClusterScopedResources,omitempty"` + + // IncludedNamespaceScopedResources is a slice of namespace-scoped + // resource type names to include in the backup. + // The default value is "*". + // +optional + // +nullable + IncludedNamespaceScopedResources []string `json:"includedNamespaceScopedResources,omitempty"` + + // ExcludedNamespaceScopedResources is a slice of namespace-scoped + // resource type names to exclude from the backup. + // If set to "*", all namespace-scoped resource types are excluded. + // The default value is empty. + // +optional + // +nullable + ExcludedNamespaceScopedResources []string `json:"excludedNamespaceScopedResources,omitempty"` + + // LabelSelector is a metav1.LabelSelector to filter with + // when adding individual objects to the backup. If empty + // or nil, all objects are included. Optional. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // OrLabelSelectors is list of metav1.LabelSelector to filter with + // when adding individual objects to the backup. If multiple provided + // they will be joined by the OR operator. LabelSelector as well as + // OrLabelSelectors cannot co-exist in backup request, only one of them + // can be used. + // +optional + // +nullable + OrLabelSelectors []*metav1.LabelSelector `json:"orLabelSelectors,omitempty"` + + // SnapshotVolumes specifies whether to take snapshots + // of any PV's referenced in the set of objects included + // in the Backup. + // +optional + // +nullable + SnapshotVolumes *bool `json:"snapshotVolumes,omitempty"` + + // TTL is a time.Duration-parseable string describing how long + // the Backup should be retained for. + // +optional + TTL metav1.Duration `json:"ttl,omitempty"` + + // IncludeClusterResources specifies whether cluster-scoped resources + // should be included for consideration in the backup. + // +optional + // +nullable + IncludeClusterResources *bool `json:"includeClusterResources,omitempty"` + + // Hooks represent custom behaviors that should be executed at different phases of the backup. + // +optional + Hooks BackupHooks `json:"hooks,omitempty"` + + // StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored. + // +optional + StorageLocation string `json:"storageLocation,omitempty"` + + // VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. + // +optional + VolumeSnapshotLocations []string `json:"volumeSnapshotLocations,omitempty"` + + // DefaultVolumesToRestic specifies whether restic should be used to take a + // backup of all pod volumes by default. + // + // Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead. + // +optional + // +nullable + DefaultVolumesToRestic *bool `json:"defaultVolumesToRestic,omitempty"` + + // DefaultVolumesToFsBackup specifies whether pod volume file system backup should be used + // for all volumes by default. + // +optional + // +nullable + DefaultVolumesToFsBackup *bool `json:"defaultVolumesToFsBackup,omitempty"` + + // OrderedResources specifies the backup order of resources of specific Kind. + // The map key is the resource name and value is a list of object names separated by commas. + // Each resource name has format "namespace/objectname". For cluster resources, simply use "objectname". + // +optional + // +nullable + OrderedResources map[string]string `json:"orderedResources,omitempty"` + + // CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to + // ReadyToUse during creation, before returning error as timeout. + // The default value is 10 minute. + // +optional + CSISnapshotTimeout metav1.Duration `json:"csiSnapshotTimeout,omitempty"` + + // ItemOperationTimeout specifies the time used to wait for asynchronous BackupItemAction operations + // The default value is 4 hour. + // +optional + ItemOperationTimeout metav1.Duration `json:"itemOperationTimeout,omitempty"` + // ResourcePolicy specifies the referenced resource policies that backup should follow + // +optional + ResourcePolicy *v1.TypedLocalObjectReference `json:"resourcePolicy,omitempty"` + + // SnapshotMoveData specifies whether snapshot data should be moved + // +optional + // +nullable + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty"` + + // DataMover specifies the data mover to be used by the backup. + // If DataMover is "" or "velero", the built-in data mover will be used. + // +optional + DataMover string `json:"datamover,omitempty"` + + // UploaderConfig specifies the configuration for the uploader. + // +optional + // +nullable + UploaderConfig *UploaderConfigForBackup `json:"uploaderConfig,omitempty"` +} + +// UploaderConfigForBackup defines the configuration for the uploader when doing backup. +type UploaderConfigForBackup struct { + // ParallelFilesUpload is the number of files parallel uploads to perform when using the uploader. + // +optional + ParallelFilesUpload int `json:"parallelFilesUpload,omitempty"` +} + +// BackupHooks contains custom behaviors that should be executed at different phases of the backup. +type BackupHooks struct { + // Resources are hooks that should be executed when backing up individual instances of a resource. + // +optional + // +nullable + Resources []BackupResourceHookSpec `json:"resources,omitempty"` +} + +// BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on +// the rules defined for namespaces, resources, and label selector. +type BackupResourceHookSpec struct { + // Name is the name of this hook. + Name string `json:"name"` + + // IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + // to all namespaces. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedResources specifies the resources to which this hook spec applies. If empty, it applies + // to all resources. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources specifies the resources to which this hook spec does not apply. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` + + // LabelSelector, if specified, filters the resources to which this hook spec applies. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. + // These are executed before any "additional items" from item actions are processed. + // +optional + PreHooks []BackupResourceHook `json:"pre,omitempty"` + + // PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. + // These are executed after all "additional items" from item actions are processed. + // +optional + PostHooks []BackupResourceHook `json:"post,omitempty"` +} + +// BackupResourceHook defines a hook for a resource. +type BackupResourceHook struct { + // Exec defines an exec hook. + Exec *ExecHook `json:"exec"` +} + +// ExecHook is a hook that uses the pod exec API to execute a command in a container in a pod. +type ExecHook struct { + // Container is the container in the pod where the command should be executed. If not specified, + // the pod's first container is used. + // +optional + Container string `json:"container,omitempty"` + + // Command is the command and arguments to execute. + // +kubebuilder:validation:MinItems=1 + Command []string `json:"command"` + + // OnError specifies how Velero should behave if it encounters an error executing this hook. + // +optional + OnError HookErrorMode `json:"onError,omitempty"` + + // Timeout defines the maximum amount of time Velero should wait for the hook to complete before + // considering the execution a failure. + // +optional + Timeout metav1.Duration `json:"timeout,omitempty"` +} + +// HookErrorMode defines how Velero should treat an error from a hook. +// +kubebuilder:validation:Enum=Continue;Fail +type HookErrorMode string + +const ( + // HookErrorModeContinue means that an error from a hook is acceptable and the backup/restore can + // proceed with the rest of hooks' execution. This backup/restore should be in `PartiallyFailed` status. + HookErrorModeContinue HookErrorMode = "Continue" + + // HookErrorModeFail means that an error from a hook is problematic and Velero should stop executing following hooks. + // This backup/restore should be in `PartiallyFailed` status. + HookErrorModeFail HookErrorMode = "Fail" +) + +// BackupPhase is a string representation of the lifecycle phase +// of a Velero backup. +// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting +type BackupPhase string + +const ( + // BackupPhaseNew means the backup has been created but not + // yet processed by the BackupController. + BackupPhaseNew BackupPhase = "New" + + // BackupPhaseFailedValidation means the backup has failed + // the controller's validations and therefore will not run. + BackupPhaseFailedValidation BackupPhase = "FailedValidation" + + // BackupPhaseInProgress means the backup is currently executing. + BackupPhaseInProgress BackupPhase = "InProgress" + + // BackupPhaseWaitingForPluginOperations means the backup of + // Kubernetes resources, creation of snapshots, and other + // async plugin operations was successful and snapshot data is + // currently uploading or other plugin operations are still + // ongoing. The backup is not usable yet. + BackupPhaseWaitingForPluginOperations BackupPhase = "WaitingForPluginOperations" + + // BackupPhaseWaitingForPluginOperationsPartiallyFailed means + // the backup of Kubernetes resources, creation of snapshots, + // and other async plugin operations partially failed (final + // phase will be PartiallyFailed) and snapshot data is + // currently uploading or other plugin operations are still + // ongoing. The backup is not usable yet. + BackupPhaseWaitingForPluginOperationsPartiallyFailed BackupPhase = "WaitingForPluginOperationsPartiallyFailed" + + // BackupPhaseFinalizing means the backup of + // Kubernetes resources, creation of snapshots, and other + // async plugin operations were successful and snapshot upload and + // other plugin operations are now complete, but the Backup is awaiting + // final update of resources modified during async operations. + // The backup is not usable yet. + BackupPhaseFinalizing BackupPhase = "Finalizing" + + // BackupPhaseFinalizingPartiallyFailed means the backup of + // Kubernetes resources, creation of snapshots, and other + // async plugin operations were successful and snapshot upload and + // other plugin operations are now complete, but one or more errors + // occurred during backup or async operation processing, and the + // Backup is awaiting final update of resources modified during async + // operations. The backup is not usable yet. + BackupPhaseFinalizingPartiallyFailed BackupPhase = "FinalizingPartiallyFailed" + + // BackupPhaseCompleted means the backup has run successfully without + // errors. + BackupPhaseCompleted BackupPhase = "Completed" + + // BackupPhasePartiallyFailed means the backup has run to completion + // but encountered 1+ errors backing up individual items. + BackupPhasePartiallyFailed BackupPhase = "PartiallyFailed" + + // BackupPhaseFailed means the backup ran but encountered an error that + // prevented it from completing successfully. + BackupPhaseFailed BackupPhase = "Failed" + + // BackupPhaseDeleting means the backup and all its associated data are being deleted. + BackupPhaseDeleting BackupPhase = "Deleting" +) + +// BackupStatus captures the current status of a Velero backup. +type BackupStatus struct { + // Version is the backup format major version. + // Deprecated: Please see FormatVersion + // +optional + Version int `json:"version,omitempty"` + + // FormatVersion is the backup format version, including major, minor, and patch version. + // +optional + FormatVersion string `json:"formatVersion,omitempty"` + + // Expiration is when this Backup is eligible for garbage-collection. + // +optional + // +nullable + Expiration *metav1.Time `json:"expiration,omitempty"` + + // Phase is the current state of the Backup. + // +optional + Phase BackupPhase `json:"phase,omitempty"` + + // ValidationErrors is a slice of all validation errors (if + // applicable). + // +optional + // +nullable + ValidationErrors []string `json:"validationErrors,omitempty"` + + // StartTimestamp records the time a backup was started. + // Separate from CreationTimestamp, since that value changes + // on restores. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a backup was completed. + // Completion time is recorded even on failed backups. + // Completion time is recorded before uploading the backup object. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // VolumeSnapshotsAttempted is the total number of attempted + // volume snapshots for this backup. + // +optional + VolumeSnapshotsAttempted int `json:"volumeSnapshotsAttempted,omitempty"` + + // VolumeSnapshotsCompleted is the total number of successfully + // completed volume snapshots for this backup. + // +optional + VolumeSnapshotsCompleted int `json:"volumeSnapshotsCompleted,omitempty"` + + // FailureReason is an error that caused the entire backup to fail. + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // Warnings is a count of all warning messages that were generated during + // execution of the backup. The actual warnings are in the backup's log + // file in object storage. + // +optional + Warnings int `json:"warnings,omitempty"` + + // Errors is a count of all error messages that were generated during + // execution of the backup. The actual errors are in the backup's log + // file in object storage. + // +optional + Errors int `json:"errors,omitempty"` + + // Progress contains information about the backup's execution progress. Note + // that this information is best-effort only -- if Velero fails to update it + // during a backup for any reason, it may be inaccurate/stale. + // +optional + // +nullable + Progress *BackupProgress `json:"progress,omitempty"` + + // CSIVolumeSnapshotsAttempted is the total number of attempted + // CSI VolumeSnapshots for this backup. + // +optional + CSIVolumeSnapshotsAttempted int `json:"csiVolumeSnapshotsAttempted,omitempty"` + + // CSIVolumeSnapshotsCompleted is the total number of successfully + // completed CSI VolumeSnapshots for this backup. + // +optional + CSIVolumeSnapshotsCompleted int `json:"csiVolumeSnapshotsCompleted,omitempty"` + + // BackupItemOperationsAttempted is the total number of attempted + // async BackupItemAction operations for this backup. + // +optional + BackupItemOperationsAttempted int `json:"backupItemOperationsAttempted,omitempty"` + + // BackupItemOperationsCompleted is the total number of successfully completed + // async BackupItemAction operations for this backup. + // +optional + BackupItemOperationsCompleted int `json:"backupItemOperationsCompleted,omitempty"` + + // BackupItemOperationsFailed is the total number of async + // BackupItemAction operations for this backup which ended with an error. + // +optional + BackupItemOperationsFailed int `json:"backupItemOperationsFailed,omitempty"` + + // HookStatus contains information about the status of the hooks. + // +optional + // +nullable + HookStatus *HookStatus `json:"hookStatus,omitempty"` +} + +// BackupProgress stores information about the progress of a Backup's execution. +type BackupProgress struct { + // TotalItems is the total number of items to be backed up. This number may change + // throughout the execution of the backup due to plugins that return additional related + // items to back up, the velero.io/exclude-from-backup label, and various other + // filters that happen as items are processed. + // +optional + TotalItems int `json:"totalItems,omitempty"` + + // ItemsBackedUp is the number of items that have actually been written to the + // backup tarball so far. + // +optional + ItemsBackedUp int `json:"itemsBackedUp,omitempty"` +} + +// HookStatus stores information about the status of the hooks. +type HookStatus struct { + // HooksAttempted is the total number of attempted hooks + // Specifically, HooksAttempted represents the number of hooks that failed to execute + // and the number of hooks that executed successfully. + // +optional + HooksAttempted int `json:"hooksAttempted,omitempty"` + + // HooksFailed is the total number of hooks which ended with an error + // +optional + HooksFailed int `json:"hooksFailed,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=create;delete;get;list;patch;update;watch +// +kubebuilder:rbac:groups=velero.io,resources=backups/status,verbs=get;update;patch + +// Backup is a Velero resource that represents the capture of Kubernetes +// cluster state at a point in time (API objects and associated volume state). +type Backup struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec BackupSpec `json:"spec,omitempty"` + + // +optional + Status BackupStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupList is a list of Backups. +type BackupList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Backup `json:"items"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go new file mode 100644 index 000000000..e44671222 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go @@ -0,0 +1,179 @@ +/* +Copyright 2017, 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// BackupStorageLocationSpec defines the desired state of a Velero BackupStorageLocation +type BackupStorageLocationSpec struct { + // Provider is the provider of the backup storage. + Provider string `json:"provider"` + + // Config is for provider-specific configuration fields. + // +optional + Config map[string]string `json:"config,omitempty"` + + // Credential contains the credential information intended to be used with this location + // +optional + Credential *corev1api.SecretKeySelector `json:"credential,omitempty"` + + StorageType `json:",inline"` + + // Default indicates this location is the default backup storage location. + // +optional + Default bool `json:"default,omitempty"` + + // AccessMode defines the permissions for the backup storage location. + // +optional + AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"` + + // BackupSyncPeriod defines how frequently to sync backup API objects from object storage. A value of 0 disables sync. + // +optional + // +nullable + BackupSyncPeriod *metav1.Duration `json:"backupSyncPeriod,omitempty"` + + // ValidationFrequency defines how frequently to validate the corresponding object storage. A value of 0 disables validation. + // +optional + // +nullable + ValidationFrequency *metav1.Duration `json:"validationFrequency,omitempty"` +} + +// BackupStorageLocationStatus defines the observed state of BackupStorageLocation +type BackupStorageLocationStatus struct { + // Phase is the current state of the BackupStorageLocation. + // +optional + Phase BackupStorageLocationPhase `json:"phase,omitempty"` + + // LastSyncedTime is the last time the contents of the location were synced into + // the cluster. + // +optional + // +nullable + LastSyncedTime *metav1.Time `json:"lastSyncedTime,omitempty"` + + // LastValidationTime is the last time the backup store location was validated + // the cluster. + // +optional + // +nullable + LastValidationTime *metav1.Time `json:"lastValidationTime,omitempty"` + + // Message is a message about the backup storage location's status. + // +optional + Message string `json:"message,omitempty"` + + // LastSyncedRevision is the value of the `metadata/revision` file in the backup + // storage location the last time the BSL's contents were synced into the cluster. + // + // Deprecated: this field is no longer updated or used for detecting changes to + // the location's contents and will be removed entirely in v2.0. + // +optional + LastSyncedRevision types.UID `json:"lastSyncedRevision,omitempty"` + + // AccessMode is an unused field. + // + // Deprecated: there is now an AccessMode field on the Spec and this field + // will be removed entirely as of v2.0. + // +optional + AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=bsl +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Backup Storage Location status such as Available/Unavailable" +// +kubebuilder:printcolumn:name="Last Validated",type="date",JSONPath=".status.lastValidationTime",description="LastValidationTime is the last time the backup store location was validated" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Default",type="boolean",JSONPath=".spec.default",description="Default backup storage location" + +// BackupStorageLocation is a location where Velero stores backup objects +type BackupStorageLocation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupStorageLocationSpec `json:"spec,omitempty"` + Status BackupStorageLocationStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations/status,verbs=get;update;patch + +// BackupStorageLocationList contains a list of BackupStorageLocation +type BackupStorageLocationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupStorageLocation `json:"items"` +} + +// StorageType represents the type of storage that a backup location uses. +// ObjectStorage must be non-nil, since it is currently the only supported StorageType. +type StorageType struct { + ObjectStorage *ObjectStorageLocation `json:"objectStorage"` +} + +// ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage. +type ObjectStorageLocation struct { + // Bucket is the bucket to use for object storage. + Bucket string `json:"bucket"` + + // Prefix is the path inside a bucket to use for Velero storage. Optional. + // +optional + Prefix string `json:"prefix,omitempty"` + + // CACert defines a CA bundle to use when verifying TLS connections to the provider. + // +optional + CACert []byte `json:"caCert,omitempty"` +} + +// BackupStorageLocationPhase is the lifecycle phase of a Velero BackupStorageLocation. +// +kubebuilder:validation:Enum=Available;Unavailable +// +kubebuilder:default=Unavailable +type BackupStorageLocationPhase string + +const ( + // BackupStorageLocationPhaseAvailable means the location is available to read and write from. + BackupStorageLocationPhaseAvailable BackupStorageLocationPhase = "Available" + + // BackupStorageLocationPhaseUnavailable means the location is unavailable to read and write from. + BackupStorageLocationPhaseUnavailable BackupStorageLocationPhase = "Unavailable" +) + +// BackupStorageLocationAccessMode represents the permissions for a BackupStorageLocation. +// +kubebuilder:validation:Enum=ReadOnly;ReadWrite +type BackupStorageLocationAccessMode string + +const ( + // BackupStorageLocationAccessModeReadOnly represents read-only access to a BackupStorageLocation. + BackupStorageLocationAccessModeReadOnly BackupStorageLocationAccessMode = "ReadOnly" + + // BackupStorageLocationAccessModeReadWrite represents read and write access to a BackupStorageLocation. + BackupStorageLocationAccessModeReadWrite BackupStorageLocationAccessMode = "ReadWrite" +) + +// TODO(2.0): remove the AccessMode field from BackupStorageLocationStatus. +// TODO(2.0): remove the LastSyncedRevision field from BackupStorageLocationStatus. diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/constants.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/constants.go new file mode 100644 index 000000000..a7292d568 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/constants.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // DefaultNamespace is the Kubernetes namespace that is used by default for + // the Velero server and API objects. + DefaultNamespace = "velero" + + // ResourcesDir is a top-level directory expected in backups which contains sub-directories + // for each resource type in the backup. + ResourcesDir = "resources" + + // MetadataDir is a top-level directory expected in backups which contains + // files that store metadata about the backup, such as the backup version. + MetadataDir = "metadata" + + // ClusterScopedDir is the name of the directory containing cluster-scoped + // resources within a Velero backup. + ClusterScopedDir = "cluster" + + // NamespaceScopedDir is the name of the directory containing namespace-scoped + // resource within a Velero backup. + NamespaceScopedDir = "namespaces" + + // CSIFeatureFlag is the feature flag string that defines whether or not CSI features are being used. + CSIFeatureFlag = "EnableCSI" + + // PreferredVersionDir is the suffix name of the directory containing the preferred version of the API group + // resource within a Velero backup. + PreferredVersionDir = "-preferredversion" + + // APIGroupVersionsFeatureFlag is the feature flag string that defines whether or not to handle multiple API Group Versions + APIGroupVersionsFeatureFlag = "EnableAPIGroupVersions" +) diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/delete_backup_request_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/delete_backup_request_types.go new file mode 100644 index 000000000..8c7b1fa09 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/delete_backup_request_types.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// DeleteBackupRequestSpec is the specification for which backups to delete. +type DeleteBackupRequestSpec struct { + BackupName string `json:"backupName"` +} + +// DeleteBackupRequestPhase represents the lifecycle phase of a DeleteBackupRequest. +// +kubebuilder:validation:Enum=New;InProgress;Processed +type DeleteBackupRequestPhase string + +const ( + // DeleteBackupRequestPhaseNew means the DeleteBackupRequest has not been processed yet. + DeleteBackupRequestPhaseNew DeleteBackupRequestPhase = "New" + + // DeleteBackupRequestPhaseInProgress means the DeleteBackupRequest is being processed. + DeleteBackupRequestPhaseInProgress DeleteBackupRequestPhase = "InProgress" + + // DeleteBackupRequestPhaseProcessed means the DeleteBackupRequest has been processed. + DeleteBackupRequestPhaseProcessed DeleteBackupRequestPhase = "Processed" +) + +// DeleteBackupRequestStatus is the current status of a DeleteBackupRequest. +type DeleteBackupRequestStatus struct { + // Phase is the current state of the DeleteBackupRequest. + // +optional + Phase DeleteBackupRequestPhase `json:"phase,omitempty"` + + // Errors contains any errors that were encountered during the deletion process. + // +optional + // +nullable + Errors []string `json:"errors,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="BackupName",type="string",JSONPath=".spec.backupName",description="The name of the backup to be deleted" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="The status of the deletion request" + +// DeleteBackupRequest is a request to delete one or more backups. +type DeleteBackupRequest struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec DeleteBackupRequestSpec `json:"spec,omitempty"` + + // +optional + Status DeleteBackupRequestStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// DeleteBackupRequestList is a list of DeleteBackupRequests. +type DeleteBackupRequestList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DeleteBackupRequest `json:"items"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/doc.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/doc.go new file mode 100644 index 000000000..4431315c2 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package v1 is the v1 version of the API. +// +groupName=velero.io +package v1 diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/download_request_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/download_request_types.go new file mode 100644 index 000000000..f23118fe5 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/download_request_types.go @@ -0,0 +1,126 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// DownloadRequestSpec is the specification for a download request. +type DownloadRequestSpec struct { + // Target is what to download (e.g. logs for a backup). + Target DownloadTarget `json:"target"` +} + +// DownloadTargetKind represents what type of file to download. +// +kubebuilder:validation:Enum=BackupLog;BackupContents;BackupVolumeSnapshots;BackupItemOperations;BackupResourceList;BackupResults;RestoreLog;RestoreResults;RestoreResourceList;RestoreItemOperations;CSIBackupVolumeSnapshots;CSIBackupVolumeSnapshotContents;BackupVolumeInfos;RestoreVolumeInfo +type DownloadTargetKind string + +const ( + DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog" + DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents" + DownloadTargetKindBackupVolumeSnapshots DownloadTargetKind = "BackupVolumeSnapshots" + DownloadTargetKindBackupItemOperations DownloadTargetKind = "BackupItemOperations" + DownloadTargetKindBackupResourceList DownloadTargetKind = "BackupResourceList" + DownloadTargetKindBackupResults DownloadTargetKind = "BackupResults" + DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog" + DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults" + DownloadTargetKindRestoreResourceList DownloadTargetKind = "RestoreResourceList" + DownloadTargetKindRestoreItemOperations DownloadTargetKind = "RestoreItemOperations" + DownloadTargetKindCSIBackupVolumeSnapshots DownloadTargetKind = "CSIBackupVolumeSnapshots" + DownloadTargetKindCSIBackupVolumeSnapshotContents DownloadTargetKind = "CSIBackupVolumeSnapshotContents" + DownloadTargetKindBackupVolumeInfos DownloadTargetKind = "BackupVolumeInfos" + DownloadTargetKindRestoreVolumeInfo DownloadTargetKind = "RestoreVolumeInfo" +) + +// DownloadTarget is the specification for what kind of file to download, and the name of the +// resource with which it's associated. +type DownloadTarget struct { + // Kind is the type of file to download. + Kind DownloadTargetKind `json:"kind"` + + // Name is the name of the Kubernetes resource with which the file is associated. + Name string `json:"name"` +} + +// DownloadRequestPhase represents the lifecycle phase of a DownloadRequest. +// +kubebuilder:validation:Enum=New;Processed +type DownloadRequestPhase string + +const ( + // DownloadRequestPhaseNew means the DownloadRequest has not been processed by the + // DownloadRequestController yet. + DownloadRequestPhaseNew DownloadRequestPhase = "New" + + // DownloadRequestPhaseProcessed means the DownloadRequest has been processed by the + // DownloadRequestController. + DownloadRequestPhaseProcessed DownloadRequestPhase = "Processed" +) + +// DownloadRequestStatus is the current status of a DownloadRequest. +type DownloadRequestStatus struct { + // Phase is the current state of the DownloadRequest. + // +optional + Phase DownloadRequestPhase `json:"phase,omitempty"` + + // DownloadURL contains the pre-signed URL for the target file. + // +optional + DownloadURL string `json:"downloadURL,omitempty"` + + // Expiration is when this DownloadRequest expires and can be deleted by the system. + // +optional + // +nullable + Expiration *metav1.Time `json:"expiration,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion + +// DownloadRequest is a request to download an artifact from backup object storage, such as a backup +// log file. +type DownloadRequest struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec DownloadRequestSpec `json:"spec,omitempty"` + + // +optional + Status DownloadRequestStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=downloadrequests,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=downloadrequests/status,verbs=get;update;patch + +// DownloadRequestList is a list of DownloadRequests. +type DownloadRequestList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DownloadRequest `json:"items"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/groupversion_info.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/groupversion_info.go new file mode 100644 index 000000000..ab5b20433 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the velero v1 API group +// +kubebuilder:object:generate=true +// +groupName=velero.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "velero.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go new file mode 100644 index 000000000..c86b4e91b --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // BackupNameLabel is the label key used to identify a backup by name. + BackupNameLabel = "velero.io/backup-name" + + // BackupUIDLabel is the label key used to identify a backup by uid. + BackupUIDLabel = "velero.io/backup-uid" + + // RestoreNameLabel is the label key used to identify a restore by name. + RestoreNameLabel = "velero.io/restore-name" + + // ScheduleNameLabel is the label key used to identify a schedule by name. + ScheduleNameLabel = "velero.io/schedule-name" + + // RestoreUIDLabel is the label key used to identify a restore by uid. + RestoreUIDLabel = "velero.io/restore-uid" + + // PodUIDLabel is the label key used to identify a pod by uid. + PodUIDLabel = "velero.io/pod-uid" + + // PVCUIDLabel is the label key used to identify a PVC by uid. + PVCUIDLabel = "velero.io/pvc-uid" + + // PodVolumeOperationTimeoutAnnotation is the annotation key used to apply + // a backup/restore-specific timeout value for pod volume operations (i.e. + // pod volume backups/restores). + PodVolumeOperationTimeoutAnnotation = "velero.io/pod-volume-timeout" + + // StorageLocationLabel is the label key used to identify the storage + // location of a backup. + StorageLocationLabel = "velero.io/storage-location" + + // VolumeNamespaceLabel is the label key used to identify which + // namespace a repository stores backups for. + VolumeNamespaceLabel = "velero.io/volume-namespace" + + // RepositoryTypeLabel is the label key used to identify the type of a repository + RepositoryTypeLabel = "velero.io/repository-type" + + // DataUploadLabel is the label key used to identify the dataupload for snapshot backup pod + DataUploadLabel = "velero.io/data-upload" + + // DataUploadSnapshotInfoLabel is used to identify the configmap that contains the snapshot info of a data upload + // normally the value of the label should the "true" or "false" + DataUploadSnapshotInfoLabel = "velero.io/data-upload-snapshot-info" + + // DataDownloadLabel is the label key used to identify the datadownload for snapshot restore pod + DataDownloadLabel = "velero.io/data-download" + + // SourceClusterK8sVersionAnnotation is the label key used to identify the k8s + // git version of the backup , i.e. v1.16.4 + SourceClusterK8sGitVersionAnnotation = "velero.io/source-cluster-k8s-gitversion" + + // SourceClusterK8sMajorVersionAnnotation is the label key used to identify the k8s + // major version of the backup , i.e. 1 + SourceClusterK8sMajorVersionAnnotation = "velero.io/source-cluster-k8s-major-version" + + // SourceClusterK8sMajorVersionAnnotation is the label key used to identify the k8s + // minor version of the backup , i.e. 16 + SourceClusterK8sMinorVersionAnnotation = "velero.io/source-cluster-k8s-minor-version" + + // ResourceTimeoutAnnotation is the annotation key used to carry the global resource + // timeout value for backup to plugins. + ResourceTimeoutAnnotation = "velero.io/resource-timeout" + + // AsyncOperationIDLabel is the label key used to identify the async operation ID + AsyncOperationIDLabel = "velero.io/async-operation-id" + + // PVCNameLabel is the label key used to identify the PVC's namespace and name. + // The format is /. + PVCNamespaceNameLabel = "velero.io/pvc-namespace-name" + + // ResourceUsageLabel is the label key to explain the Velero resource usage. + ResourceUsageLabel = "velero.io/resource-usage" + + // VolumesToBackupAnnotation is the annotation on a pod whose mounted volumes + // need to be backed up using pod volume backup. + VolumesToBackupAnnotation = "backup.velero.io/backup-volumes" + + // VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes + // should be excluded from pod volume backup. + VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes" + + // ExcludeFromBackupLabel is the label to exclude k8s resource from backup, + // even if the resource contains a matching selector label. + ExcludeFromBackupLabel = "velero.io/exclude-from-backup" +) + +type AsyncOperationIDPrefix string + +const ( + AsyncOperationIDPrefixDataDownload AsyncOperationIDPrefix = "dd-" + AsyncOperationIDPrefixDataUpload AsyncOperationIDPrefix = "du-" +) + +type VeleroResourceUsage string + +const ( + VeleroResourceUsageDataUploadResult VeleroResourceUsage = "DataUpload" +) + +// CSI related plugin actions' constant variable +const ( + VolumeSnapshotLabel = "velero.io/volume-snapshot-name" + VolumeSnapshotHandleAnnotation = "velero.io/csi-volumesnapshot-handle" + VolumeSnapshotRestoreSize = "velero.io/csi-volumesnapshot-restore-size" + DriverNameAnnotation = "velero.io/csi-driver-name" + VSCDeletionPolicyAnnotation = "velero.io/csi-vsc-deletion-policy" + VolumeSnapshotClassSelectorLabel = "velero.io/csi-volumesnapshot-class" + VolumeSnapshotClassDriverBackupAnnotationPrefix = "velero.io/csi-volumesnapshot-class" + VolumeSnapshotClassDriverPVCAnnotation = "velero.io/csi-volumesnapshot-class" + + // There is no release w/ these constants exported. Using the strings for now. + // CSI Annotation volumesnapshotclass + // https://github.com/kubernetes-csi/external-snapshotter/blob/master/pkg/utils/util.go#L59-L60 + PrefixedListSecretNameAnnotation = "csi.storage.k8s.io/snapshotter-list-secret-name" // #nosec G101 + PrefixedListSecretNamespaceAnnotation = "csi.storage.k8s.io/snapshotter-list-secret-namespace" // #nosec G101 + + // CSI Annotation volumesnapshotcontents + PrefixedSecretNameAnnotation = "csi.storage.k8s.io/snapshotter-secret-name" // #nosec G101 + PrefixedSecretNamespaceAnnotation = "csi.storage.k8s.io/snapshotter-secret-namespace" // #nosec G101 + + // Velero checks this annotation to determine whether to skip resource excluding check. + MustIncludeAdditionalItemAnnotation = "backup.velero.io/must-include-additional-items" + // SkippedNoCSIPVAnnotation - Velero checks this annotation on processed PVC to + // find out if the snapshot was skipped b/c the PV is not provisioned via CSI + SkippedNoCSIPVAnnotation = "backup.velero.io/skipped-no-csi-pv" + + // DynamicPVRestoreLabel is the label key for dynamic PV restore + DynamicPVRestoreLabel = "velero.io/dynamic-pv-restore" + + // DataUploadNameAnnotation is the label key for the DataUpload name + DataUploadNameAnnotation = "velero.io/data-upload-name" +) diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go new file mode 100644 index 000000000..b3070e3dd --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go @@ -0,0 +1,157 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" +) + +// PodVolumeBackupSpec is the specification for a PodVolumeBackup. +type PodVolumeBackupSpec struct { + // Node is the name of the node that the Pod is running on. + Node string `json:"node"` + + // Pod is a reference to the pod containing the volume to be backed up. + Pod corev1api.ObjectReference `json:"pod"` + + // Volume is the name of the volume within the Pod to be backed + // up. + Volume string `json:"volume"` + + // BackupStorageLocation is the name of the backup storage location + // where the backup repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // RepoIdentifier is the backup repository identifier. + RepoIdentifier string `json:"repoIdentifier"` + + // UploaderType is the type of the uploader to handle the data transfer. + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + UploaderType string `json:"uploaderType"` + + // Tags are a map of key-value pairs that should be applied to the + // volume backup as tags. + // +optional + Tags map[string]string `json:"tags,omitempty"` + + // UploaderSettings are a map of key-value pairs that should be applied to the + // uploader configuration. + // +optional + // +nullable + UploaderSettings map[string]string `json:"uploaderSettings,omitempty"` +} + +// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup. +// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed +type PodVolumeBackupPhase string + +const ( + PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New" + PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress" + PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed" + PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed" +) + +// PodVolumeBackupStatus is the current status of a PodVolumeBackup. +type PodVolumeBackupStatus struct { + // Phase is the current state of the PodVolumeBackup. + // +optional + Phase PodVolumeBackupPhase `json:"phase,omitempty"` + + // Path is the full path within the controller pod being backed up. + // +optional + Path string `json:"path,omitempty"` + + // SnapshotID is the identifier for the snapshot of the pod volume. + // +optional + SnapshotID string `json:"snapshotID,omitempty"` + + // Message is a message about the pod volume backup's status. + // +optional + Message string `json:"message,omitempty"` + + // StartTimestamp records the time a backup was started. + // Separate from CreationTimestamp, since that value changes + // on restores. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a backup was completed. + // Completion time is recorded even on failed backups. + // Completion time is recorded before uploading the backup object. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // Progress holds the total number of bytes of the volume and the current + // number of backed up bytes. This can be used to display progress information + // about the backup operation. + // +optional + Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runttime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Backup status such as New/InProgress" +// +kubebuilder:printcolumn:name="Created",type="date",JSONPath=".status.startTimestamp",description="Time when this backup was started" +// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be backed up" +// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be backed up" +// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be backed up" +// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" +// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true + +type PodVolumeBackup struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec PodVolumeBackupSpec `json:"spec,omitempty"` + + // +optional + Status PodVolumeBackupStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups/status,verbs=get;update;patch + +// PodVolumeBackupList is a list of PodVolumeBackups. +type PodVolumeBackupList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PodVolumeBackup `json:"items"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go new file mode 100644 index 000000000..34bc7e530 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" +) + +// PodVolumeRestoreSpec is the specification for a PodVolumeRestore. +type PodVolumeRestoreSpec struct { + // Pod is a reference to the pod containing the volume to be restored. + Pod corev1api.ObjectReference `json:"pod"` + + // Volume is the name of the volume within the Pod to be restored. + Volume string `json:"volume"` + + // BackupStorageLocation is the name of the backup storage location + // where the backup repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // RepoIdentifier is the backup repository identifier. + RepoIdentifier string `json:"repoIdentifier"` + + // UploaderType is the type of the uploader to handle the data transfer. + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + UploaderType string `json:"uploaderType"` + + // SnapshotID is the ID of the volume snapshot to be restored. + SnapshotID string `json:"snapshotID"` + + // SourceNamespace is the original namespace for namaspace mapping. + SourceNamespace string `json:"sourceNamespace"` + + // UploaderSettings are a map of key-value pairs that should be applied to the + // uploader configuration. + // +optional + // +nullable + UploaderSettings map[string]string `json:"uploaderSettings,omitempty"` +} + +// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore. +// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed +type PodVolumeRestorePhase string + +const ( + PodVolumeRestorePhaseNew PodVolumeRestorePhase = "New" + PodVolumeRestorePhaseInProgress PodVolumeRestorePhase = "InProgress" + PodVolumeRestorePhaseCompleted PodVolumeRestorePhase = "Completed" + PodVolumeRestorePhaseFailed PodVolumeRestorePhase = "Failed" +) + +// PodVolumeRestoreStatus is the current status of a PodVolumeRestore. +type PodVolumeRestoreStatus struct { + // Phase is the current state of the PodVolumeRestore. + // +optional + Phase PodVolumeRestorePhase `json:"phase,omitempty"` + + // Message is a message about the pod volume restore's status. + // +optional + Message string `json:"message,omitempty"` + + // StartTimestamp records the time a restore was started. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a restore was completed. + // Completion time is recorded even on failed restores. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // Progress holds the total number of bytes of the snapshot and the current + // number of restored bytes. This can be used to display progress information + // about the restore operation. + // +optional + Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be restored" +// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be restored" +// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" +// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be restored" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Restore status such as New/InProgress" +// +kubebuilder:printcolumn:name="TotalBytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Pod Volume Restore status such as New/InProgress" +// +kubebuilder:printcolumn:name="BytesDone",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Pod Volume Restore status such as New/InProgress" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +type PodVolumeRestore struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec PodVolumeRestoreSpec `json:"spec,omitempty"` + + // +optional + Status PodVolumeRestoreStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true + +// PodVolumeRestoreList is a list of PodVolumeRestores. +type PodVolumeRestoreList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PodVolumeRestore `json:"items"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/register.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/register.go new file mode 100644 index 000000000..cfcff670c --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/register.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Resource gets a Velero GroupResource for a specified resource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +type typeInfo struct { + PluralName string + ItemType runtime.Object + ItemListType runtime.Object +} + +func newTypeInfo(pluralName string, itemType, itemListType runtime.Object) typeInfo { + return typeInfo{ + PluralName: pluralName, + ItemType: itemType, + ItemListType: itemListType, + } +} + +// CustomResources returns a map of all custom resources within the Velero +// API group, keyed on Kind. +func CustomResources() map[string]typeInfo { + return map[string]typeInfo{ + "Backup": newTypeInfo("backups", &Backup{}, &BackupList{}), + "Restore": newTypeInfo("restores", &Restore{}, &RestoreList{}), + "Schedule": newTypeInfo("schedules", &Schedule{}, &ScheduleList{}), + "DownloadRequest": newTypeInfo("downloadrequests", &DownloadRequest{}, &DownloadRequestList{}), + "DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}), + "PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}), + "PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}), + "BackupRepository": newTypeInfo("backuprepositories", &BackupRepository{}, &BackupRepositoryList{}), + "BackupStorageLocation": newTypeInfo("backupstoragelocations", &BackupStorageLocation{}, &BackupStorageLocationList{}), + "VolumeSnapshotLocation": newTypeInfo("volumesnapshotlocations", &VolumeSnapshotLocation{}, &VolumeSnapshotLocationList{}), + "ServerStatusRequest": newTypeInfo("serverstatusrequests", &ServerStatusRequest{}, &ServerStatusRequestList{}), + } +} + +// CustomResourceKinds returns a list of all custom resources kinds within the Velero +func CustomResourceKinds() sets.Set[string] { + kinds := sets.New[string]() + + resources := CustomResources() + for kind := range resources { + kinds.Insert(kind) + } + + return kinds +} + +func addKnownTypes(scheme *runtime.Scheme) error { + for _, typeInfo := range CustomResources() { + scheme.AddKnownTypes(SchemeGroupVersion, typeInfo.ItemType, typeInfo.ItemListType) + } + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go new file mode 100644 index 000000000..377a92737 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go @@ -0,0 +1,433 @@ +/* +Copyright 2017, 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// RestoreSpec defines the specification for a Velero restore. +type RestoreSpec struct { + // BackupName is the unique name of the Velero backup to restore + // from. + // +optional + BackupName string `json:"backupName,omitempty"` + + // ScheduleName is the unique name of the Velero schedule to restore + // from. If specified, and BackupName is empty, Velero will restore + // from the most recent successful backup created from this schedule. + // +optional + ScheduleName string `json:"scheduleName,omitempty"` + + // IncludedNamespaces is a slice of namespace names to include objects + // from. If empty, all namespaces are included. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces contains a list of namespaces that are not + // included in the restore. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedResources is a slice of resource names to include + // in the restore. If empty, all resources in the backup are included. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources is a slice of resource names that are not + // included in the restore. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` + + // NamespaceMapping is a map of source namespace names + // to target namespace names to restore into. Any source + // namespaces not included in the map will be restored into + // namespaces of the same name. + // +optional + NamespaceMapping map[string]string `json:"namespaceMapping,omitempty"` + + // LabelSelector is a metav1.LabelSelector to filter with + // when restoring individual objects from the backup. If empty + // or nil, all objects are included. Optional. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // OrLabelSelectors is list of metav1.LabelSelector to filter with + // when restoring individual objects from the backup. If multiple provided + // they will be joined by the OR operator. LabelSelector as well as + // OrLabelSelectors cannot co-exist in restore request, only one of them + // can be used + // +optional + // +nullable + OrLabelSelectors []*metav1.LabelSelector `json:"orLabelSelectors,omitempty"` + + // RestorePVs specifies whether to restore all included + // PVs from snapshot + // +optional + // +nullable + RestorePVs *bool `json:"restorePVs,omitempty"` + + // RestoreStatus specifies which resources we should restore the status + // field. If nil, no objects are included. Optional. + // +optional + // +nullable + RestoreStatus *RestoreStatusSpec `json:"restoreStatus,omitempty"` + + // PreserveNodePorts specifies whether to restore old nodePorts from backup. + // +optional + // +nullable + PreserveNodePorts *bool `json:"preserveNodePorts,omitempty"` + + // IncludeClusterResources specifies whether cluster-scoped resources + // should be included for consideration in the restore. If null, defaults + // to true. + // +optional + // +nullable + IncludeClusterResources *bool `json:"includeClusterResources,omitempty"` + + // Hooks represent custom behaviors that should be executed during or post restore. + // +optional + Hooks RestoreHooks `json:"hooks,omitempty"` + + // ExistingResourcePolicy specifies the restore behavior for the Kubernetes resource to be restored + // +optional + // +nullable + ExistingResourcePolicy PolicyType `json:"existingResourcePolicy,omitempty"` + + // ItemOperationTimeout specifies the time used to wait for RestoreItemAction operations + // The default value is 4 hour. + // +optional + ItemOperationTimeout metav1.Duration `json:"itemOperationTimeout,omitempty"` + + // ResourceModifier specifies the reference to JSON resource patches that should be applied to resources before restoration. + // +optional + // +nullable + ResourceModifier *v1.TypedLocalObjectReference `json:"resourceModifier,omitempty"` + + // UploaderConfig specifies the configuration for the restore. + // +optional + // +nullable + UploaderConfig *UploaderConfigForRestore `json:"uploaderConfig,omitempty"` +} + +// UploaderConfigForRestore defines the configuration for the restore. +type UploaderConfigForRestore struct { + // WriteSparseFiles is a flag to indicate whether write files sparsely or not. + // +optional + // +nullable + WriteSparseFiles *bool `json:"writeSparseFiles,omitempty"` + // ParallelFilesDownload is the concurrency number setting for restore. + // +optional + ParallelFilesDownload int `json:"parallelFilesDownload,omitempty"` +} + +// RestoreHooks contains custom behaviors that should be executed during or post restore. +type RestoreHooks struct { + Resources []RestoreResourceHookSpec `json:"resources,omitempty"` +} + +type RestoreStatusSpec struct { + // IncludedResources specifies the resources to which will restore the status. + // If empty, it applies to all resources. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources specifies the resources to which will not restore the status. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` +} + +// RestoreResourceHookSpec defines one or more RestoreResrouceHooks that should be executed based on +// the rules defined for namespaces, resources, and label selector. +type RestoreResourceHookSpec struct { + // Name is the name of this hook. + Name string `json:"name"` + + // IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + // to all namespaces. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedResources specifies the resources to which this hook spec applies. If empty, it applies + // to all resources. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources specifies the resources to which this hook spec does not apply. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` + + // LabelSelector, if specified, filters the resources to which this hook spec applies. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // PostHooks is a list of RestoreResourceHooks to execute during and after restoring a resource. + // +optional + PostHooks []RestoreResourceHook `json:"postHooks,omitempty"` +} + +// RestoreResourceHook defines a restore hook for a resource. +type RestoreResourceHook struct { + // Exec defines an exec restore hook. + Exec *ExecRestoreHook `json:"exec,omitempty"` + + // Init defines an init restore hook. + Init *InitRestoreHook `json:"init,omitempty"` +} + +// ExecRestoreHook is a hook that uses pod exec API to execute a command inside a container in a pod +type ExecRestoreHook struct { + // Container is the container in the pod where the command should be executed. If not specified, + // the pod's first container is used. + // +optional + Container string `json:"container,omitempty"` + + // Command is the command and arguments to execute from within a container after a pod has been restored. + // +kubebuilder:validation:MinItems=1 + Command []string `json:"command"` + + // OnError specifies how Velero should behave if it encounters an error executing this hook. + // +optional + OnError HookErrorMode `json:"onError,omitempty"` + + // ExecTimeout defines the maximum amount of time Velero should wait for the hook to complete before + // considering the execution a failure. + // +optional + ExecTimeout metav1.Duration `json:"execTimeout,omitempty"` + + // WaitTimeout defines the maximum amount of time Velero should wait for the container to be Ready + // before attempting to run the command. + // +optional + WaitTimeout metav1.Duration `json:"waitTimeout,omitempty"` + + // WaitForReady ensures command will be launched when container is Ready instead of Running. + // +optional + // +nullable + WaitForReady *bool `json:"waitForReady,omitempty"` +} + +// InitRestoreHook is a hook that adds an init container to a PodSpec to run commands before the +// workload pod is able to start. +type InitRestoreHook struct { + // +kubebuilder:pruning:PreserveUnknownFields + // InitContainers is list of init containers to be added to a pod during its restore. + // +optional + InitContainers []runtime.RawExtension `json:"initContainers"` + + // Timeout defines the maximum amount of time Velero should wait for the initContainers to complete. + // +optional + Timeout metav1.Duration `json:"timeout,omitempty"` +} + +// RestorePhase is a string representation of the lifecycle phase +// of a Velero restore +// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Completed;PartiallyFailed;Failed;Finalizing;FinalizingPartiallyFailed +type RestorePhase string + +const ( + // RestorePhaseNew means the restore has been created but not + // yet processed by the RestoreController + RestorePhaseNew RestorePhase = "New" + + // RestorePhaseFailedValidation means the restore has failed + // the controller's validations and therefore will not run. + RestorePhaseFailedValidation RestorePhase = "FailedValidation" + + // RestorePhaseInProgress means the restore is currently executing. + RestorePhaseInProgress RestorePhase = "InProgress" + + // RestorePhaseWaitingForPluginOperations means the restore of + // Kubernetes resources and other async plugin operations was + // successful and plugin operations are still ongoing. The + // restore is not complete yet. + RestorePhaseWaitingForPluginOperations RestorePhase = "WaitingForPluginOperations" + + // RestorePhaseWaitingForPluginOperationsPartiallyFailed means + // the restore of Kubernetes resources and other async plugin + // operations partially failed (final phase will be + // PartiallyFailed) and other plugin operations are still + // ongoing. The restore is not complete yet. + RestorePhaseWaitingForPluginOperationsPartiallyFailed RestorePhase = "WaitingForPluginOperationsPartiallyFailed" + + // RestorePhaseFinalizing means the restore of + // Kubernetes resources and other async plugin operations were successful and + // other plugin operations are now complete, but the restore is awaiting + // the completion of wrap-up tasks before the restore process enters terminal phase. + RestorePhaseFinalizing RestorePhase = "Finalizing" + + // RestorePhaseFinalizingPartiallyFailed means the restore of + // Kubernetes resources and other async plugin operations were successful and + // other plugin operations are now complete, but one or more errors + // occurred during restore or async operation processing. The restore is awaiting + // the completion of wrap-up tasks before the restore process enters terminal phase. + RestorePhaseFinalizingPartiallyFailed RestorePhase = "FinalizingPartiallyFailed" + + // RestorePhaseCompleted means the restore has run successfully + // without errors. + RestorePhaseCompleted RestorePhase = "Completed" + + // RestorePhasePartiallyFailed means the restore has run to completion + // but encountered 1+ errors restoring individual items. + RestorePhasePartiallyFailed RestorePhase = "PartiallyFailed" + + // RestorePhaseFailed means the restore was unable to execute. + // The failing error is recorded in status.FailureReason. + RestorePhaseFailed RestorePhase = "Failed" + + // PolicyTypeNone means velero will not overwrite the resource + // in cluster with the one in backup whether changed/unchanged. + PolicyTypeNone PolicyType = "none" + + // PolicyTypeUpdate means velero will try to attempt a patch on + // the changed resources. + PolicyTypeUpdate PolicyType = "update" +) + +// RestoreStatus captures the current status of a Velero restore +type RestoreStatus struct { + // Phase is the current state of the Restore + // +optional + Phase RestorePhase `json:"phase,omitempty"` + + // ValidationErrors is a slice of all validation errors (if + // applicable) + // +optional + // +nullable + ValidationErrors []string `json:"validationErrors,omitempty"` + + // Warnings is a count of all warning messages that were generated during + // execution of the restore. The actual warnings are stored in object storage. + // +optional + Warnings int `json:"warnings,omitempty"` + + // Errors is a count of all error messages that were generated during + // execution of the restore. The actual errors are stored in object storage. + // +optional + Errors int `json:"errors,omitempty"` + + // FailureReason is an error that caused the entire restore to fail. + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // StartTimestamp records the time the restore operation was started. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time the restore operation was completed. + // Completion time is recorded even on failed restore. + // The server's time is used for StartTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // Progress contains information about the restore's execution progress. Note + // that this information is best-effort only -- if Velero fails to update it + // during a restore for any reason, it may be inaccurate/stale. + // +optional + // +nullable + Progress *RestoreProgress `json:"progress,omitempty"` + + // RestoreItemOperationsAttempted is the total number of attempted + // async RestoreItemAction operations for this restore. + // +optional + RestoreItemOperationsAttempted int `json:"restoreItemOperationsAttempted,omitempty"` + + // RestoreItemOperationsCompleted is the total number of successfully completed + // async RestoreItemAction operations for this restore. + // +optional + RestoreItemOperationsCompleted int `json:"restoreItemOperationsCompleted,omitempty"` + + // RestoreItemOperationsFailed is the total number of async + // RestoreItemAction operations for this restore which ended with an error. + // +optional + RestoreItemOperationsFailed int `json:"restoreItemOperationsFailed,omitempty"` + + // HookStatus contains information about the status of the hooks. + // +optional + // +nullable + HookStatus *HookStatus `json:"hookStatus,omitempty"` +} + +// RestoreProgress stores information about the restore's execution progress +type RestoreProgress struct { + // TotalItems is the total number of items to be restored. This number may change + // throughout the execution of the restore due to plugins that return additional related + // items to restore + // +optional + TotalItems int `json:"totalItems,omitempty"` + // ItemsRestored is the number of items that have actually been restored so far + // +optional + ItemsRestored int `json:"itemsRestored,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:rbac:groups=velero.io,resources=restores,verbs=create;delete;get;list;patch;update;watch +// +kubebuilder:rbac:groups=velero.io,resources=restores/status,verbs=get;update;patch + +// Restore is a Velero resource that represents the application of +// resources from a Velero backup to a target Kubernetes cluster. +type Restore struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec RestoreSpec `json:"spec,omitempty"` + + // +optional + Status RestoreStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestoreList is a list of Restores. +type RestoreList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata"` + + Items []Restore `json:"items"` +} + +// PolicyType helps specify the ExistingResourcePolicy +type PolicyType string diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/schedule_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/schedule_types.go new file mode 100644 index 000000000..6a5f885ab --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/schedule_types.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ScheduleSpec defines the specification for a Velero schedule +type ScheduleSpec struct { + // Template is the definition of the Backup to be run + // on the provided schedule + Template BackupSpec `json:"template"` + + // Schedule is a Cron expression defining when to run + // the Backup. + Schedule string `json:"schedule"` + + // UseOwnerReferencesBackup specifies whether to use + // OwnerReferences on backups created by this Schedule. + // +optional + // +nullable + UseOwnerReferencesInBackup *bool `json:"useOwnerReferencesInBackup,omitempty"` + + // Paused specifies whether the schedule is paused or not + // +optional + Paused bool `json:"paused,omitempty"` + + // SkipImmediately specifies whether to skip backup if schedule is due immediately from `schedule.status.lastBackup` timestamp when schedule is unpaused or if schedule is new. + // If true, backup will be skipped immediately when schedule is unpaused if it is due based on .Status.LastBackupTimestamp or schedule is new, and will run at next schedule time. + // If false, backup will not be skipped immediately when schedule is unpaused, but will run at next schedule time. + // If empty, will follow server configuration (default: false). + // +optional + SkipImmediately *bool `json:"skipImmediately,omitempty"` +} + +// SchedulePhase is a string representation of the lifecycle phase +// of a Velero schedule +// +kubebuilder:validation:Enum=New;Enabled;FailedValidation +type SchedulePhase string + +const ( + // SchedulePhaseNew means the schedule has been created but not + // yet processed by the ScheduleController + SchedulePhaseNew SchedulePhase = "New" + + // SchedulePhaseEnabled means the schedule has been validated and + // will now be triggering backups according to the schedule spec. + SchedulePhaseEnabled SchedulePhase = "Enabled" + + // SchedulePhaseFailedValidation means the schedule has failed + // the controller's validations and therefore will not trigger backups. + SchedulePhaseFailedValidation SchedulePhase = "FailedValidation" +) + +// ScheduleStatus captures the current state of a Velero schedule +type ScheduleStatus struct { + // Phase is the current phase of the Schedule + // +optional + Phase SchedulePhase `json:"phase,omitempty"` + + // LastBackup is the last time a Backup was run for this + // Schedule schedule + // +optional + // +nullable + LastBackup *metav1.Time `json:"lastBackup,omitempty"` + + // LastSkipped is the last time a Schedule was skipped + // +optional + // +nullable + LastSkipped *metav1.Time `json:"lastSkipped,omitempty"` + + // ValidationErrors is a slice of all validation errors (if + // applicable) + // +optional + ValidationErrors []string `json:"validationErrors,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Status of the schedule" +// +kubebuilder:printcolumn:name="Schedule",type="string",JSONPath=".spec.schedule",description="A Cron expression defining when to run the Backup" +// +kubebuilder:printcolumn:name="LastBackup",type="date",JSONPath=".status.lastBackup",description="The last time a Backup was run for this schedule" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".spec.paused" + +// Schedule is a Velero resource that represents a pre-scheduled or +// periodic Backup that should be run. +type Schedule struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata"` + + // +optional + Spec ScheduleSpec `json:"spec,omitempty"` + + // +optional + Status ScheduleStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true + +// ScheduleList is a list of Schedules. +type ScheduleList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Schedule `json:"items"` +} + +// TimestampedName returns the default backup name format based on the schedule +func (s *Schedule) TimestampedName(timestamp time.Time) string { + return fmt.Sprintf("%s-%s", s.Name, timestamp.Format("20060102150405")) +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/server_status_request_types.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/server_status_request_types.go new file mode 100644 index 000000000..98e15a0b5 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/server_status_request_types.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=ssr +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion + +// ServerStatusRequest is a request to access current status information about +// the Velero server. +type ServerStatusRequest struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec ServerStatusRequestSpec `json:"spec,omitempty"` + + // +optional + Status ServerStatusRequestStatus `json:"status,omitempty"` +} + +// ServerStatusRequestSpec is the specification for a ServerStatusRequest. +type ServerStatusRequestSpec struct { +} + +// ServerStatusRequestPhase represents the lifecycle phase of a ServerStatusRequest. +// +kubebuilder:validation:Enum=New;Processed +type ServerStatusRequestPhase string + +const ( + // ServerStatusRequestPhaseNew means the ServerStatusRequest has not been processed yet. + ServerStatusRequestPhaseNew ServerStatusRequestPhase = "New" + // ServerStatusRequestPhaseProcessed means the ServerStatusRequest has been processed. + ServerStatusRequestPhaseProcessed ServerStatusRequestPhase = "Processed" +) + +// PluginInfo contains attributes of a Velero plugin +type PluginInfo struct { + Name string `json:"name"` + Kind string `json:"kind"` +} + +// ServerStatusRequestStatus is the current status of a ServerStatusRequest. +type ServerStatusRequestStatus struct { + // Phase is the current lifecycle phase of the ServerStatusRequest. + // +optional + Phase ServerStatusRequestPhase `json:"phase,omitempty"` + + // ProcessedTimestamp is when the ServerStatusRequest was processed + // by the ServerStatusRequestController. + // +optional + // +nullable + ProcessedTimestamp *metav1.Time `json:"processedTimestamp,omitempty"` + + // ServerVersion is the Velero server version. + // +optional + ServerVersion string `json:"serverVersion,omitempty"` + + // Plugins list information about the plugins running on the Velero server + // +optional + // +nullable + Plugins []PluginInfo `json:"plugins,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=serverstatusrequests,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=serverstatusrequests/status,verbs=get;update;patch + +// ServerStatusRequestList is a list of ServerStatusRequests. +type ServerStatusRequestList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []ServerStatusRequest `json:"items"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/volume_snapshot_location_type.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/volume_snapshot_location_type.go new file mode 100644 index 000000000..836701b77 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/volume_snapshot_location_type.go @@ -0,0 +1,89 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=vsl +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion + +// VolumeSnapshotLocation is a location where Velero stores volume snapshots. +type VolumeSnapshotLocation struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec VolumeSnapshotLocationSpec `json:"spec,omitempty"` + + // +optional + Status VolumeSnapshotLocationStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=volumesnapshotlocations,verbs=get;list;watch;create;update;patch;delete + +// VolumeSnapshotLocationList is a list of VolumeSnapshotLocations. +type VolumeSnapshotLocationList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []VolumeSnapshotLocation `json:"items"` +} + +// VolumeSnapshotLocationSpec defines the specification for a Velero VolumeSnapshotLocation. +type VolumeSnapshotLocationSpec struct { + // Provider is the provider of the volume storage. + Provider string `json:"provider"` + + // Config is for provider-specific configuration fields. + // +optional + Config map[string]string `json:"config,omitempty"` + + // Credential contains the credential information intended to be used with this location + // +optional + Credential *corev1api.SecretKeySelector `json:"credential,omitempty"` +} + +// VolumeSnapshotLocationPhase is the lifecycle phase of a Velero VolumeSnapshotLocation. +// +kubebuilder:validation:Enum=Available;Unavailable +type VolumeSnapshotLocationPhase string + +const ( + // VolumeSnapshotLocationPhaseAvailable means the location is available to read and write from. + VolumeSnapshotLocationPhaseAvailable VolumeSnapshotLocationPhase = "Available" + + // VolumeSnapshotLocationPhaseUnavailable means the location is unavailable to read and write from. + VolumeSnapshotLocationPhaseUnavailable VolumeSnapshotLocationPhase = "Unavailable" +) + +// VolumeSnapshotLocationStatus describes the current status of a Velero VolumeSnapshotLocation. +type VolumeSnapshotLocationStatus struct { + // +optional + Phase VolumeSnapshotLocationPhase `json:"phase,omitempty"` +} diff --git a/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..522e15105 --- /dev/null +++ b/common-lib/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -0,0 +1,1813 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backup) DeepCopyInto(out *Backup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. +func (in *Backup) DeepCopy() *Backup { + if in == nil { + return nil + } + out := new(Backup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupHooks) DeepCopyInto(out *BackupHooks) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]BackupResourceHookSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupHooks. +func (in *BackupHooks) DeepCopy() *BackupHooks { + if in == nil { + return nil + } + out := new(BackupHooks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupList) DeepCopyInto(out *BackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. +func (in *BackupList) DeepCopy() *BackupList { + if in == nil { + return nil + } + out := new(BackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupProgress) DeepCopyInto(out *BackupProgress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupProgress. +func (in *BackupProgress) DeepCopy() *BackupProgress { + if in == nil { + return nil + } + out := new(BackupProgress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepository) DeepCopyInto(out *BackupRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepository. +func (in *BackupRepository) DeepCopy() *BackupRepository { + if in == nil { + return nil + } + out := new(BackupRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositoryList) DeepCopyInto(out *BackupRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryList. +func (in *BackupRepositoryList) DeepCopy() *BackupRepositoryList { + if in == nil { + return nil + } + out := new(BackupRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositorySpec) DeepCopyInto(out *BackupRepositorySpec) { + *out = *in + out.MaintenanceFrequency = in.MaintenanceFrequency +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositorySpec. +func (in *BackupRepositorySpec) DeepCopy() *BackupRepositorySpec { + if in == nil { + return nil + } + out := new(BackupRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositoryStatus) DeepCopyInto(out *BackupRepositoryStatus) { + *out = *in + if in.LastMaintenanceTime != nil { + in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryStatus. +func (in *BackupRepositoryStatus) DeepCopy() *BackupRepositoryStatus { + if in == nil { + return nil + } + out := new(BackupRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupResourceHook) DeepCopyInto(out *BackupResourceHook) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecHook) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHook. +func (in *BackupResourceHook) DeepCopy() *BackupResourceHook { + if in == nil { + return nil + } + out := new(BackupResourceHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupResourceHookSpec) DeepCopyInto(out *BackupResourceHookSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PreHooks != nil { + in, out := &in.PreHooks, &out.PreHooks + *out = make([]BackupResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostHooks != nil { + in, out := &in.PostHooks, &out.PostHooks + *out = make([]BackupResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHookSpec. +func (in *BackupResourceHookSpec) DeepCopy() *BackupResourceHookSpec { + if in == nil { + return nil + } + out := new(BackupResourceHookSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedClusterScopedResources != nil { + in, out := &in.IncludedClusterScopedResources, &out.IncludedClusterScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedClusterScopedResources != nil { + in, out := &in.ExcludedClusterScopedResources, &out.ExcludedClusterScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedNamespaceScopedResources != nil { + in, out := &in.IncludedNamespaceScopedResources, &out.IncludedNamespaceScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaceScopedResources != nil { + in, out := &in.ExcludedNamespaceScopedResources, &out.ExcludedNamespaceScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.OrLabelSelectors != nil { + in, out := &in.OrLabelSelectors, &out.OrLabelSelectors + *out = make([]*metav1.LabelSelector, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + } + if in.SnapshotVolumes != nil { + in, out := &in.SnapshotVolumes, &out.SnapshotVolumes + *out = new(bool) + **out = **in + } + out.TTL = in.TTL + if in.IncludeClusterResources != nil { + in, out := &in.IncludeClusterResources, &out.IncludeClusterResources + *out = new(bool) + **out = **in + } + in.Hooks.DeepCopyInto(&out.Hooks) + if in.VolumeSnapshotLocations != nil { + in, out := &in.VolumeSnapshotLocations, &out.VolumeSnapshotLocations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultVolumesToRestic != nil { + in, out := &in.DefaultVolumesToRestic, &out.DefaultVolumesToRestic + *out = new(bool) + **out = **in + } + if in.DefaultVolumesToFsBackup != nil { + in, out := &in.DefaultVolumesToFsBackup, &out.DefaultVolumesToFsBackup + *out = new(bool) + **out = **in + } + if in.OrderedResources != nil { + in, out := &in.OrderedResources, &out.OrderedResources + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.CSISnapshotTimeout = in.CSISnapshotTimeout + out.ItemOperationTimeout = in.ItemOperationTimeout + if in.ResourcePolicy != nil { + in, out := &in.ResourcePolicy, &out.ResourcePolicy + *out = new(corev1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.SnapshotMoveData != nil { + in, out := &in.SnapshotMoveData, &out.SnapshotMoveData + *out = new(bool) + **out = **in + } + if in.UploaderConfig != nil { + in, out := &in.UploaderConfig, &out.UploaderConfig + *out = new(UploaderConfigForBackup) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. +func (in *BackupSpec) DeepCopy() *BackupSpec { + if in == nil { + return nil + } + out := new(BackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { + *out = *in + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = (*in).DeepCopy() + } + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Progress != nil { + in, out := &in.Progress, &out.Progress + *out = new(BackupProgress) + **out = **in + } + if in.HookStatus != nil { + in, out := &in.HookStatus, &out.HookStatus + *out = new(HookStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocation) DeepCopyInto(out *BackupStorageLocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocation. +func (in *BackupStorageLocation) DeepCopy() *BackupStorageLocation { + if in == nil { + return nil + } + out := new(BackupStorageLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupStorageLocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationList) DeepCopyInto(out *BackupStorageLocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupStorageLocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationList. +func (in *BackupStorageLocationList) DeepCopy() *BackupStorageLocationList { + if in == nil { + return nil + } + out := new(BackupStorageLocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupStorageLocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationSpec) DeepCopyInto(out *BackupStorageLocationSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Credential != nil { + in, out := &in.Credential, &out.Credential + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + in.StorageType.DeepCopyInto(&out.StorageType) + if in.BackupSyncPeriod != nil { + in, out := &in.BackupSyncPeriod, &out.BackupSyncPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.ValidationFrequency != nil { + in, out := &in.ValidationFrequency, &out.ValidationFrequency + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationSpec. +func (in *BackupStorageLocationSpec) DeepCopy() *BackupStorageLocationSpec { + if in == nil { + return nil + } + out := new(BackupStorageLocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationStatus) DeepCopyInto(out *BackupStorageLocationStatus) { + *out = *in + if in.LastSyncedTime != nil { + in, out := &in.LastSyncedTime, &out.LastSyncedTime + *out = (*in).DeepCopy() + } + if in.LastValidationTime != nil { + in, out := &in.LastValidationTime, &out.LastValidationTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationStatus. +func (in *BackupStorageLocationStatus) DeepCopy() *BackupStorageLocationStatus { + if in == nil { + return nil + } + out := new(BackupStorageLocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequest) DeepCopyInto(out *DeleteBackupRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequest. +func (in *DeleteBackupRequest) DeepCopy() *DeleteBackupRequest { + if in == nil { + return nil + } + out := new(DeleteBackupRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteBackupRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestList) DeepCopyInto(out *DeleteBackupRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeleteBackupRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestList. +func (in *DeleteBackupRequestList) DeepCopy() *DeleteBackupRequestList { + if in == nil { + return nil + } + out := new(DeleteBackupRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteBackupRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestSpec) DeepCopyInto(out *DeleteBackupRequestSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestSpec. +func (in *DeleteBackupRequestSpec) DeepCopy() *DeleteBackupRequestSpec { + if in == nil { + return nil + } + out := new(DeleteBackupRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestStatus) DeepCopyInto(out *DeleteBackupRequestStatus) { + *out = *in + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestStatus. +func (in *DeleteBackupRequestStatus) DeepCopy() *DeleteBackupRequestStatus { + if in == nil { + return nil + } + out := new(DeleteBackupRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequest) DeepCopyInto(out *DownloadRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequest. +func (in *DownloadRequest) DeepCopy() *DownloadRequest { + if in == nil { + return nil + } + out := new(DownloadRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DownloadRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestList) DeepCopyInto(out *DownloadRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownloadRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestList. +func (in *DownloadRequestList) DeepCopy() *DownloadRequestList { + if in == nil { + return nil + } + out := new(DownloadRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DownloadRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestSpec) DeepCopyInto(out *DownloadRequestSpec) { + *out = *in + out.Target = in.Target +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestSpec. +func (in *DownloadRequestSpec) DeepCopy() *DownloadRequestSpec { + if in == nil { + return nil + } + out := new(DownloadRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestStatus) DeepCopyInto(out *DownloadRequestStatus) { + *out = *in + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestStatus. +func (in *DownloadRequestStatus) DeepCopy() *DownloadRequestStatus { + if in == nil { + return nil + } + out := new(DownloadRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadTarget) DeepCopyInto(out *DownloadTarget) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadTarget. +func (in *DownloadTarget) DeepCopy() *DownloadTarget { + if in == nil { + return nil + } + out := new(DownloadTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecHook) DeepCopyInto(out *ExecHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Timeout = in.Timeout +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecHook. +func (in *ExecHook) DeepCopy() *ExecHook { + if in == nil { + return nil + } + out := new(ExecHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecRestoreHook) DeepCopyInto(out *ExecRestoreHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ExecTimeout = in.ExecTimeout + out.WaitTimeout = in.WaitTimeout + if in.WaitForReady != nil { + in, out := &in.WaitForReady, &out.WaitForReady + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecRestoreHook. +func (in *ExecRestoreHook) DeepCopy() *ExecRestoreHook { + if in == nil { + return nil + } + out := new(ExecRestoreHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HookStatus) DeepCopyInto(out *HookStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HookStatus. +func (in *HookStatus) DeepCopy() *HookStatus { + if in == nil { + return nil + } + out := new(HookStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitRestoreHook) DeepCopyInto(out *InitRestoreHook) { + *out = *in + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Timeout = in.Timeout +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitRestoreHook. +func (in *InitRestoreHook) DeepCopy() *InitRestoreHook { + if in == nil { + return nil + } + out := new(InitRestoreHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) { + *out = *in + if in.CACert != nil { + in, out := &in.CACert, &out.CACert + *out = make([]byte, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation. +func (in *ObjectStorageLocation) DeepCopy() *ObjectStorageLocation { + if in == nil { + return nil + } + out := new(ObjectStorageLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginInfo) DeepCopyInto(out *PluginInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginInfo. +func (in *PluginInfo) DeepCopy() *PluginInfo { + if in == nil { + return nil + } + out := new(PluginInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackup) DeepCopyInto(out *PodVolumeBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackup. +func (in *PodVolumeBackup) DeepCopy() *PodVolumeBackup { + if in == nil { + return nil + } + out := new(PodVolumeBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupList) DeepCopyInto(out *PodVolumeBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodVolumeBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupList. +func (in *PodVolumeBackupList) DeepCopy() *PodVolumeBackupList { + if in == nil { + return nil + } + out := new(PodVolumeBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupSpec) DeepCopyInto(out *PodVolumeBackupSpec) { + *out = *in + out.Pod = in.Pod + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.UploaderSettings != nil { + in, out := &in.UploaderSettings, &out.UploaderSettings + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupSpec. +func (in *PodVolumeBackupSpec) DeepCopy() *PodVolumeBackupSpec { + if in == nil { + return nil + } + out := new(PodVolumeBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + out.Progress = in.Progress +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus. +func (in *PodVolumeBackupStatus) DeepCopy() *PodVolumeBackupStatus { + if in == nil { + return nil + } + out := new(PodVolumeBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestore. +func (in *PodVolumeRestore) DeepCopy() *PodVolumeRestore { + if in == nil { + return nil + } + out := new(PodVolumeRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeRestore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreList) DeepCopyInto(out *PodVolumeRestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodVolumeRestore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreList. +func (in *PodVolumeRestoreList) DeepCopy() *PodVolumeRestoreList { + if in == nil { + return nil + } + out := new(PodVolumeRestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeRestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreSpec) DeepCopyInto(out *PodVolumeRestoreSpec) { + *out = *in + out.Pod = in.Pod + if in.UploaderSettings != nil { + in, out := &in.UploaderSettings, &out.UploaderSettings + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreSpec. +func (in *PodVolumeRestoreSpec) DeepCopy() *PodVolumeRestoreSpec { + if in == nil { + return nil + } + out := new(PodVolumeRestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + out.Progress = in.Progress +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreStatus. +func (in *PodVolumeRestoreStatus) DeepCopy() *PodVolumeRestoreStatus { + if in == nil { + return nil + } + out := new(PodVolumeRestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Restore) DeepCopyInto(out *Restore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Restore. +func (in *Restore) DeepCopy() *Restore { + if in == nil { + return nil + } + out := new(Restore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Restore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreHooks) DeepCopyInto(out *RestoreHooks) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]RestoreResourceHookSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreHooks. +func (in *RestoreHooks) DeepCopy() *RestoreHooks { + if in == nil { + return nil + } + out := new(RestoreHooks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreList) DeepCopyInto(out *RestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Restore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreList. +func (in *RestoreList) DeepCopy() *RestoreList { + if in == nil { + return nil + } + out := new(RestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreProgress) DeepCopyInto(out *RestoreProgress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreProgress. +func (in *RestoreProgress) DeepCopy() *RestoreProgress { + if in == nil { + return nil + } + out := new(RestoreProgress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreResourceHook) DeepCopyInto(out *RestoreResourceHook) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecRestoreHook) + (*in).DeepCopyInto(*out) + } + if in.Init != nil { + in, out := &in.Init, &out.Init + *out = new(InitRestoreHook) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreResourceHook. +func (in *RestoreResourceHook) DeepCopy() *RestoreResourceHook { + if in == nil { + return nil + } + out := new(RestoreResourceHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreResourceHookSpec) DeepCopyInto(out *RestoreResourceHookSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PostHooks != nil { + in, out := &in.PostHooks, &out.PostHooks + *out = make([]RestoreResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreResourceHookSpec. +func (in *RestoreResourceHookSpec) DeepCopy() *RestoreResourceHookSpec { + if in == nil { + return nil + } + out := new(RestoreResourceHookSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NamespaceMapping != nil { + in, out := &in.NamespaceMapping, &out.NamespaceMapping + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.OrLabelSelectors != nil { + in, out := &in.OrLabelSelectors, &out.OrLabelSelectors + *out = make([]*metav1.LabelSelector, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + } + if in.RestorePVs != nil { + in, out := &in.RestorePVs, &out.RestorePVs + *out = new(bool) + **out = **in + } + if in.RestoreStatus != nil { + in, out := &in.RestoreStatus, &out.RestoreStatus + *out = new(RestoreStatusSpec) + (*in).DeepCopyInto(*out) + } + if in.PreserveNodePorts != nil { + in, out := &in.PreserveNodePorts, &out.PreserveNodePorts + *out = new(bool) + **out = **in + } + if in.IncludeClusterResources != nil { + in, out := &in.IncludeClusterResources, &out.IncludeClusterResources + *out = new(bool) + **out = **in + } + in.Hooks.DeepCopyInto(&out.Hooks) + out.ItemOperationTimeout = in.ItemOperationTimeout + if in.ResourceModifier != nil { + in, out := &in.ResourceModifier, &out.ResourceModifier + *out = new(corev1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.UploaderConfig != nil { + in, out := &in.UploaderConfig, &out.UploaderConfig + *out = new(UploaderConfigForRestore) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSpec. +func (in *RestoreSpec) DeepCopy() *RestoreSpec { + if in == nil { + return nil + } + out := new(RestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) { + *out = *in + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Progress != nil { + in, out := &in.Progress, &out.Progress + *out = new(RestoreProgress) + **out = **in + } + if in.HookStatus != nil { + in, out := &in.HookStatus, &out.HookStatus + *out = new(HookStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatus. +func (in *RestoreStatus) DeepCopy() *RestoreStatus { + if in == nil { + return nil + } + out := new(RestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreStatusSpec) DeepCopyInto(out *RestoreStatusSpec) { + *out = *in + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatusSpec. +func (in *RestoreStatusSpec) DeepCopy() *RestoreStatusSpec { + if in == nil { + return nil + } + out := new(RestoreStatusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Schedule) DeepCopyInto(out *Schedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule. +func (in *Schedule) DeepCopy() *Schedule { + if in == nil { + return nil + } + out := new(Schedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Schedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleList) DeepCopyInto(out *ScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Schedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleList. +func (in *ScheduleList) DeepCopy() *ScheduleList { + if in == nil { + return nil + } + out := new(ScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleSpec) DeepCopyInto(out *ScheduleSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.UseOwnerReferencesInBackup != nil { + in, out := &in.UseOwnerReferencesInBackup, &out.UseOwnerReferencesInBackup + *out = new(bool) + **out = **in + } + if in.SkipImmediately != nil { + in, out := &in.SkipImmediately, &out.SkipImmediately + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleSpec. +func (in *ScheduleSpec) DeepCopy() *ScheduleSpec { + if in == nil { + return nil + } + out := new(ScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) { + *out = *in + if in.LastBackup != nil { + in, out := &in.LastBackup, &out.LastBackup + *out = (*in).DeepCopy() + } + if in.LastSkipped != nil { + in, out := &in.LastSkipped, &out.LastSkipped + *out = (*in).DeepCopy() + } + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus. +func (in *ScheduleStatus) DeepCopy() *ScheduleStatus { + if in == nil { + return nil + } + out := new(ScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequest) DeepCopyInto(out *ServerStatusRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequest. +func (in *ServerStatusRequest) DeepCopy() *ServerStatusRequest { + if in == nil { + return nil + } + out := new(ServerStatusRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerStatusRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestList) DeepCopyInto(out *ServerStatusRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServerStatusRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestList. +func (in *ServerStatusRequestList) DeepCopy() *ServerStatusRequestList { + if in == nil { + return nil + } + out := new(ServerStatusRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerStatusRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestSpec) DeepCopyInto(out *ServerStatusRequestSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestSpec. +func (in *ServerStatusRequestSpec) DeepCopy() *ServerStatusRequestSpec { + if in == nil { + return nil + } + out := new(ServerStatusRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestStatus) DeepCopyInto(out *ServerStatusRequestStatus) { + *out = *in + if in.ProcessedTimestamp != nil { + in, out := &in.ProcessedTimestamp, &out.ProcessedTimestamp + *out = (*in).DeepCopy() + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]PluginInfo, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestStatus. +func (in *ServerStatusRequestStatus) DeepCopy() *ServerStatusRequestStatus { + if in == nil { + return nil + } + out := new(ServerStatusRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageType) DeepCopyInto(out *StorageType) { + *out = *in + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = new(ObjectStorageLocation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageType. +func (in *StorageType) DeepCopy() *StorageType { + if in == nil { + return nil + } + out := new(StorageType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploaderConfigForBackup) DeepCopyInto(out *UploaderConfigForBackup) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploaderConfigForBackup. +func (in *UploaderConfigForBackup) DeepCopy() *UploaderConfigForBackup { + if in == nil { + return nil + } + out := new(UploaderConfigForBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploaderConfigForRestore) DeepCopyInto(out *UploaderConfigForRestore) { + *out = *in + if in.WriteSparseFiles != nil { + in, out := &in.WriteSparseFiles, &out.WriteSparseFiles + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploaderConfigForRestore. +func (in *UploaderConfigForRestore) DeepCopy() *UploaderConfigForRestore { + if in == nil { + return nil + } + out := new(UploaderConfigForRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocation) DeepCopyInto(out *VolumeSnapshotLocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocation. +func (in *VolumeSnapshotLocation) DeepCopy() *VolumeSnapshotLocation { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotLocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationList) DeepCopyInto(out *VolumeSnapshotLocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeSnapshotLocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationList. +func (in *VolumeSnapshotLocationList) DeepCopy() *VolumeSnapshotLocationList { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotLocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationSpec) DeepCopyInto(out *VolumeSnapshotLocationSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Credential != nil { + in, out := &in.Credential, &out.Credential + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationSpec. +func (in *VolumeSnapshotLocationSpec) DeepCopy() *VolumeSnapshotLocationSpec { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationStatus) DeepCopyInto(out *VolumeSnapshotLocationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationStatus. +func (in *VolumeSnapshotLocationStatus) DeepCopy() *VolumeSnapshotLocationStatus { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/common-lib/vendor/modules.txt b/common-lib/vendor/modules.txt index b5a35f1d9..5d992b0f8 100644 --- a/common-lib/vendor/modules.txt +++ b/common-lib/vendor/modules.txt @@ -624,8 +624,6 @@ github.com/nats-io/nkeys # github.com/nats-io/nuid v1.0.1 ## explicit github.com/nats-io/nuid -# github.com/nxadm/tail v1.4.8 -## explicit; go 1.13 # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -699,6 +697,10 @@ github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/mock github.com/stretchr/testify/require +# github.com/vmware-tanzu/velero v1.14.1 +## explicit; go 1.22.6 +github.com/vmware-tanzu/velero/pkg/apis/velero/shared +github.com/vmware-tanzu/velero/pkg/apis/velero/v1 # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 diff --git a/git-sensor/go.mod b/git-sensor/go.mod index 3c392dba7..2eef0c73e 100644 --- a/git-sensor/go.mod +++ b/git-sensor/go.mod @@ -4,7 +4,7 @@ go 1.24.0 toolchain go1.24.3 -replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 require ( github.com/caarlos0/env v3.5.0+incompatible diff --git a/git-sensor/go.sum b/git-sensor/go.sum index c32f27b10..8d18ae58e 100644 --- a/git-sensor/go.sum +++ b/git-sensor/go.sum @@ -26,8 +26,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be h1:ZufKGk1RMcJsTDgpCfdWcrca90K3s1+88KlVL/4dEmU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 h1:xDbz5etX5h6foQDgpT9ukTo2I65b6q32Nu9do5nBPk8= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= github.com/devtron-labs/protos v0.0.3-0.20250323220609-ecf8a0f7305e h1:U6UdYbW8a7xn5IzFPd8cywjVVPfutGJCudjePAfL/Hs= github.com/devtron-labs/protos v0.0.3-0.20250323220609-ecf8a0f7305e/go.mod h1:1TqULGlTey+VNhAu/ag7NJuUvByJemkqodsc9L5PHJk= github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= diff --git a/git-sensor/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go b/git-sensor/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go index b7434bb27..cfd7f98e4 100644 --- a/git-sensor/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go +++ b/git-sensor/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go @@ -124,6 +124,21 @@ const ( INFRA_HELM_RELEASE_ACTION_TOPIC string = "INFRA_HELM_RELEASE_ACTION_TOPIC" INFRA_HELM_RELEASE_ACTION_GROUP string = "INFRA_HELM_RELEASE_ACTION_GROUP" INFRA_HELM_RELEASE_ACTION_DURABLE string = "INFRA_HELM_RELEASE_ACTION_DURABLE" + COST_MODULE_INSTALLATION_TOPIC string = "COST_MODULE_INSTALLATION_TOPIC" + COST_MODULE_INSTALLATION_GROUP string = "COST_MODULE_INSTALLATION_GROUP" + COST_MODULE_INSTALLATION_DURABLE string = "COST_MODULE_INSTALLATION_DURABLE" + COST_MODULE_GPU_INSTALLATION_TOPIC string = "COST_MODULE_GPU_INSTALLATION_TOPIC" + COST_MODULE_GPU_INSTALLATION_GROUP string = "COST_MODULE_GPU_INSTALLATION_GROUP" + COST_MODULE_GPU_INSTALLATION_DURABLE string = "COST_MODULE_GPU_INSTALLATION_DURABLE" + STORAGE_MODULE_TOPIC string = "STORAGE_MODULE_TOPIC" + STORAGE_MODULE_GROUP string = "STORAGE_MODULE_GROUP" + STORAGE_MODULE_DURABLE string = "STORAGE_MODULE_DURABLE" + STORAGE_VELERO_INSTALL_TOPIC string = "STORAGE_VELERO_INSTALL_TOPIC" + STORAGE_VELERO_INSTALL_GROUP string = "STORAGE_VELERO_INSTALL_GROUP" + STORAGE_VELERO_INSTALL_DURABLE string = "STORAGE_VELERO_INSTALL_DURABLE" + STORAGE_VELERO_POST_INSTALLATION_TOPIC string = "STORAGE_VELERO_POST_INSTALLATION_TOPIC" + STORAGE_VELERO_POST_INSTALLATION_GROUP string = "STORAGE_VELERO_POST_INSTALLATION_GROUP" + STORAGE_VELERO_POST_INSTALLATION_DURABLE string = "STORAGE_VELERO_POST_INSTALLATION_DURABLE" ) type NatsTopic struct { @@ -179,6 +194,11 @@ var natsTopicMapping = map[string]NatsTopic{ INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE}, INFRA_HELM_RELEASE_ACTION_TOPIC: {topicName: INFRA_HELM_RELEASE_ACTION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRA_HELM_RELEASE_ACTION_GROUP, consumerName: INFRA_HELM_RELEASE_ACTION_DURABLE}, + COST_MODULE_INSTALLATION_TOPIC: {topicName: COST_MODULE_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_INSTALLATION_GROUP, consumerName: COST_MODULE_INSTALLATION_DURABLE}, + COST_MODULE_GPU_INSTALLATION_TOPIC: {topicName: COST_MODULE_GPU_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_GPU_INSTALLATION_GROUP, consumerName: COST_MODULE_GPU_INSTALLATION_DURABLE}, + STORAGE_MODULE_TOPIC: {topicName: STORAGE_MODULE_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_MODULE_GROUP, consumerName: STORAGE_MODULE_DURABLE}, + STORAGE_VELERO_INSTALL_TOPIC: {topicName: STORAGE_VELERO_INSTALL_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_INSTALL_GROUP, consumerName: STORAGE_VELERO_INSTALL_DURABLE}, + STORAGE_VELERO_POST_INSTALLATION_TOPIC: {topicName: STORAGE_VELERO_POST_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_POST_INSTALLATION_GROUP, consumerName: STORAGE_VELERO_POST_INSTALLATION_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -221,6 +241,11 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE: {}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE: {}, INFRA_HELM_RELEASE_ACTION_DURABLE: {}, + COST_MODULE_INSTALLATION_DURABLE: {}, + COST_MODULE_GPU_INSTALLATION_DURABLE: {}, + STORAGE_MODULE_DURABLE: {}, + STORAGE_VELERO_INSTALL_DURABLE: {}, + STORAGE_VELERO_POST_INSTALLATION_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string diff --git a/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go b/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go new file mode 100644 index 000000000..372765014 --- /dev/null +++ b/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index ea16a2f72..2bbfa1dc1 100644 --- a/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go b/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go index fa6858e5d..5c9cb23bf 100644 --- a/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go +++ b/git-sensor/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go @@ -34,7 +34,7 @@ type Config struct { User string `env:"PG_USER" envDefault:"" description:"user for postgres" example:"postgres"` Password string `env:"PG_PASSWORD" envDefault:"" secretData:"-" description:"password for postgres, associated with PG_USER" example:"confidential ;)"` Database string `env:"PG_DATABASE" envDefault:"orchestrator" description:"postgres database to be made connection with" example:"orchestrator, casbin, git_sensor, lens"` - CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin""` + CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin"` ApplicationName string `env:"APP" envDefault:"orchestrator" description:"Application name"` ReadTimeout int64 `env:"PG_READ_TIMEOUT" envDefault:"30"` WriteTimeout int64 `env:"PG_WRITE_TIMEOUT" envDefault:"30"` @@ -71,10 +71,10 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { _, err := dbConnection.QueryOne(&test, `SELECT 1`) if err != nil { - logger.Errorw("error in connecting db ", "db", obfuscateSecretTags(cfg), "err", err) + logger.Errorw("error in connecting db ", "db", ObfuscateSecretTags(cfg), "err", err) return nil, err } else { - logger.Infow("connected with db", "db", obfuscateSecretTags(cfg)) + logger.Infow("connected with db", "db", ObfuscateSecretTags(cfg)) } // -------------- @@ -82,7 +82,7 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { return dbConnection, err } -func obfuscateSecretTags(cfg interface{}) interface{} { +func ObfuscateSecretTags(cfg interface{}) interface{} { cfgDpl := reflect.New(reflect.ValueOf(cfg).Elem().Type()).Interface() cfgDplElm := reflect.ValueOf(cfgDpl).Elem() diff --git a/git-sensor/vendor/modules.txt b/git-sensor/vendor/modules.txt index 01d7f11ed..8e62d0efd 100644 --- a/git-sensor/vendor/modules.txt +++ b/git-sensor/vendor/modules.txt @@ -66,7 +66,7 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/devtron-labs/common-lib v0.0.0 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib v0.0.0 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/constants github.com/devtron-labs/common-lib/fetchAllEnv @@ -469,4 +469,4 @@ gopkg.in/yaml.v3 # mellium.im/sasl v0.3.2 ## explicit; go 1.20 mellium.im/sasl -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 diff --git a/image-scanner/go.mod b/image-scanner/go.mod index 799907f33..bedd9df5e 100644 --- a/image-scanner/go.mod +++ b/image-scanner/go.mod @@ -69,4 +69,4 @@ require ( mellium.im/sasl v0.3.2 // indirect ) -replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 diff --git a/image-scanner/go.sum b/image-scanner/go.sum index 35754fa3f..152ccda1f 100644 --- a/image-scanner/go.sum +++ b/image-scanner/go.sum @@ -279,8 +279,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be h1:ZufKGk1RMcJsTDgpCfdWcrca90K3s1+88KlVL/4dEmU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 h1:xDbz5etX5h6foQDgpT9ukTo2I65b6q32Nu9do5nBPk8= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -721,9 +721,8 @@ github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= diff --git a/image-scanner/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go b/image-scanner/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go index b7434bb27..cfd7f98e4 100644 --- a/image-scanner/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go +++ b/image-scanner/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go @@ -124,6 +124,21 @@ const ( INFRA_HELM_RELEASE_ACTION_TOPIC string = "INFRA_HELM_RELEASE_ACTION_TOPIC" INFRA_HELM_RELEASE_ACTION_GROUP string = "INFRA_HELM_RELEASE_ACTION_GROUP" INFRA_HELM_RELEASE_ACTION_DURABLE string = "INFRA_HELM_RELEASE_ACTION_DURABLE" + COST_MODULE_INSTALLATION_TOPIC string = "COST_MODULE_INSTALLATION_TOPIC" + COST_MODULE_INSTALLATION_GROUP string = "COST_MODULE_INSTALLATION_GROUP" + COST_MODULE_INSTALLATION_DURABLE string = "COST_MODULE_INSTALLATION_DURABLE" + COST_MODULE_GPU_INSTALLATION_TOPIC string = "COST_MODULE_GPU_INSTALLATION_TOPIC" + COST_MODULE_GPU_INSTALLATION_GROUP string = "COST_MODULE_GPU_INSTALLATION_GROUP" + COST_MODULE_GPU_INSTALLATION_DURABLE string = "COST_MODULE_GPU_INSTALLATION_DURABLE" + STORAGE_MODULE_TOPIC string = "STORAGE_MODULE_TOPIC" + STORAGE_MODULE_GROUP string = "STORAGE_MODULE_GROUP" + STORAGE_MODULE_DURABLE string = "STORAGE_MODULE_DURABLE" + STORAGE_VELERO_INSTALL_TOPIC string = "STORAGE_VELERO_INSTALL_TOPIC" + STORAGE_VELERO_INSTALL_GROUP string = "STORAGE_VELERO_INSTALL_GROUP" + STORAGE_VELERO_INSTALL_DURABLE string = "STORAGE_VELERO_INSTALL_DURABLE" + STORAGE_VELERO_POST_INSTALLATION_TOPIC string = "STORAGE_VELERO_POST_INSTALLATION_TOPIC" + STORAGE_VELERO_POST_INSTALLATION_GROUP string = "STORAGE_VELERO_POST_INSTALLATION_GROUP" + STORAGE_VELERO_POST_INSTALLATION_DURABLE string = "STORAGE_VELERO_POST_INSTALLATION_DURABLE" ) type NatsTopic struct { @@ -179,6 +194,11 @@ var natsTopicMapping = map[string]NatsTopic{ INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE}, INFRA_HELM_RELEASE_ACTION_TOPIC: {topicName: INFRA_HELM_RELEASE_ACTION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRA_HELM_RELEASE_ACTION_GROUP, consumerName: INFRA_HELM_RELEASE_ACTION_DURABLE}, + COST_MODULE_INSTALLATION_TOPIC: {topicName: COST_MODULE_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_INSTALLATION_GROUP, consumerName: COST_MODULE_INSTALLATION_DURABLE}, + COST_MODULE_GPU_INSTALLATION_TOPIC: {topicName: COST_MODULE_GPU_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_GPU_INSTALLATION_GROUP, consumerName: COST_MODULE_GPU_INSTALLATION_DURABLE}, + STORAGE_MODULE_TOPIC: {topicName: STORAGE_MODULE_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_MODULE_GROUP, consumerName: STORAGE_MODULE_DURABLE}, + STORAGE_VELERO_INSTALL_TOPIC: {topicName: STORAGE_VELERO_INSTALL_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_INSTALL_GROUP, consumerName: STORAGE_VELERO_INSTALL_DURABLE}, + STORAGE_VELERO_POST_INSTALLATION_TOPIC: {topicName: STORAGE_VELERO_POST_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_POST_INSTALLATION_GROUP, consumerName: STORAGE_VELERO_POST_INSTALLATION_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -221,6 +241,11 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE: {}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE: {}, INFRA_HELM_RELEASE_ACTION_DURABLE: {}, + COST_MODULE_INSTALLATION_DURABLE: {}, + COST_MODULE_GPU_INSTALLATION_DURABLE: {}, + STORAGE_MODULE_DURABLE: {}, + STORAGE_VELERO_INSTALL_DURABLE: {}, + STORAGE_VELERO_POST_INSTALLATION_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string diff --git a/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go b/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go new file mode 100644 index 000000000..372765014 --- /dev/null +++ b/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index ea16a2f72..2bbfa1dc1 100644 --- a/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go b/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go index fa6858e5d..5c9cb23bf 100644 --- a/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go +++ b/image-scanner/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go @@ -34,7 +34,7 @@ type Config struct { User string `env:"PG_USER" envDefault:"" description:"user for postgres" example:"postgres"` Password string `env:"PG_PASSWORD" envDefault:"" secretData:"-" description:"password for postgres, associated with PG_USER" example:"confidential ;)"` Database string `env:"PG_DATABASE" envDefault:"orchestrator" description:"postgres database to be made connection with" example:"orchestrator, casbin, git_sensor, lens"` - CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin""` + CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin"` ApplicationName string `env:"APP" envDefault:"orchestrator" description:"Application name"` ReadTimeout int64 `env:"PG_READ_TIMEOUT" envDefault:"30"` WriteTimeout int64 `env:"PG_WRITE_TIMEOUT" envDefault:"30"` @@ -71,10 +71,10 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { _, err := dbConnection.QueryOne(&test, `SELECT 1`) if err != nil { - logger.Errorw("error in connecting db ", "db", obfuscateSecretTags(cfg), "err", err) + logger.Errorw("error in connecting db ", "db", ObfuscateSecretTags(cfg), "err", err) return nil, err } else { - logger.Infow("connected with db", "db", obfuscateSecretTags(cfg)) + logger.Infow("connected with db", "db", ObfuscateSecretTags(cfg)) } // -------------- @@ -82,7 +82,7 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { return dbConnection, err } -func obfuscateSecretTags(cfg interface{}) interface{} { +func ObfuscateSecretTags(cfg interface{}) interface{} { cfgDpl := reflect.New(reflect.ValueOf(cfg).Elem().Type()).Interface() cfgDplElm := reflect.ValueOf(cfgDpl).Elem() diff --git a/image-scanner/vendor/modules.txt b/image-scanner/vendor/modules.txt index 639d25431..d77f716e7 100644 --- a/image-scanner/vendor/modules.txt +++ b/image-scanner/vendor/modules.txt @@ -74,7 +74,7 @@ github.com/cespare/xxhash/v2 github.com/coreos/clair/api/v3/clairpb github.com/coreos/clair/database github.com/coreos/clair/ext/versionfmt -# github.com/devtron-labs/common-lib v0.19.0 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib v0.19.0 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/async github.com/devtron-labs/common-lib/constants @@ -457,4 +457,4 @@ google.golang.org/protobuf/types/known/wrapperspb # mellium.im/sasl v0.3.2 ## explicit; go 1.20 mellium.im/sasl -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 diff --git a/kubelink/go.mod b/kubelink/go.mod index f1866acde..101ce36c0 100644 --- a/kubelink/go.mod +++ b/kubelink/go.mod @@ -168,4 +168,4 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect ) -replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +replace github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 diff --git a/kubelink/go.sum b/kubelink/go.sum index be8a143d2..61473a878 100644 --- a/kubelink/go.sum +++ b/kubelink/go.sum @@ -65,8 +65,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be h1:ZufKGk1RMcJsTDgpCfdWcrca90K3s1+88KlVL/4dEmU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 h1:xDbz5etX5h6foQDgpT9ukTo2I65b6q32Nu9do5nBPk8= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= diff --git a/kubelink/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go b/kubelink/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go index b7434bb27..cfd7f98e4 100644 --- a/kubelink/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go +++ b/kubelink/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go @@ -124,6 +124,21 @@ const ( INFRA_HELM_RELEASE_ACTION_TOPIC string = "INFRA_HELM_RELEASE_ACTION_TOPIC" INFRA_HELM_RELEASE_ACTION_GROUP string = "INFRA_HELM_RELEASE_ACTION_GROUP" INFRA_HELM_RELEASE_ACTION_DURABLE string = "INFRA_HELM_RELEASE_ACTION_DURABLE" + COST_MODULE_INSTALLATION_TOPIC string = "COST_MODULE_INSTALLATION_TOPIC" + COST_MODULE_INSTALLATION_GROUP string = "COST_MODULE_INSTALLATION_GROUP" + COST_MODULE_INSTALLATION_DURABLE string = "COST_MODULE_INSTALLATION_DURABLE" + COST_MODULE_GPU_INSTALLATION_TOPIC string = "COST_MODULE_GPU_INSTALLATION_TOPIC" + COST_MODULE_GPU_INSTALLATION_GROUP string = "COST_MODULE_GPU_INSTALLATION_GROUP" + COST_MODULE_GPU_INSTALLATION_DURABLE string = "COST_MODULE_GPU_INSTALLATION_DURABLE" + STORAGE_MODULE_TOPIC string = "STORAGE_MODULE_TOPIC" + STORAGE_MODULE_GROUP string = "STORAGE_MODULE_GROUP" + STORAGE_MODULE_DURABLE string = "STORAGE_MODULE_DURABLE" + STORAGE_VELERO_INSTALL_TOPIC string = "STORAGE_VELERO_INSTALL_TOPIC" + STORAGE_VELERO_INSTALL_GROUP string = "STORAGE_VELERO_INSTALL_GROUP" + STORAGE_VELERO_INSTALL_DURABLE string = "STORAGE_VELERO_INSTALL_DURABLE" + STORAGE_VELERO_POST_INSTALLATION_TOPIC string = "STORAGE_VELERO_POST_INSTALLATION_TOPIC" + STORAGE_VELERO_POST_INSTALLATION_GROUP string = "STORAGE_VELERO_POST_INSTALLATION_GROUP" + STORAGE_VELERO_POST_INSTALLATION_DURABLE string = "STORAGE_VELERO_POST_INSTALLATION_DURABLE" ) type NatsTopic struct { @@ -179,6 +194,11 @@ var natsTopicMapping = map[string]NatsTopic{ INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE}, INFRA_HELM_RELEASE_ACTION_TOPIC: {topicName: INFRA_HELM_RELEASE_ACTION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRA_HELM_RELEASE_ACTION_GROUP, consumerName: INFRA_HELM_RELEASE_ACTION_DURABLE}, + COST_MODULE_INSTALLATION_TOPIC: {topicName: COST_MODULE_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_INSTALLATION_GROUP, consumerName: COST_MODULE_INSTALLATION_DURABLE}, + COST_MODULE_GPU_INSTALLATION_TOPIC: {topicName: COST_MODULE_GPU_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_GPU_INSTALLATION_GROUP, consumerName: COST_MODULE_GPU_INSTALLATION_DURABLE}, + STORAGE_MODULE_TOPIC: {topicName: STORAGE_MODULE_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_MODULE_GROUP, consumerName: STORAGE_MODULE_DURABLE}, + STORAGE_VELERO_INSTALL_TOPIC: {topicName: STORAGE_VELERO_INSTALL_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_INSTALL_GROUP, consumerName: STORAGE_VELERO_INSTALL_DURABLE}, + STORAGE_VELERO_POST_INSTALLATION_TOPIC: {topicName: STORAGE_VELERO_POST_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_POST_INSTALLATION_GROUP, consumerName: STORAGE_VELERO_POST_INSTALLATION_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -221,6 +241,11 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE: {}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE: {}, INFRA_HELM_RELEASE_ACTION_DURABLE: {}, + COST_MODULE_INSTALLATION_DURABLE: {}, + COST_MODULE_GPU_INSTALLATION_DURABLE: {}, + STORAGE_MODULE_DURABLE: {}, + STORAGE_VELERO_INSTALL_DURABLE: {}, + STORAGE_VELERO_POST_INSTALLATION_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string diff --git a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go new file mode 100644 index 000000000..372765014 --- /dev/null +++ b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index ea16a2f72..2bbfa1dc1 100644 --- a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/k8s/K8sUtil.go b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/k8s/K8sUtil.go index f621257bd..434fb4874 100644 --- a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/k8s/K8sUtil.go +++ b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/k8s/K8sUtil.go @@ -1351,7 +1351,7 @@ func (impl *K8sServiceImpl) GetPodListByLabel(namespace, label string, clientSet return podList.Items, nil } -func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, secretData map[string]string) error { +func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, dataString map[string]string, data map[string][]byte) error { secret, err := impl.GetSecret(namespace, uniqueSecretName, client) statusError, ok := err.(*errors.StatusError) @@ -1361,13 +1361,18 @@ func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, } if ok && statusError != nil && statusError.Status().Code == http.StatusNotFound { - _, err = impl.CreateSecret(namespace, nil, uniqueSecretName, "", client, secretLabel, secretData) + _, err = impl.CreateSecret(namespace, data, uniqueSecretName, "", client, secretLabel, dataString) if err != nil { impl.logger.Errorw("Error in creating secret for chart repo", "uniqueSecretName", uniqueSecretName, "err", err) return err } } else { - secret.StringData = secretData + if len(data) > 0 { + secret.Data = data + } + if len(dataString) > 0 { + secret.StringData = dataString + } _, err = impl.UpdateSecret(namespace, secret, client) if err != nil { impl.logger.Errorw("Error in creating secret for chart repo", "uniqueSecretName", uniqueSecretName, "err", err) diff --git a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/k8s/k8sService.go b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/k8s/k8sService.go index c8f583ff3..681e96ad2 100644 --- a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/k8s/k8sService.go +++ b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/k8s/k8sService.go @@ -105,8 +105,7 @@ type K8sService interface { GetResourceIf(restConfig *rest.Config, groupVersionKind schema.GroupVersionKind) (resourceIf dynamic.NamespaceableResourceInterface, namespaced bool, err error) FetchConnectionStatusForCluster(k8sClientSet *kubernetes.Clientset) error CreateK8sClientSet(restConfig *rest.Config) (*kubernetes.Clientset, error) - CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, secretData map[string]string) error - + CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, dataString map[string]string, data map[string][]byte) error // below functions are exposed for K8sUtilExtended CreateNsWithLabels(namespace string, labels map[string]string, client *v12.CoreV1Client) (ns *v1.Namespace, err error) diff --git a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go index fa6858e5d..5c9cb23bf 100644 --- a/kubelink/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go +++ b/kubelink/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go @@ -34,7 +34,7 @@ type Config struct { User string `env:"PG_USER" envDefault:"" description:"user for postgres" example:"postgres"` Password string `env:"PG_PASSWORD" envDefault:"" secretData:"-" description:"password for postgres, associated with PG_USER" example:"confidential ;)"` Database string `env:"PG_DATABASE" envDefault:"orchestrator" description:"postgres database to be made connection with" example:"orchestrator, casbin, git_sensor, lens"` - CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin""` + CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin"` ApplicationName string `env:"APP" envDefault:"orchestrator" description:"Application name"` ReadTimeout int64 `env:"PG_READ_TIMEOUT" envDefault:"30"` WriteTimeout int64 `env:"PG_WRITE_TIMEOUT" envDefault:"30"` @@ -71,10 +71,10 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { _, err := dbConnection.QueryOne(&test, `SELECT 1`) if err != nil { - logger.Errorw("error in connecting db ", "db", obfuscateSecretTags(cfg), "err", err) + logger.Errorw("error in connecting db ", "db", ObfuscateSecretTags(cfg), "err", err) return nil, err } else { - logger.Infow("connected with db", "db", obfuscateSecretTags(cfg)) + logger.Infow("connected with db", "db", ObfuscateSecretTags(cfg)) } // -------------- @@ -82,7 +82,7 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { return dbConnection, err } -func obfuscateSecretTags(cfg interface{}) interface{} { +func ObfuscateSecretTags(cfg interface{}) interface{} { cfgDpl := reflect.New(reflect.ValueOf(cfg).Elem().Type()).Interface() cfgDplElm := reflect.ValueOf(cfgDpl).Elem() diff --git a/kubelink/vendor/modules.txt b/kubelink/vendor/modules.txt index 68d270652..889defd28 100644 --- a/kubelink/vendor/modules.txt +++ b/kubelink/vendor/modules.txt @@ -125,7 +125,7 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/devtron-labs/common-lib v0.0.0 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib v0.0.0 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/async github.com/devtron-labs/common-lib/constants @@ -1403,4 +1403,4 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 diff --git a/kubewatch/env_gen.json b/kubewatch/env_gen.json index 045a01b12..bfd4405af 100644 --- a/kubewatch/env_gen.json +++ b/kubewatch/env_gen.json @@ -1 +1 @@ -[{"Category":"ARGOCD_INFORMER","Fields":[{"Env":"ACD_INFORMER","EnvType":"bool","EnvValue":"true","EnvDescription":"Used to determine whether ArgoCD informer is enabled or not","Example":"","Deprecated":"false"},{"Env":"ACD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace where all the ArgoCD application objects are published. For multi-cluster mode, it will be set to v1.NamespaceAll","Example":"","Deprecated":"false"}]},{"Category":"CD_ARGO_WORKFLOW","Fields":[{"Env":"CD_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-cd","EnvDescription":"Namespace where all CD workflows objects are scheduled. For multi-cluster mode, it will be set to v1.NamespaceAll","Example":"","Deprecated":"false"},{"Env":"CD_INFORMER","EnvType":"bool","EnvValue":"true","EnvDescription":"Used to determine whether CD informer is enabled or not","Example":"","Deprecated":"false"}]},{"Category":"CI_ARGO_WORKFLOW","Fields":[{"Env":"CI_INFORMER","EnvType":"bool","EnvValue":"true","EnvDescription":"Used to determine whether CI informer is enabled or not","Example":"","Deprecated":"false"},{"Env":"DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-ci","EnvDescription":"Namespace where all CI workflows objects are scheduled. For multi-cluster mode, it will be set to v1.NamespaceAll","Example":"","Deprecated":"false"}]},{"Category":"CLUSTER_MODE","Fields":[{"Env":"CLUSTER_ARGO_CD_TYPE","EnvType":"string","EnvValue":"IN_CLUSTER","EnvDescription":"Determines cluster mode for ArgoCD informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"},{"Env":"CLUSTER_CD_ARGO_WF_TYPE","EnvType":"string","EnvValue":"IN_CLUSTER","EnvDescription":"Determines cluster mode for CD ArgoWorkflow informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"},{"Env":"CLUSTER_CI_ARGO_WF_TYPE","EnvType":"string","EnvValue":"IN_CLUSTER","EnvDescription":"Determines cluster mode for CI ArgoWorkflow informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"},{"Env":"CLUSTER_TYPE","EnvType":"string","EnvValue":"IN_CLUSTER","EnvDescription":"Determines cluster mode for System Executor informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"}]},{"Category":"DEVTRON","Fields":[{"Env":"APP","EnvType":"string","EnvValue":"kubewatch","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CONSUMER_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_LOG_TIME_LIMIT","EnvType":"int64","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_STATSVIZ","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_IDLE_CONN_TIMEOUT","EnvType":"int","EnvValue":"300","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_KEEPALIVE","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_TIMEOUT","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TLS_HANDSHAKE_TIMEOUT","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LOG_LEVEL","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_ACK_WAIT_IN_SECS","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_BUFFER_SIZE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_MAX_AGE","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_PROCESSING_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_REPLICAS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_SERVER_HOST","EnvType":"string","EnvValue":"nats://devtron-nats.devtroncd:4222","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_EXPORT_PROM_METRICS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_FAILURE_QUERIES","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_QUERY","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_SLOW_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_PASSWORD","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_PORT","EnvType":"string","EnvValue":"5432","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_QUERY_DUR_THRESHOLD","EnvType":"int64","EnvValue":"5000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_USER","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUNTIME_CONFIG_LOCAL_DEV","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"STREAM_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_CUSTOM_HTTP_TRANSPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"EXTERNAL_KUBEWATCH","Fields":[{"Env":"CD_EXTERNAL_LISTENER_URL","EnvType":"string","EnvValue":"http://devtroncd-orchestrator-service-prod.devtroncd:80","EnvDescription":"URL of the orchestrator","Example":"","Deprecated":"false"},{"Env":"CD_EXTERNAL_NAMESPACE","EnvType":"string","EnvValue":"","EnvDescription":"Namespace where the external kubewatch is set up","Example":"","Deprecated":"false"},{"Env":"CD_EXTERNAL_ORCHESTRATOR_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"Token used to authenticate with the orchestrator","Example":"","Deprecated":"false"},{"Env":"CD_EXTERNAL_REST_LISTENER","EnvType":"bool","EnvValue":"false","EnvDescription":"Used to determine whether it's an external kubewatch or internal kubewatch","Example":"","Deprecated":"false"}]},{"Category":"GRACEFUL_SHUTDOWN","Fields":[{"Env":"SLEEP_TIMEOUT","EnvType":"int","EnvValue":"5","EnvDescription":"Graceful shutdown timeout in seconds","Example":"","Deprecated":"false"}]},{"Category":"POSTGRES","Fields":[{"Env":"CASBIN_DATABASE","EnvType":"string","EnvValue":"casbin","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_READ_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_WRITE_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"}]}] \ No newline at end of file +[{"Category":"ARGOCD_INFORMER","Fields":[{"Env":"ACD_INFORMER","EnvType":"bool","EnvValue":"true","EnvDescription":"Used to determine whether ArgoCD informer is enabled or not","Example":"","Deprecated":"false"},{"Env":"ACD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace where all the ArgoCD application objects are published. For multi-cluster mode, it will be set to v1.NamespaceAll","Example":"","Deprecated":"false"}]},{"Category":"CD_ARGO_WORKFLOW","Fields":[{"Env":"CD_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-cd","EnvDescription":"Namespace where all CD workflows objects are scheduled. For multi-cluster mode, it will be set to v1.NamespaceAll","Example":"","Deprecated":"false"},{"Env":"CD_INFORMER","EnvType":"bool","EnvValue":"true","EnvDescription":"Used to determine whether CD informer is enabled or not","Example":"","Deprecated":"false"}]},{"Category":"CI_ARGO_WORKFLOW","Fields":[{"Env":"CI_INFORMER","EnvType":"bool","EnvValue":"true","EnvDescription":"Used to determine whether CI informer is enabled or not","Example":"","Deprecated":"false"},{"Env":"DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-ci","EnvDescription":"Namespace where all CI workflows objects are scheduled. For multi-cluster mode, it will be set to v1.NamespaceAll","Example":"","Deprecated":"false"}]},{"Category":"CLUSTER_MODE","Fields":[{"Env":"CLUSTER_ARGO_CD_TYPE","EnvType":"string","EnvValue":"IN_CLUSTER","EnvDescription":"Determines cluster mode for ArgoCD informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"},{"Env":"CLUSTER_CD_ARGO_WF_TYPE","EnvType":"string","EnvValue":"IN_CLUSTER","EnvDescription":"Determines cluster mode for CD ArgoWorkflow informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"},{"Env":"CLUSTER_CI_ARGO_WF_TYPE","EnvType":"string","EnvValue":"IN_CLUSTER","EnvDescription":"Determines cluster mode for CI ArgoWorkflow informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"},{"Env":"CLUSTER_STORAGE_MODULE_TYPE","EnvType":"string","EnvValue":"ALL_CLUSTER","EnvDescription":"Determines cluster mode for Velero BSL informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"},{"Env":"CLUSTER_TYPE","EnvType":"string","EnvValue":"IN_CLUSTER","EnvDescription":"Determines cluster mode for System Executor informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER","Example":"","Deprecated":"false"}]},{"Category":"DEVTRON","Fields":[{"Env":"APP","EnvType":"string","EnvValue":"kubewatch","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CONSUMER_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_LOG_TIME_LIMIT","EnvType":"int64","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ENABLE_STATSVIZ","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_IDLE_CONN_TIMEOUT","EnvType":"int","EnvValue":"300","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_KEEPALIVE","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_TIMEOUT","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TLS_HANDSHAKE_TIMEOUT","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LOG_LEVEL","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_ACK_WAIT_IN_SECS","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_BUFFER_SIZE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_MAX_AGE","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_PROCESSING_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_REPLICAS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_SERVER_HOST","EnvType":"string","EnvValue":"nats://devtron-nats.devtroncd:4222","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_EXPORT_PROM_METRICS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_FAILURE_QUERIES","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_QUERY","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_SLOW_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_PASSWORD","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_PORT","EnvType":"string","EnvValue":"5432","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_QUERY_DUR_THRESHOLD","EnvType":"int64","EnvValue":"5000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_USER","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUNTIME_CONFIG_LOCAL_DEV","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"STREAM_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_CUSTOM_HTTP_TRANSPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"EXTERNAL_KUBEWATCH","Fields":[{"Env":"CD_EXTERNAL_LISTENER_URL","EnvType":"string","EnvValue":"http://devtroncd-orchestrator-service-prod.devtroncd:80","EnvDescription":"URL of the orchestrator","Example":"","Deprecated":"false"},{"Env":"CD_EXTERNAL_NAMESPACE","EnvType":"string","EnvValue":"","EnvDescription":"Namespace where the external kubewatch is set up","Example":"","Deprecated":"false"},{"Env":"CD_EXTERNAL_ORCHESTRATOR_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"Token used to authenticate with the orchestrator","Example":"","Deprecated":"false"},{"Env":"CD_EXTERNAL_REST_LISTENER","EnvType":"bool","EnvValue":"false","EnvDescription":"Used to determine whether it's an external kubewatch or internal kubewatch","Example":"","Deprecated":"false"}]},{"Category":"GRACEFUL_SHUTDOWN","Fields":[{"Env":"SLEEP_TIMEOUT","EnvType":"int","EnvValue":"5","EnvDescription":"Graceful shutdown timeout in seconds","Example":"","Deprecated":"false"}]},{"Category":"POSTGRES","Fields":[{"Env":"CASBIN_DATABASE","EnvType":"string","EnvValue":"casbin","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_READ_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_WRITE_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"VELERO_INFORMER","Fields":[{"Env":"VELERO_INFORMER","EnvType":"bool","EnvValue":"false","EnvDescription":"Used to determine whether Velero informer is enabled or not","Example":"","Deprecated":"false"},{"Env":"VELERO_NAMESPACE","EnvType":"string","EnvValue":"velero","EnvDescription":"Namespace where all the Velero backup objects are published","Example":"","Deprecated":"false"}]}] \ No newline at end of file diff --git a/kubewatch/env_gen.md b/kubewatch/env_gen.md index babb43804..b65711fb9 100644 --- a/kubewatch/env_gen.md +++ b/kubewatch/env_gen.md @@ -27,6 +27,7 @@ | CLUSTER_ARGO_CD_TYPE | string |IN_CLUSTER | Determines cluster mode for ArgoCD informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER | | false | | CLUSTER_CD_ARGO_WF_TYPE | string |IN_CLUSTER | Determines cluster mode for CD ArgoWorkflow informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER | | false | | CLUSTER_CI_ARGO_WF_TYPE | string |IN_CLUSTER | Determines cluster mode for CI ArgoWorkflow informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER | | false | + | CLUSTER_STORAGE_MODULE_TYPE | string |ALL_CLUSTER | Determines cluster mode for Velero BSL informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER | | false | | CLUSTER_TYPE | string |IN_CLUSTER | Determines cluster mode for System Executor informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER | | false | @@ -86,3 +87,10 @@ | PG_READ_TIMEOUT | int64 |30 | | | false | | PG_WRITE_TIMEOUT | int64 |30 | | | false | + +## VELERO_INFORMER Related Environment Variables +| Key | Type | Default Value | Description | Example | Deprecated | +|-------|----------|-------------------|-------------------|-----------------------|------------------| + | VELERO_INFORMER | bool |false | Used to determine whether Velero informer is enabled or not | | false | + | VELERO_NAMESPACE | string |velero | Namespace where all the Velero backup objects are published | | false | + diff --git a/kubewatch/go.mod b/kubewatch/go.mod index 23a051923..c3353d215 100644 --- a/kubewatch/go.mod +++ b/kubewatch/go.mod @@ -13,11 +13,13 @@ require ( github.com/go-resty/resty/v2 v2.16.5 github.com/gorilla/mux v1.8.1 github.com/nlopes/slack v0.1.0 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.22.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 github.com/spf13/viper v1.20.0 github.com/tbruyelle/hipchat-go v0.0.0-20160921153256-749fb9e14beb + github.com/vmware-tanzu/velero v1.14.1 golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.33.3 @@ -163,7 +165,6 @@ require ( github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pjbgf/sha1cd v0.3.2 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.64.0 // indirect @@ -240,5 +241,5 @@ require ( replace ( github.com/cyphar/filepath-securejoin v0.4.1 => github.com/cyphar/filepath-securejoin v0.3.6 // indirect - github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be + github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 ) diff --git a/kubewatch/go.sum b/kubewatch/go.sum index 6263280b4..153d0a67b 100644 --- a/kubewatch/go.sum +++ b/kubewatch/go.sum @@ -129,8 +129,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be h1:ZufKGk1RMcJsTDgpCfdWcrca90K3s1+88KlVL/4dEmU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 h1:xDbz5etX5h6foQDgpT9ukTo2I65b6q32Nu9do5nBPk8= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= @@ -660,6 +660,8 @@ github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2el github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/vmware-tanzu/velero v1.14.1 h1:HYj73scn7ZqtfTanjW/X4W0Hn3w/qcfoRbrHCWM52iI= +github.com/vmware-tanzu/velero v1.14.1/go.mod h1:/OzHzTvbevkkX+bK/BS4AgYMv6nKuOgsybuuvLWkSS0= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= diff --git a/kubewatch/pkg/config/appConfig.go b/kubewatch/pkg/config/appConfig.go index 09817dba6..ad5fe1e06 100644 --- a/kubewatch/pkg/config/appConfig.go +++ b/kubewatch/pkg/config/appConfig.go @@ -25,6 +25,7 @@ type AppConfig struct { CdConfig *CdConfig AcdConfig *AcdConfig Timeout *Timeout + VeleroConfig *VeleroConfig } func (app *AppConfig) GetClusterConfig() *ClusterConfig { @@ -67,6 +68,10 @@ func (app *AppConfig) GetAcdConfig() *AcdConfig { return app.AcdConfig } +func (app *AppConfig) GetVeleroConfig() *VeleroConfig { + return app.VeleroConfig +} + func (app *AppConfig) GetACDNamespace() string { if app.IsMultiClusterArgoCD() { return metav1.NamespaceAll @@ -99,6 +104,10 @@ func (app *AppConfig) IsMultiClusterSystemExec() bool { return app.GetClusterConfig().SystemExecClusterType == AllClusterType && !app.GetExternalConfig().External } +func (app *AppConfig) IsMultiClusterVeleroType() bool { + return app.GetClusterConfig().ClusterStorageModuleType == AllClusterType && !app.GetExternalConfig().External +} + func GetAppConfig() (*AppConfig, error) { clusterConfig, err := getClusterConfig() if err != nil { @@ -124,6 +133,10 @@ func GetAppConfig() (*AppConfig, error) { if err != nil { return nil, err } + veleroConfig, err := getVeleroConfig() + if err != nil { + return nil, err + } return &AppConfig{ ClusterCfg: clusterConfig, ExternalConfig: externalConfig, @@ -131,5 +144,6 @@ func GetAppConfig() (*AppConfig, error) { CdConfig: cdConfig, AcdConfig: acdConfig, Timeout: timeout, + VeleroConfig: veleroConfig, }, nil } diff --git a/kubewatch/pkg/config/clusterConfig.go b/kubewatch/pkg/config/clusterConfig.go index cb7f2165c..57be7616f 100644 --- a/kubewatch/pkg/config/clusterConfig.go +++ b/kubewatch/pkg/config/clusterConfig.go @@ -39,6 +39,10 @@ type ClusterConfig struct { // - AllClusterType: All clusters are enabled for CD ArgoWorkflow informer // - InClusterType: Only default cluster is enabled for CD ArgoWorkflow informer ClusterCdArgoWfType string `env:"CLUSTER_CD_ARGO_WF_TYPE" envDefault:"IN_CLUSTER" description:"Determines cluster mode for CD ArgoWorkflow informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER" deprecated:"false"` + // ClusterStorageModuleType defines whether all clusters are enabled for Velero informer + // - AllClusterType: All clusters are enabled for Velero informer + // - InClusterType: Only default cluster is enabled for Velero informer + ClusterStorageModuleType string `env:"CLUSTER_STORAGE_MODULE_TYPE" envDefault:"ALL_CLUSTER" description:"Determines cluster mode for Velero BSL informer; for multiple cluster mode, it will be set to ALL_CLUSTER; for single cluster mode, it will be set to IN_CLUSTER" deprecated:"false"` } func getClusterConfig() (*ClusterConfig, error) { diff --git a/kubewatch/pkg/config/veleroConfig.go b/kubewatch/pkg/config/veleroConfig.go new file mode 100644 index 000000000..2b818889e --- /dev/null +++ b/kubewatch/pkg/config/veleroConfig.go @@ -0,0 +1,22 @@ +package config + +import "github.com/caarlos0/env" + +// CATEGORY=VELERO_INFORMER +type VeleroConfig struct { + // VeleroInformer is used to determine whether Velero informer is enabled or not + VeleroInformer bool `env:"VELERO_INFORMER" envDefault:"false" description:"Used to determine whether Velero informer is enabled or not" deprecated:"false"` + + // VeleroNamespace is the namespace where all the Velero backup objects are published + VeleroNamespace string `env:"VELERO_NAMESPACE" envDefault:"velero" description:"Namespace where all the Velero backup objects are published" deprecated:"false"` +} + +func getVeleroConfig() (*VeleroConfig, error) { + veleroConfig := &VeleroConfig{} + err := env.Parse(veleroConfig) + return veleroConfig, err +} + +func (v *VeleroConfig) GetVeleroNamespace() string { + return v.VeleroNamespace +} diff --git a/kubewatch/pkg/informer/bean/client.go b/kubewatch/pkg/informer/bean/client.go index 4248abdfb..e8da54e2d 100644 --- a/kubewatch/pkg/informer/bean/client.go +++ b/kubewatch/pkg/informer/bean/client.go @@ -19,15 +19,25 @@ package bean type ClientType string const ( - ArgoCDClientType ClientType = "ArgoCD" - CiArgoWorkflowClientType ClientType = "CiArgoWorkflow" - CdArgoWorkflowClientType ClientType = "CdArgoWorkflow" - SystemExecutorClientType ClientType = "SystemExecutor" + ArgoCDClientType ClientType = "ArgoCD" + CiArgoWorkflowClientType ClientType = "CiArgoWorkflow" + CdArgoWorkflowClientType ClientType = "CdArgoWorkflow" + SystemExecutorClientType ClientType = "SystemExecutor" + VeleroBslClientType ClientType = "VeleroBsl" + VeleroVslClientType ClientType = "VeleroVsl" + VeleroBackupScheduleClientType ClientType = "VeleroBackupSchedule" + VeleroBackupClientType ClientType = "VeleroBackup" + VeleroRestoreClientType ClientType = "VeleroRestore" ) var SupportedClientMap = map[ClientType]bool{ - ArgoCDClientType: true, - CiArgoWorkflowClientType: true, - CdArgoWorkflowClientType: true, - SystemExecutorClientType: true, + ArgoCDClientType: true, + CiArgoWorkflowClientType: true, + CdArgoWorkflowClientType: true, + SystemExecutorClientType: true, + VeleroBslClientType: true, + VeleroVslClientType: true, + VeleroBackupScheduleClientType: true, + VeleroBackupClientType: true, + VeleroRestoreClientType: true, } diff --git a/kubewatch/pkg/informer/cluster/advisor.go b/kubewatch/pkg/informer/cluster/advisor.go index 0d42fe626..d4842279f 100644 --- a/kubewatch/pkg/informer/cluster/advisor.go +++ b/kubewatch/pkg/informer/cluster/advisor.go @@ -54,6 +54,16 @@ func (impl *InformerImpl) GetClientAdvisor(clientType bean.ClientType) (ClientAd return impl.cdWfInformer, nil case bean.SystemExecutorClientType: return impl.systemExecInformer, nil + case bean.VeleroBslClientType: + return impl.veleroBslInformer, nil + case bean.VeleroVslClientType: + return impl.veleroVslInformer, nil + case bean.VeleroBackupClientType: + return impl.veleroBackupInformer, nil + case bean.VeleroRestoreClientType: + return impl.veleroRestoreInformer, nil + case bean.VeleroBackupScheduleClientType: + return impl.veleroBackupScheduleInformer, nil default: return NewUnimplementedAdvisor(), fmt.Errorf("client type %q not supported", clientType) } @@ -69,6 +79,16 @@ func (impl *InformerImpl) IsMultiClusterMode(clientType bean.ClientType) bool { return impl.appConfig.IsMultiClusterCdArgoWfType() case bean.SystemExecutorClientType: return impl.appConfig.IsMultiClusterSystemExec() + case bean.VeleroBslClientType: + return impl.appConfig.IsMultiClusterVeleroType() + case bean.VeleroVslClientType: + return impl.appConfig.IsMultiClusterVeleroType() + case bean.VeleroBackupClientType: + return impl.appConfig.IsMultiClusterVeleroType() + case bean.VeleroRestoreClientType: + return impl.appConfig.IsMultiClusterVeleroType() + case bean.VeleroBackupScheduleClientType: + return impl.appConfig.IsMultiClusterVeleroType() default: return false } diff --git a/kubewatch/pkg/informer/cluster/argoCD/informer.go b/kubewatch/pkg/informer/cluster/argoCD/informer.go index eadb26ddc..3988becf4 100644 --- a/kubewatch/pkg/informer/cluster/argoCD/informer.go +++ b/kubewatch/pkg/informer/cluster/argoCD/informer.go @@ -54,7 +54,6 @@ func NewInformerImpl(logger *zap.SugaredLogger, argoCdInformerStopper: make(map[int]*informerBean.SharedStopper), } } - func (impl *InformerImpl) StartInformerForCluster(clusterInfo *repository.Cluster) error { if !impl.appConfig.GetAcdConfig().ACDInformer || impl.appConfig.GetExternalConfig().External { impl.logger.Warnw("argo cd informer is not enabled for cluster, skipping...", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName, "appConfig", impl.appConfig) diff --git a/kubewatch/pkg/informer/cluster/informer.go b/kubewatch/pkg/informer/cluster/informer.go index 6d6e64b4d..ca0bd3202 100644 --- a/kubewatch/pkg/informer/cluster/informer.go +++ b/kubewatch/pkg/informer/cluster/informer.go @@ -27,6 +27,11 @@ import ( cdWf "github.com/devtron-labs/kubewatch/pkg/informer/cluster/argoWf/cd" ciWf "github.com/devtron-labs/kubewatch/pkg/informer/cluster/argoWf/ci" "github.com/devtron-labs/kubewatch/pkg/informer/cluster/systemExec" + veleroBackupInformer "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backup" + veleroBackupScheduleInformer "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backupSchedule" + veleroBslInformer "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backupStorageLocation" + veleroRestoreInformer "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/restore" + veleroVslInformer "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation" "github.com/devtron-labs/kubewatch/pkg/middleware" "github.com/devtron-labs/kubewatch/pkg/resource" resourceBean "github.com/devtron-labs/kubewatch/pkg/resource/bean" @@ -47,16 +52,21 @@ type Informer interface { } type InformerImpl struct { - logger *zap.SugaredLogger - appConfig *config.AppConfig - k8sUtil utils.K8sUtil - informerClient resource.InformerClient - clusterRepository repository.ClusterRepository - clusterInformerStopper *informerBean.FactoryStopper - argoCdInformer *argoCD.InformerImpl - ciWfInformer *ciWf.InformerImpl - cdWfInformer *cdWf.InformerImpl - systemExecInformer *systemExec.InformerImpl + logger *zap.SugaredLogger + appConfig *config.AppConfig + k8sUtil utils.K8sUtil + informerClient resource.InformerClient + clusterRepository repository.ClusterRepository + clusterInformerStopper *informerBean.FactoryStopper + argoCdInformer *argoCD.InformerImpl + ciWfInformer *ciWf.InformerImpl + cdWfInformer *cdWf.InformerImpl + systemExecInformer *systemExec.InformerImpl + veleroBslInformer *veleroBslInformer.InformerImpl + veleroVslInformer *veleroVslInformer.InformerImpl + veleroBackupInformer *veleroBackupInformer.InformerImpl + veleroRestoreInformer *veleroRestoreInformer.InformerImpl + veleroBackupScheduleInformer *veleroBackupScheduleInformer.InformerImpl } func NewInformerImpl(logger *zap.SugaredLogger, @@ -67,17 +77,28 @@ func NewInformerImpl(logger *zap.SugaredLogger, argoCdInformer *argoCD.InformerImpl, ciWfInformer *ciWf.InformerImpl, cdWfInformer *cdWf.InformerImpl, - systemExecInformer *systemExec.InformerImpl) *InformerImpl { + systemExecInformer *systemExec.InformerImpl, + veleroBslInformer *veleroBslInformer.InformerImpl, + veleroVslInformer *veleroVslInformer.InformerImpl, + veleroBackupInformer *veleroBackupInformer.InformerImpl, + veleroRestoreInformer *veleroRestoreInformer.InformerImpl, + veleroBackupScheduleInformer *veleroBackupScheduleInformer.InformerImpl, +) *InformerImpl { return &InformerImpl{ - logger: logger, - appConfig: appConfig, - k8sUtil: k8sUtil, - informerClient: informerClient, - clusterRepository: clusterRepository, - argoCdInformer: argoCdInformer, - ciWfInformer: ciWfInformer, - cdWfInformer: cdWfInformer, - systemExecInformer: systemExecInformer, + logger: logger, + appConfig: appConfig, + k8sUtil: k8sUtil, + informerClient: informerClient, + clusterRepository: clusterRepository, + argoCdInformer: argoCdInformer, + ciWfInformer: ciWfInformer, + cdWfInformer: cdWfInformer, + systemExecInformer: systemExecInformer, + veleroBslInformer: veleroBslInformer, + veleroVslInformer: veleroVslInformer, + veleroBackupInformer: veleroBackupInformer, + veleroRestoreInformer: veleroRestoreInformer, + veleroBackupScheduleInformer: veleroBackupScheduleInformer, } } diff --git a/kubewatch/pkg/informer/cluster/velero/backup/helper.go b/kubewatch/pkg/informer/cluster/velero/backup/helper.go new file mode 100644 index 000000000..6e473ada0 --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/backup/helper.go @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBackupInformer + +import ( + "fmt" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + informerErr "github.com/devtron-labs/kubewatch/pkg/informer/errors" + "golang.org/x/exp/maps" +) + +func (impl *InformerImpl) getVeleroBackupStopper(clusterId int) (*informerBean.SharedStopper, bool) { + stopper, ok := impl.veleroBackupInformerStopper[clusterId] + if ok { + return stopper, stopper.HasInformer() + } + return stopper, false +} + +func (impl *InformerImpl) checkAndGetStopChannel(clusterLabels *informerBean.ClusterLabels) (chan struct{}, error) { + stopChannel := make(chan struct{}) + stopper, ok := impl.getVeleroBackupStopper(clusterLabels.ClusterId) + if ok && stopper.HasInformer() { + impl.logger.Debug(fmt.Sprintf("velero bsl informer for %s already exist", clusterLabels.ClusterName)) + // TODO: should we return the stop channel here? + return nil, informerErr.AlreadyExists + } + stopper = stopper.GetStopper(stopChannel) + impl.veleroBackupInformerStopper[clusterLabels.ClusterId] = stopper + return stopChannel, nil +} + +func (impl *InformerImpl) getStoppableClusterIds() []int { + return maps.Keys(impl.veleroBackupInformerStopper) +} diff --git a/kubewatch/pkg/informer/cluster/velero/backup/informer.go b/kubewatch/pkg/informer/cluster/velero/backup/informer.go new file mode 100644 index 000000000..9c5b1cb64 --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/backup/informer.go @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBackupInformer + +import ( + "github.com/devtron-labs/common-lib/async" + repository "github.com/devtron-labs/kubewatch/pkg/cluster" + "github.com/devtron-labs/kubewatch/pkg/config" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + resourceBean "github.com/devtron-labs/kubewatch/pkg/resource/bean" + "time" + + "github.com/devtron-labs/kubewatch/pkg/resource" + "github.com/devtron-labs/kubewatch/pkg/utils" + "go.uber.org/zap" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + k8sUtil utils.K8sUtil + appConfig *config.AppConfig + informerClient resource.InformerClient + asyncRunnable *async.Runnable + veleroBackupInformerStopper map[int]*informerBean.SharedStopper +} + +func NewInformerImpl(logger *zap.SugaredLogger, + k8sUtil utils.K8sUtil, + appConfig *config.AppConfig, + informerClient resource.InformerClient, + asyncRunnable *async.Runnable) *InformerImpl { + return &InformerImpl{ + logger: logger, + k8sUtil: k8sUtil, + appConfig: appConfig, + informerClient: informerClient, + asyncRunnable: asyncRunnable, + veleroBackupInformerStopper: make(map[int]*informerBean.SharedStopper), + } +} + +func (impl *InformerImpl) StartInformerForCluster(clusterInfo *repository.Cluster) error { + if !impl.appConfig.GetVeleroConfig().VeleroInformer || impl.appConfig.GetExternalConfig().External { + impl.logger.Warnw("velero informer is not enabled, skipping...", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName, "appConfig", impl.appConfig) + return nil + } + startTime := time.Now() + defer func() { + impl.logger.Debugw("time taken to start informer for velero backup", "time", time.Since(startTime)) + }() + clusterLabels := informerBean.NewClusterLabels(clusterInfo.ClusterName, clusterInfo.Id) + stopChannel, err := impl.checkAndGetStopChannel(clusterLabels) + if err != nil { + impl.logger.Errorw("error in getting stop channel, velero informer already exists ", "clusterId", clusterInfo.Id, "err", err) + return err + } + impl.logger.Infow("starting velero informer for cluster", "clusterId", clusterInfo.Id) + restConfig := impl.k8sUtil.GetK8sConfigForCluster(clusterInfo) + backupInformerClient := impl.informerClient.GetSharedInformerClient(resourceBean.VeleroBackupResourceType) + backupInformer, err := backupInformerClient.GetSharedInformer(clusterLabels, impl.appConfig.GetVeleroConfig().GetVeleroNamespace(), restConfig) + if err != nil { + impl.logger.Errorw("error in getting velero backup informer", "clusterId", clusterInfo.Id, "err", err) + return err + } + runnable := func() { + backupInformer.Run(stopChannel) + impl.logger.Infow("informer started for velero bsl", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName) + } + impl.asyncRunnable.Execute(runnable) + return nil +} + +func (impl *InformerImpl) StopInformerForCluster(clusterId int) error { + stopper, found := impl.getVeleroBackupStopper(clusterId) + if found { + stopper.Stop() + delete(impl.veleroBackupInformerStopper, clusterId) + impl.logger.Infow("velero bsl informer stopped for cluster", "clusterId", clusterId) + } + return nil +} + +func (impl *InformerImpl) StopAll() { + for _, stopper := range impl.veleroBackupInformerStopper { + stopper.Stop() + } +} diff --git a/kubewatch/pkg/informer/cluster/velero/backupSchedule/helper.go b/kubewatch/pkg/informer/cluster/velero/backupSchedule/helper.go new file mode 100644 index 000000000..c3aec2401 --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/backupSchedule/helper.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBackupScheduleInformer + +import ( + "fmt" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + informerErr "github.com/devtron-labs/kubewatch/pkg/informer/errors" + "golang.org/x/exp/maps" +) + +func (impl *InformerImpl) getVeleroBackupScheduleStopper(clusterId int) (*informerBean.SharedStopper, bool) { + stopper, ok := impl.veleroBackupScheduleInformerStopper[clusterId] + if ok { + return stopper, stopper.HasInformer() + } + return stopper, false +} +func (impl *InformerImpl) checkAndGetStopChannel(clusterLabels *informerBean.ClusterLabels) (chan struct{}, error) { + stopChannel := make(chan struct{}) + stopper, ok := impl.getVeleroBackupScheduleStopper(clusterLabels.ClusterId) + if ok && stopper.HasInformer() { + impl.logger.Debug(fmt.Sprintf("velero bsl informer for %s already exist", clusterLabels.ClusterName)) + // TODO: should we return the stop channel here? + return nil, informerErr.AlreadyExists + } + stopper = stopper.GetStopper(stopChannel) + impl.veleroBackupScheduleInformerStopper[clusterLabels.ClusterId] = stopper + return stopChannel, nil +} +func (impl *InformerImpl) getStoppableClusterIds() []int { + return maps.Keys(impl.veleroBackupScheduleInformerStopper) +} diff --git a/kubewatch/pkg/informer/cluster/velero/backupSchedule/informer.go b/kubewatch/pkg/informer/cluster/velero/backupSchedule/informer.go new file mode 100644 index 000000000..9b881d6d3 --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/backupSchedule/informer.go @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBackupScheduleInformer + +import ( + "github.com/devtron-labs/common-lib/async" + repository "github.com/devtron-labs/kubewatch/pkg/cluster" + "github.com/devtron-labs/kubewatch/pkg/config" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + "github.com/devtron-labs/kubewatch/pkg/resource" + resourceBean "github.com/devtron-labs/kubewatch/pkg/resource/bean" + "github.com/devtron-labs/kubewatch/pkg/utils" + "go.uber.org/zap" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + k8sUtil utils.K8sUtil + appConfig *config.AppConfig + informerClient resource.InformerClient + asyncRunnable *async.Runnable + veleroBackupScheduleInformerStopper map[int]*informerBean.SharedStopper +} + +func NewInformerImpl(logger *zap.SugaredLogger, + k8sUtil utils.K8sUtil, + appConfig *config.AppConfig, + informerClient resource.InformerClient, + asyncRunnable *async.Runnable) *InformerImpl { + return &InformerImpl{ + logger: logger, + k8sUtil: k8sUtil, + appConfig: appConfig, + informerClient: informerClient, + asyncRunnable: asyncRunnable, + veleroBackupScheduleInformerStopper: make(map[int]*informerBean.SharedStopper), + } +} + +func (impl *InformerImpl) StartInformerForCluster(clusterInfo *repository.Cluster) error { + if !impl.appConfig.GetVeleroConfig().VeleroInformer || impl.appConfig.GetExternalConfig().External { + impl.logger.Warnw("velero informer is not enabled, skipping...", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName, "appConfig", impl.appConfig) + return nil + } + startTime := time.Now() + defer func() { + impl.logger.Debugw("time taken to start informer for velero backup schedule", "time", time.Since(startTime)) + }() + clusterLabels := informerBean.NewClusterLabels(clusterInfo.ClusterName, clusterInfo.Id) + stopChannel, err := impl.checkAndGetStopChannel(clusterLabels) + if err != nil { + impl.logger.Errorw("error in getting stop channel, velero informer already exists ", "clusterId", clusterInfo.Id, "err", err) + return err + } + impl.logger.Infow("starting velero informer for cluster", "clusterId", clusterInfo.Id) + restConfig := impl.k8sUtil.GetK8sConfigForCluster(clusterInfo) + backupScheduleInformerClient := impl.informerClient.GetSharedInformerClient(resourceBean.VeleroBackupScheduleResourceType) + backupScheduleInformer, err := backupScheduleInformerClient.GetSharedInformer(clusterLabels, impl.appConfig.GetVeleroConfig().GetVeleroNamespace(), restConfig) + if err != nil { + impl.logger.Errorw("error in getting velero backup schedule informer", "clusterId", clusterInfo.Id, "err", err) + return err + } + runnable := func() { + backupScheduleInformer.Run(stopChannel) + impl.logger.Infow("informer started for velero backup schedule", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName) + } + impl.asyncRunnable.Execute(runnable) + return nil +} + +func (impl *InformerImpl) StopInformerForCluster(clusterId int) error { + stopper, found := impl.getVeleroBackupScheduleStopper(clusterId) + if found { + stopper.Stop() + delete(impl.veleroBackupScheduleInformerStopper, clusterId) + impl.logger.Infow("velero bsl informer stopped for cluster", "clusterId", clusterId) + } + return nil +} + +func (impl *InformerImpl) StopAll() { + for _, stopper := range impl.veleroBackupScheduleInformerStopper { + stopper.Stop() + } +} diff --git a/kubewatch/pkg/informer/cluster/velero/backupStorageLocation/helper.go b/kubewatch/pkg/informer/cluster/velero/backupStorageLocation/helper.go new file mode 100644 index 000000000..7b45d84e7 --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/backupStorageLocation/helper.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBslInformer + +import ( + "fmt" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + informerErr "github.com/devtron-labs/kubewatch/pkg/informer/errors" + "golang.org/x/exp/maps" +) + +func (impl *InformerImpl) getVeleroBslStopper(clusterId int) (*informerBean.SharedStopper, bool) { + stopper, ok := impl.veleroBslInformerStopper[clusterId] + if ok { + return stopper, stopper.HasInformer() + } + return stopper, false +} +func (impl *InformerImpl) checkAndGetStopChannel(clusterLabels *informerBean.ClusterLabels) (chan struct{}, error) { + stopChannel := make(chan struct{}) + stopper, ok := impl.getVeleroBslStopper(clusterLabels.ClusterId) + if ok && stopper.HasInformer() { + impl.logger.Debug(fmt.Sprintf("velero bsl informer for %s already exist", clusterLabels.ClusterName)) + // TODO: should we return the stop channel here? + return nil, informerErr.AlreadyExists + } + stopper = stopper.GetStopper(stopChannel) + impl.veleroBslInformerStopper[clusterLabels.ClusterId] = stopper + return stopChannel, nil +} +func (impl *InformerImpl) getStoppableClusterIds() []int { + return maps.Keys(impl.veleroBslInformerStopper) +} diff --git a/kubewatch/pkg/informer/cluster/velero/backupStorageLocation/informer.go b/kubewatch/pkg/informer/cluster/velero/backupStorageLocation/informer.go new file mode 100644 index 000000000..6735dc589 --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/backupStorageLocation/informer.go @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBslInformer + +import ( + "github.com/devtron-labs/common-lib/async" + repository "github.com/devtron-labs/kubewatch/pkg/cluster" + "github.com/devtron-labs/kubewatch/pkg/config" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + "github.com/devtron-labs/kubewatch/pkg/resource" + resourceBean "github.com/devtron-labs/kubewatch/pkg/resource/bean" + "github.com/devtron-labs/kubewatch/pkg/utils" + "go.uber.org/zap" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + k8sUtil utils.K8sUtil + appConfig *config.AppConfig + informerClient resource.InformerClient + asyncRunnable *async.Runnable + veleroBslInformerStopper map[int]*informerBean.SharedStopper +} + +func NewInformerImpl(logger *zap.SugaredLogger, + appConfig *config.AppConfig, + k8sUtil utils.K8sUtil, + informerClient resource.InformerClient, + asyncRunnable *async.Runnable) *InformerImpl { + return &InformerImpl{ + logger: logger, + appConfig: appConfig, + k8sUtil: k8sUtil, + informerClient: informerClient, + asyncRunnable: asyncRunnable, + veleroBslInformerStopper: make(map[int]*informerBean.SharedStopper), + } +} + +func (impl *InformerImpl) StartInformerForCluster(clusterInfo *repository.Cluster) error { + if !impl.appConfig.GetVeleroConfig().VeleroInformer || impl.appConfig.GetExternalConfig().External { + impl.logger.Warnw("velero informer is not enabled, skipping...", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName, "appConfig", impl.appConfig) + return nil + } + startTime := time.Now() + defer func() { + impl.logger.Infow("time taken to start velero informer", "clusterId", clusterInfo.Id, "time", time.Since(startTime)) + }() + clusterLabels := informerBean.NewClusterLabels(clusterInfo.ClusterName, clusterInfo.Id) + stopChannel, err := impl.checkAndGetStopChannel(clusterLabels) + if err != nil { + impl.logger.Errorw("error in getting stop channel, velero informer already exists ", "clusterId", clusterInfo.Id, "err", err) + return err + } + impl.logger.Infow("starting velero informer for cluster", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName) + restConfig := impl.k8sUtil.GetK8sConfigForCluster(clusterInfo) + bslInformerClient := impl.informerClient.GetSharedInformerClient(resourceBean.VeleroBslResourceType) + bslInformer, err := bslInformerClient.GetSharedInformer(clusterLabels, impl.appConfig.GetVeleroConfig().GetVeleroNamespace(), restConfig) + if err != nil { + impl.logger.Errorw("error in registering velero bsl informer", "err", err, "clusterId", clusterInfo.Id) + return err + } + runnable := func() { + bslInformer.Run(stopChannel) + impl.logger.Infow("informer started for velero bsl", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName) + } + impl.asyncRunnable.Execute(runnable) + return nil +} + +func (impl *InformerImpl) StopInformerForCluster(clusterId int) error { + stopper, found := impl.getVeleroBslStopper(clusterId) + if found { + stopper.Stop() + delete(impl.veleroBslInformerStopper, clusterId) + impl.logger.Infow("velero bsl informer stopped for cluster", "clusterId", clusterId) + } + return nil +} + +func (impl *InformerImpl) StopAll() { + for _, stopper := range impl.veleroBslInformerStopper { + stopper.Stop() + } +} diff --git a/kubewatch/pkg/informer/cluster/velero/restore/helper.go b/kubewatch/pkg/informer/cluster/velero/restore/helper.go new file mode 100644 index 000000000..f28ef1036 --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/restore/helper.go @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroRestoreInformer + +import ( + "fmt" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + informerErr "github.com/devtron-labs/kubewatch/pkg/informer/errors" + "golang.org/x/exp/maps" +) + +func (impl *InformerImpl) getVeleroRestoreStopper(clusterId int) (*informerBean.SharedStopper, bool) { + stopper, ok := impl.veleroRestoreInformerStopper[clusterId] + if ok { + return stopper, stopper.HasInformer() + } + return stopper, false +} + +func (impl *InformerImpl) checkAndGetStopChannel(clusterLabels *informerBean.ClusterLabels) (chan struct{}, error) { + stopChannel := make(chan struct{}) + stopper, ok := impl.getVeleroRestoreStopper(clusterLabels.ClusterId) + if ok && stopper.HasInformer() { + impl.logger.Debug(fmt.Sprintf("velero restore informer for %s already exist", clusterLabels.ClusterName)) + return nil, informerErr.AlreadyExists + } + stopper = stopper.GetStopper(stopChannel) + impl.veleroRestoreInformerStopper[clusterLabels.ClusterId] = stopper + return stopChannel, nil +} + +func (impl *InformerImpl) getStoppableClusterIds() []int { + return maps.Keys(impl.veleroRestoreInformerStopper) +} diff --git a/kubewatch/pkg/informer/cluster/velero/restore/informer.go b/kubewatch/pkg/informer/cluster/velero/restore/informer.go new file mode 100644 index 000000000..99563739a --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/restore/informer.go @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroRestoreInformer + +import ( + "github.com/devtron-labs/common-lib/async" + repository "github.com/devtron-labs/kubewatch/pkg/cluster" + "github.com/devtron-labs/kubewatch/pkg/config" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + "github.com/devtron-labs/kubewatch/pkg/resource" + resourceBean "github.com/devtron-labs/kubewatch/pkg/resource/bean" + "github.com/devtron-labs/kubewatch/pkg/utils" + "go.uber.org/zap" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + k8sUtil utils.K8sUtil + appConfig *config.AppConfig + informerClient resource.InformerClient + asyncRunnable *async.Runnable + veleroRestoreInformerStopper map[int]*informerBean.SharedStopper +} + +func NewInformerImpl(logger *zap.SugaredLogger, + k8sUtil utils.K8sUtil, + appConfig *config.AppConfig, + informerClient resource.InformerClient, + asyncRunnable *async.Runnable) *InformerImpl { + return &InformerImpl{ + logger: logger, + k8sUtil: k8sUtil, + appConfig: appConfig, + informerClient: informerClient, + asyncRunnable: asyncRunnable, + veleroRestoreInformerStopper: make(map[int]*informerBean.SharedStopper), + } +} +func (impl *InformerImpl) StartInformerForCluster(clusterInfo *repository.Cluster) error { + if !impl.appConfig.GetVeleroConfig().VeleroInformer || impl.appConfig.GetExternalConfig().External { + impl.logger.Warnw("velero informer is not enabled, skipping...", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName, "appConfig", impl.appConfig) + return nil + } + startTime := time.Now() + defer func() { + impl.logger.Debugw("time taken to start informer for velero restore", "time", time.Since(startTime)) + }() + clusterLabels := informerBean.NewClusterLabels(clusterInfo.ClusterName, clusterInfo.Id) + stopChannel, err := impl.checkAndGetStopChannel(clusterLabels) + if err != nil { + impl.logger.Errorw("error in getting stop channel, velero informer already exists ", "clusterId", clusterInfo.Id, "err", err) + return err + } + impl.logger.Infow("starting velero informer for cluster", "clusterId", clusterInfo.Id) + restConfig := impl.k8sUtil.GetK8sConfigForCluster(clusterInfo) + restoreInformerClient := impl.informerClient.GetSharedInformerClient(resourceBean.VeleroRestoreResourceType) + restoreInformer, err := restoreInformerClient.GetSharedInformer(clusterLabels, impl.appConfig.GetVeleroConfig().GetVeleroNamespace(), restConfig) + if err != nil { + impl.logger.Errorw("error in getting velero restore informer", "clusterId", clusterInfo.Id, "err", err) + return err + } + runnable := func() { + restoreInformer.Run(stopChannel) + impl.logger.Infow("informer started for velero restore", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName) + } + impl.asyncRunnable.Execute(runnable) + return nil +} + +func (impl *InformerImpl) StopInformerForCluster(clusterId int) error { + stopper, found := impl.getVeleroRestoreStopper(clusterId) + if found { + stopper.Stop() + delete(impl.veleroRestoreInformerStopper, clusterId) + impl.logger.Infow("velero bsl informer stopped for cluster", "clusterId", clusterId) + } + return nil +} + +func (impl *InformerImpl) StopAll() { + for _, stopper := range impl.veleroRestoreInformerStopper { + stopper.Stop() + } +} diff --git a/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation/helper.go b/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation/helper.go new file mode 100644 index 000000000..1b2ab67b1 --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation/helper.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroVslInformer + +import ( + "fmt" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + informerErr "github.com/devtron-labs/kubewatch/pkg/informer/errors" + "golang.org/x/exp/maps" +) + +func (impl *InformerImpl) getVeleroVslStopper(clusterId int) (*informerBean.SharedStopper, bool) { + stopper, ok := impl.veleroVslInformerStopper[clusterId] + if ok { + return stopper, stopper.HasInformer() + } + return stopper, false +} +func (impl *InformerImpl) checkAndGetStopChannel(clusterLabels *informerBean.ClusterLabels) (chan struct{}, error) { + stopChannel := make(chan struct{}) + stopper, ok := impl.getVeleroVslStopper(clusterLabels.ClusterId) + if ok && stopper.HasInformer() { + impl.logger.Debug(fmt.Sprintf("velero vsl informer for %s already exist", clusterLabels.ClusterName)) + // TODO: should we return the stop channel here? + return nil, informerErr.AlreadyExists + } + stopper = stopper.GetStopper(stopChannel) + impl.veleroVslInformerStopper[clusterLabels.ClusterId] = stopper + return stopChannel, nil +} +func (impl *InformerImpl) getStoppableClusterIds() []int { + return maps.Keys(impl.veleroVslInformerStopper) +} diff --git a/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation/informer.go b/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation/informer.go new file mode 100644 index 000000000..7aa811dde --- /dev/null +++ b/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation/informer.go @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroVslInformer + +import ( + "github.com/devtron-labs/common-lib/async" + repository "github.com/devtron-labs/kubewatch/pkg/cluster" + "github.com/devtron-labs/kubewatch/pkg/config" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + "github.com/devtron-labs/kubewatch/pkg/resource" + resourceBean "github.com/devtron-labs/kubewatch/pkg/resource/bean" + "github.com/devtron-labs/kubewatch/pkg/utils" + "go.uber.org/zap" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + k8sUtil utils.K8sUtil + appConfig *config.AppConfig + informerClient resource.InformerClient + asyncRunnable *async.Runnable + veleroVslInformerStopper map[int]*informerBean.SharedStopper +} + +func NewInformerImpl(logger *zap.SugaredLogger, + appConfig *config.AppConfig, + k8sUtil utils.K8sUtil, + informerClient resource.InformerClient, + asyncRunnable *async.Runnable) *InformerImpl { + return &InformerImpl{ + logger: logger, + appConfig: appConfig, + k8sUtil: k8sUtil, + informerClient: informerClient, + asyncRunnable: asyncRunnable, + veleroVslInformerStopper: make(map[int]*informerBean.SharedStopper), + } +} + +func (impl *InformerImpl) StartInformerForCluster(clusterInfo *repository.Cluster) error { + if !impl.appConfig.GetVeleroConfig().VeleroInformer || impl.appConfig.GetExternalConfig().External { + impl.logger.Warnw("velero informer is not enabled, skipping...", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName, "appConfig", impl.appConfig) + return nil + } + startTime := time.Now() + defer func() { + impl.logger.Infow("time taken to start velero vsl informer", "clusterId", clusterInfo.Id, "time", time.Since(startTime)) + }() + clusterLabels := informerBean.NewClusterLabels(clusterInfo.ClusterName, clusterInfo.Id) + stopChannel, err := impl.checkAndGetStopChannel(clusterLabels) + if err != nil { + impl.logger.Errorw("error in getting stop channel, velero informer already exists ", "clusterId", clusterInfo.Id, "err", err) + return err + } + impl.logger.Infow("starting velero vsl informer for cluster", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName) + restConfig := impl.k8sUtil.GetK8sConfigForCluster(clusterInfo) + vslInformerClient := impl.informerClient.GetSharedInformerClient(resourceBean.VeleroVslResourceType) + vslInformer, err := vslInformerClient.GetSharedInformer(clusterLabels, impl.appConfig.GetVeleroConfig().GetVeleroNamespace(), restConfig) + if err != nil { + impl.logger.Errorw("error in registering velero vsl informer", "err", err, "clusterId", clusterInfo.Id) + return err + } + runnable := func() { + vslInformer.Run(stopChannel) + impl.logger.Infow("informer started for velero vsl", "clusterId", clusterInfo.Id, "clusterName", clusterInfo.ClusterName) + } + impl.asyncRunnable.Execute(runnable) + return nil +} + +func (impl *InformerImpl) StopInformerForCluster(clusterId int) error { + stopper, found := impl.getVeleroVslStopper(clusterId) + if found { + stopper.Stop() + delete(impl.veleroVslInformerStopper, clusterId) + impl.logger.Infow("velero bsl informer stopped for cluster", "clusterId", clusterId) + } + return nil +} + +func (impl *InformerImpl) StopAll() { + for _, stopper := range impl.veleroVslInformerStopper { + stopper.Stop() + } +} diff --git a/kubewatch/pkg/informer/cluster/wire_cluster.go b/kubewatch/pkg/informer/cluster/wire_cluster.go index d2f8c833b..2dfb5e5e1 100644 --- a/kubewatch/pkg/informer/cluster/wire_cluster.go +++ b/kubewatch/pkg/informer/cluster/wire_cluster.go @@ -5,6 +5,11 @@ import ( cdWf "github.com/devtron-labs/kubewatch/pkg/informer/cluster/argoWf/cd" ciWf "github.com/devtron-labs/kubewatch/pkg/informer/cluster/argoWf/ci" "github.com/devtron-labs/kubewatch/pkg/informer/cluster/systemExec" + veleroBackupInformer "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backup" + veleroBackupScheduleInformer "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backupSchedule" + "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backupStorageLocation" + veleroRestoreInformer "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/restore" + "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation" "github.com/google/wire" ) @@ -13,6 +18,11 @@ var WireSet = wire.NewSet( cdWf.NewInformerImpl, ciWf.NewInformerImpl, systemExec.NewInformerImpl, + veleroBslInformer.NewInformerImpl, + veleroVslInformer.NewInformerImpl, + veleroBackupInformer.NewInformerImpl, + veleroRestoreInformer.NewInformerImpl, + veleroBackupScheduleInformer.NewInformerImpl, NewInformerImpl, wire.Bind(new(Informer), new(*InformerImpl)), diff --git a/kubewatch/pkg/resource/bean/bean.go b/kubewatch/pkg/resource/bean/bean.go index 8b4bd56c9..4d27edec1 100644 --- a/kubewatch/pkg/resource/bean/bean.go +++ b/kubewatch/pkg/resource/bean/bean.go @@ -19,9 +19,14 @@ package bean type SharedInformerType string const ( - ApplicationResourceType SharedInformerType = "application" - CiWorkflowResourceType SharedInformerType = "ci/workflow" - CdWorkflowResourceType SharedInformerType = "cd/workflow" + ApplicationResourceType SharedInformerType = "application" + CiWorkflowResourceType SharedInformerType = "ci/workflow" + CdWorkflowResourceType SharedInformerType = "cd/workflow" + VeleroBslResourceType SharedInformerType = "velero/bsl" + VeleroVslResourceType SharedInformerType = "velero/vsl" + VeleroBackupResourceType SharedInformerType = "velero/backup" + VeleroRestoreResourceType SharedInformerType = "velero/restore" + VeleroBackupScheduleResourceType SharedInformerType = "velero/backupSchedule" ) type InformerFactoryType string diff --git a/kubewatch/pkg/resource/sharedInformer.go b/kubewatch/pkg/resource/sharedInformer.go index a8b52254c..e714c3a76 100644 --- a/kubewatch/pkg/resource/sharedInformer.go +++ b/kubewatch/pkg/resource/sharedInformer.go @@ -20,6 +20,11 @@ import ( informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" "github.com/devtron-labs/kubewatch/pkg/resource/application" "github.com/devtron-labs/kubewatch/pkg/resource/bean" + veleroBackup "github.com/devtron-labs/kubewatch/pkg/resource/veleroResource/backup" + veleroBackupSchedule "github.com/devtron-labs/kubewatch/pkg/resource/veleroResource/backupSchedule" + veleroBSL "github.com/devtron-labs/kubewatch/pkg/resource/veleroResource/bsl" + veleroRestore "github.com/devtron-labs/kubewatch/pkg/resource/veleroResource/restore" + veleroVSL "github.com/devtron-labs/kubewatch/pkg/resource/veleroResource/vsl" "github.com/devtron-labs/kubewatch/pkg/resource/workflow" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -37,6 +42,17 @@ func (impl *InformerClientImpl) GetSharedInformerClient(sharedInformerType bean. return workflow.NewCiInformerImpl(impl.logger, impl.client, impl.appConfig) case bean.CdWorkflowResourceType: return workflow.NewCdInformerImpl(impl.logger, impl.client, impl.appConfig) + case bean.VeleroBslResourceType: + return veleroBSL.NewInformerImpl(impl.logger, impl.client) + case bean.VeleroVslResourceType: + return veleroVSL.NewInformerImpl(impl.logger, impl.client) + case bean.VeleroBackupResourceType: + return veleroBackup.NewInformerImpl(impl.logger, impl.client) + case bean.VeleroRestoreResourceType: + return veleroRestore.NewInformerImpl(impl.logger, impl.client) + case bean.VeleroBackupScheduleResourceType: + return veleroBackupSchedule.NewInformerImpl(impl.logger, impl.client) + default: return NewUnimplementedImpl() } diff --git a/kubewatch/pkg/resource/veleroResource/backup/handler.go b/kubewatch/pkg/resource/veleroResource/backup/handler.go new file mode 100644 index 000000000..fa6c9c442 --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/backup/handler.go @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBackup + +import ( + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + veleroBackupBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + veleroBackupInformer "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" + "go.uber.org/zap" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + client *pubsub.PubSubClientServiceImpl +} + +func NewInformerImpl(logger *zap.SugaredLogger, + client *pubsub.PubSubClientServiceImpl) *InformerImpl { + return &InformerImpl{ + logger: logger, + client: client, + } +} + +func (impl *InformerImpl) GetSharedInformer(clusterLabels *informerBean.ClusterLabels, namespace string, k8sConfig *rest.Config) (cache.SharedIndexInformer, error) { + startTime := time.Now() + defer func() { + impl.logger.Debugw("registered velero backup informer", "namespace", namespace, "time", time.Since(startTime)) + }() + clientset := versioned.NewForConfigOrDie(k8sConfig) + backupInformer := veleroBackupInformer.NewBackupInformer(clientset, namespace, 0, cache.Indexers{}) + _, err := backupInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + impl.logger.Debugw("velero backup add event received") + if backupObj, ok := obj.(*veleroBackupBean.Backup); ok { + impl.logger.Debugw("velero backup add event received", "backupObj", backupObj) + backupChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeAdded). + SetResourceKind(storage.ResourceBackup). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(backupObj.Name) + err := impl.sendBackupUpdate(backupChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero backup add event", "err", err) + } + } else { + impl.logger.Errorw("velero backup object add detected, but could not cast to velero backup object", "obj", obj) + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + impl.logger.Debugw("velero backup update event received") + if oldBackupObj, ok := oldObj.(*veleroBackupBean.Backup); ok { + if newBackupObj, ok := newObj.(*veleroBackupBean.Backup); ok { + if isChangeInBackupObject(oldBackupObj, newBackupObj) { + backupChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeUpdated). + SetResourceKind(storage.ResourceBackup). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(newBackupObj.Name) + err := impl.sendBackupUpdate(backupChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero backup update event", "err", err) + } + } else { + impl.logger.Debugw("no change in velero backup object", "oldObj", oldBackupObj, "newObj", newBackupObj) + } + } else { + impl.logger.Errorw("velero backup object update detected, but could not cast to velero backup object", "newObj", newObj) + } + } else { + impl.logger.Errorw("velero backup object update detected, but could not cast to velero backup object", "oldObj", oldObj) + } + }, + DeleteFunc: func(obj interface{}) { + impl.logger.Debugw("velero backup delete event received") + if backupObj, ok := obj.(*veleroBackupBean.Backup); ok { + backupChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeDeleted). + SetResourceKind(storage.ResourceBackup). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(backupObj.Name) + err := impl.sendBackupUpdate(backupChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero backup delete event", "err", err) + } + } else { + impl.logger.Errorw("velero backup object delete detected, but could not cast to velero backup object", "obj", obj) + } + }, + }) + if err != nil { + impl.logger.Errorw("error in adding velero backup event handler", "err", err) + return nil, err + } + return backupInformer, nil +} diff --git a/kubewatch/pkg/resource/veleroResource/backup/util.go b/kubewatch/pkg/resource/veleroResource/backup/util.go new file mode 100644 index 000000000..5a625bf1c --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/backup/util.go @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBackup + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + "github.com/pkg/errors" + veleroBackupBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +func (impl *InformerImpl) sendBackupUpdate(backupChangeObj *storage.VeleroResourceEvent) error { + if impl.client == nil { + impl.logger.Errorw("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + return errors.New("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + } + backupChangeObjByte, err := json.Marshal(backupChangeObj) + if err != nil { + impl.logger.Errorw("error in marshalling velero status update", "err", err) + return err + } + err = impl.client.Publish(pubsub.STORAGE_MODULE_TOPIC, string(backupChangeObjByte)) + if err != nil { + impl.logger.Errorw("error in publishing velero status update", "err", err) + return err + } else { + impl.logger.Info("velero status update sent", "veleroStatusUpdate:", string(backupChangeObjByte)) + return nil + } +} + +func isChangeInBackupObject(oldObj, newObj *veleroBackupBean.Backup) bool { + return oldObj.Status.Version != newObj.Status.Version || + oldObj.Status.FormatVersion != newObj.Status.FormatVersion || + !oldObj.Status.Expiration.Equal(newObj.Status.Expiration) || + oldObj.Status.Phase != newObj.Status.Phase || + len(oldObj.Status.ValidationErrors) != len(newObj.Status.ValidationErrors) || + !oldObj.Status.StartTimestamp.Equal(newObj.Status.StartTimestamp) || + !oldObj.Status.CompletionTimestamp.Equal(newObj.Status.CompletionTimestamp) || + oldObj.Status.VolumeSnapshotsAttempted != newObj.Status.VolumeSnapshotsAttempted || + oldObj.Status.VolumeSnapshotsCompleted != newObj.Status.VolumeSnapshotsCompleted || + oldObj.Status.FailureReason != newObj.Status.FailureReason || + oldObj.Status.Warnings != newObj.Status.Warnings || + oldObj.Status.Errors != newObj.Status.Errors || + (oldObj.Status.Progress == nil && newObj.Status.Progress != nil) || + (oldObj.Status.Progress != nil && newObj.Status.Progress != nil && + (oldObj.Status.Progress.ItemsBackedUp != newObj.Status.Progress.ItemsBackedUp || + oldObj.Status.Progress.TotalItems != newObj.Status.Progress.TotalItems)) || + oldObj.Status.CSIVolumeSnapshotsAttempted != newObj.Status.CSIVolumeSnapshotsAttempted || + oldObj.Status.CSIVolumeSnapshotsCompleted != newObj.Status.CSIVolumeSnapshotsCompleted || + oldObj.Status.BackupItemOperationsAttempted != newObj.Status.BackupItemOperationsAttempted || + oldObj.Status.BackupItemOperationsCompleted != newObj.Status.BackupItemOperationsCompleted || + oldObj.Status.BackupItemOperationsFailed != newObj.Status.BackupItemOperationsFailed || + (oldObj.Status.HookStatus == nil && newObj.Status.HookStatus != nil) || + (oldObj.Status.HookStatus != nil && newObj.Status.HookStatus != nil && + oldObj.Status.HookStatus.HooksAttempted != newObj.Status.HookStatus.HooksAttempted || + oldObj.Status.HookStatus.HooksFailed != newObj.Status.HookStatus.HooksFailed) +} diff --git a/kubewatch/pkg/resource/veleroResource/backupSchedule/handler.go b/kubewatch/pkg/resource/veleroResource/backupSchedule/handler.go new file mode 100644 index 000000000..b121c3aa9 --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/backupSchedule/handler.go @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBackupSchedule + +import ( + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + veleroBackupScheduleBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + veleroVslInformer "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" + "go.uber.org/zap" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + client *pubsub.PubSubClientServiceImpl +} + +func NewInformerImpl(logger *zap.SugaredLogger, + client *pubsub.PubSubClientServiceImpl) *InformerImpl { + return &InformerImpl{ + logger: logger, + client: client, + } +} + +func (impl *InformerImpl) GetSharedInformer(clusterLabels *informerBean.ClusterLabels, namespace string, k8sConfig *rest.Config) (cache.SharedIndexInformer, error) { + startTime := time.Now() + defer func() { + impl.logger.Debugw("registered velero backup schedule informer", "namespace", namespace, "time", time.Since(startTime)) + }() + clientSet := versioned.NewForConfigOrDie(k8sConfig) + backupScheduleInformer := veleroVslInformer.NewScheduleInformer(clientSet, namespace, 0, cache.Indexers{}) + _, err := backupScheduleInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + impl.logger.Debugw("velero backup schedule added", "obj", obj) + if backupSchedule, ok := obj.(*veleroBackupScheduleBean.Schedule); ok { + impl.logger.Debugw("velero backup schedule added", "backupSchedule", backupSchedule) + backupScheduleChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeAdded). + SetResourceKind(storage.ResourceBackupSchedule). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(backupSchedule.Name) + err := impl.sendBackupScheduleUpdate(backupScheduleChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero backup schedule update", "err", err) + } + } else { + impl.logger.Errorw("error in casting velero backup schedule", "obj", obj) + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + impl.logger.Debugw("velero backup schedule updated", "oldObj", oldObj, "newObj", newObj) + if oldBackupSchedule, ok := oldObj.(*veleroBackupScheduleBean.Schedule); ok { + if newBackupSchedule, ok := newObj.(*veleroBackupScheduleBean.Schedule); ok { + if isChangeInBackupScheduleObject(oldBackupSchedule, newBackupSchedule) { + backupScheduleChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeUpdated). + SetResourceKind(storage.ResourceBackupSchedule). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(newBackupSchedule.Name) + err := impl.sendBackupScheduleUpdate(backupScheduleChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero backup schedule update", "err", err) + } + } else { + impl.logger.Debugw("no change in velero backup schedule, skipping the publish", "oldObj", oldObj, "newObj", newObj) + } + } else { + impl.logger.Errorw("error in casting velero backup schedule", "newObj", newObj) + } + } else { + impl.logger.Errorw("error in casting velero backup schedule", "oldObj", oldObj) + } + }, + DeleteFunc: func(obj interface{}) { + impl.logger.Debugw("velero backup schedule deleted", "obj", obj) + if backupSchedule, ok := obj.(*veleroBackupScheduleBean.Schedule); ok { + backupScheduleChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeDeleted). + SetResourceKind(storage.ResourceBackupSchedule). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(backupSchedule.Name) + err := impl.sendBackupScheduleUpdate(backupScheduleChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero backup schedule update", "err", err) + } + } else { + impl.logger.Errorw("error in casting velero backup schedule", "obj", obj) + } + }, + }) + if err != nil { + impl.logger.Errorw("error in adding event handler for velero backup schedule", "err", err) + return nil, err + } + return backupScheduleInformer, nil +} diff --git a/kubewatch/pkg/resource/veleroResource/backupSchedule/util.go b/kubewatch/pkg/resource/veleroResource/backupSchedule/util.go new file mode 100644 index 000000000..fdec51cf1 --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/backupSchedule/util.go @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBackupSchedule + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + "github.com/pkg/errors" + veleroBackupScheduleBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +func (impl *InformerImpl) sendBackupScheduleUpdate(backupScheduleChangeObj *storage.VeleroResourceEvent) error { + if impl.client == nil { + impl.logger.Errorw("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + return errors.New("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + } + backupScheduleChangeObjByte, err := json.Marshal(backupScheduleChangeObj) + if err != nil { + impl.logger.Errorw("error in marshalling velero backup schedule status update", "err", err) + return err + } + err = impl.client.Publish(pubsub.STORAGE_MODULE_TOPIC, string(backupScheduleChangeObjByte)) + if err != nil { + impl.logger.Errorw("error in publishing velero status update", "err", err) + return err + } else { + impl.logger.Info("velero status update sent", "veleroStatusUpdate:", string(backupScheduleChangeObjByte)) + return nil + } +} + +func isChangeInBackupScheduleObject(oldObj, newObj *veleroBackupScheduleBean.Schedule) bool { + return oldObj.Status.Phase != newObj.Status.Phase || + !oldObj.Status.LastBackup.Equal(newObj.Status.LastBackup) || + oldObj.Status.LastSkipped.Equal(newObj.Status.LastSkipped) || + len(oldObj.Status.ValidationErrors) != len(newObj.Status.ValidationErrors) +} diff --git a/kubewatch/pkg/resource/veleroResource/bsl/handler.go b/kubewatch/pkg/resource/veleroResource/bsl/handler.go new file mode 100644 index 000000000..43cb7832d --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/bsl/handler.go @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroBSL + +import ( + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + veleroBslBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + veleroBslInformer "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" + + "go.uber.org/zap" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + client *pubsub.PubSubClientServiceImpl +} + +func NewInformerImpl(logger *zap.SugaredLogger, + client *pubsub.PubSubClientServiceImpl) *InformerImpl { + return &InformerImpl{ + logger: logger, + client: client, + } +} + +func (impl *InformerImpl) GetSharedInformer(clusterLabels *informerBean.ClusterLabels, namespace string, k8sConfig *rest.Config) (cache.SharedIndexInformer, error) { + startTime := time.Now() + defer func() { + impl.logger.Debugw("registered velero bsl informer", "namespace", namespace, "time", time.Since(startTime)) + }() + clientSet := versioned.NewForConfigOrDie(k8sConfig) + bslInformer := veleroBslInformer.NewBackupStorageLocationInformer(clientSet, namespace, 0, cache.Indexers{}) + _, err := bslInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + impl.logger.Infow("backup storage location add detected") + if bslObj, ok := obj.(*veleroBslBean.BackupStorageLocation); ok { + bslChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeAdded). + SetResourceKind(storage.ResourceBackupStorageLocation). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(bslObj.Name) + err := impl.sendBslUpdate(bslChangeObj) + if err != nil { + impl.logger.Errorw("error in sending backup storage location add event", "err", err) + return + } + } else { + impl.logger.Errorw("backup storage location object add detected, but could not cast to backup storage location object", "obj", obj) + return + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + impl.logger.Infow("backup storage location update detected") + if oldBslObj, ok := oldObj.(*veleroBslBean.BackupStorageLocation); ok { + if newBslObj, ok := newObj.(*veleroBslBean.BackupStorageLocation); ok { + if isChangeInBslObject(oldBslObj, newBslObj) { + bslChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeUpdated). + SetResourceKind(storage.ResourceBackupStorageLocation). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(newBslObj.Name) + err := impl.sendBslUpdate(bslChangeObj) + if err != nil { + impl.logger.Errorw("error in sending backup storage location update event", "err", err) + } + } + } else { + impl.logger.Errorw("backup storage location object update detected, but could not cast to backup storage location object", "newObj", newObj) + } + } else { + impl.logger.Errorw("backup storage location object update detected, but could not cast to backup storage location object", "oldObj", oldObj) + } + }, + DeleteFunc: func(obj interface{}) { + impl.logger.Infow("backup storage location delete detected") + if bslObj, ok := obj.(*veleroBslBean.BackupStorageLocation); ok { + bslChangeObj := &storage.VeleroResourceEvent{ + EventType: storage.EventTypeDeleted, + ResourceKind: storage.ResourceBackupStorageLocation, + ClusterId: clusterLabels.ClusterId, + ResourceName: bslObj.Name, + } + err := impl.sendBslUpdate(bslChangeObj) + if err != nil { + impl.logger.Errorw("error in sending backup storage location delete event", "err", err) + } + } else { + impl.logger.Errorw("backup storage location object delete detected, but could not cast to backup storage location object", "obj", obj) + } + }, + }) + if err != nil { + impl.logger.Errorw("error in adding velero bsl event handler", "err", err) + return nil, err + } + return bslInformer, nil +} diff --git a/kubewatch/pkg/resource/veleroResource/bsl/util.go b/kubewatch/pkg/resource/veleroResource/bsl/util.go new file mode 100644 index 000000000..657169728 --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/bsl/util.go @@ -0,0 +1,38 @@ +package veleroBSL + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + "github.com/pkg/errors" + veleroBslBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +func (impl *InformerImpl) sendBslUpdate(bslChangeObj *storage.VeleroResourceEvent) error { + if impl.client == nil { + impl.logger.Errorw("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + return errors.New("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + } + bslChangeObjByte, err := json.Marshal(bslChangeObj) + if err != nil { + impl.logger.Errorw("error in marshalling velero status update", "err", err) + return err + } + err = impl.client.Publish(pubsub.STORAGE_MODULE_TOPIC, string(bslChangeObjByte)) + if err != nil { + impl.logger.Errorw("error in publishing velero status update", "err", err) + return err + } else { + impl.logger.Info("velero status update sent", "veleroStatusUpdate:", string(bslChangeObjByte)) + return nil + } +} + +func isChangeInBslObject(oldObj, newObj *veleroBslBean.BackupStorageLocation) bool { + return oldObj.Status.Phase != newObj.Status.Phase || + !oldObj.Status.LastSyncedTime.Equal(newObj.Status.LastSyncedTime) || + !oldObj.Status.LastValidationTime.Equal(newObj.Status.LastValidationTime) || + oldObj.Status.Message != newObj.Status.Message || + oldObj.Status.LastSyncedRevision != newObj.Status.LastSyncedRevision || + oldObj.Status.AccessMode != newObj.Status.AccessMode +} diff --git a/kubewatch/pkg/resource/veleroResource/restore/handler.go b/kubewatch/pkg/resource/veleroResource/restore/handler.go new file mode 100644 index 000000000..6e7e82621 --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/restore/handler.go @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroRestore + +import ( + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + veleroRestoreBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + veleroRestoreInformer "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" + "go.uber.org/zap" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + client *pubsub.PubSubClientServiceImpl +} + +func NewInformerImpl(logger *zap.SugaredLogger, + client *pubsub.PubSubClientServiceImpl) *InformerImpl { + return &InformerImpl{ + logger: logger, + client: client, + } +} +func (impl *InformerImpl) GetSharedInformer(clusterLabels *informerBean.ClusterLabels, namespace string, k8sConfig *rest.Config) (cache.SharedIndexInformer, error) { + startTime := time.Now() + defer func() { + impl.logger.Debugw("registered velero restore informer", "namespace", namespace, "time", time.Since(startTime)) + }() + + clientset := versioned.NewForConfigOrDie(k8sConfig) + restoreInformer := veleroRestoreInformer.NewRestoreInformer(clientset, namespace, 0, cache.Indexers{}) + _, err := restoreInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + impl.logger.Debugw("velero restore add event received") + if restoreObj, ok := obj.(*veleroRestoreBean.Restore); ok { + impl.logger.Debugw("velero restore add event received", "restoreObj", restoreObj) + restoreChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeAdded). + SetResourceKind(storage.ResourceRestore). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(restoreObj.Name) + err := impl.sendRestoreUpdate(restoreChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero restore add event", "err", err) + } + } else { + impl.logger.Errorw("velero restore object add detected, but could not cast to velero restore object", "obj", obj) + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + impl.logger.Debugw("velero restore update event received") + if oldRestoreObj, ok := oldObj.(*veleroRestoreBean.Restore); ok { + if newRestoreObj, ok := newObj.(*veleroRestoreBean.Restore); ok { + if isChangeInRestoreObject(oldRestoreObj, newRestoreObj) { + restoreChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeUpdated). + SetResourceKind(storage.ResourceRestore). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(newRestoreObj.Name) + err := impl.sendRestoreUpdate(restoreChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero restore update event", "err", err) + } + } else { + impl.logger.Debugw("no change in velero restore object", "oldObj", oldRestoreObj, "newObj", newRestoreObj) + } + } else { + impl.logger.Errorw("velero restore object update detected, but could not cast to velero restore object", "newObj", newObj) + } + } else { + impl.logger.Errorw("velero restore object update detected, but could not cast to velero restore object", "oldObj", oldObj) + } + }, + DeleteFunc: func(obj interface{}) { + impl.logger.Debugw("velero restore delete event received") + if restoreObj, ok := obj.(*veleroRestoreBean.Restore); ok { + restoreChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeDeleted). + SetResourceKind(storage.ResourceRestore). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(restoreObj.Name) + err := impl.sendRestoreUpdate(restoreChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero restore delete event", "err", err) + } + } else { + impl.logger.Errorw("velero restore object delete detected, but could not cast to velero restore object", "obj", obj) + } + }, + }) + if err != nil { + impl.logger.Errorw("error in adding velero restore event handler", "err", err) + return nil, err + } + return restoreInformer, nil +} diff --git a/kubewatch/pkg/resource/veleroResource/restore/util.go b/kubewatch/pkg/resource/veleroResource/restore/util.go new file mode 100644 index 000000000..880e88c97 --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/restore/util.go @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package veleroRestore + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + "github.com/pkg/errors" + veleroRestoreBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +func (impl *InformerImpl) sendRestoreUpdate(restoreChangeObj *storage.VeleroResourceEvent) error { + if impl.client == nil { + impl.logger.Errorw("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + return errors.New("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + } + restoreChangeObjByte, err := json.Marshal(restoreChangeObj) + if err != nil { + impl.logger.Errorw("error in marshalling velero restore status update", "err", err) + return err + } + err = impl.client.Publish(pubsub.STORAGE_MODULE_TOPIC, string(restoreChangeObjByte)) + if err != nil { + impl.logger.Errorw("error in publishing velero restore status update", "err", err) + return err + } else { + impl.logger.Info("velero restore status update sent", "veleroRestoreStatusUpdate:", string(restoreChangeObjByte)) + return nil + } +} + +func isChangeInRestoreObject(oldObj, newObj *veleroRestoreBean.Restore) bool { + return oldObj.Spec.BackupName != newObj.Spec.BackupName || + oldObj.Status.Phase != newObj.Status.Phase || + len(oldObj.Status.ValidationErrors) != len(newObj.Status.ValidationErrors) || + oldObj.Status.Warnings != newObj.Status.Warnings || + oldObj.Status.Errors != newObj.Status.Errors || + oldObj.Status.FailureReason != newObj.Status.FailureReason || + !oldObj.Status.StartTimestamp.Equal(newObj.Status.StartTimestamp) || + !oldObj.Status.CompletionTimestamp.Equal(newObj.Status.CompletionTimestamp) || + (oldObj.Status.Progress == nil && newObj.Status.Progress != nil) || + (oldObj.Status.Progress != nil && newObj.Status.Progress != nil && + (oldObj.Status.Progress.ItemsRestored != newObj.Status.Progress.ItemsRestored || + oldObj.Status.Progress.TotalItems != newObj.Status.Progress.TotalItems)) || + oldObj.Status.RestoreItemOperationsAttempted != newObj.Status.RestoreItemOperationsAttempted || + oldObj.Status.RestoreItemOperationsCompleted != newObj.Status.RestoreItemOperationsCompleted || + oldObj.Status.RestoreItemOperationsFailed != newObj.Status.RestoreItemOperationsFailed || + (oldObj.Status.HookStatus == nil && newObj.Status.HookStatus != nil) || + (oldObj.Status.HookStatus != nil && newObj.Status.HookStatus != nil && + oldObj.Status.HookStatus.HooksAttempted != newObj.Status.HookStatus.HooksAttempted || + oldObj.Status.HookStatus.HooksFailed != newObj.Status.HookStatus.HooksFailed) +} diff --git a/kubewatch/pkg/resource/veleroResource/vsl/handler.go b/kubewatch/pkg/resource/veleroResource/vsl/handler.go new file mode 100644 index 000000000..d23d43a23 --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/vsl/handler.go @@ -0,0 +1,77 @@ +package veleroVSL + +import ( + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + informerBean "github.com/devtron-labs/kubewatch/pkg/informer/bean" + veleroVslBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + veleroVslInformer "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" + "go.uber.org/zap" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "time" +) + +type InformerImpl struct { + logger *zap.SugaredLogger + client *pubsub.PubSubClientServiceImpl +} + +func NewInformerImpl(logger *zap.SugaredLogger, + client *pubsub.PubSubClientServiceImpl) *InformerImpl { + return &InformerImpl{ + logger: logger, + client: client, + } +} + +func (impl *InformerImpl) GetSharedInformer(clusterLabels *informerBean.ClusterLabels, namespace string, k8sConfig *rest.Config) (cache.SharedIndexInformer, error) { + startTime := time.Now() + defer func() { + impl.logger.Debugw("registered velero vsl informer", "namespace", namespace, "time", time.Since(startTime)) + }() + clientSet := versioned.NewForConfigOrDie(k8sConfig) + vslInformer := veleroVslInformer.NewVolumeSnapshotLocationInformer(clientSet, namespace, 0, cache.Indexers{}) + _, err := vslInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + impl.logger.Debugw("velero vsl add event received") + if vslObj, ok := obj.(*veleroVslBean.VolumeSnapshotLocation); ok { + impl.logger.Infow("velero vsl add event received", "vslObj", vslObj) + vslChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeAdded). + SetResourceKind(storage.ResourceVolumeSnapshotLocation). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(vslObj.Name) + err := impl.sendVslUpdate(vslChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero vsl add event", "err", err) + } + } else { + impl.logger.Errorw("velero vsl object add detected, but could not cast to velero vsl object", "obj", obj) + } + }, + UpdateFunc: func(oldObj, newObj interface{}) {}, + DeleteFunc: func(obj interface{}) { + impl.logger.Debugw("velero vsl delete event received") + if vslObj, ok := obj.(*veleroVslBean.VolumeSnapshotLocation); ok { + vslChangeObj := storage.NewVeleroResourceEvent(). + SetEventType(storage.EventTypeDeleted). + SetResourceKind(storage.ResourceVolumeSnapshotLocation). + SetClusterId(clusterLabels.ClusterId). + SetResourceName(vslObj.Name) + err := impl.sendVslUpdate(vslChangeObj) + if err != nil { + impl.logger.Errorw("error in sending velero vsl delete event", "err", err) + } + } else { + impl.logger.Errorw("velero vsl object delete detected, but could not cast to velero vsl object", "obj", obj) + } + }, + }) + if err != nil { + impl.logger.Errorw("error in adding velero vsl event handler", "err", err) + return nil, err + } + return vslInformer, nil +} diff --git a/kubewatch/pkg/resource/veleroResource/vsl/util.go b/kubewatch/pkg/resource/veleroResource/vsl/util.go new file mode 100644 index 000000000..6cb5b0f84 --- /dev/null +++ b/kubewatch/pkg/resource/veleroResource/vsl/util.go @@ -0,0 +1,28 @@ +package veleroVSL + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/storage" + "github.com/pkg/errors" +) + +func (impl *InformerImpl) sendVslUpdate(vslChangeObj *storage.VeleroResourceEvent) error { + if impl.client == nil { + impl.logger.Errorw("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + return errors.New("pubsub client is nil - STORAGE_MODULE_TOPIC, skipping the publish") + } + vslChangeObjByte, err := json.Marshal(vslChangeObj) + if err != nil { + impl.logger.Errorw("error in marshalling velero status update", "err", err) + return err + } + err = impl.client.Publish(pubsub.STORAGE_MODULE_TOPIC, string(vslChangeObjByte)) + if err != nil { + impl.logger.Errorw("error in publishing velero status update", "err", err) + return err + } else { + impl.logger.Info("velero status update sent", "veleroStatusUpdate:", string(vslChangeObjByte)) + return nil + } +} diff --git a/kubewatch/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go b/kubewatch/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go index b7434bb27..cfd7f98e4 100644 --- a/kubewatch/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go +++ b/kubewatch/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go @@ -124,6 +124,21 @@ const ( INFRA_HELM_RELEASE_ACTION_TOPIC string = "INFRA_HELM_RELEASE_ACTION_TOPIC" INFRA_HELM_RELEASE_ACTION_GROUP string = "INFRA_HELM_RELEASE_ACTION_GROUP" INFRA_HELM_RELEASE_ACTION_DURABLE string = "INFRA_HELM_RELEASE_ACTION_DURABLE" + COST_MODULE_INSTALLATION_TOPIC string = "COST_MODULE_INSTALLATION_TOPIC" + COST_MODULE_INSTALLATION_GROUP string = "COST_MODULE_INSTALLATION_GROUP" + COST_MODULE_INSTALLATION_DURABLE string = "COST_MODULE_INSTALLATION_DURABLE" + COST_MODULE_GPU_INSTALLATION_TOPIC string = "COST_MODULE_GPU_INSTALLATION_TOPIC" + COST_MODULE_GPU_INSTALLATION_GROUP string = "COST_MODULE_GPU_INSTALLATION_GROUP" + COST_MODULE_GPU_INSTALLATION_DURABLE string = "COST_MODULE_GPU_INSTALLATION_DURABLE" + STORAGE_MODULE_TOPIC string = "STORAGE_MODULE_TOPIC" + STORAGE_MODULE_GROUP string = "STORAGE_MODULE_GROUP" + STORAGE_MODULE_DURABLE string = "STORAGE_MODULE_DURABLE" + STORAGE_VELERO_INSTALL_TOPIC string = "STORAGE_VELERO_INSTALL_TOPIC" + STORAGE_VELERO_INSTALL_GROUP string = "STORAGE_VELERO_INSTALL_GROUP" + STORAGE_VELERO_INSTALL_DURABLE string = "STORAGE_VELERO_INSTALL_DURABLE" + STORAGE_VELERO_POST_INSTALLATION_TOPIC string = "STORAGE_VELERO_POST_INSTALLATION_TOPIC" + STORAGE_VELERO_POST_INSTALLATION_GROUP string = "STORAGE_VELERO_POST_INSTALLATION_GROUP" + STORAGE_VELERO_POST_INSTALLATION_DURABLE string = "STORAGE_VELERO_POST_INSTALLATION_DURABLE" ) type NatsTopic struct { @@ -179,6 +194,11 @@ var natsTopicMapping = map[string]NatsTopic{ INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE}, INFRA_HELM_RELEASE_ACTION_TOPIC: {topicName: INFRA_HELM_RELEASE_ACTION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRA_HELM_RELEASE_ACTION_GROUP, consumerName: INFRA_HELM_RELEASE_ACTION_DURABLE}, + COST_MODULE_INSTALLATION_TOPIC: {topicName: COST_MODULE_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_INSTALLATION_GROUP, consumerName: COST_MODULE_INSTALLATION_DURABLE}, + COST_MODULE_GPU_INSTALLATION_TOPIC: {topicName: COST_MODULE_GPU_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_GPU_INSTALLATION_GROUP, consumerName: COST_MODULE_GPU_INSTALLATION_DURABLE}, + STORAGE_MODULE_TOPIC: {topicName: STORAGE_MODULE_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_MODULE_GROUP, consumerName: STORAGE_MODULE_DURABLE}, + STORAGE_VELERO_INSTALL_TOPIC: {topicName: STORAGE_VELERO_INSTALL_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_INSTALL_GROUP, consumerName: STORAGE_VELERO_INSTALL_DURABLE}, + STORAGE_VELERO_POST_INSTALLATION_TOPIC: {topicName: STORAGE_VELERO_POST_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_POST_INSTALLATION_GROUP, consumerName: STORAGE_VELERO_POST_INSTALLATION_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -221,6 +241,11 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE: {}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE: {}, INFRA_HELM_RELEASE_ACTION_DURABLE: {}, + COST_MODULE_INSTALLATION_DURABLE: {}, + COST_MODULE_GPU_INSTALLATION_DURABLE: {}, + STORAGE_MODULE_DURABLE: {}, + STORAGE_VELERO_INSTALL_DURABLE: {}, + STORAGE_VELERO_POST_INSTALLATION_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string diff --git a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go new file mode 100644 index 000000000..372765014 --- /dev/null +++ b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index ea16a2f72..2bbfa1dc1 100644 --- a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/k8s/K8sUtil.go b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/k8s/K8sUtil.go index f621257bd..434fb4874 100644 --- a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/k8s/K8sUtil.go +++ b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/k8s/K8sUtil.go @@ -1351,7 +1351,7 @@ func (impl *K8sServiceImpl) GetPodListByLabel(namespace, label string, clientSet return podList.Items, nil } -func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, secretData map[string]string) error { +func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, dataString map[string]string, data map[string][]byte) error { secret, err := impl.GetSecret(namespace, uniqueSecretName, client) statusError, ok := err.(*errors.StatusError) @@ -1361,13 +1361,18 @@ func (impl *K8sServiceImpl) CreateOrUpdateSecretByName(client *v12.CoreV1Client, } if ok && statusError != nil && statusError.Status().Code == http.StatusNotFound { - _, err = impl.CreateSecret(namespace, nil, uniqueSecretName, "", client, secretLabel, secretData) + _, err = impl.CreateSecret(namespace, data, uniqueSecretName, "", client, secretLabel, dataString) if err != nil { impl.logger.Errorw("Error in creating secret for chart repo", "uniqueSecretName", uniqueSecretName, "err", err) return err } } else { - secret.StringData = secretData + if len(data) > 0 { + secret.Data = data + } + if len(dataString) > 0 { + secret.StringData = dataString + } _, err = impl.UpdateSecret(namespace, secret, client) if err != nil { impl.logger.Errorw("Error in creating secret for chart repo", "uniqueSecretName", uniqueSecretName, "err", err) diff --git a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/k8s/k8sService.go b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/k8s/k8sService.go index c8f583ff3..681e96ad2 100644 --- a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/k8s/k8sService.go +++ b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/k8s/k8sService.go @@ -105,8 +105,7 @@ type K8sService interface { GetResourceIf(restConfig *rest.Config, groupVersionKind schema.GroupVersionKind) (resourceIf dynamic.NamespaceableResourceInterface, namespaced bool, err error) FetchConnectionStatusForCluster(k8sClientSet *kubernetes.Clientset) error CreateK8sClientSet(restConfig *rest.Config) (*kubernetes.Clientset, error) - CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, secretData map[string]string) error - + CreateOrUpdateSecretByName(client *v12.CoreV1Client, namespace, uniqueSecretName string, secretLabel map[string]string, dataString map[string]string, data map[string][]byte) error // below functions are exposed for K8sUtilExtended CreateNsWithLabels(namespace string, labels map[string]string, client *v12.CoreV1Client) (ns *v1.Namespace, err error) diff --git a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go index fa6858e5d..5c9cb23bf 100644 --- a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go +++ b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/sql/connection.go @@ -34,7 +34,7 @@ type Config struct { User string `env:"PG_USER" envDefault:"" description:"user for postgres" example:"postgres"` Password string `env:"PG_PASSWORD" envDefault:"" secretData:"-" description:"password for postgres, associated with PG_USER" example:"confidential ;)"` Database string `env:"PG_DATABASE" envDefault:"orchestrator" description:"postgres database to be made connection with" example:"orchestrator, casbin, git_sensor, lens"` - CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin""` + CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin"` ApplicationName string `env:"APP" envDefault:"orchestrator" description:"Application name"` ReadTimeout int64 `env:"PG_READ_TIMEOUT" envDefault:"30"` WriteTimeout int64 `env:"PG_WRITE_TIMEOUT" envDefault:"30"` @@ -71,10 +71,10 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { _, err := dbConnection.QueryOne(&test, `SELECT 1`) if err != nil { - logger.Errorw("error in connecting db ", "db", obfuscateSecretTags(cfg), "err", err) + logger.Errorw("error in connecting db ", "db", ObfuscateSecretTags(cfg), "err", err) return nil, err } else { - logger.Infow("connected with db", "db", obfuscateSecretTags(cfg)) + logger.Infow("connected with db", "db", ObfuscateSecretTags(cfg)) } // -------------- @@ -82,7 +82,7 @@ func NewDbConnection(cfg *Config, logger *zap.SugaredLogger) (*pg.DB, error) { return dbConnection, err } -func obfuscateSecretTags(cfg interface{}) interface{} { +func ObfuscateSecretTags(cfg interface{}) interface{} { cfgDpl := reflect.New(reflect.ValueOf(cfg).Elem().Type()).Interface() cfgDplElm := reflect.ValueOf(cfgDpl).Elem() diff --git a/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/storage/bean.go b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/storage/bean.go new file mode 100644 index 000000000..040504a29 --- /dev/null +++ b/kubewatch/vendor/github.com/devtron-labs/common-lib/utils/storage/bean.go @@ -0,0 +1,146 @@ +package storage + +import ( + veleroBean "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +// EventType represents the type of event +type EventType string + +func (e EventType) String() string { + return string(e) +} + +func (e EventType) IsCreated() bool { + return e == EventTypeAdded +} + +func (e EventType) IsUpdated() bool { + return e == EventTypeUpdated +} + +func (e EventType) IsDeleted() bool { + return e == EventTypeDeleted +} + +const ( + EventTypeAdded EventType = "ADDED" + EventTypeUpdated EventType = "UPDATED" + EventTypeDeleted EventType = "DELETED" +) + +// ResourceKind represents the kind of resource +type ResourceKind string + +func (r ResourceKind) String() string { + return string(r) +} + +func (r ResourceKind) IsBackup() bool { + return r == ResourceBackup +} + +func (r ResourceKind) IsRestore() bool { + return r == ResourceRestore +} + +func (r ResourceKind) IsBackupStorageLocation() bool { + return r == ResourceBackupStorageLocation +} + +func (r ResourceKind) IsVolumeSnapshotLocation() bool { + return r == ResourceVolumeSnapshotLocation +} + +func (r ResourceKind) IsBackupSchedule() bool { + return r == ResourceBackupSchedule +} + +const ( + ResourceBackup ResourceKind = "Backup" + ResourceRestore ResourceKind = "Restore" + ResourceBackupStorageLocation ResourceKind = "BackupStorageLocation" + ResourceVolumeSnapshotLocation ResourceKind = "VolumeSnapshotLocation" + ResourceBackupSchedule ResourceKind = "BackupSchedule" +) + +// LocationsStatus represents the status of a location +// NOTE: status is only available in case of BSL +type LocationsStatus struct { + *veleroBean.BackupStorageLocationStatus +} + +// BackupStatus represents the status of a backup +type BackupStatus struct { + *veleroBean.BackupStatus +} + +// RestoreStatus represents the status of a restore +type RestoreStatus struct { + *veleroBean.RestoreStatus +} + +// BackupScheduleStatus represents the status of a backup schedule +type BackupScheduleStatus struct { + *veleroBean.ScheduleStatus +} + +// VeleroResourceEvent represents the event sent by velero +type VeleroResourceEvent struct { + EventType EventType `json:"eventType"` + ResourceKind ResourceKind `json:"kind"` + ClusterId int `json:"clusterId"` + ResourceName string `json:"resourceName"` +} + +func NewVeleroResourceEvent() *VeleroResourceEvent { + return &VeleroResourceEvent{} +} + +// Getters + +// GetEventType returns the EventType +func (e *VeleroResourceEvent) GetEventType() any { + return e.EventType +} + +// GetResourceKind returns the ResourceKind +func (e *VeleroResourceEvent) GetResourceKind() ResourceKind { + return e.ResourceKind +} + +// GetClusterId returns the ClusterId +func (e *VeleroResourceEvent) GetClusterId() int { + return e.ClusterId +} + +// GetResourceName returns the ResourceName +func (e *VeleroResourceEvent) GetResourceName() string { + return e.ResourceName +} + +// Setters + +// SetEventType sets the EventType +func (e *VeleroResourceEvent) SetEventType(eventType EventType) *VeleroResourceEvent { + e.EventType = eventType + return e +} + +// SetClusterId sets the ClusterId +func (e *VeleroResourceEvent) SetClusterId(clusterId int) *VeleroResourceEvent { + e.ClusterId = clusterId + return e +} + +// SetResourceKind sets the ResourceKind +func (e *VeleroResourceEvent) SetResourceKind(resourceKind ResourceKind) *VeleroResourceEvent { + e.ResourceKind = resourceKind + return e +} + +// SetResourceName sets the ResourceName +func (e *VeleroResourceEvent) SetResourceName(resourceName string) *VeleroResourceEvent { + e.ResourceName = resourceName + return e +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/LICENSE b/kubewatch/vendor/github.com/vmware-tanzu/velero/LICENSE new file mode 100644 index 000000000..5e0fd33cb --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/LICENSE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "{}" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright {yyyy} {name of copyright owner} + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/shared/data_move_operation_progress.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/shared/data_move_operation_progress.go new file mode 100644 index 000000000..f92b3e533 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/shared/data_move_operation_progress.go @@ -0,0 +1,29 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shared + +// DataMoveOperationProgress represents the progress of a +// data movement operation + +// +k8s:deepcopy-gen=true +type DataMoveOperationProgress struct { + // +optional + TotalBytes int64 `json:"totalBytes,omitempty"` + + // +optional + BytesDone int64 `json:"bytesDone,omitempty"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go new file mode 100644 index 000000000..6a062c4fe --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_repository_types.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BackupRepositorySpec is the specification for a BackupRepository. +type BackupRepositorySpec struct { + // VolumeNamespace is the namespace this backup repository contains + // pod volume backups for. + VolumeNamespace string `json:"volumeNamespace"` + + // BackupStorageLocation is the name of the BackupStorageLocation + // that should contain this repository. + BackupStorageLocation string `json:"backupStorageLocation"` + + // RepositoryType indicates the type of the backend repository + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + RepositoryType string `json:"repositoryType"` + + // ResticIdentifier is the full restic-compatible string for identifying + // this repository. + ResticIdentifier string `json:"resticIdentifier"` + + // MaintenanceFrequency is how often maintenance should be run. + MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"` +} + +// BackupRepositoryPhase represents the lifecycle phase of a BackupRepository. +// +kubebuilder:validation:Enum=New;Ready;NotReady +type BackupRepositoryPhase string + +const ( + BackupRepositoryPhaseNew BackupRepositoryPhase = "New" + BackupRepositoryPhaseReady BackupRepositoryPhase = "Ready" + BackupRepositoryPhaseNotReady BackupRepositoryPhase = "NotReady" + + BackupRepositoryTypeRestic string = "restic" + BackupRepositoryTypeKopia string = "kopia" +) + +// BackupRepositoryStatus is the current status of a BackupRepository. +type BackupRepositoryStatus struct { + // Phase is the current state of the BackupRepository. + // +optional + Phase BackupRepositoryPhase `json:"phase,omitempty"` + + // Message is a message about the current status of the BackupRepository. + // +optional + Message string `json:"message,omitempty"` + + // LastMaintenanceTime is the last time maintenance was run. + // +optional + // +nullable + LastMaintenanceTime *metav1.Time `json:"lastMaintenanceTime,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Repository Type",type="string",JSONPath=".spec.repositoryType" +// + +type BackupRepository struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec BackupRepositorySpec `json:"spec,omitempty"` + + // +optional + Status BackupRepositoryStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=backuprepositories,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=backuprepositories/status,verbs=get;update;patch + +// BackupRepositoryList is a list of BackupRepositories. +type BackupRepositoryList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []BackupRepository `json:"items"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go new file mode 100644 index 000000000..858894dc7 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backup_types.go @@ -0,0 +1,517 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Metadata struct { + Labels map[string]string `json:"labels,omitempty"` +} + +// BackupSpec defines the specification for a Velero backup. +type BackupSpec struct { + // +optional + Metadata `json:"metadata,omitempty"` + // IncludedNamespaces is a slice of namespace names to include objects + // from. If empty, all namespaces are included. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces contains a list of namespaces that are not + // included in the backup. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedResources is a slice of resource names to include + // in the backup. If empty, all resources are included. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources is a slice of resource names that are not + // included in the backup. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` + + // IncludedClusterScopedResources is a slice of cluster-scoped + // resource type names to include in the backup. + // If set to "*", all cluster-scoped resource types are included. + // The default value is empty, which means only related + // cluster-scoped resources are included. + // +optional + // +nullable + IncludedClusterScopedResources []string `json:"includedClusterScopedResources,omitempty"` + + // ExcludedClusterScopedResources is a slice of cluster-scoped + // resource type names to exclude from the backup. + // If set to "*", all cluster-scoped resource types are excluded. + // The default value is empty. + // +optional + // +nullable + ExcludedClusterScopedResources []string `json:"excludedClusterScopedResources,omitempty"` + + // IncludedNamespaceScopedResources is a slice of namespace-scoped + // resource type names to include in the backup. + // The default value is "*". + // +optional + // +nullable + IncludedNamespaceScopedResources []string `json:"includedNamespaceScopedResources,omitempty"` + + // ExcludedNamespaceScopedResources is a slice of namespace-scoped + // resource type names to exclude from the backup. + // If set to "*", all namespace-scoped resource types are excluded. + // The default value is empty. + // +optional + // +nullable + ExcludedNamespaceScopedResources []string `json:"excludedNamespaceScopedResources,omitempty"` + + // LabelSelector is a metav1.LabelSelector to filter with + // when adding individual objects to the backup. If empty + // or nil, all objects are included. Optional. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // OrLabelSelectors is list of metav1.LabelSelector to filter with + // when adding individual objects to the backup. If multiple provided + // they will be joined by the OR operator. LabelSelector as well as + // OrLabelSelectors cannot co-exist in backup request, only one of them + // can be used. + // +optional + // +nullable + OrLabelSelectors []*metav1.LabelSelector `json:"orLabelSelectors,omitempty"` + + // SnapshotVolumes specifies whether to take snapshots + // of any PV's referenced in the set of objects included + // in the Backup. + // +optional + // +nullable + SnapshotVolumes *bool `json:"snapshotVolumes,omitempty"` + + // TTL is a time.Duration-parseable string describing how long + // the Backup should be retained for. + // +optional + TTL metav1.Duration `json:"ttl,omitempty"` + + // IncludeClusterResources specifies whether cluster-scoped resources + // should be included for consideration in the backup. + // +optional + // +nullable + IncludeClusterResources *bool `json:"includeClusterResources,omitempty"` + + // Hooks represent custom behaviors that should be executed at different phases of the backup. + // +optional + Hooks BackupHooks `json:"hooks,omitempty"` + + // StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored. + // +optional + StorageLocation string `json:"storageLocation,omitempty"` + + // VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. + // +optional + VolumeSnapshotLocations []string `json:"volumeSnapshotLocations,omitempty"` + + // DefaultVolumesToRestic specifies whether restic should be used to take a + // backup of all pod volumes by default. + // + // Deprecated: this field is no longer used and will be removed entirely in future. Use DefaultVolumesToFsBackup instead. + // +optional + // +nullable + DefaultVolumesToRestic *bool `json:"defaultVolumesToRestic,omitempty"` + + // DefaultVolumesToFsBackup specifies whether pod volume file system backup should be used + // for all volumes by default. + // +optional + // +nullable + DefaultVolumesToFsBackup *bool `json:"defaultVolumesToFsBackup,omitempty"` + + // OrderedResources specifies the backup order of resources of specific Kind. + // The map key is the resource name and value is a list of object names separated by commas. + // Each resource name has format "namespace/objectname". For cluster resources, simply use "objectname". + // +optional + // +nullable + OrderedResources map[string]string `json:"orderedResources,omitempty"` + + // CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to + // ReadyToUse during creation, before returning error as timeout. + // The default value is 10 minute. + // +optional + CSISnapshotTimeout metav1.Duration `json:"csiSnapshotTimeout,omitempty"` + + // ItemOperationTimeout specifies the time used to wait for asynchronous BackupItemAction operations + // The default value is 4 hour. + // +optional + ItemOperationTimeout metav1.Duration `json:"itemOperationTimeout,omitempty"` + // ResourcePolicy specifies the referenced resource policies that backup should follow + // +optional + ResourcePolicy *v1.TypedLocalObjectReference `json:"resourcePolicy,omitempty"` + + // SnapshotMoveData specifies whether snapshot data should be moved + // +optional + // +nullable + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty"` + + // DataMover specifies the data mover to be used by the backup. + // If DataMover is "" or "velero", the built-in data mover will be used. + // +optional + DataMover string `json:"datamover,omitempty"` + + // UploaderConfig specifies the configuration for the uploader. + // +optional + // +nullable + UploaderConfig *UploaderConfigForBackup `json:"uploaderConfig,omitempty"` +} + +// UploaderConfigForBackup defines the configuration for the uploader when doing backup. +type UploaderConfigForBackup struct { + // ParallelFilesUpload is the number of files parallel uploads to perform when using the uploader. + // +optional + ParallelFilesUpload int `json:"parallelFilesUpload,omitempty"` +} + +// BackupHooks contains custom behaviors that should be executed at different phases of the backup. +type BackupHooks struct { + // Resources are hooks that should be executed when backing up individual instances of a resource. + // +optional + // +nullable + Resources []BackupResourceHookSpec `json:"resources,omitempty"` +} + +// BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on +// the rules defined for namespaces, resources, and label selector. +type BackupResourceHookSpec struct { + // Name is the name of this hook. + Name string `json:"name"` + + // IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + // to all namespaces. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedResources specifies the resources to which this hook spec applies. If empty, it applies + // to all resources. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources specifies the resources to which this hook spec does not apply. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` + + // LabelSelector, if specified, filters the resources to which this hook spec applies. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. + // These are executed before any "additional items" from item actions are processed. + // +optional + PreHooks []BackupResourceHook `json:"pre,omitempty"` + + // PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. + // These are executed after all "additional items" from item actions are processed. + // +optional + PostHooks []BackupResourceHook `json:"post,omitempty"` +} + +// BackupResourceHook defines a hook for a resource. +type BackupResourceHook struct { + // Exec defines an exec hook. + Exec *ExecHook `json:"exec"` +} + +// ExecHook is a hook that uses the pod exec API to execute a command in a container in a pod. +type ExecHook struct { + // Container is the container in the pod where the command should be executed. If not specified, + // the pod's first container is used. + // +optional + Container string `json:"container,omitempty"` + + // Command is the command and arguments to execute. + // +kubebuilder:validation:MinItems=1 + Command []string `json:"command"` + + // OnError specifies how Velero should behave if it encounters an error executing this hook. + // +optional + OnError HookErrorMode `json:"onError,omitempty"` + + // Timeout defines the maximum amount of time Velero should wait for the hook to complete before + // considering the execution a failure. + // +optional + Timeout metav1.Duration `json:"timeout,omitempty"` +} + +// HookErrorMode defines how Velero should treat an error from a hook. +// +kubebuilder:validation:Enum=Continue;Fail +type HookErrorMode string + +const ( + // HookErrorModeContinue means that an error from a hook is acceptable and the backup/restore can + // proceed with the rest of hooks' execution. This backup/restore should be in `PartiallyFailed` status. + HookErrorModeContinue HookErrorMode = "Continue" + + // HookErrorModeFail means that an error from a hook is problematic and Velero should stop executing following hooks. + // This backup/restore should be in `PartiallyFailed` status. + HookErrorModeFail HookErrorMode = "Fail" +) + +// BackupPhase is a string representation of the lifecycle phase +// of a Velero backup. +// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Finalizing;FinalizingPartiallyFailed;Completed;PartiallyFailed;Failed;Deleting +type BackupPhase string + +const ( + // BackupPhaseNew means the backup has been created but not + // yet processed by the BackupController. + BackupPhaseNew BackupPhase = "New" + + // BackupPhaseFailedValidation means the backup has failed + // the controller's validations and therefore will not run. + BackupPhaseFailedValidation BackupPhase = "FailedValidation" + + // BackupPhaseInProgress means the backup is currently executing. + BackupPhaseInProgress BackupPhase = "InProgress" + + // BackupPhaseWaitingForPluginOperations means the backup of + // Kubernetes resources, creation of snapshots, and other + // async plugin operations was successful and snapshot data is + // currently uploading or other plugin operations are still + // ongoing. The backup is not usable yet. + BackupPhaseWaitingForPluginOperations BackupPhase = "WaitingForPluginOperations" + + // BackupPhaseWaitingForPluginOperationsPartiallyFailed means + // the backup of Kubernetes resources, creation of snapshots, + // and other async plugin operations partially failed (final + // phase will be PartiallyFailed) and snapshot data is + // currently uploading or other plugin operations are still + // ongoing. The backup is not usable yet. + BackupPhaseWaitingForPluginOperationsPartiallyFailed BackupPhase = "WaitingForPluginOperationsPartiallyFailed" + + // BackupPhaseFinalizing means the backup of + // Kubernetes resources, creation of snapshots, and other + // async plugin operations were successful and snapshot upload and + // other plugin operations are now complete, but the Backup is awaiting + // final update of resources modified during async operations. + // The backup is not usable yet. + BackupPhaseFinalizing BackupPhase = "Finalizing" + + // BackupPhaseFinalizingPartiallyFailed means the backup of + // Kubernetes resources, creation of snapshots, and other + // async plugin operations were successful and snapshot upload and + // other plugin operations are now complete, but one or more errors + // occurred during backup or async operation processing, and the + // Backup is awaiting final update of resources modified during async + // operations. The backup is not usable yet. + BackupPhaseFinalizingPartiallyFailed BackupPhase = "FinalizingPartiallyFailed" + + // BackupPhaseCompleted means the backup has run successfully without + // errors. + BackupPhaseCompleted BackupPhase = "Completed" + + // BackupPhasePartiallyFailed means the backup has run to completion + // but encountered 1+ errors backing up individual items. + BackupPhasePartiallyFailed BackupPhase = "PartiallyFailed" + + // BackupPhaseFailed means the backup ran but encountered an error that + // prevented it from completing successfully. + BackupPhaseFailed BackupPhase = "Failed" + + // BackupPhaseDeleting means the backup and all its associated data are being deleted. + BackupPhaseDeleting BackupPhase = "Deleting" +) + +// BackupStatus captures the current status of a Velero backup. +type BackupStatus struct { + // Version is the backup format major version. + // Deprecated: Please see FormatVersion + // +optional + Version int `json:"version,omitempty"` + + // FormatVersion is the backup format version, including major, minor, and patch version. + // +optional + FormatVersion string `json:"formatVersion,omitempty"` + + // Expiration is when this Backup is eligible for garbage-collection. + // +optional + // +nullable + Expiration *metav1.Time `json:"expiration,omitempty"` + + // Phase is the current state of the Backup. + // +optional + Phase BackupPhase `json:"phase,omitempty"` + + // ValidationErrors is a slice of all validation errors (if + // applicable). + // +optional + // +nullable + ValidationErrors []string `json:"validationErrors,omitempty"` + + // StartTimestamp records the time a backup was started. + // Separate from CreationTimestamp, since that value changes + // on restores. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a backup was completed. + // Completion time is recorded even on failed backups. + // Completion time is recorded before uploading the backup object. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // VolumeSnapshotsAttempted is the total number of attempted + // volume snapshots for this backup. + // +optional + VolumeSnapshotsAttempted int `json:"volumeSnapshotsAttempted,omitempty"` + + // VolumeSnapshotsCompleted is the total number of successfully + // completed volume snapshots for this backup. + // +optional + VolumeSnapshotsCompleted int `json:"volumeSnapshotsCompleted,omitempty"` + + // FailureReason is an error that caused the entire backup to fail. + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // Warnings is a count of all warning messages that were generated during + // execution of the backup. The actual warnings are in the backup's log + // file in object storage. + // +optional + Warnings int `json:"warnings,omitempty"` + + // Errors is a count of all error messages that were generated during + // execution of the backup. The actual errors are in the backup's log + // file in object storage. + // +optional + Errors int `json:"errors,omitempty"` + + // Progress contains information about the backup's execution progress. Note + // that this information is best-effort only -- if Velero fails to update it + // during a backup for any reason, it may be inaccurate/stale. + // +optional + // +nullable + Progress *BackupProgress `json:"progress,omitempty"` + + // CSIVolumeSnapshotsAttempted is the total number of attempted + // CSI VolumeSnapshots for this backup. + // +optional + CSIVolumeSnapshotsAttempted int `json:"csiVolumeSnapshotsAttempted,omitempty"` + + // CSIVolumeSnapshotsCompleted is the total number of successfully + // completed CSI VolumeSnapshots for this backup. + // +optional + CSIVolumeSnapshotsCompleted int `json:"csiVolumeSnapshotsCompleted,omitempty"` + + // BackupItemOperationsAttempted is the total number of attempted + // async BackupItemAction operations for this backup. + // +optional + BackupItemOperationsAttempted int `json:"backupItemOperationsAttempted,omitempty"` + + // BackupItemOperationsCompleted is the total number of successfully completed + // async BackupItemAction operations for this backup. + // +optional + BackupItemOperationsCompleted int `json:"backupItemOperationsCompleted,omitempty"` + + // BackupItemOperationsFailed is the total number of async + // BackupItemAction operations for this backup which ended with an error. + // +optional + BackupItemOperationsFailed int `json:"backupItemOperationsFailed,omitempty"` + + // HookStatus contains information about the status of the hooks. + // +optional + // +nullable + HookStatus *HookStatus `json:"hookStatus,omitempty"` +} + +// BackupProgress stores information about the progress of a Backup's execution. +type BackupProgress struct { + // TotalItems is the total number of items to be backed up. This number may change + // throughout the execution of the backup due to plugins that return additional related + // items to back up, the velero.io/exclude-from-backup label, and various other + // filters that happen as items are processed. + // +optional + TotalItems int `json:"totalItems,omitempty"` + + // ItemsBackedUp is the number of items that have actually been written to the + // backup tarball so far. + // +optional + ItemsBackedUp int `json:"itemsBackedUp,omitempty"` +} + +// HookStatus stores information about the status of the hooks. +type HookStatus struct { + // HooksAttempted is the total number of attempted hooks + // Specifically, HooksAttempted represents the number of hooks that failed to execute + // and the number of hooks that executed successfully. + // +optional + HooksAttempted int `json:"hooksAttempted,omitempty"` + + // HooksFailed is the total number of hooks which ended with an error + // +optional + HooksFailed int `json:"hooksFailed,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:rbac:groups=velero.io,resources=backups,verbs=create;delete;get;list;patch;update;watch +// +kubebuilder:rbac:groups=velero.io,resources=backups/status,verbs=get;update;patch + +// Backup is a Velero resource that represents the capture of Kubernetes +// cluster state at a point in time (API objects and associated volume state). +type Backup struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec BackupSpec `json:"spec,omitempty"` + + // +optional + Status BackupStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupList is a list of Backups. +type BackupList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Backup `json:"items"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go new file mode 100644 index 000000000..e44671222 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/backupstoragelocation_types.go @@ -0,0 +1,179 @@ +/* +Copyright 2017, 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// BackupStorageLocationSpec defines the desired state of a Velero BackupStorageLocation +type BackupStorageLocationSpec struct { + // Provider is the provider of the backup storage. + Provider string `json:"provider"` + + // Config is for provider-specific configuration fields. + // +optional + Config map[string]string `json:"config,omitempty"` + + // Credential contains the credential information intended to be used with this location + // +optional + Credential *corev1api.SecretKeySelector `json:"credential,omitempty"` + + StorageType `json:",inline"` + + // Default indicates this location is the default backup storage location. + // +optional + Default bool `json:"default,omitempty"` + + // AccessMode defines the permissions for the backup storage location. + // +optional + AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"` + + // BackupSyncPeriod defines how frequently to sync backup API objects from object storage. A value of 0 disables sync. + // +optional + // +nullable + BackupSyncPeriod *metav1.Duration `json:"backupSyncPeriod,omitempty"` + + // ValidationFrequency defines how frequently to validate the corresponding object storage. A value of 0 disables validation. + // +optional + // +nullable + ValidationFrequency *metav1.Duration `json:"validationFrequency,omitempty"` +} + +// BackupStorageLocationStatus defines the observed state of BackupStorageLocation +type BackupStorageLocationStatus struct { + // Phase is the current state of the BackupStorageLocation. + // +optional + Phase BackupStorageLocationPhase `json:"phase,omitempty"` + + // LastSyncedTime is the last time the contents of the location were synced into + // the cluster. + // +optional + // +nullable + LastSyncedTime *metav1.Time `json:"lastSyncedTime,omitempty"` + + // LastValidationTime is the last time the backup store location was validated + // the cluster. + // +optional + // +nullable + LastValidationTime *metav1.Time `json:"lastValidationTime,omitempty"` + + // Message is a message about the backup storage location's status. + // +optional + Message string `json:"message,omitempty"` + + // LastSyncedRevision is the value of the `metadata/revision` file in the backup + // storage location the last time the BSL's contents were synced into the cluster. + // + // Deprecated: this field is no longer updated or used for detecting changes to + // the location's contents and will be removed entirely in v2.0. + // +optional + LastSyncedRevision types.UID `json:"lastSyncedRevision,omitempty"` + + // AccessMode is an unused field. + // + // Deprecated: there is now an AccessMode field on the Spec and this field + // will be removed entirely as of v2.0. + // +optional + AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=bsl +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Backup Storage Location status such as Available/Unavailable" +// +kubebuilder:printcolumn:name="Last Validated",type="date",JSONPath=".status.lastValidationTime",description="LastValidationTime is the last time the backup store location was validated" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Default",type="boolean",JSONPath=".spec.default",description="Default backup storage location" + +// BackupStorageLocation is a location where Velero stores backup objects +type BackupStorageLocation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupStorageLocationSpec `json:"spec,omitempty"` + Status BackupStorageLocationStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=backupstoragelocations/status,verbs=get;update;patch + +// BackupStorageLocationList contains a list of BackupStorageLocation +type BackupStorageLocationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupStorageLocation `json:"items"` +} + +// StorageType represents the type of storage that a backup location uses. +// ObjectStorage must be non-nil, since it is currently the only supported StorageType. +type StorageType struct { + ObjectStorage *ObjectStorageLocation `json:"objectStorage"` +} + +// ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage. +type ObjectStorageLocation struct { + // Bucket is the bucket to use for object storage. + Bucket string `json:"bucket"` + + // Prefix is the path inside a bucket to use for Velero storage. Optional. + // +optional + Prefix string `json:"prefix,omitempty"` + + // CACert defines a CA bundle to use when verifying TLS connections to the provider. + // +optional + CACert []byte `json:"caCert,omitempty"` +} + +// BackupStorageLocationPhase is the lifecycle phase of a Velero BackupStorageLocation. +// +kubebuilder:validation:Enum=Available;Unavailable +// +kubebuilder:default=Unavailable +type BackupStorageLocationPhase string + +const ( + // BackupStorageLocationPhaseAvailable means the location is available to read and write from. + BackupStorageLocationPhaseAvailable BackupStorageLocationPhase = "Available" + + // BackupStorageLocationPhaseUnavailable means the location is unavailable to read and write from. + BackupStorageLocationPhaseUnavailable BackupStorageLocationPhase = "Unavailable" +) + +// BackupStorageLocationAccessMode represents the permissions for a BackupStorageLocation. +// +kubebuilder:validation:Enum=ReadOnly;ReadWrite +type BackupStorageLocationAccessMode string + +const ( + // BackupStorageLocationAccessModeReadOnly represents read-only access to a BackupStorageLocation. + BackupStorageLocationAccessModeReadOnly BackupStorageLocationAccessMode = "ReadOnly" + + // BackupStorageLocationAccessModeReadWrite represents read and write access to a BackupStorageLocation. + BackupStorageLocationAccessModeReadWrite BackupStorageLocationAccessMode = "ReadWrite" +) + +// TODO(2.0): remove the AccessMode field from BackupStorageLocationStatus. +// TODO(2.0): remove the LastSyncedRevision field from BackupStorageLocationStatus. diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/constants.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/constants.go new file mode 100644 index 000000000..a7292d568 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/constants.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // DefaultNamespace is the Kubernetes namespace that is used by default for + // the Velero server and API objects. + DefaultNamespace = "velero" + + // ResourcesDir is a top-level directory expected in backups which contains sub-directories + // for each resource type in the backup. + ResourcesDir = "resources" + + // MetadataDir is a top-level directory expected in backups which contains + // files that store metadata about the backup, such as the backup version. + MetadataDir = "metadata" + + // ClusterScopedDir is the name of the directory containing cluster-scoped + // resources within a Velero backup. + ClusterScopedDir = "cluster" + + // NamespaceScopedDir is the name of the directory containing namespace-scoped + // resource within a Velero backup. + NamespaceScopedDir = "namespaces" + + // CSIFeatureFlag is the feature flag string that defines whether or not CSI features are being used. + CSIFeatureFlag = "EnableCSI" + + // PreferredVersionDir is the suffix name of the directory containing the preferred version of the API group + // resource within a Velero backup. + PreferredVersionDir = "-preferredversion" + + // APIGroupVersionsFeatureFlag is the feature flag string that defines whether or not to handle multiple API Group Versions + APIGroupVersionsFeatureFlag = "EnableAPIGroupVersions" +) diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/delete_backup_request_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/delete_backup_request_types.go new file mode 100644 index 000000000..8c7b1fa09 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/delete_backup_request_types.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// DeleteBackupRequestSpec is the specification for which backups to delete. +type DeleteBackupRequestSpec struct { + BackupName string `json:"backupName"` +} + +// DeleteBackupRequestPhase represents the lifecycle phase of a DeleteBackupRequest. +// +kubebuilder:validation:Enum=New;InProgress;Processed +type DeleteBackupRequestPhase string + +const ( + // DeleteBackupRequestPhaseNew means the DeleteBackupRequest has not been processed yet. + DeleteBackupRequestPhaseNew DeleteBackupRequestPhase = "New" + + // DeleteBackupRequestPhaseInProgress means the DeleteBackupRequest is being processed. + DeleteBackupRequestPhaseInProgress DeleteBackupRequestPhase = "InProgress" + + // DeleteBackupRequestPhaseProcessed means the DeleteBackupRequest has been processed. + DeleteBackupRequestPhaseProcessed DeleteBackupRequestPhase = "Processed" +) + +// DeleteBackupRequestStatus is the current status of a DeleteBackupRequest. +type DeleteBackupRequestStatus struct { + // Phase is the current state of the DeleteBackupRequest. + // +optional + Phase DeleteBackupRequestPhase `json:"phase,omitempty"` + + // Errors contains any errors that were encountered during the deletion process. + // +optional + // +nullable + Errors []string `json:"errors,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="BackupName",type="string",JSONPath=".spec.backupName",description="The name of the backup to be deleted" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="The status of the deletion request" + +// DeleteBackupRequest is a request to delete one or more backups. +type DeleteBackupRequest struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec DeleteBackupRequestSpec `json:"spec,omitempty"` + + // +optional + Status DeleteBackupRequestStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// DeleteBackupRequestList is a list of DeleteBackupRequests. +type DeleteBackupRequestList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DeleteBackupRequest `json:"items"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/doc.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/doc.go new file mode 100644 index 000000000..4431315c2 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package v1 is the v1 version of the API. +// +groupName=velero.io +package v1 diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/download_request_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/download_request_types.go new file mode 100644 index 000000000..f23118fe5 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/download_request_types.go @@ -0,0 +1,126 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// DownloadRequestSpec is the specification for a download request. +type DownloadRequestSpec struct { + // Target is what to download (e.g. logs for a backup). + Target DownloadTarget `json:"target"` +} + +// DownloadTargetKind represents what type of file to download. +// +kubebuilder:validation:Enum=BackupLog;BackupContents;BackupVolumeSnapshots;BackupItemOperations;BackupResourceList;BackupResults;RestoreLog;RestoreResults;RestoreResourceList;RestoreItemOperations;CSIBackupVolumeSnapshots;CSIBackupVolumeSnapshotContents;BackupVolumeInfos;RestoreVolumeInfo +type DownloadTargetKind string + +const ( + DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog" + DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents" + DownloadTargetKindBackupVolumeSnapshots DownloadTargetKind = "BackupVolumeSnapshots" + DownloadTargetKindBackupItemOperations DownloadTargetKind = "BackupItemOperations" + DownloadTargetKindBackupResourceList DownloadTargetKind = "BackupResourceList" + DownloadTargetKindBackupResults DownloadTargetKind = "BackupResults" + DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog" + DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults" + DownloadTargetKindRestoreResourceList DownloadTargetKind = "RestoreResourceList" + DownloadTargetKindRestoreItemOperations DownloadTargetKind = "RestoreItemOperations" + DownloadTargetKindCSIBackupVolumeSnapshots DownloadTargetKind = "CSIBackupVolumeSnapshots" + DownloadTargetKindCSIBackupVolumeSnapshotContents DownloadTargetKind = "CSIBackupVolumeSnapshotContents" + DownloadTargetKindBackupVolumeInfos DownloadTargetKind = "BackupVolumeInfos" + DownloadTargetKindRestoreVolumeInfo DownloadTargetKind = "RestoreVolumeInfo" +) + +// DownloadTarget is the specification for what kind of file to download, and the name of the +// resource with which it's associated. +type DownloadTarget struct { + // Kind is the type of file to download. + Kind DownloadTargetKind `json:"kind"` + + // Name is the name of the Kubernetes resource with which the file is associated. + Name string `json:"name"` +} + +// DownloadRequestPhase represents the lifecycle phase of a DownloadRequest. +// +kubebuilder:validation:Enum=New;Processed +type DownloadRequestPhase string + +const ( + // DownloadRequestPhaseNew means the DownloadRequest has not been processed by the + // DownloadRequestController yet. + DownloadRequestPhaseNew DownloadRequestPhase = "New" + + // DownloadRequestPhaseProcessed means the DownloadRequest has been processed by the + // DownloadRequestController. + DownloadRequestPhaseProcessed DownloadRequestPhase = "Processed" +) + +// DownloadRequestStatus is the current status of a DownloadRequest. +type DownloadRequestStatus struct { + // Phase is the current state of the DownloadRequest. + // +optional + Phase DownloadRequestPhase `json:"phase,omitempty"` + + // DownloadURL contains the pre-signed URL for the target file. + // +optional + DownloadURL string `json:"downloadURL,omitempty"` + + // Expiration is when this DownloadRequest expires and can be deleted by the system. + // +optional + // +nullable + Expiration *metav1.Time `json:"expiration,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion + +// DownloadRequest is a request to download an artifact from backup object storage, such as a backup +// log file. +type DownloadRequest struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec DownloadRequestSpec `json:"spec,omitempty"` + + // +optional + Status DownloadRequestStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=downloadrequests,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=downloadrequests/status,verbs=get;update;patch + +// DownloadRequestList is a list of DownloadRequests. +type DownloadRequestList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DownloadRequest `json:"items"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/groupversion_info.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/groupversion_info.go new file mode 100644 index 000000000..ab5b20433 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the velero v1 API group +// +kubebuilder:object:generate=true +// +groupName=velero.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "velero.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go new file mode 100644 index 000000000..c86b4e91b --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/labels_annotations.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // BackupNameLabel is the label key used to identify a backup by name. + BackupNameLabel = "velero.io/backup-name" + + // BackupUIDLabel is the label key used to identify a backup by uid. + BackupUIDLabel = "velero.io/backup-uid" + + // RestoreNameLabel is the label key used to identify a restore by name. + RestoreNameLabel = "velero.io/restore-name" + + // ScheduleNameLabel is the label key used to identify a schedule by name. + ScheduleNameLabel = "velero.io/schedule-name" + + // RestoreUIDLabel is the label key used to identify a restore by uid. + RestoreUIDLabel = "velero.io/restore-uid" + + // PodUIDLabel is the label key used to identify a pod by uid. + PodUIDLabel = "velero.io/pod-uid" + + // PVCUIDLabel is the label key used to identify a PVC by uid. + PVCUIDLabel = "velero.io/pvc-uid" + + // PodVolumeOperationTimeoutAnnotation is the annotation key used to apply + // a backup/restore-specific timeout value for pod volume operations (i.e. + // pod volume backups/restores). + PodVolumeOperationTimeoutAnnotation = "velero.io/pod-volume-timeout" + + // StorageLocationLabel is the label key used to identify the storage + // location of a backup. + StorageLocationLabel = "velero.io/storage-location" + + // VolumeNamespaceLabel is the label key used to identify which + // namespace a repository stores backups for. + VolumeNamespaceLabel = "velero.io/volume-namespace" + + // RepositoryTypeLabel is the label key used to identify the type of a repository + RepositoryTypeLabel = "velero.io/repository-type" + + // DataUploadLabel is the label key used to identify the dataupload for snapshot backup pod + DataUploadLabel = "velero.io/data-upload" + + // DataUploadSnapshotInfoLabel is used to identify the configmap that contains the snapshot info of a data upload + // normally the value of the label should the "true" or "false" + DataUploadSnapshotInfoLabel = "velero.io/data-upload-snapshot-info" + + // DataDownloadLabel is the label key used to identify the datadownload for snapshot restore pod + DataDownloadLabel = "velero.io/data-download" + + // SourceClusterK8sVersionAnnotation is the label key used to identify the k8s + // git version of the backup , i.e. v1.16.4 + SourceClusterK8sGitVersionAnnotation = "velero.io/source-cluster-k8s-gitversion" + + // SourceClusterK8sMajorVersionAnnotation is the label key used to identify the k8s + // major version of the backup , i.e. 1 + SourceClusterK8sMajorVersionAnnotation = "velero.io/source-cluster-k8s-major-version" + + // SourceClusterK8sMajorVersionAnnotation is the label key used to identify the k8s + // minor version of the backup , i.e. 16 + SourceClusterK8sMinorVersionAnnotation = "velero.io/source-cluster-k8s-minor-version" + + // ResourceTimeoutAnnotation is the annotation key used to carry the global resource + // timeout value for backup to plugins. + ResourceTimeoutAnnotation = "velero.io/resource-timeout" + + // AsyncOperationIDLabel is the label key used to identify the async operation ID + AsyncOperationIDLabel = "velero.io/async-operation-id" + + // PVCNameLabel is the label key used to identify the PVC's namespace and name. + // The format is /. + PVCNamespaceNameLabel = "velero.io/pvc-namespace-name" + + // ResourceUsageLabel is the label key to explain the Velero resource usage. + ResourceUsageLabel = "velero.io/resource-usage" + + // VolumesToBackupAnnotation is the annotation on a pod whose mounted volumes + // need to be backed up using pod volume backup. + VolumesToBackupAnnotation = "backup.velero.io/backup-volumes" + + // VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes + // should be excluded from pod volume backup. + VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes" + + // ExcludeFromBackupLabel is the label to exclude k8s resource from backup, + // even if the resource contains a matching selector label. + ExcludeFromBackupLabel = "velero.io/exclude-from-backup" +) + +type AsyncOperationIDPrefix string + +const ( + AsyncOperationIDPrefixDataDownload AsyncOperationIDPrefix = "dd-" + AsyncOperationIDPrefixDataUpload AsyncOperationIDPrefix = "du-" +) + +type VeleroResourceUsage string + +const ( + VeleroResourceUsageDataUploadResult VeleroResourceUsage = "DataUpload" +) + +// CSI related plugin actions' constant variable +const ( + VolumeSnapshotLabel = "velero.io/volume-snapshot-name" + VolumeSnapshotHandleAnnotation = "velero.io/csi-volumesnapshot-handle" + VolumeSnapshotRestoreSize = "velero.io/csi-volumesnapshot-restore-size" + DriverNameAnnotation = "velero.io/csi-driver-name" + VSCDeletionPolicyAnnotation = "velero.io/csi-vsc-deletion-policy" + VolumeSnapshotClassSelectorLabel = "velero.io/csi-volumesnapshot-class" + VolumeSnapshotClassDriverBackupAnnotationPrefix = "velero.io/csi-volumesnapshot-class" + VolumeSnapshotClassDriverPVCAnnotation = "velero.io/csi-volumesnapshot-class" + + // There is no release w/ these constants exported. Using the strings for now. + // CSI Annotation volumesnapshotclass + // https://github.com/kubernetes-csi/external-snapshotter/blob/master/pkg/utils/util.go#L59-L60 + PrefixedListSecretNameAnnotation = "csi.storage.k8s.io/snapshotter-list-secret-name" // #nosec G101 + PrefixedListSecretNamespaceAnnotation = "csi.storage.k8s.io/snapshotter-list-secret-namespace" // #nosec G101 + + // CSI Annotation volumesnapshotcontents + PrefixedSecretNameAnnotation = "csi.storage.k8s.io/snapshotter-secret-name" // #nosec G101 + PrefixedSecretNamespaceAnnotation = "csi.storage.k8s.io/snapshotter-secret-namespace" // #nosec G101 + + // Velero checks this annotation to determine whether to skip resource excluding check. + MustIncludeAdditionalItemAnnotation = "backup.velero.io/must-include-additional-items" + // SkippedNoCSIPVAnnotation - Velero checks this annotation on processed PVC to + // find out if the snapshot was skipped b/c the PV is not provisioned via CSI + SkippedNoCSIPVAnnotation = "backup.velero.io/skipped-no-csi-pv" + + // DynamicPVRestoreLabel is the label key for dynamic PV restore + DynamicPVRestoreLabel = "velero.io/dynamic-pv-restore" + + // DataUploadNameAnnotation is the label key for the DataUpload name + DataUploadNameAnnotation = "velero.io/data-upload-name" +) diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go new file mode 100644 index 000000000..b3070e3dd --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_backup_types.go @@ -0,0 +1,157 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" +) + +// PodVolumeBackupSpec is the specification for a PodVolumeBackup. +type PodVolumeBackupSpec struct { + // Node is the name of the node that the Pod is running on. + Node string `json:"node"` + + // Pod is a reference to the pod containing the volume to be backed up. + Pod corev1api.ObjectReference `json:"pod"` + + // Volume is the name of the volume within the Pod to be backed + // up. + Volume string `json:"volume"` + + // BackupStorageLocation is the name of the backup storage location + // where the backup repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // RepoIdentifier is the backup repository identifier. + RepoIdentifier string `json:"repoIdentifier"` + + // UploaderType is the type of the uploader to handle the data transfer. + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + UploaderType string `json:"uploaderType"` + + // Tags are a map of key-value pairs that should be applied to the + // volume backup as tags. + // +optional + Tags map[string]string `json:"tags,omitempty"` + + // UploaderSettings are a map of key-value pairs that should be applied to the + // uploader configuration. + // +optional + // +nullable + UploaderSettings map[string]string `json:"uploaderSettings,omitempty"` +} + +// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup. +// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed +type PodVolumeBackupPhase string + +const ( + PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New" + PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress" + PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed" + PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed" +) + +// PodVolumeBackupStatus is the current status of a PodVolumeBackup. +type PodVolumeBackupStatus struct { + // Phase is the current state of the PodVolumeBackup. + // +optional + Phase PodVolumeBackupPhase `json:"phase,omitempty"` + + // Path is the full path within the controller pod being backed up. + // +optional + Path string `json:"path,omitempty"` + + // SnapshotID is the identifier for the snapshot of the pod volume. + // +optional + SnapshotID string `json:"snapshotID,omitempty"` + + // Message is a message about the pod volume backup's status. + // +optional + Message string `json:"message,omitempty"` + + // StartTimestamp records the time a backup was started. + // Separate from CreationTimestamp, since that value changes + // on restores. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a backup was completed. + // Completion time is recorded even on failed backups. + // Completion time is recorded before uploading the backup object. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // Progress holds the total number of bytes of the volume and the current + // number of backed up bytes. This can be used to display progress information + // about the backup operation. + // +optional + Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runttime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Backup status such as New/InProgress" +// +kubebuilder:printcolumn:name="Created",type="date",JSONPath=".status.startTimestamp",description="Time when this backup was started" +// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be backed up" +// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be backed up" +// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be backed up" +// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" +// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true + +type PodVolumeBackup struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec PodVolumeBackupSpec `json:"spec,omitempty"` + + // +optional + Status PodVolumeBackupStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=podvolumebackups/status,verbs=get;update;patch + +// PodVolumeBackupList is a list of PodVolumeBackups. +type PodVolumeBackupList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PodVolumeBackup `json:"items"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go new file mode 100644 index 000000000..34bc7e530 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/pod_volume_restore_type.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" +) + +// PodVolumeRestoreSpec is the specification for a PodVolumeRestore. +type PodVolumeRestoreSpec struct { + // Pod is a reference to the pod containing the volume to be restored. + Pod corev1api.ObjectReference `json:"pod"` + + // Volume is the name of the volume within the Pod to be restored. + Volume string `json:"volume"` + + // BackupStorageLocation is the name of the backup storage location + // where the backup repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // RepoIdentifier is the backup repository identifier. + RepoIdentifier string `json:"repoIdentifier"` + + // UploaderType is the type of the uploader to handle the data transfer. + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + UploaderType string `json:"uploaderType"` + + // SnapshotID is the ID of the volume snapshot to be restored. + SnapshotID string `json:"snapshotID"` + + // SourceNamespace is the original namespace for namaspace mapping. + SourceNamespace string `json:"sourceNamespace"` + + // UploaderSettings are a map of key-value pairs that should be applied to the + // uploader configuration. + // +optional + // +nullable + UploaderSettings map[string]string `json:"uploaderSettings,omitempty"` +} + +// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore. +// +kubebuilder:validation:Enum=New;InProgress;Completed;Failed +type PodVolumeRestorePhase string + +const ( + PodVolumeRestorePhaseNew PodVolumeRestorePhase = "New" + PodVolumeRestorePhaseInProgress PodVolumeRestorePhase = "InProgress" + PodVolumeRestorePhaseCompleted PodVolumeRestorePhase = "Completed" + PodVolumeRestorePhaseFailed PodVolumeRestorePhase = "Failed" +) + +// PodVolumeRestoreStatus is the current status of a PodVolumeRestore. +type PodVolumeRestoreStatus struct { + // Phase is the current state of the PodVolumeRestore. + // +optional + Phase PodVolumeRestorePhase `json:"phase,omitempty"` + + // Message is a message about the pod volume restore's status. + // +optional + Message string `json:"message,omitempty"` + + // StartTimestamp records the time a restore was started. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a restore was completed. + // Completion time is recorded even on failed restores. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // Progress holds the total number of bytes of the snapshot and the current + // number of restored bytes. This can be used to display progress information + // about the restore operation. + // +optional + Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be restored" +// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be restored" +// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" +// +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be restored" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Restore status such as New/InProgress" +// +kubebuilder:printcolumn:name="TotalBytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Pod Volume Restore status such as New/InProgress" +// +kubebuilder:printcolumn:name="BytesDone",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Pod Volume Restore status such as New/InProgress" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +type PodVolumeRestore struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec PodVolumeRestoreSpec `json:"spec,omitempty"` + + // +optional + Status PodVolumeRestoreStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true + +// PodVolumeRestoreList is a list of PodVolumeRestores. +type PodVolumeRestoreList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PodVolumeRestore `json:"items"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/register.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/register.go new file mode 100644 index 000000000..cfcff670c --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/register.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Resource gets a Velero GroupResource for a specified resource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +type typeInfo struct { + PluralName string + ItemType runtime.Object + ItemListType runtime.Object +} + +func newTypeInfo(pluralName string, itemType, itemListType runtime.Object) typeInfo { + return typeInfo{ + PluralName: pluralName, + ItemType: itemType, + ItemListType: itemListType, + } +} + +// CustomResources returns a map of all custom resources within the Velero +// API group, keyed on Kind. +func CustomResources() map[string]typeInfo { + return map[string]typeInfo{ + "Backup": newTypeInfo("backups", &Backup{}, &BackupList{}), + "Restore": newTypeInfo("restores", &Restore{}, &RestoreList{}), + "Schedule": newTypeInfo("schedules", &Schedule{}, &ScheduleList{}), + "DownloadRequest": newTypeInfo("downloadrequests", &DownloadRequest{}, &DownloadRequestList{}), + "DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}), + "PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}), + "PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}), + "BackupRepository": newTypeInfo("backuprepositories", &BackupRepository{}, &BackupRepositoryList{}), + "BackupStorageLocation": newTypeInfo("backupstoragelocations", &BackupStorageLocation{}, &BackupStorageLocationList{}), + "VolumeSnapshotLocation": newTypeInfo("volumesnapshotlocations", &VolumeSnapshotLocation{}, &VolumeSnapshotLocationList{}), + "ServerStatusRequest": newTypeInfo("serverstatusrequests", &ServerStatusRequest{}, &ServerStatusRequestList{}), + } +} + +// CustomResourceKinds returns a list of all custom resources kinds within the Velero +func CustomResourceKinds() sets.Set[string] { + kinds := sets.New[string]() + + resources := CustomResources() + for kind := range resources { + kinds.Insert(kind) + } + + return kinds +} + +func addKnownTypes(scheme *runtime.Scheme) error { + for _, typeInfo := range CustomResources() { + scheme.AddKnownTypes(SchemeGroupVersion, typeInfo.ItemType, typeInfo.ItemListType) + } + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go new file mode 100644 index 000000000..377a92737 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/restore_types.go @@ -0,0 +1,433 @@ +/* +Copyright 2017, 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// RestoreSpec defines the specification for a Velero restore. +type RestoreSpec struct { + // BackupName is the unique name of the Velero backup to restore + // from. + // +optional + BackupName string `json:"backupName,omitempty"` + + // ScheduleName is the unique name of the Velero schedule to restore + // from. If specified, and BackupName is empty, Velero will restore + // from the most recent successful backup created from this schedule. + // +optional + ScheduleName string `json:"scheduleName,omitempty"` + + // IncludedNamespaces is a slice of namespace names to include objects + // from. If empty, all namespaces are included. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces contains a list of namespaces that are not + // included in the restore. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedResources is a slice of resource names to include + // in the restore. If empty, all resources in the backup are included. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources is a slice of resource names that are not + // included in the restore. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` + + // NamespaceMapping is a map of source namespace names + // to target namespace names to restore into. Any source + // namespaces not included in the map will be restored into + // namespaces of the same name. + // +optional + NamespaceMapping map[string]string `json:"namespaceMapping,omitempty"` + + // LabelSelector is a metav1.LabelSelector to filter with + // when restoring individual objects from the backup. If empty + // or nil, all objects are included. Optional. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // OrLabelSelectors is list of metav1.LabelSelector to filter with + // when restoring individual objects from the backup. If multiple provided + // they will be joined by the OR operator. LabelSelector as well as + // OrLabelSelectors cannot co-exist in restore request, only one of them + // can be used + // +optional + // +nullable + OrLabelSelectors []*metav1.LabelSelector `json:"orLabelSelectors,omitempty"` + + // RestorePVs specifies whether to restore all included + // PVs from snapshot + // +optional + // +nullable + RestorePVs *bool `json:"restorePVs,omitempty"` + + // RestoreStatus specifies which resources we should restore the status + // field. If nil, no objects are included. Optional. + // +optional + // +nullable + RestoreStatus *RestoreStatusSpec `json:"restoreStatus,omitempty"` + + // PreserveNodePorts specifies whether to restore old nodePorts from backup. + // +optional + // +nullable + PreserveNodePorts *bool `json:"preserveNodePorts,omitempty"` + + // IncludeClusterResources specifies whether cluster-scoped resources + // should be included for consideration in the restore. If null, defaults + // to true. + // +optional + // +nullable + IncludeClusterResources *bool `json:"includeClusterResources,omitempty"` + + // Hooks represent custom behaviors that should be executed during or post restore. + // +optional + Hooks RestoreHooks `json:"hooks,omitempty"` + + // ExistingResourcePolicy specifies the restore behavior for the Kubernetes resource to be restored + // +optional + // +nullable + ExistingResourcePolicy PolicyType `json:"existingResourcePolicy,omitempty"` + + // ItemOperationTimeout specifies the time used to wait for RestoreItemAction operations + // The default value is 4 hour. + // +optional + ItemOperationTimeout metav1.Duration `json:"itemOperationTimeout,omitempty"` + + // ResourceModifier specifies the reference to JSON resource patches that should be applied to resources before restoration. + // +optional + // +nullable + ResourceModifier *v1.TypedLocalObjectReference `json:"resourceModifier,omitempty"` + + // UploaderConfig specifies the configuration for the restore. + // +optional + // +nullable + UploaderConfig *UploaderConfigForRestore `json:"uploaderConfig,omitempty"` +} + +// UploaderConfigForRestore defines the configuration for the restore. +type UploaderConfigForRestore struct { + // WriteSparseFiles is a flag to indicate whether write files sparsely or not. + // +optional + // +nullable + WriteSparseFiles *bool `json:"writeSparseFiles,omitempty"` + // ParallelFilesDownload is the concurrency number setting for restore. + // +optional + ParallelFilesDownload int `json:"parallelFilesDownload,omitempty"` +} + +// RestoreHooks contains custom behaviors that should be executed during or post restore. +type RestoreHooks struct { + Resources []RestoreResourceHookSpec `json:"resources,omitempty"` +} + +type RestoreStatusSpec struct { + // IncludedResources specifies the resources to which will restore the status. + // If empty, it applies to all resources. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources specifies the resources to which will not restore the status. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` +} + +// RestoreResourceHookSpec defines one or more RestoreResrouceHooks that should be executed based on +// the rules defined for namespaces, resources, and label selector. +type RestoreResourceHookSpec struct { + // Name is the name of this hook. + Name string `json:"name"` + + // IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + // to all namespaces. + // +optional + // +nullable + IncludedNamespaces []string `json:"includedNamespaces,omitempty"` + + // ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + // +optional + // +nullable + ExcludedNamespaces []string `json:"excludedNamespaces,omitempty"` + + // IncludedResources specifies the resources to which this hook spec applies. If empty, it applies + // to all resources. + // +optional + // +nullable + IncludedResources []string `json:"includedResources,omitempty"` + + // ExcludedResources specifies the resources to which this hook spec does not apply. + // +optional + // +nullable + ExcludedResources []string `json:"excludedResources,omitempty"` + + // LabelSelector, if specified, filters the resources to which this hook spec applies. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // PostHooks is a list of RestoreResourceHooks to execute during and after restoring a resource. + // +optional + PostHooks []RestoreResourceHook `json:"postHooks,omitempty"` +} + +// RestoreResourceHook defines a restore hook for a resource. +type RestoreResourceHook struct { + // Exec defines an exec restore hook. + Exec *ExecRestoreHook `json:"exec,omitempty"` + + // Init defines an init restore hook. + Init *InitRestoreHook `json:"init,omitempty"` +} + +// ExecRestoreHook is a hook that uses pod exec API to execute a command inside a container in a pod +type ExecRestoreHook struct { + // Container is the container in the pod where the command should be executed. If not specified, + // the pod's first container is used. + // +optional + Container string `json:"container,omitempty"` + + // Command is the command and arguments to execute from within a container after a pod has been restored. + // +kubebuilder:validation:MinItems=1 + Command []string `json:"command"` + + // OnError specifies how Velero should behave if it encounters an error executing this hook. + // +optional + OnError HookErrorMode `json:"onError,omitempty"` + + // ExecTimeout defines the maximum amount of time Velero should wait for the hook to complete before + // considering the execution a failure. + // +optional + ExecTimeout metav1.Duration `json:"execTimeout,omitempty"` + + // WaitTimeout defines the maximum amount of time Velero should wait for the container to be Ready + // before attempting to run the command. + // +optional + WaitTimeout metav1.Duration `json:"waitTimeout,omitempty"` + + // WaitForReady ensures command will be launched when container is Ready instead of Running. + // +optional + // +nullable + WaitForReady *bool `json:"waitForReady,omitempty"` +} + +// InitRestoreHook is a hook that adds an init container to a PodSpec to run commands before the +// workload pod is able to start. +type InitRestoreHook struct { + // +kubebuilder:pruning:PreserveUnknownFields + // InitContainers is list of init containers to be added to a pod during its restore. + // +optional + InitContainers []runtime.RawExtension `json:"initContainers"` + + // Timeout defines the maximum amount of time Velero should wait for the initContainers to complete. + // +optional + Timeout metav1.Duration `json:"timeout,omitempty"` +} + +// RestorePhase is a string representation of the lifecycle phase +// of a Velero restore +// +kubebuilder:validation:Enum=New;FailedValidation;InProgress;WaitingForPluginOperations;WaitingForPluginOperationsPartiallyFailed;Completed;PartiallyFailed;Failed;Finalizing;FinalizingPartiallyFailed +type RestorePhase string + +const ( + // RestorePhaseNew means the restore has been created but not + // yet processed by the RestoreController + RestorePhaseNew RestorePhase = "New" + + // RestorePhaseFailedValidation means the restore has failed + // the controller's validations and therefore will not run. + RestorePhaseFailedValidation RestorePhase = "FailedValidation" + + // RestorePhaseInProgress means the restore is currently executing. + RestorePhaseInProgress RestorePhase = "InProgress" + + // RestorePhaseWaitingForPluginOperations means the restore of + // Kubernetes resources and other async plugin operations was + // successful and plugin operations are still ongoing. The + // restore is not complete yet. + RestorePhaseWaitingForPluginOperations RestorePhase = "WaitingForPluginOperations" + + // RestorePhaseWaitingForPluginOperationsPartiallyFailed means + // the restore of Kubernetes resources and other async plugin + // operations partially failed (final phase will be + // PartiallyFailed) and other plugin operations are still + // ongoing. The restore is not complete yet. + RestorePhaseWaitingForPluginOperationsPartiallyFailed RestorePhase = "WaitingForPluginOperationsPartiallyFailed" + + // RestorePhaseFinalizing means the restore of + // Kubernetes resources and other async plugin operations were successful and + // other plugin operations are now complete, but the restore is awaiting + // the completion of wrap-up tasks before the restore process enters terminal phase. + RestorePhaseFinalizing RestorePhase = "Finalizing" + + // RestorePhaseFinalizingPartiallyFailed means the restore of + // Kubernetes resources and other async plugin operations were successful and + // other plugin operations are now complete, but one or more errors + // occurred during restore or async operation processing. The restore is awaiting + // the completion of wrap-up tasks before the restore process enters terminal phase. + RestorePhaseFinalizingPartiallyFailed RestorePhase = "FinalizingPartiallyFailed" + + // RestorePhaseCompleted means the restore has run successfully + // without errors. + RestorePhaseCompleted RestorePhase = "Completed" + + // RestorePhasePartiallyFailed means the restore has run to completion + // but encountered 1+ errors restoring individual items. + RestorePhasePartiallyFailed RestorePhase = "PartiallyFailed" + + // RestorePhaseFailed means the restore was unable to execute. + // The failing error is recorded in status.FailureReason. + RestorePhaseFailed RestorePhase = "Failed" + + // PolicyTypeNone means velero will not overwrite the resource + // in cluster with the one in backup whether changed/unchanged. + PolicyTypeNone PolicyType = "none" + + // PolicyTypeUpdate means velero will try to attempt a patch on + // the changed resources. + PolicyTypeUpdate PolicyType = "update" +) + +// RestoreStatus captures the current status of a Velero restore +type RestoreStatus struct { + // Phase is the current state of the Restore + // +optional + Phase RestorePhase `json:"phase,omitempty"` + + // ValidationErrors is a slice of all validation errors (if + // applicable) + // +optional + // +nullable + ValidationErrors []string `json:"validationErrors,omitempty"` + + // Warnings is a count of all warning messages that were generated during + // execution of the restore. The actual warnings are stored in object storage. + // +optional + Warnings int `json:"warnings,omitempty"` + + // Errors is a count of all error messages that were generated during + // execution of the restore. The actual errors are stored in object storage. + // +optional + Errors int `json:"errors,omitempty"` + + // FailureReason is an error that caused the entire restore to fail. + // +optional + FailureReason string `json:"failureReason,omitempty"` + + // StartTimestamp records the time the restore operation was started. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time the restore operation was completed. + // Completion time is recorded even on failed restore. + // The server's time is used for StartTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // Progress contains information about the restore's execution progress. Note + // that this information is best-effort only -- if Velero fails to update it + // during a restore for any reason, it may be inaccurate/stale. + // +optional + // +nullable + Progress *RestoreProgress `json:"progress,omitempty"` + + // RestoreItemOperationsAttempted is the total number of attempted + // async RestoreItemAction operations for this restore. + // +optional + RestoreItemOperationsAttempted int `json:"restoreItemOperationsAttempted,omitempty"` + + // RestoreItemOperationsCompleted is the total number of successfully completed + // async RestoreItemAction operations for this restore. + // +optional + RestoreItemOperationsCompleted int `json:"restoreItemOperationsCompleted,omitempty"` + + // RestoreItemOperationsFailed is the total number of async + // RestoreItemAction operations for this restore which ended with an error. + // +optional + RestoreItemOperationsFailed int `json:"restoreItemOperationsFailed,omitempty"` + + // HookStatus contains information about the status of the hooks. + // +optional + // +nullable + HookStatus *HookStatus `json:"hookStatus,omitempty"` +} + +// RestoreProgress stores information about the restore's execution progress +type RestoreProgress struct { + // TotalItems is the total number of items to be restored. This number may change + // throughout the execution of the restore due to plugins that return additional related + // items to restore + // +optional + TotalItems int `json:"totalItems,omitempty"` + // ItemsRestored is the number of items that have actually been restored so far + // +optional + ItemsRestored int `json:"itemsRestored,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:rbac:groups=velero.io,resources=restores,verbs=create;delete;get;list;patch;update;watch +// +kubebuilder:rbac:groups=velero.io,resources=restores/status,verbs=get;update;patch + +// Restore is a Velero resource that represents the application of +// resources from a Velero backup to a target Kubernetes cluster. +type Restore struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec RestoreSpec `json:"spec,omitempty"` + + // +optional + Status RestoreStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestoreList is a list of Restores. +type RestoreList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata"` + + Items []Restore `json:"items"` +} + +// PolicyType helps specify the ExistingResourcePolicy +type PolicyType string diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/schedule_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/schedule_types.go new file mode 100644 index 000000000..6a5f885ab --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/schedule_types.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ScheduleSpec defines the specification for a Velero schedule +type ScheduleSpec struct { + // Template is the definition of the Backup to be run + // on the provided schedule + Template BackupSpec `json:"template"` + + // Schedule is a Cron expression defining when to run + // the Backup. + Schedule string `json:"schedule"` + + // UseOwnerReferencesBackup specifies whether to use + // OwnerReferences on backups created by this Schedule. + // +optional + // +nullable + UseOwnerReferencesInBackup *bool `json:"useOwnerReferencesInBackup,omitempty"` + + // Paused specifies whether the schedule is paused or not + // +optional + Paused bool `json:"paused,omitempty"` + + // SkipImmediately specifies whether to skip backup if schedule is due immediately from `schedule.status.lastBackup` timestamp when schedule is unpaused or if schedule is new. + // If true, backup will be skipped immediately when schedule is unpaused if it is due based on .Status.LastBackupTimestamp or schedule is new, and will run at next schedule time. + // If false, backup will not be skipped immediately when schedule is unpaused, but will run at next schedule time. + // If empty, will follow server configuration (default: false). + // +optional + SkipImmediately *bool `json:"skipImmediately,omitempty"` +} + +// SchedulePhase is a string representation of the lifecycle phase +// of a Velero schedule +// +kubebuilder:validation:Enum=New;Enabled;FailedValidation +type SchedulePhase string + +const ( + // SchedulePhaseNew means the schedule has been created but not + // yet processed by the ScheduleController + SchedulePhaseNew SchedulePhase = "New" + + // SchedulePhaseEnabled means the schedule has been validated and + // will now be triggering backups according to the schedule spec. + SchedulePhaseEnabled SchedulePhase = "Enabled" + + // SchedulePhaseFailedValidation means the schedule has failed + // the controller's validations and therefore will not trigger backups. + SchedulePhaseFailedValidation SchedulePhase = "FailedValidation" +) + +// ScheduleStatus captures the current state of a Velero schedule +type ScheduleStatus struct { + // Phase is the current phase of the Schedule + // +optional + Phase SchedulePhase `json:"phase,omitempty"` + + // LastBackup is the last time a Backup was run for this + // Schedule schedule + // +optional + // +nullable + LastBackup *metav1.Time `json:"lastBackup,omitempty"` + + // LastSkipped is the last time a Schedule was skipped + // +optional + // +nullable + LastSkipped *metav1.Time `json:"lastSkipped,omitempty"` + + // ValidationErrors is a slice of all validation errors (if + // applicable) + // +optional + ValidationErrors []string `json:"validationErrors,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Status of the schedule" +// +kubebuilder:printcolumn:name="Schedule",type="string",JSONPath=".spec.schedule",description="A Cron expression defining when to run the Backup" +// +kubebuilder:printcolumn:name="LastBackup",type="date",JSONPath=".status.lastBackup",description="The last time a Backup was run for this schedule" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".spec.paused" + +// Schedule is a Velero resource that represents a pre-scheduled or +// periodic Backup that should be run. +type Schedule struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata"` + + // +optional + Spec ScheduleSpec `json:"spec,omitempty"` + + // +optional + Status ScheduleStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true + +// ScheduleList is a list of Schedules. +type ScheduleList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Schedule `json:"items"` +} + +// TimestampedName returns the default backup name format based on the schedule +func (s *Schedule) TimestampedName(timestamp time.Time) string { + return fmt.Sprintf("%s-%s", s.Name, timestamp.Format("20060102150405")) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/server_status_request_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/server_status_request_types.go new file mode 100644 index 000000000..98e15a0b5 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/server_status_request_types.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=ssr +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion + +// ServerStatusRequest is a request to access current status information about +// the Velero server. +type ServerStatusRequest struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec ServerStatusRequestSpec `json:"spec,omitempty"` + + // +optional + Status ServerStatusRequestStatus `json:"status,omitempty"` +} + +// ServerStatusRequestSpec is the specification for a ServerStatusRequest. +type ServerStatusRequestSpec struct { +} + +// ServerStatusRequestPhase represents the lifecycle phase of a ServerStatusRequest. +// +kubebuilder:validation:Enum=New;Processed +type ServerStatusRequestPhase string + +const ( + // ServerStatusRequestPhaseNew means the ServerStatusRequest has not been processed yet. + ServerStatusRequestPhaseNew ServerStatusRequestPhase = "New" + // ServerStatusRequestPhaseProcessed means the ServerStatusRequest has been processed. + ServerStatusRequestPhaseProcessed ServerStatusRequestPhase = "Processed" +) + +// PluginInfo contains attributes of a Velero plugin +type PluginInfo struct { + Name string `json:"name"` + Kind string `json:"kind"` +} + +// ServerStatusRequestStatus is the current status of a ServerStatusRequest. +type ServerStatusRequestStatus struct { + // Phase is the current lifecycle phase of the ServerStatusRequest. + // +optional + Phase ServerStatusRequestPhase `json:"phase,omitempty"` + + // ProcessedTimestamp is when the ServerStatusRequest was processed + // by the ServerStatusRequestController. + // +optional + // +nullable + ProcessedTimestamp *metav1.Time `json:"processedTimestamp,omitempty"` + + // ServerVersion is the Velero server version. + // +optional + ServerVersion string `json:"serverVersion,omitempty"` + + // Plugins list information about the plugins running on the Velero server + // +optional + // +nullable + Plugins []PluginInfo `json:"plugins,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=serverstatusrequests,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=serverstatusrequests/status,verbs=get;update;patch + +// ServerStatusRequestList is a list of ServerStatusRequests. +type ServerStatusRequestList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []ServerStatusRequest `json:"items"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/volume_snapshot_location_type.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/volume_snapshot_location_type.go new file mode 100644 index 000000000..836701b77 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/volume_snapshot_location_type.go @@ -0,0 +1,89 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=vsl +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion + +// VolumeSnapshotLocation is a location where Velero stores volume snapshots. +type VolumeSnapshotLocation struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec VolumeSnapshotLocationSpec `json:"spec,omitempty"` + + // +optional + Status VolumeSnapshotLocationStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=volumesnapshotlocations,verbs=get;list;watch;create;update;patch;delete + +// VolumeSnapshotLocationList is a list of VolumeSnapshotLocations. +type VolumeSnapshotLocationList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []VolumeSnapshotLocation `json:"items"` +} + +// VolumeSnapshotLocationSpec defines the specification for a Velero VolumeSnapshotLocation. +type VolumeSnapshotLocationSpec struct { + // Provider is the provider of the volume storage. + Provider string `json:"provider"` + + // Config is for provider-specific configuration fields. + // +optional + Config map[string]string `json:"config,omitempty"` + + // Credential contains the credential information intended to be used with this location + // +optional + Credential *corev1api.SecretKeySelector `json:"credential,omitempty"` +} + +// VolumeSnapshotLocationPhase is the lifecycle phase of a Velero VolumeSnapshotLocation. +// +kubebuilder:validation:Enum=Available;Unavailable +type VolumeSnapshotLocationPhase string + +const ( + // VolumeSnapshotLocationPhaseAvailable means the location is available to read and write from. + VolumeSnapshotLocationPhaseAvailable VolumeSnapshotLocationPhase = "Available" + + // VolumeSnapshotLocationPhaseUnavailable means the location is unavailable to read and write from. + VolumeSnapshotLocationPhaseUnavailable VolumeSnapshotLocationPhase = "Unavailable" +) + +// VolumeSnapshotLocationStatus describes the current status of a Velero VolumeSnapshotLocation. +type VolumeSnapshotLocationStatus struct { + // +optional + Phase VolumeSnapshotLocationPhase `json:"phase,omitempty"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..522e15105 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -0,0 +1,1813 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backup) DeepCopyInto(out *Backup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. +func (in *Backup) DeepCopy() *Backup { + if in == nil { + return nil + } + out := new(Backup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupHooks) DeepCopyInto(out *BackupHooks) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]BackupResourceHookSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupHooks. +func (in *BackupHooks) DeepCopy() *BackupHooks { + if in == nil { + return nil + } + out := new(BackupHooks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupList) DeepCopyInto(out *BackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. +func (in *BackupList) DeepCopy() *BackupList { + if in == nil { + return nil + } + out := new(BackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupProgress) DeepCopyInto(out *BackupProgress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupProgress. +func (in *BackupProgress) DeepCopy() *BackupProgress { + if in == nil { + return nil + } + out := new(BackupProgress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepository) DeepCopyInto(out *BackupRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepository. +func (in *BackupRepository) DeepCopy() *BackupRepository { + if in == nil { + return nil + } + out := new(BackupRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositoryList) DeepCopyInto(out *BackupRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryList. +func (in *BackupRepositoryList) DeepCopy() *BackupRepositoryList { + if in == nil { + return nil + } + out := new(BackupRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositorySpec) DeepCopyInto(out *BackupRepositorySpec) { + *out = *in + out.MaintenanceFrequency = in.MaintenanceFrequency +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositorySpec. +func (in *BackupRepositorySpec) DeepCopy() *BackupRepositorySpec { + if in == nil { + return nil + } + out := new(BackupRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositoryStatus) DeepCopyInto(out *BackupRepositoryStatus) { + *out = *in + if in.LastMaintenanceTime != nil { + in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryStatus. +func (in *BackupRepositoryStatus) DeepCopy() *BackupRepositoryStatus { + if in == nil { + return nil + } + out := new(BackupRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupResourceHook) DeepCopyInto(out *BackupResourceHook) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecHook) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHook. +func (in *BackupResourceHook) DeepCopy() *BackupResourceHook { + if in == nil { + return nil + } + out := new(BackupResourceHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupResourceHookSpec) DeepCopyInto(out *BackupResourceHookSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PreHooks != nil { + in, out := &in.PreHooks, &out.PreHooks + *out = make([]BackupResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostHooks != nil { + in, out := &in.PostHooks, &out.PostHooks + *out = make([]BackupResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHookSpec. +func (in *BackupResourceHookSpec) DeepCopy() *BackupResourceHookSpec { + if in == nil { + return nil + } + out := new(BackupResourceHookSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedClusterScopedResources != nil { + in, out := &in.IncludedClusterScopedResources, &out.IncludedClusterScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedClusterScopedResources != nil { + in, out := &in.ExcludedClusterScopedResources, &out.ExcludedClusterScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedNamespaceScopedResources != nil { + in, out := &in.IncludedNamespaceScopedResources, &out.IncludedNamespaceScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaceScopedResources != nil { + in, out := &in.ExcludedNamespaceScopedResources, &out.ExcludedNamespaceScopedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.OrLabelSelectors != nil { + in, out := &in.OrLabelSelectors, &out.OrLabelSelectors + *out = make([]*metav1.LabelSelector, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + } + if in.SnapshotVolumes != nil { + in, out := &in.SnapshotVolumes, &out.SnapshotVolumes + *out = new(bool) + **out = **in + } + out.TTL = in.TTL + if in.IncludeClusterResources != nil { + in, out := &in.IncludeClusterResources, &out.IncludeClusterResources + *out = new(bool) + **out = **in + } + in.Hooks.DeepCopyInto(&out.Hooks) + if in.VolumeSnapshotLocations != nil { + in, out := &in.VolumeSnapshotLocations, &out.VolumeSnapshotLocations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultVolumesToRestic != nil { + in, out := &in.DefaultVolumesToRestic, &out.DefaultVolumesToRestic + *out = new(bool) + **out = **in + } + if in.DefaultVolumesToFsBackup != nil { + in, out := &in.DefaultVolumesToFsBackup, &out.DefaultVolumesToFsBackup + *out = new(bool) + **out = **in + } + if in.OrderedResources != nil { + in, out := &in.OrderedResources, &out.OrderedResources + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.CSISnapshotTimeout = in.CSISnapshotTimeout + out.ItemOperationTimeout = in.ItemOperationTimeout + if in.ResourcePolicy != nil { + in, out := &in.ResourcePolicy, &out.ResourcePolicy + *out = new(corev1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.SnapshotMoveData != nil { + in, out := &in.SnapshotMoveData, &out.SnapshotMoveData + *out = new(bool) + **out = **in + } + if in.UploaderConfig != nil { + in, out := &in.UploaderConfig, &out.UploaderConfig + *out = new(UploaderConfigForBackup) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. +func (in *BackupSpec) DeepCopy() *BackupSpec { + if in == nil { + return nil + } + out := new(BackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { + *out = *in + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = (*in).DeepCopy() + } + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Progress != nil { + in, out := &in.Progress, &out.Progress + *out = new(BackupProgress) + **out = **in + } + if in.HookStatus != nil { + in, out := &in.HookStatus, &out.HookStatus + *out = new(HookStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocation) DeepCopyInto(out *BackupStorageLocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocation. +func (in *BackupStorageLocation) DeepCopy() *BackupStorageLocation { + if in == nil { + return nil + } + out := new(BackupStorageLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupStorageLocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationList) DeepCopyInto(out *BackupStorageLocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupStorageLocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationList. +func (in *BackupStorageLocationList) DeepCopy() *BackupStorageLocationList { + if in == nil { + return nil + } + out := new(BackupStorageLocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupStorageLocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationSpec) DeepCopyInto(out *BackupStorageLocationSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Credential != nil { + in, out := &in.Credential, &out.Credential + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + in.StorageType.DeepCopyInto(&out.StorageType) + if in.BackupSyncPeriod != nil { + in, out := &in.BackupSyncPeriod, &out.BackupSyncPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.ValidationFrequency != nil { + in, out := &in.ValidationFrequency, &out.ValidationFrequency + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationSpec. +func (in *BackupStorageLocationSpec) DeepCopy() *BackupStorageLocationSpec { + if in == nil { + return nil + } + out := new(BackupStorageLocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationStatus) DeepCopyInto(out *BackupStorageLocationStatus) { + *out = *in + if in.LastSyncedTime != nil { + in, out := &in.LastSyncedTime, &out.LastSyncedTime + *out = (*in).DeepCopy() + } + if in.LastValidationTime != nil { + in, out := &in.LastValidationTime, &out.LastValidationTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationStatus. +func (in *BackupStorageLocationStatus) DeepCopy() *BackupStorageLocationStatus { + if in == nil { + return nil + } + out := new(BackupStorageLocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequest) DeepCopyInto(out *DeleteBackupRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequest. +func (in *DeleteBackupRequest) DeepCopy() *DeleteBackupRequest { + if in == nil { + return nil + } + out := new(DeleteBackupRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteBackupRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestList) DeepCopyInto(out *DeleteBackupRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeleteBackupRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestList. +func (in *DeleteBackupRequestList) DeepCopy() *DeleteBackupRequestList { + if in == nil { + return nil + } + out := new(DeleteBackupRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteBackupRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestSpec) DeepCopyInto(out *DeleteBackupRequestSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestSpec. +func (in *DeleteBackupRequestSpec) DeepCopy() *DeleteBackupRequestSpec { + if in == nil { + return nil + } + out := new(DeleteBackupRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestStatus) DeepCopyInto(out *DeleteBackupRequestStatus) { + *out = *in + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestStatus. +func (in *DeleteBackupRequestStatus) DeepCopy() *DeleteBackupRequestStatus { + if in == nil { + return nil + } + out := new(DeleteBackupRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequest) DeepCopyInto(out *DownloadRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequest. +func (in *DownloadRequest) DeepCopy() *DownloadRequest { + if in == nil { + return nil + } + out := new(DownloadRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DownloadRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestList) DeepCopyInto(out *DownloadRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownloadRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestList. +func (in *DownloadRequestList) DeepCopy() *DownloadRequestList { + if in == nil { + return nil + } + out := new(DownloadRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DownloadRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestSpec) DeepCopyInto(out *DownloadRequestSpec) { + *out = *in + out.Target = in.Target +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestSpec. +func (in *DownloadRequestSpec) DeepCopy() *DownloadRequestSpec { + if in == nil { + return nil + } + out := new(DownloadRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestStatus) DeepCopyInto(out *DownloadRequestStatus) { + *out = *in + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestStatus. +func (in *DownloadRequestStatus) DeepCopy() *DownloadRequestStatus { + if in == nil { + return nil + } + out := new(DownloadRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadTarget) DeepCopyInto(out *DownloadTarget) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadTarget. +func (in *DownloadTarget) DeepCopy() *DownloadTarget { + if in == nil { + return nil + } + out := new(DownloadTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecHook) DeepCopyInto(out *ExecHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Timeout = in.Timeout +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecHook. +func (in *ExecHook) DeepCopy() *ExecHook { + if in == nil { + return nil + } + out := new(ExecHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecRestoreHook) DeepCopyInto(out *ExecRestoreHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ExecTimeout = in.ExecTimeout + out.WaitTimeout = in.WaitTimeout + if in.WaitForReady != nil { + in, out := &in.WaitForReady, &out.WaitForReady + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecRestoreHook. +func (in *ExecRestoreHook) DeepCopy() *ExecRestoreHook { + if in == nil { + return nil + } + out := new(ExecRestoreHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HookStatus) DeepCopyInto(out *HookStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HookStatus. +func (in *HookStatus) DeepCopy() *HookStatus { + if in == nil { + return nil + } + out := new(HookStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitRestoreHook) DeepCopyInto(out *InitRestoreHook) { + *out = *in + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Timeout = in.Timeout +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitRestoreHook. +func (in *InitRestoreHook) DeepCopy() *InitRestoreHook { + if in == nil { + return nil + } + out := new(InitRestoreHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) { + *out = *in + if in.CACert != nil { + in, out := &in.CACert, &out.CACert + *out = make([]byte, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation. +func (in *ObjectStorageLocation) DeepCopy() *ObjectStorageLocation { + if in == nil { + return nil + } + out := new(ObjectStorageLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginInfo) DeepCopyInto(out *PluginInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginInfo. +func (in *PluginInfo) DeepCopy() *PluginInfo { + if in == nil { + return nil + } + out := new(PluginInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackup) DeepCopyInto(out *PodVolumeBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackup. +func (in *PodVolumeBackup) DeepCopy() *PodVolumeBackup { + if in == nil { + return nil + } + out := new(PodVolumeBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupList) DeepCopyInto(out *PodVolumeBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodVolumeBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupList. +func (in *PodVolumeBackupList) DeepCopy() *PodVolumeBackupList { + if in == nil { + return nil + } + out := new(PodVolumeBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupSpec) DeepCopyInto(out *PodVolumeBackupSpec) { + *out = *in + out.Pod = in.Pod + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.UploaderSettings != nil { + in, out := &in.UploaderSettings, &out.UploaderSettings + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupSpec. +func (in *PodVolumeBackupSpec) DeepCopy() *PodVolumeBackupSpec { + if in == nil { + return nil + } + out := new(PodVolumeBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + out.Progress = in.Progress +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus. +func (in *PodVolumeBackupStatus) DeepCopy() *PodVolumeBackupStatus { + if in == nil { + return nil + } + out := new(PodVolumeBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestore. +func (in *PodVolumeRestore) DeepCopy() *PodVolumeRestore { + if in == nil { + return nil + } + out := new(PodVolumeRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeRestore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreList) DeepCopyInto(out *PodVolumeRestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodVolumeRestore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreList. +func (in *PodVolumeRestoreList) DeepCopy() *PodVolumeRestoreList { + if in == nil { + return nil + } + out := new(PodVolumeRestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeRestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreSpec) DeepCopyInto(out *PodVolumeRestoreSpec) { + *out = *in + out.Pod = in.Pod + if in.UploaderSettings != nil { + in, out := &in.UploaderSettings, &out.UploaderSettings + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreSpec. +func (in *PodVolumeRestoreSpec) DeepCopy() *PodVolumeRestoreSpec { + if in == nil { + return nil + } + out := new(PodVolumeRestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + out.Progress = in.Progress +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreStatus. +func (in *PodVolumeRestoreStatus) DeepCopy() *PodVolumeRestoreStatus { + if in == nil { + return nil + } + out := new(PodVolumeRestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Restore) DeepCopyInto(out *Restore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Restore. +func (in *Restore) DeepCopy() *Restore { + if in == nil { + return nil + } + out := new(Restore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Restore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreHooks) DeepCopyInto(out *RestoreHooks) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]RestoreResourceHookSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreHooks. +func (in *RestoreHooks) DeepCopy() *RestoreHooks { + if in == nil { + return nil + } + out := new(RestoreHooks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreList) DeepCopyInto(out *RestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Restore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreList. +func (in *RestoreList) DeepCopy() *RestoreList { + if in == nil { + return nil + } + out := new(RestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreProgress) DeepCopyInto(out *RestoreProgress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreProgress. +func (in *RestoreProgress) DeepCopy() *RestoreProgress { + if in == nil { + return nil + } + out := new(RestoreProgress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreResourceHook) DeepCopyInto(out *RestoreResourceHook) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecRestoreHook) + (*in).DeepCopyInto(*out) + } + if in.Init != nil { + in, out := &in.Init, &out.Init + *out = new(InitRestoreHook) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreResourceHook. +func (in *RestoreResourceHook) DeepCopy() *RestoreResourceHook { + if in == nil { + return nil + } + out := new(RestoreResourceHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreResourceHookSpec) DeepCopyInto(out *RestoreResourceHookSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PostHooks != nil { + in, out := &in.PostHooks, &out.PostHooks + *out = make([]RestoreResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreResourceHookSpec. +func (in *RestoreResourceHookSpec) DeepCopy() *RestoreResourceHookSpec { + if in == nil { + return nil + } + out := new(RestoreResourceHookSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NamespaceMapping != nil { + in, out := &in.NamespaceMapping, &out.NamespaceMapping + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.OrLabelSelectors != nil { + in, out := &in.OrLabelSelectors, &out.OrLabelSelectors + *out = make([]*metav1.LabelSelector, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + } + if in.RestorePVs != nil { + in, out := &in.RestorePVs, &out.RestorePVs + *out = new(bool) + **out = **in + } + if in.RestoreStatus != nil { + in, out := &in.RestoreStatus, &out.RestoreStatus + *out = new(RestoreStatusSpec) + (*in).DeepCopyInto(*out) + } + if in.PreserveNodePorts != nil { + in, out := &in.PreserveNodePorts, &out.PreserveNodePorts + *out = new(bool) + **out = **in + } + if in.IncludeClusterResources != nil { + in, out := &in.IncludeClusterResources, &out.IncludeClusterResources + *out = new(bool) + **out = **in + } + in.Hooks.DeepCopyInto(&out.Hooks) + out.ItemOperationTimeout = in.ItemOperationTimeout + if in.ResourceModifier != nil { + in, out := &in.ResourceModifier, &out.ResourceModifier + *out = new(corev1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.UploaderConfig != nil { + in, out := &in.UploaderConfig, &out.UploaderConfig + *out = new(UploaderConfigForRestore) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSpec. +func (in *RestoreSpec) DeepCopy() *RestoreSpec { + if in == nil { + return nil + } + out := new(RestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) { + *out = *in + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Progress != nil { + in, out := &in.Progress, &out.Progress + *out = new(RestoreProgress) + **out = **in + } + if in.HookStatus != nil { + in, out := &in.HookStatus, &out.HookStatus + *out = new(HookStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatus. +func (in *RestoreStatus) DeepCopy() *RestoreStatus { + if in == nil { + return nil + } + out := new(RestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreStatusSpec) DeepCopyInto(out *RestoreStatusSpec) { + *out = *in + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatusSpec. +func (in *RestoreStatusSpec) DeepCopy() *RestoreStatusSpec { + if in == nil { + return nil + } + out := new(RestoreStatusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Schedule) DeepCopyInto(out *Schedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule. +func (in *Schedule) DeepCopy() *Schedule { + if in == nil { + return nil + } + out := new(Schedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Schedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleList) DeepCopyInto(out *ScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Schedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleList. +func (in *ScheduleList) DeepCopy() *ScheduleList { + if in == nil { + return nil + } + out := new(ScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleSpec) DeepCopyInto(out *ScheduleSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.UseOwnerReferencesInBackup != nil { + in, out := &in.UseOwnerReferencesInBackup, &out.UseOwnerReferencesInBackup + *out = new(bool) + **out = **in + } + if in.SkipImmediately != nil { + in, out := &in.SkipImmediately, &out.SkipImmediately + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleSpec. +func (in *ScheduleSpec) DeepCopy() *ScheduleSpec { + if in == nil { + return nil + } + out := new(ScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) { + *out = *in + if in.LastBackup != nil { + in, out := &in.LastBackup, &out.LastBackup + *out = (*in).DeepCopy() + } + if in.LastSkipped != nil { + in, out := &in.LastSkipped, &out.LastSkipped + *out = (*in).DeepCopy() + } + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus. +func (in *ScheduleStatus) DeepCopy() *ScheduleStatus { + if in == nil { + return nil + } + out := new(ScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequest) DeepCopyInto(out *ServerStatusRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequest. +func (in *ServerStatusRequest) DeepCopy() *ServerStatusRequest { + if in == nil { + return nil + } + out := new(ServerStatusRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerStatusRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestList) DeepCopyInto(out *ServerStatusRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServerStatusRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestList. +func (in *ServerStatusRequestList) DeepCopy() *ServerStatusRequestList { + if in == nil { + return nil + } + out := new(ServerStatusRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerStatusRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestSpec) DeepCopyInto(out *ServerStatusRequestSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestSpec. +func (in *ServerStatusRequestSpec) DeepCopy() *ServerStatusRequestSpec { + if in == nil { + return nil + } + out := new(ServerStatusRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestStatus) DeepCopyInto(out *ServerStatusRequestStatus) { + *out = *in + if in.ProcessedTimestamp != nil { + in, out := &in.ProcessedTimestamp, &out.ProcessedTimestamp + *out = (*in).DeepCopy() + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]PluginInfo, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestStatus. +func (in *ServerStatusRequestStatus) DeepCopy() *ServerStatusRequestStatus { + if in == nil { + return nil + } + out := new(ServerStatusRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageType) DeepCopyInto(out *StorageType) { + *out = *in + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = new(ObjectStorageLocation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageType. +func (in *StorageType) DeepCopy() *StorageType { + if in == nil { + return nil + } + out := new(StorageType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploaderConfigForBackup) DeepCopyInto(out *UploaderConfigForBackup) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploaderConfigForBackup. +func (in *UploaderConfigForBackup) DeepCopy() *UploaderConfigForBackup { + if in == nil { + return nil + } + out := new(UploaderConfigForBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploaderConfigForRestore) DeepCopyInto(out *UploaderConfigForRestore) { + *out = *in + if in.WriteSparseFiles != nil { + in, out := &in.WriteSparseFiles, &out.WriteSparseFiles + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploaderConfigForRestore. +func (in *UploaderConfigForRestore) DeepCopy() *UploaderConfigForRestore { + if in == nil { + return nil + } + out := new(UploaderConfigForRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocation) DeepCopyInto(out *VolumeSnapshotLocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocation. +func (in *VolumeSnapshotLocation) DeepCopy() *VolumeSnapshotLocation { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotLocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationList) DeepCopyInto(out *VolumeSnapshotLocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeSnapshotLocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationList. +func (in *VolumeSnapshotLocationList) DeepCopy() *VolumeSnapshotLocationList { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotLocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationSpec) DeepCopyInto(out *VolumeSnapshotLocationSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Credential != nil { + in, out := &in.Credential, &out.Credential + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationSpec. +func (in *VolumeSnapshotLocationSpec) DeepCopy() *VolumeSnapshotLocationSpec { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationStatus) DeepCopyInto(out *VolumeSnapshotLocationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationStatus. +func (in *VolumeSnapshotLocationStatus) DeepCopy() *VolumeSnapshotLocationStatus { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_download_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_download_types.go new file mode 100644 index 000000000..17fe40a26 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_download_types.go @@ -0,0 +1,162 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" +) + +// DataDownloadSpec is the specification for a DataDownload. +type DataDownloadSpec struct { + // TargetVolume is the information of the target PVC and PV. + TargetVolume TargetVolumeSpec `json:"targetVolume"` + + // BackupStorageLocation is the name of the backup storage location + // where the backup repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // DataMover specifies the data mover to be used by the backup. + // If DataMover is "" or "velero", the built-in data mover will be used. + // +optional + DataMover string `json:"datamover,omitempty"` + + // SnapshotID is the ID of the Velero backup snapshot to be restored from. + SnapshotID string `json:"snapshotID"` + + // SourceNamespace is the original namespace where the volume is backed up from. + // It may be different from SourcePVC's namespace if namespace is remapped during restore. + SourceNamespace string `json:"sourceNamespace"` + + // DataMoverConfig is for data-mover-specific configuration fields. + // +optional + DataMoverConfig map[string]string `json:"dataMoverConfig,omitempty"` + + // Cancel indicates request to cancel the ongoing DataDownload. It can be set + // when the DataDownload is in InProgress phase + Cancel bool `json:"cancel,omitempty"` + + // OperationTimeout specifies the time used to wait internal operations, + // before returning error as timeout. + OperationTimeout metav1.Duration `json:"operationTimeout"` +} + +// TargetVolumeSpec is the specification for a target PVC. +type TargetVolumeSpec struct { + // PVC is the name of the target PVC that is created by Velero restore + PVC string `json:"pvc"` + + // PV is the name of the target PV that is created by Velero restore + PV string `json:"pv"` + + // Namespace is the target namespace + Namespace string `json:"namespace"` +} + +// DataDownloadPhase represents the lifecycle phase of a DataDownload. +// +kubebuilder:validation:Enum=New;Accepted;Prepared;InProgress;Canceling;Canceled;Completed;Failed +type DataDownloadPhase string + +const ( + DataDownloadPhaseNew DataDownloadPhase = "New" + DataDownloadPhaseAccepted DataDownloadPhase = "Accepted" + DataDownloadPhasePrepared DataDownloadPhase = "Prepared" + DataDownloadPhaseInProgress DataDownloadPhase = "InProgress" + DataDownloadPhaseCanceling DataDownloadPhase = "Canceling" + DataDownloadPhaseCanceled DataDownloadPhase = "Canceled" + DataDownloadPhaseCompleted DataDownloadPhase = "Completed" + DataDownloadPhaseFailed DataDownloadPhase = "Failed" +) + +// DataDownloadStatus is the current status of a DataDownload. +type DataDownloadStatus struct { + // Phase is the current state of the DataDownload. + // +optional + Phase DataDownloadPhase `json:"phase,omitempty"` + + // Message is a message about the DataDownload's status. + // +optional + Message string `json:"message,omitempty"` + + // StartTimestamp records the time a restore was started. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a restore was completed. + // Completion time is recorded even on failed restores. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // Progress holds the total number of bytes of the snapshot and the current + // number of restored bytes. This can be used to display progress information + // about the restore operation. + // +optional + Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` + + // Node is name of the node where the DataDownload is processed. + // +optional + Node string `json:"node,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="DataDownload status such as New/InProgress" +// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this DataDownload was started" +// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes" +// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes" +// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where the backup data is stored" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataDownload was created" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataDownload is processed" + +// DataDownload acts as the protocol between data mover plugins and data mover controller for the datamover restore operation +type DataDownload struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec DataDownloadSpec `json:"spec,omitempty"` + + // +optional + Status DataDownloadStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:generate=true +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=datadownloads,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=datadownloads/status,verbs=get;update;patch + +// DataDownloadList is a list of DataDownloads. +type DataDownloadList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DataDownload `json:"items"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_upload_types.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_upload_types.go new file mode 100644 index 000000000..347bf2dd1 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/data_upload_types.go @@ -0,0 +1,215 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" +) + +// DataUploadSpec is the specification for a DataUpload. +type DataUploadSpec struct { + // SnapshotType is the type of the snapshot to be backed up. + SnapshotType SnapshotType `json:"snapshotType"` + + // If SnapshotType is CSI, CSISnapshot provides the information of the CSI snapshot. + // +optional + // +nullable + CSISnapshot *CSISnapshotSpec `json:"csiSnapshot"` + + // SourcePVC is the name of the PVC which the snapshot is taken for. + SourcePVC string `json:"sourcePVC"` + + // DataMover specifies the data mover to be used by the backup. + // If DataMover is "" or "velero", the built-in data mover will be used. + // +optional + DataMover string `json:"datamover,omitempty"` + + // BackupStorageLocation is the name of the backup storage location + // where the backup repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // SourceNamespace is the original namespace where the volume is backed up from. + // It is the same namespace for SourcePVC and CSI namespaced objects. + SourceNamespace string `json:"sourceNamespace"` + + // DataMoverConfig is for data-mover-specific configuration fields. + // +optional + // +nullable + DataMoverConfig map[string]string `json:"dataMoverConfig,omitempty"` + + // Cancel indicates request to cancel the ongoing DataUpload. It can be set + // when the DataUpload is in InProgress phase + Cancel bool `json:"cancel,omitempty"` + + // OperationTimeout specifies the time used to wait internal operations, + // before returning error as timeout. + OperationTimeout metav1.Duration `json:"operationTimeout"` +} + +type SnapshotType string + +const ( + SnapshotTypeCSI SnapshotType = "CSI" +) + +// CSISnapshotSpec is the specification for a CSI snapshot. +type CSISnapshotSpec struct { + // VolumeSnapshot is the name of the volume snapshot to be backed up + VolumeSnapshot string `json:"volumeSnapshot"` + + // StorageClass is the name of the storage class of the PVC that the volume snapshot is created from + StorageClass string `json:"storageClass"` + + // SnapshotClass is the name of the snapshot class that the volume snapshot is created with + // +optional + SnapshotClass string `json:"snapshotClass"` +} + +// DataUploadPhase represents the lifecycle phase of a DataUpload. +// +kubebuilder:validation:Enum=New;Accepted;Prepared;InProgress;Canceling;Canceled;Completed;Failed +type DataUploadPhase string + +const ( + DataUploadPhaseNew DataUploadPhase = "New" + DataUploadPhaseAccepted DataUploadPhase = "Accepted" + DataUploadPhasePrepared DataUploadPhase = "Prepared" + DataUploadPhaseInProgress DataUploadPhase = "InProgress" + DataUploadPhaseCanceling DataUploadPhase = "Canceling" + DataUploadPhaseCanceled DataUploadPhase = "Canceled" + DataUploadPhaseCompleted DataUploadPhase = "Completed" + DataUploadPhaseFailed DataUploadPhase = "Failed" +) + +// DataUploadStatus is the current status of a DataUpload. +type DataUploadStatus struct { + // Phase is the current state of the DataUpload. + // +optional + Phase DataUploadPhase `json:"phase,omitempty"` + + // Path is the full path of the snapshot volume being backed up. + // +optional + Path string `json:"path,omitempty"` + + // SnapshotID is the identifier for the snapshot in the backup repository. + // +optional + SnapshotID string `json:"snapshotID,omitempty"` + + // DataMoverResult stores data-mover-specific information as a result of the DataUpload. + // +optional + // +nullable + DataMoverResult *map[string]string `json:"dataMoverResult,omitempty"` + + // Message is a message about the DataUpload's status. + // +optional + Message string `json:"message,omitempty"` + + // StartTimestamp records the time a backup was started. + // Separate from CreationTimestamp, since that value changes + // on restores. + // The server's time is used for StartTimestamps + // +optional + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // CompletionTimestamp records the time a backup was completed. + // Completion time is recorded even on failed backups. + // Completion time is recorded before uploading the backup object. + // The server's time is used for CompletionTimestamps + // +optional + // +nullable + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + + // Progress holds the total number of bytes of the volume and the current + // number of backed up bytes. This can be used to display progress information + // about the backup operation. + // +optional + Progress shared.DataMoveOperationProgress `json:"progress,omitempty"` + + // Node is name of the node where the DataUpload is processed. + // +optional + Node string `json:"node,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runttime-controller client, +// the genclient and k8s:deepcopy markers will no longer be needed and should be removed. +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="DataUpload status such as New/InProgress" +// +kubebuilder:printcolumn:name="Started",type="date",JSONPath=".status.startTimestamp",description="Time duration since this DataUpload was started" +// +kubebuilder:printcolumn:name="Bytes Done",type="integer",format="int64",JSONPath=".status.progress.bytesDone",description="Completed bytes" +// +kubebuilder:printcolumn:name="Total Bytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Total bytes" +// +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed" + +// DataUpload acts as the protocol between data mover plugins and data mover controller for the datamover backup operation +type DataUpload struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec DataUploadSpec `json:"spec,omitempty"` + + // +optional + Status DataUploadStatus `json:"status,omitempty"` +} + +// TODO(2.0) After converting all resources to use the runtime-controller client, +// the k8s:deepcopy marker will no longer be needed and should be removed. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=datauploads,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=datauploads/status,verbs=get;update;patch + +// DataUploadList is a list of DataUploads. +type DataUploadList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DataUpload `json:"items"` +} + +// DataUploadResult represents the SnasphotBackup result to be used by DataDownload. +type DataUploadResult struct { + // BackupStorageLocation is the name of the backup storage location + // where the backup repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // DataMover specifies the data mover used by the DataUpload + // +optional + DataMover string `json:"datamover,omitempty"` + + // SnapshotID is the identifier for the snapshot in the backup repository. + SnapshotID string `json:"snapshotID,omitempty"` + + // SourceNamespace is the original namespace where the volume is backed up from. + SourceNamespace string `json:"sourceNamespace"` + + // DataMoverResult stores data-mover-specific information as a result of the DataUpload. + // +optional + // +nullable + DataMoverResult *map[string]string `json:"dataMoverResult,omitempty"` +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/doc.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/doc.go new file mode 100644 index 000000000..9bab0a40c --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package v2alpha1 is the v2alpha1 version of the API. +// +groupName=velero.io +package v2alpha1 diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/groupversion_info.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/groupversion_info.go new file mode 100644 index 000000000..645c95653 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v2alpha1 contains API Schema definitions for the velero v2alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=velero.io +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "velero.io", Version: "v2alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/register.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/register.go new file mode 100644 index 000000000..ad605eb7b --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/register.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Resource gets a Velero GroupResource for a specified resource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +type typeInfo struct { + PluralName string + ItemType runtime.Object + ItemListType runtime.Object +} + +func newTypeInfo(pluralName string, itemType, itemListType runtime.Object) typeInfo { + return typeInfo{ + PluralName: pluralName, + ItemType: itemType, + ItemListType: itemListType, + } +} + +// CustomResources returns a map of all custom resources within the Velero +// API group, keyed on Kind. +func CustomResources() map[string]typeInfo { + return map[string]typeInfo{ + "DataUpload": newTypeInfo("datauploads", &DataUpload{}, &DataUploadList{}), + "DataDownload": newTypeInfo("datadownloads", &DataDownload{}, &DataDownloadList{}), + } +} + +// CustomResourceKinds returns a list of all custom resources kinds within the Velero +func CustomResourceKinds() sets.Set[string] { + kinds := sets.New[string]() + + resources := CustomResources() + for kind := range resources { + kinds.Insert(kind) + } + + return kinds +} + +func addKnownTypes(scheme *runtime.Scheme) error { + for _, typeInfo := range CustomResources() { + scheme.AddKnownTypes(SchemeGroupVersion, typeInfo.ItemType, typeInfo.ItemListType) + } + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/zz_generated.deepcopy.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a543ac705 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,294 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotSpec) DeepCopyInto(out *CSISnapshotSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotSpec. +func (in *CSISnapshotSpec) DeepCopy() *CSISnapshotSpec { + if in == nil { + return nil + } + out := new(CSISnapshotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDownload) DeepCopyInto(out *DataDownload) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownload. +func (in *DataDownload) DeepCopy() *DataDownload { + if in == nil { + return nil + } + out := new(DataDownload) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataDownload) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDownloadList) DeepCopyInto(out *DataDownloadList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataDownload, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownloadList. +func (in *DataDownloadList) DeepCopy() *DataDownloadList { + if in == nil { + return nil + } + out := new(DataDownloadList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataDownloadList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDownloadSpec) DeepCopyInto(out *DataDownloadSpec) { + *out = *in + out.TargetVolume = in.TargetVolume + if in.DataMoverConfig != nil { + in, out := &in.DataMoverConfig, &out.DataMoverConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.OperationTimeout = in.OperationTimeout +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownloadSpec. +func (in *DataDownloadSpec) DeepCopy() *DataDownloadSpec { + if in == nil { + return nil + } + out := new(DataDownloadSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDownloadStatus) DeepCopyInto(out *DataDownloadStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + out.Progress = in.Progress +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDownloadStatus. +func (in *DataDownloadStatus) DeepCopy() *DataDownloadStatus { + if in == nil { + return nil + } + out := new(DataDownloadStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataUpload) DeepCopyInto(out *DataUpload) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUpload. +func (in *DataUpload) DeepCopy() *DataUpload { + if in == nil { + return nil + } + out := new(DataUpload) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataUpload) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataUploadList) DeepCopyInto(out *DataUploadList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataUpload, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadList. +func (in *DataUploadList) DeepCopy() *DataUploadList { + if in == nil { + return nil + } + out := new(DataUploadList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataUploadList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataUploadResult) DeepCopyInto(out *DataUploadResult) { + *out = *in + if in.DataMoverResult != nil { + in, out := &in.DataMoverResult, &out.DataMoverResult + *out = new(map[string]string) + if **in != nil { + in, out := *in, *out + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadResult. +func (in *DataUploadResult) DeepCopy() *DataUploadResult { + if in == nil { + return nil + } + out := new(DataUploadResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataUploadSpec) DeepCopyInto(out *DataUploadSpec) { + *out = *in + if in.CSISnapshot != nil { + in, out := &in.CSISnapshot, &out.CSISnapshot + *out = new(CSISnapshotSpec) + **out = **in + } + if in.DataMoverConfig != nil { + in, out := &in.DataMoverConfig, &out.DataMoverConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.OperationTimeout = in.OperationTimeout +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadSpec. +func (in *DataUploadSpec) DeepCopy() *DataUploadSpec { + if in == nil { + return nil + } + out := new(DataUploadSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataUploadStatus) DeepCopyInto(out *DataUploadStatus) { + *out = *in + if in.DataMoverResult != nil { + in, out := &in.DataMoverResult, &out.DataMoverResult + *out = new(map[string]string) + if **in != nil { + in, out := *in, *out + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + out.Progress = in.Progress +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataUploadStatus. +func (in *DataUploadStatus) DeepCopy() *DataUploadStatus { + if in == nil { + return nil + } + out := new(DataUploadStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetVolumeSpec) DeepCopyInto(out *TargetVolumeSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetVolumeSpec. +func (in *TargetVolumeSpec) DeepCopy() *TargetVolumeSpec { + if in == nil { + return nil + } + out := new(TargetVolumeSpec) + in.DeepCopyInto(out) + return out +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/clientset.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/clientset.go new file mode 100644 index 000000000..881dee994 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/clientset.go @@ -0,0 +1,111 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + velerov1 "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" + velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + VeleroV1() velerov1.VeleroV1Interface + VeleroV2alpha1() velerov2alpha1.VeleroV2alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + veleroV1 *velerov1.VeleroV1Client + veleroV2alpha1 *velerov2alpha1.VeleroV2alpha1Client +} + +// VeleroV1 retrieves the VeleroV1Client +func (c *Clientset) VeleroV1() velerov1.VeleroV1Interface { + return c.veleroV1 +} + +// VeleroV2alpha1 retrieves the VeleroV2alpha1Client +func (c *Clientset) VeleroV2alpha1() velerov2alpha1.VeleroV2alpha1Interface { + return c.veleroV2alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.veleroV1, err = velerov1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.veleroV2alpha1, err = velerov2alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.veleroV1 = velerov1.NewForConfigOrDie(c) + cs.veleroV2alpha1 = velerov2alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.veleroV1 = velerov1.New(c) + cs.veleroV2alpha1 = velerov2alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/doc.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/doc.go new file mode 100644 index 000000000..95ffaaafa --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme/doc.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..927fc4f47 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme/register.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..12654733e --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + velerov1.AddToScheme, + velerov2alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backup.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backup.go new file mode 100644 index 000000000..420bfc5c9 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backup.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupsGetter has a method to return a BackupInterface. +// A group's client should implement this interface. +type BackupsGetter interface { + Backups(namespace string) BackupInterface +} + +// BackupInterface has methods to work with Backup resources. +type BackupInterface interface { + Create(ctx context.Context, backup *v1.Backup, opts metav1.CreateOptions) (*v1.Backup, error) + Update(ctx context.Context, backup *v1.Backup, opts metav1.UpdateOptions) (*v1.Backup, error) + UpdateStatus(ctx context.Context, backup *v1.Backup, opts metav1.UpdateOptions) (*v1.Backup, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Backup, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.BackupList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Backup, err error) + BackupExpansion +} + +// backups implements BackupInterface +type backups struct { + client rest.Interface + ns string +} + +// newBackups returns a Backups +func newBackups(c *VeleroV1Client, namespace string) *backups { + return &backups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the backup, and returns the corresponding backup object, and an error if there is any. +func (c *backups) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Backups that match those selectors. +func (c *backups) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BackupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.BackupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backups. +func (c *backups) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("backups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a backup and creates it. Returns the server's representation of the backup, and an error, if there is any. +func (c *backups) Create(ctx context.Context, backup *v1.Backup, opts metav1.CreateOptions) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("backups"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backup). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a backup and updates it. Returns the server's representation of the backup, and an error, if there is any. +func (c *backups) Update(ctx context.Context, backup *v1.Backup, opts metav1.UpdateOptions) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backups"). + Name(backup.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backup). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *backups) UpdateStatus(ctx context.Context, backup *v1.Backup, opts metav1.UpdateOptions) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backups"). + Name(backup.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backup). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the backup and deletes it. Returns an error if one occurs. +func (c *backups) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backups"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backups) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("backups"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched backup. +func (c *backups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("backups"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backuprepository.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backuprepository.go new file mode 100644 index 000000000..7ecef6dcf --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backuprepository.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupRepositoriesGetter has a method to return a BackupRepositoryInterface. +// A group's client should implement this interface. +type BackupRepositoriesGetter interface { + BackupRepositories(namespace string) BackupRepositoryInterface +} + +// BackupRepositoryInterface has methods to work with BackupRepository resources. +type BackupRepositoryInterface interface { + Create(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.CreateOptions) (*v1.BackupRepository, error) + Update(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (*v1.BackupRepository, error) + UpdateStatus(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (*v1.BackupRepository, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.BackupRepository, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.BackupRepositoryList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BackupRepository, err error) + BackupRepositoryExpansion +} + +// backupRepositories implements BackupRepositoryInterface +type backupRepositories struct { + client rest.Interface + ns string +} + +// newBackupRepositories returns a BackupRepositories +func newBackupRepositories(c *VeleroV1Client, namespace string) *backupRepositories { + return &backupRepositories{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the backupRepository, and returns the corresponding backupRepository object, and an error if there is any. +func (c *backupRepositories) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backuprepositories"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BackupRepositories that match those selectors. +func (c *backupRepositories) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BackupRepositoryList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.BackupRepositoryList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backuprepositories"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backupRepositories. +func (c *backupRepositories) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("backuprepositories"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a backupRepository and creates it. Returns the server's representation of the backupRepository, and an error, if there is any. +func (c *backupRepositories) Create(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.CreateOptions) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} + err = c.client.Post(). + Namespace(c.ns). + Resource("backuprepositories"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupRepository). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a backupRepository and updates it. Returns the server's representation of the backupRepository, and an error, if there is any. +func (c *backupRepositories) Update(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backuprepositories"). + Name(backupRepository.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupRepository). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *backupRepositories) UpdateStatus(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backuprepositories"). + Name(backupRepository.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupRepository). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the backupRepository and deletes it. Returns an error if one occurs. +func (c *backupRepositories) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backuprepositories"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backupRepositories) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("backuprepositories"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched backupRepository. +func (c *backupRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("backuprepositories"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backupstoragelocation.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backupstoragelocation.go new file mode 100644 index 000000000..352c08ad2 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/backupstoragelocation.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupStorageLocationsGetter has a method to return a BackupStorageLocationInterface. +// A group's client should implement this interface. +type BackupStorageLocationsGetter interface { + BackupStorageLocations(namespace string) BackupStorageLocationInterface +} + +// BackupStorageLocationInterface has methods to work with BackupStorageLocation resources. +type BackupStorageLocationInterface interface { + Create(ctx context.Context, backupStorageLocation *v1.BackupStorageLocation, opts metav1.CreateOptions) (*v1.BackupStorageLocation, error) + Update(ctx context.Context, backupStorageLocation *v1.BackupStorageLocation, opts metav1.UpdateOptions) (*v1.BackupStorageLocation, error) + UpdateStatus(ctx context.Context, backupStorageLocation *v1.BackupStorageLocation, opts metav1.UpdateOptions) (*v1.BackupStorageLocation, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.BackupStorageLocation, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.BackupStorageLocationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BackupStorageLocation, err error) + BackupStorageLocationExpansion +} + +// backupStorageLocations implements BackupStorageLocationInterface +type backupStorageLocations struct { + client rest.Interface + ns string +} + +// newBackupStorageLocations returns a BackupStorageLocations +func newBackupStorageLocations(c *VeleroV1Client, namespace string) *backupStorageLocations { + return &backupStorageLocations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the backupStorageLocation, and returns the corresponding backupStorageLocation object, and an error if there is any. +func (c *backupStorageLocations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BackupStorageLocations that match those selectors. +func (c *backupStorageLocations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BackupStorageLocationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.BackupStorageLocationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupstoragelocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backupStorageLocations. +func (c *backupStorageLocations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("backupstoragelocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a backupStorageLocation and creates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any. +func (c *backupStorageLocations) Create(ctx context.Context, backupStorageLocation *v1.BackupStorageLocation, opts metav1.CreateOptions) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Post(). + Namespace(c.ns). + Resource("backupstoragelocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupStorageLocation). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a backupStorageLocation and updates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any. +func (c *backupStorageLocations) Update(ctx context.Context, backupStorageLocation *v1.BackupStorageLocation, opts metav1.UpdateOptions) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(backupStorageLocation.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupStorageLocation). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *backupStorageLocations) UpdateStatus(ctx context.Context, backupStorageLocation *v1.BackupStorageLocation, opts metav1.UpdateOptions) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(backupStorageLocation.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupStorageLocation). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the backupStorageLocation and deletes it. Returns an error if one occurs. +func (c *backupStorageLocations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backupStorageLocations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("backupstoragelocations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched backupStorageLocation. +func (c *backupStorageLocations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/deletebackuprequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/deletebackuprequest.go new file mode 100644 index 000000000..e713e4df9 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/deletebackuprequest.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DeleteBackupRequestsGetter has a method to return a DeleteBackupRequestInterface. +// A group's client should implement this interface. +type DeleteBackupRequestsGetter interface { + DeleteBackupRequests(namespace string) DeleteBackupRequestInterface +} + +// DeleteBackupRequestInterface has methods to work with DeleteBackupRequest resources. +type DeleteBackupRequestInterface interface { + Create(ctx context.Context, deleteBackupRequest *v1.DeleteBackupRequest, opts metav1.CreateOptions) (*v1.DeleteBackupRequest, error) + Update(ctx context.Context, deleteBackupRequest *v1.DeleteBackupRequest, opts metav1.UpdateOptions) (*v1.DeleteBackupRequest, error) + UpdateStatus(ctx context.Context, deleteBackupRequest *v1.DeleteBackupRequest, opts metav1.UpdateOptions) (*v1.DeleteBackupRequest, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DeleteBackupRequest, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.DeleteBackupRequestList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DeleteBackupRequest, err error) + DeleteBackupRequestExpansion +} + +// deleteBackupRequests implements DeleteBackupRequestInterface +type deleteBackupRequests struct { + client rest.Interface + ns string +} + +// newDeleteBackupRequests returns a DeleteBackupRequests +func newDeleteBackupRequests(c *VeleroV1Client, namespace string) *deleteBackupRequests { + return &deleteBackupRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deleteBackupRequest, and returns the corresponding deleteBackupRequest object, and an error if there is any. +func (c *deleteBackupRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DeleteBackupRequests that match those selectors. +func (c *deleteBackupRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeleteBackupRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.DeleteBackupRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deletebackuprequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deleteBackupRequests. +func (c *deleteBackupRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deletebackuprequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a deleteBackupRequest and creates it. Returns the server's representation of the deleteBackupRequest, and an error, if there is any. +func (c *deleteBackupRequests) Create(ctx context.Context, deleteBackupRequest *v1.DeleteBackupRequest, opts metav1.CreateOptions) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deletebackuprequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(deleteBackupRequest). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a deleteBackupRequest and updates it. Returns the server's representation of the deleteBackupRequest, and an error, if there is any. +func (c *deleteBackupRequests) Update(ctx context.Context, deleteBackupRequest *v1.DeleteBackupRequest, opts metav1.UpdateOptions) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(deleteBackupRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(deleteBackupRequest). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *deleteBackupRequests) UpdateStatus(ctx context.Context, deleteBackupRequest *v1.DeleteBackupRequest, opts metav1.UpdateOptions) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(deleteBackupRequest.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(deleteBackupRequest). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the deleteBackupRequest and deletes it. Returns an error if one occurs. +func (c *deleteBackupRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deleteBackupRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("deletebackuprequests"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched deleteBackupRequest. +func (c *deleteBackupRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/doc.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/doc.go new file mode 100644 index 000000000..d2243753c --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/downloadrequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/downloadrequest.go new file mode 100644 index 000000000..68e5011f7 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/downloadrequest.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DownloadRequestsGetter has a method to return a DownloadRequestInterface. +// A group's client should implement this interface. +type DownloadRequestsGetter interface { + DownloadRequests(namespace string) DownloadRequestInterface +} + +// DownloadRequestInterface has methods to work with DownloadRequest resources. +type DownloadRequestInterface interface { + Create(ctx context.Context, downloadRequest *v1.DownloadRequest, opts metav1.CreateOptions) (*v1.DownloadRequest, error) + Update(ctx context.Context, downloadRequest *v1.DownloadRequest, opts metav1.UpdateOptions) (*v1.DownloadRequest, error) + UpdateStatus(ctx context.Context, downloadRequest *v1.DownloadRequest, opts metav1.UpdateOptions) (*v1.DownloadRequest, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DownloadRequest, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.DownloadRequestList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DownloadRequest, err error) + DownloadRequestExpansion +} + +// downloadRequests implements DownloadRequestInterface +type downloadRequests struct { + client rest.Interface + ns string +} + +// newDownloadRequests returns a DownloadRequests +func newDownloadRequests(c *VeleroV1Client, namespace string) *downloadRequests { + return &downloadRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the downloadRequest, and returns the corresponding downloadRequest object, and an error if there is any. +func (c *downloadRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("downloadrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DownloadRequests that match those selectors. +func (c *downloadRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DownloadRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.DownloadRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("downloadrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested downloadRequests. +func (c *downloadRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("downloadrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a downloadRequest and creates it. Returns the server's representation of the downloadRequest, and an error, if there is any. +func (c *downloadRequests) Create(ctx context.Context, downloadRequest *v1.DownloadRequest, opts metav1.CreateOptions) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("downloadrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(downloadRequest). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a downloadRequest and updates it. Returns the server's representation of the downloadRequest, and an error, if there is any. +func (c *downloadRequests) Update(ctx context.Context, downloadRequest *v1.DownloadRequest, opts metav1.UpdateOptions) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("downloadrequests"). + Name(downloadRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(downloadRequest). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *downloadRequests) UpdateStatus(ctx context.Context, downloadRequest *v1.DownloadRequest, opts metav1.UpdateOptions) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("downloadrequests"). + Name(downloadRequest.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(downloadRequest). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the downloadRequest and deletes it. Returns an error if one occurs. +func (c *downloadRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("downloadrequests"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *downloadRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("downloadrequests"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched downloadRequest. +func (c *downloadRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("downloadrequests"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go new file mode 100644 index 000000000..5032fd6a4 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type BackupExpansion interface{} + +type BackupRepositoryExpansion interface{} + +type BackupStorageLocationExpansion interface{} + +type DeleteBackupRequestExpansion interface{} + +type DownloadRequestExpansion interface{} + +type PodVolumeBackupExpansion interface{} + +type PodVolumeRestoreExpansion interface{} + +type RestoreExpansion interface{} + +type ScheduleExpansion interface{} + +type ServerStatusRequestExpansion interface{} + +type VolumeSnapshotLocationExpansion interface{} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/podvolumebackup.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/podvolumebackup.go new file mode 100644 index 000000000..836d78b58 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/podvolumebackup.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodVolumeBackupsGetter has a method to return a PodVolumeBackupInterface. +// A group's client should implement this interface. +type PodVolumeBackupsGetter interface { + PodVolumeBackups(namespace string) PodVolumeBackupInterface +} + +// PodVolumeBackupInterface has methods to work with PodVolumeBackup resources. +type PodVolumeBackupInterface interface { + Create(ctx context.Context, podVolumeBackup *v1.PodVolumeBackup, opts metav1.CreateOptions) (*v1.PodVolumeBackup, error) + Update(ctx context.Context, podVolumeBackup *v1.PodVolumeBackup, opts metav1.UpdateOptions) (*v1.PodVolumeBackup, error) + UpdateStatus(ctx context.Context, podVolumeBackup *v1.PodVolumeBackup, opts metav1.UpdateOptions) (*v1.PodVolumeBackup, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodVolumeBackup, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PodVolumeBackupList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodVolumeBackup, err error) + PodVolumeBackupExpansion +} + +// podVolumeBackups implements PodVolumeBackupInterface +type podVolumeBackups struct { + client rest.Interface + ns string +} + +// newPodVolumeBackups returns a PodVolumeBackups +func newPodVolumeBackups(c *VeleroV1Client, namespace string) *podVolumeBackups { + return &podVolumeBackups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podVolumeBackup, and returns the corresponding podVolumeBackup object, and an error if there is any. +func (c *podVolumeBackups) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodVolumeBackups that match those selectors. +func (c *podVolumeBackups) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodVolumeBackupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PodVolumeBackupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podvolumebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podVolumeBackups. +func (c *podVolumeBackups) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podvolumebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podVolumeBackup and creates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any. +func (c *podVolumeBackups) Create(ctx context.Context, podVolumeBackup *v1.PodVolumeBackup, opts metav1.CreateOptions) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podvolumebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podVolumeBackup). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podVolumeBackup and updates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any. +func (c *podVolumeBackups) Update(ctx context.Context, podVolumeBackup *v1.PodVolumeBackup, opts metav1.UpdateOptions) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(podVolumeBackup.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podVolumeBackup). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *podVolumeBackups) UpdateStatus(ctx context.Context, podVolumeBackup *v1.PodVolumeBackup, opts metav1.UpdateOptions) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(podVolumeBackup.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podVolumeBackup). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podVolumeBackup and deletes it. Returns an error if one occurs. +func (c *podVolumeBackups) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podVolumeBackups) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podvolumebackups"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podVolumeBackup. +func (c *podVolumeBackups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/podvolumerestore.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/podvolumerestore.go new file mode 100644 index 000000000..dffd51b1b --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/podvolumerestore.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodVolumeRestoresGetter has a method to return a PodVolumeRestoreInterface. +// A group's client should implement this interface. +type PodVolumeRestoresGetter interface { + PodVolumeRestores(namespace string) PodVolumeRestoreInterface +} + +// PodVolumeRestoreInterface has methods to work with PodVolumeRestore resources. +type PodVolumeRestoreInterface interface { + Create(ctx context.Context, podVolumeRestore *v1.PodVolumeRestore, opts metav1.CreateOptions) (*v1.PodVolumeRestore, error) + Update(ctx context.Context, podVolumeRestore *v1.PodVolumeRestore, opts metav1.UpdateOptions) (*v1.PodVolumeRestore, error) + UpdateStatus(ctx context.Context, podVolumeRestore *v1.PodVolumeRestore, opts metav1.UpdateOptions) (*v1.PodVolumeRestore, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodVolumeRestore, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PodVolumeRestoreList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodVolumeRestore, err error) + PodVolumeRestoreExpansion +} + +// podVolumeRestores implements PodVolumeRestoreInterface +type podVolumeRestores struct { + client rest.Interface + ns string +} + +// newPodVolumeRestores returns a PodVolumeRestores +func newPodVolumeRestores(c *VeleroV1Client, namespace string) *podVolumeRestores { + return &podVolumeRestores{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podVolumeRestore, and returns the corresponding podVolumeRestore object, and an error if there is any. +func (c *podVolumeRestores) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodVolumeRestores that match those selectors. +func (c *podVolumeRestores) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodVolumeRestoreList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PodVolumeRestoreList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podvolumerestores"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podVolumeRestores. +func (c *podVolumeRestores) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podvolumerestores"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podVolumeRestore and creates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any. +func (c *podVolumeRestores) Create(ctx context.Context, podVolumeRestore *v1.PodVolumeRestore, opts metav1.CreateOptions) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podvolumerestores"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podVolumeRestore). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podVolumeRestore and updates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any. +func (c *podVolumeRestores) Update(ctx context.Context, podVolumeRestore *v1.PodVolumeRestore, opts metav1.UpdateOptions) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(podVolumeRestore.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podVolumeRestore). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *podVolumeRestores) UpdateStatus(ctx context.Context, podVolumeRestore *v1.PodVolumeRestore, opts metav1.UpdateOptions) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(podVolumeRestore.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podVolumeRestore). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podVolumeRestore and deletes it. Returns an error if one occurs. +func (c *podVolumeRestores) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podVolumeRestores) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podvolumerestores"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podVolumeRestore. +func (c *podVolumeRestores) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/restore.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/restore.go new file mode 100644 index 000000000..a43b823a6 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/restore.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// RestoresGetter has a method to return a RestoreInterface. +// A group's client should implement this interface. +type RestoresGetter interface { + Restores(namespace string) RestoreInterface +} + +// RestoreInterface has methods to work with Restore resources. +type RestoreInterface interface { + Create(ctx context.Context, restore *v1.Restore, opts metav1.CreateOptions) (*v1.Restore, error) + Update(ctx context.Context, restore *v1.Restore, opts metav1.UpdateOptions) (*v1.Restore, error) + UpdateStatus(ctx context.Context, restore *v1.Restore, opts metav1.UpdateOptions) (*v1.Restore, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Restore, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RestoreList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Restore, err error) + RestoreExpansion +} + +// restores implements RestoreInterface +type restores struct { + client rest.Interface + ns string +} + +// newRestores returns a Restores +func newRestores(c *VeleroV1Client, namespace string) *restores { + return &restores{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the restore, and returns the corresponding restore object, and an error if there is any. +func (c *restores) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Get(). + Namespace(c.ns). + Resource("restores"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Restores that match those selectors. +func (c *restores) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RestoreList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RestoreList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("restores"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested restores. +func (c *restores) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("restores"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a restore and creates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *restores) Create(ctx context.Context, restore *v1.Restore, opts metav1.CreateOptions) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Post(). + Namespace(c.ns). + Resource("restores"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(restore). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a restore and updates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *restores) Update(ctx context.Context, restore *v1.Restore, opts metav1.UpdateOptions) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("restores"). + Name(restore.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(restore). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *restores) UpdateStatus(ctx context.Context, restore *v1.Restore, opts metav1.UpdateOptions) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("restores"). + Name(restore.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(restore). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the restore and deletes it. Returns an error if one occurs. +func (c *restores) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("restores"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *restores) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("restores"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched restore. +func (c *restores) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("restores"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/schedule.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/schedule.go new file mode 100644 index 000000000..8a003b008 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/schedule.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// SchedulesGetter has a method to return a ScheduleInterface. +// A group's client should implement this interface. +type SchedulesGetter interface { + Schedules(namespace string) ScheduleInterface +} + +// ScheduleInterface has methods to work with Schedule resources. +type ScheduleInterface interface { + Create(ctx context.Context, schedule *v1.Schedule, opts metav1.CreateOptions) (*v1.Schedule, error) + Update(ctx context.Context, schedule *v1.Schedule, opts metav1.UpdateOptions) (*v1.Schedule, error) + UpdateStatus(ctx context.Context, schedule *v1.Schedule, opts metav1.UpdateOptions) (*v1.Schedule, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Schedule, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ScheduleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Schedule, err error) + ScheduleExpansion +} + +// schedules implements ScheduleInterface +type schedules struct { + client rest.Interface + ns string +} + +// newSchedules returns a Schedules +func newSchedules(c *VeleroV1Client, namespace string) *schedules { + return &schedules{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the schedule, and returns the corresponding schedule object, and an error if there is any. +func (c *schedules) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Get(). + Namespace(c.ns). + Resource("schedules"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Schedules that match those selectors. +func (c *schedules) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ScheduleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ScheduleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("schedules"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested schedules. +func (c *schedules) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("schedules"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a schedule and creates it. Returns the server's representation of the schedule, and an error, if there is any. +func (c *schedules) Create(ctx context.Context, schedule *v1.Schedule, opts metav1.CreateOptions) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Post(). + Namespace(c.ns). + Resource("schedules"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(schedule). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a schedule and updates it. Returns the server's representation of the schedule, and an error, if there is any. +func (c *schedules) Update(ctx context.Context, schedule *v1.Schedule, opts metav1.UpdateOptions) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("schedules"). + Name(schedule.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(schedule). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *schedules) UpdateStatus(ctx context.Context, schedule *v1.Schedule, opts metav1.UpdateOptions) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("schedules"). + Name(schedule.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(schedule). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the schedule and deletes it. Returns an error if one occurs. +func (c *schedules) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("schedules"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *schedules) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("schedules"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched schedule. +func (c *schedules) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("schedules"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/serverstatusrequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/serverstatusrequest.go new file mode 100644 index 000000000..c8a16d80f --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/serverstatusrequest.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ServerStatusRequestsGetter has a method to return a ServerStatusRequestInterface. +// A group's client should implement this interface. +type ServerStatusRequestsGetter interface { + ServerStatusRequests(namespace string) ServerStatusRequestInterface +} + +// ServerStatusRequestInterface has methods to work with ServerStatusRequest resources. +type ServerStatusRequestInterface interface { + Create(ctx context.Context, serverStatusRequest *v1.ServerStatusRequest, opts metav1.CreateOptions) (*v1.ServerStatusRequest, error) + Update(ctx context.Context, serverStatusRequest *v1.ServerStatusRequest, opts metav1.UpdateOptions) (*v1.ServerStatusRequest, error) + UpdateStatus(ctx context.Context, serverStatusRequest *v1.ServerStatusRequest, opts metav1.UpdateOptions) (*v1.ServerStatusRequest, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServerStatusRequest, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ServerStatusRequestList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServerStatusRequest, err error) + ServerStatusRequestExpansion +} + +// serverStatusRequests implements ServerStatusRequestInterface +type serverStatusRequests struct { + client rest.Interface + ns string +} + +// newServerStatusRequests returns a ServerStatusRequests +func newServerStatusRequests(c *VeleroV1Client, namespace string) *serverStatusRequests { + return &serverStatusRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the serverStatusRequest, and returns the corresponding serverStatusRequest object, and an error if there is any. +func (c *serverStatusRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServerStatusRequests that match those selectors. +func (c *serverStatusRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServerStatusRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServerStatusRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serverstatusrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serverStatusRequests. +func (c *serverStatusRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("serverstatusrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a serverStatusRequest and creates it. Returns the server's representation of the serverStatusRequest, and an error, if there is any. +func (c *serverStatusRequests) Create(ctx context.Context, serverStatusRequest *v1.ServerStatusRequest, opts metav1.CreateOptions) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serverstatusrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serverStatusRequest). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a serverStatusRequest and updates it. Returns the server's representation of the serverStatusRequest, and an error, if there is any. +func (c *serverStatusRequests) Update(ctx context.Context, serverStatusRequest *v1.ServerStatusRequest, opts metav1.UpdateOptions) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(serverStatusRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serverStatusRequest). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *serverStatusRequests) UpdateStatus(ctx context.Context, serverStatusRequest *v1.ServerStatusRequest, opts metav1.UpdateOptions) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(serverStatusRequest.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serverStatusRequest). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the serverStatusRequest and deletes it. Returns an error if one occurs. +func (c *serverStatusRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serverStatusRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("serverstatusrequests"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched serverStatusRequest. +func (c *serverStatusRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go new file mode 100644 index 000000000..39f85628c --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go @@ -0,0 +1,139 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type VeleroV1Interface interface { + RESTClient() rest.Interface + BackupsGetter + BackupRepositoriesGetter + BackupStorageLocationsGetter + DeleteBackupRequestsGetter + DownloadRequestsGetter + PodVolumeBackupsGetter + PodVolumeRestoresGetter + RestoresGetter + SchedulesGetter + ServerStatusRequestsGetter + VolumeSnapshotLocationsGetter +} + +// VeleroV1Client is used to interact with features provided by the velero.io group. +type VeleroV1Client struct { + restClient rest.Interface +} + +func (c *VeleroV1Client) Backups(namespace string) BackupInterface { + return newBackups(c, namespace) +} + +func (c *VeleroV1Client) BackupRepositories(namespace string) BackupRepositoryInterface { + return newBackupRepositories(c, namespace) +} + +func (c *VeleroV1Client) BackupStorageLocations(namespace string) BackupStorageLocationInterface { + return newBackupStorageLocations(c, namespace) +} + +func (c *VeleroV1Client) DeleteBackupRequests(namespace string) DeleteBackupRequestInterface { + return newDeleteBackupRequests(c, namespace) +} + +func (c *VeleroV1Client) DownloadRequests(namespace string) DownloadRequestInterface { + return newDownloadRequests(c, namespace) +} + +func (c *VeleroV1Client) PodVolumeBackups(namespace string) PodVolumeBackupInterface { + return newPodVolumeBackups(c, namespace) +} + +func (c *VeleroV1Client) PodVolumeRestores(namespace string) PodVolumeRestoreInterface { + return newPodVolumeRestores(c, namespace) +} + +func (c *VeleroV1Client) Restores(namespace string) RestoreInterface { + return newRestores(c, namespace) +} + +func (c *VeleroV1Client) Schedules(namespace string) ScheduleInterface { + return newSchedules(c, namespace) +} + +func (c *VeleroV1Client) ServerStatusRequests(namespace string) ServerStatusRequestInterface { + return newServerStatusRequests(c, namespace) +} + +func (c *VeleroV1Client) VolumeSnapshotLocations(namespace string) VolumeSnapshotLocationInterface { + return newVolumeSnapshotLocations(c, namespace) +} + +// NewForConfig creates a new VeleroV1Client for the given config. +func NewForConfig(c *rest.Config) (*VeleroV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &VeleroV1Client{client}, nil +} + +// NewForConfigOrDie creates a new VeleroV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *VeleroV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new VeleroV1Client for the given RESTClient. +func New(c rest.Interface) *VeleroV1Client { + return &VeleroV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *VeleroV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/volumesnapshotlocation.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/volumesnapshotlocation.go new file mode 100644 index 000000000..a4c11e93a --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1/volumesnapshotlocation.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// VolumeSnapshotLocationsGetter has a method to return a VolumeSnapshotLocationInterface. +// A group's client should implement this interface. +type VolumeSnapshotLocationsGetter interface { + VolumeSnapshotLocations(namespace string) VolumeSnapshotLocationInterface +} + +// VolumeSnapshotLocationInterface has methods to work with VolumeSnapshotLocation resources. +type VolumeSnapshotLocationInterface interface { + Create(ctx context.Context, volumeSnapshotLocation *v1.VolumeSnapshotLocation, opts metav1.CreateOptions) (*v1.VolumeSnapshotLocation, error) + Update(ctx context.Context, volumeSnapshotLocation *v1.VolumeSnapshotLocation, opts metav1.UpdateOptions) (*v1.VolumeSnapshotLocation, error) + UpdateStatus(ctx context.Context, volumeSnapshotLocation *v1.VolumeSnapshotLocation, opts metav1.UpdateOptions) (*v1.VolumeSnapshotLocation, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.VolumeSnapshotLocation, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.VolumeSnapshotLocationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeSnapshotLocation, err error) + VolumeSnapshotLocationExpansion +} + +// volumeSnapshotLocations implements VolumeSnapshotLocationInterface +type volumeSnapshotLocations struct { + client rest.Interface + ns string +} + +// newVolumeSnapshotLocations returns a VolumeSnapshotLocations +func newVolumeSnapshotLocations(c *VeleroV1Client, namespace string) *volumeSnapshotLocations { + return &volumeSnapshotLocations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the volumeSnapshotLocation, and returns the corresponding volumeSnapshotLocation object, and an error if there is any. +func (c *volumeSnapshotLocations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotLocations that match those selectors. +func (c *volumeSnapshotLocations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeSnapshotLocationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.VolumeSnapshotLocationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotLocations. +func (c *volumeSnapshotLocations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a volumeSnapshotLocation and creates it. Returns the server's representation of the volumeSnapshotLocation, and an error, if there is any. +func (c *volumeSnapshotLocations) Create(ctx context.Context, volumeSnapshotLocation *v1.VolumeSnapshotLocation, opts metav1.CreateOptions) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Post(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(volumeSnapshotLocation). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a volumeSnapshotLocation and updates it. Returns the server's representation of the volumeSnapshotLocation, and an error, if there is any. +func (c *volumeSnapshotLocations) Update(ctx context.Context, volumeSnapshotLocation *v1.VolumeSnapshotLocation, opts metav1.UpdateOptions) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(volumeSnapshotLocation.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(volumeSnapshotLocation). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *volumeSnapshotLocations) UpdateStatus(ctx context.Context, volumeSnapshotLocation *v1.VolumeSnapshotLocation, opts metav1.UpdateOptions) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(volumeSnapshotLocation.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(volumeSnapshotLocation). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the volumeSnapshotLocation and deletes it. Returns an error if one occurs. +func (c *volumeSnapshotLocations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeSnapshotLocations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched volumeSnapshotLocation. +func (c *volumeSnapshotLocations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/datadownload.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/datadownload.go new file mode 100644 index 000000000..511677675 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/datadownload.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + "time" + + v2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DataDownloadsGetter has a method to return a DataDownloadInterface. +// A group's client should implement this interface. +type DataDownloadsGetter interface { + DataDownloads(namespace string) DataDownloadInterface +} + +// DataDownloadInterface has methods to work with DataDownload resources. +type DataDownloadInterface interface { + Create(ctx context.Context, dataDownload *v2alpha1.DataDownload, opts v1.CreateOptions) (*v2alpha1.DataDownload, error) + Update(ctx context.Context, dataDownload *v2alpha1.DataDownload, opts v1.UpdateOptions) (*v2alpha1.DataDownload, error) + UpdateStatus(ctx context.Context, dataDownload *v2alpha1.DataDownload, opts v1.UpdateOptions) (*v2alpha1.DataDownload, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.DataDownload, error) + List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.DataDownloadList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.DataDownload, err error) + DataDownloadExpansion +} + +// dataDownloads implements DataDownloadInterface +type dataDownloads struct { + client rest.Interface + ns string +} + +// newDataDownloads returns a DataDownloads +func newDataDownloads(c *VeleroV2alpha1Client, namespace string) *dataDownloads { + return &dataDownloads{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the dataDownload, and returns the corresponding dataDownload object, and an error if there is any. +func (c *dataDownloads) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.DataDownload, err error) { + result = &v2alpha1.DataDownload{} + err = c.client.Get(). + Namespace(c.ns). + Resource("datadownloads"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DataDownloads that match those selectors. +func (c *dataDownloads) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.DataDownloadList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2alpha1.DataDownloadList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("datadownloads"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested dataDownloads. +func (c *dataDownloads) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("datadownloads"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a dataDownload and creates it. Returns the server's representation of the dataDownload, and an error, if there is any. +func (c *dataDownloads) Create(ctx context.Context, dataDownload *v2alpha1.DataDownload, opts v1.CreateOptions) (result *v2alpha1.DataDownload, err error) { + result = &v2alpha1.DataDownload{} + err = c.client.Post(). + Namespace(c.ns). + Resource("datadownloads"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dataDownload). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a dataDownload and updates it. Returns the server's representation of the dataDownload, and an error, if there is any. +func (c *dataDownloads) Update(ctx context.Context, dataDownload *v2alpha1.DataDownload, opts v1.UpdateOptions) (result *v2alpha1.DataDownload, err error) { + result = &v2alpha1.DataDownload{} + err = c.client.Put(). + Namespace(c.ns). + Resource("datadownloads"). + Name(dataDownload.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dataDownload). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *dataDownloads) UpdateStatus(ctx context.Context, dataDownload *v2alpha1.DataDownload, opts v1.UpdateOptions) (result *v2alpha1.DataDownload, err error) { + result = &v2alpha1.DataDownload{} + err = c.client.Put(). + Namespace(c.ns). + Resource("datadownloads"). + Name(dataDownload.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dataDownload). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the dataDownload and deletes it. Returns an error if one occurs. +func (c *dataDownloads) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("datadownloads"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *dataDownloads) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("datadownloads"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched dataDownload. +func (c *dataDownloads) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.DataDownload, err error) { + result = &v2alpha1.DataDownload{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("datadownloads"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/dataupload.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/dataupload.go new file mode 100644 index 000000000..4da27d527 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/dataupload.go @@ -0,0 +1,195 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "context" + "time" + + v2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + scheme "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DataUploadsGetter has a method to return a DataUploadInterface. +// A group's client should implement this interface. +type DataUploadsGetter interface { + DataUploads(namespace string) DataUploadInterface +} + +// DataUploadInterface has methods to work with DataUpload resources. +type DataUploadInterface interface { + Create(ctx context.Context, dataUpload *v2alpha1.DataUpload, opts v1.CreateOptions) (*v2alpha1.DataUpload, error) + Update(ctx context.Context, dataUpload *v2alpha1.DataUpload, opts v1.UpdateOptions) (*v2alpha1.DataUpload, error) + UpdateStatus(ctx context.Context, dataUpload *v2alpha1.DataUpload, opts v1.UpdateOptions) (*v2alpha1.DataUpload, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.DataUpload, error) + List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.DataUploadList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.DataUpload, err error) + DataUploadExpansion +} + +// dataUploads implements DataUploadInterface +type dataUploads struct { + client rest.Interface + ns string +} + +// newDataUploads returns a DataUploads +func newDataUploads(c *VeleroV2alpha1Client, namespace string) *dataUploads { + return &dataUploads{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the dataUpload, and returns the corresponding dataUpload object, and an error if there is any. +func (c *dataUploads) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.DataUpload, err error) { + result = &v2alpha1.DataUpload{} + err = c.client.Get(). + Namespace(c.ns). + Resource("datauploads"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DataUploads that match those selectors. +func (c *dataUploads) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.DataUploadList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2alpha1.DataUploadList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("datauploads"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested dataUploads. +func (c *dataUploads) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("datauploads"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a dataUpload and creates it. Returns the server's representation of the dataUpload, and an error, if there is any. +func (c *dataUploads) Create(ctx context.Context, dataUpload *v2alpha1.DataUpload, opts v1.CreateOptions) (result *v2alpha1.DataUpload, err error) { + result = &v2alpha1.DataUpload{} + err = c.client.Post(). + Namespace(c.ns). + Resource("datauploads"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dataUpload). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a dataUpload and updates it. Returns the server's representation of the dataUpload, and an error, if there is any. +func (c *dataUploads) Update(ctx context.Context, dataUpload *v2alpha1.DataUpload, opts v1.UpdateOptions) (result *v2alpha1.DataUpload, err error) { + result = &v2alpha1.DataUpload{} + err = c.client.Put(). + Namespace(c.ns). + Resource("datauploads"). + Name(dataUpload.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dataUpload). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *dataUploads) UpdateStatus(ctx context.Context, dataUpload *v2alpha1.DataUpload, opts v1.UpdateOptions) (result *v2alpha1.DataUpload, err error) { + result = &v2alpha1.DataUpload{} + err = c.client.Put(). + Namespace(c.ns). + Resource("datauploads"). + Name(dataUpload.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dataUpload). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the dataUpload and deletes it. Returns an error if one occurs. +func (c *dataUploads) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("datauploads"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *dataUploads) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("datauploads"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched dataUpload. +func (c *dataUploads) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.DataUpload, err error) { + result = &v2alpha1.DataUpload{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("datauploads"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/doc.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/doc.go new file mode 100644 index 000000000..18b5cb4d4 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v2alpha1 diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/generated_expansion.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/generated_expansion.go new file mode 100644 index 000000000..1ea0b5ae2 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +type DataDownloadExpansion interface{} + +type DataUploadExpansion interface{} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/velero_client.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/velero_client.go new file mode 100644 index 000000000..6b2ea0980 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1/velero_client.go @@ -0,0 +1,94 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type VeleroV2alpha1Interface interface { + RESTClient() rest.Interface + DataDownloadsGetter + DataUploadsGetter +} + +// VeleroV2alpha1Client is used to interact with features provided by the velero.io group. +type VeleroV2alpha1Client struct { + restClient rest.Interface +} + +func (c *VeleroV2alpha1Client) DataDownloads(namespace string) DataDownloadInterface { + return newDataDownloads(c, namespace) +} + +func (c *VeleroV2alpha1Client) DataUploads(namespace string) DataUploadInterface { + return newDataUploads(c, namespace) +} + +// NewForConfig creates a new VeleroV2alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*VeleroV2alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &VeleroV2alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new VeleroV2alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *VeleroV2alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new VeleroV2alpha1Client for the given RESTClient. +func New(c rest.Interface) *VeleroV2alpha1Client { + return &VeleroV2alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *VeleroV2alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..4e78062c9 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backup.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backup.go new file mode 100644 index 000000000..f874a2090 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backup.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BackupInformer provides access to a shared informer and lister for +// Backups. +type BackupInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BackupLister +} + +type backupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBackupInformer constructs a new informer for Backup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBackupInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBackupInformer constructs a new informer for Backup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Backups(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Backups(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.Backup{}, + resyncPeriod, + indexers, + ) +} + +func (f *backupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBackupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *backupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.Backup{}, f.defaultInformer) +} + +func (f *backupInformer) Lister() v1.BackupLister { + return v1.NewBackupLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backuprepository.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backuprepository.go new file mode 100644 index 000000000..59865c894 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backuprepository.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BackupRepositoryInformer provides access to a shared informer and lister for +// BackupRepositories. +type BackupRepositoryInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BackupRepositoryLister +} + +type backupRepositoryInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBackupRepositoryInformer constructs a new informer for BackupRepository type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBackupRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBackupRepositoryInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBackupRepositoryInformer constructs a new informer for BackupRepository type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBackupRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().BackupRepositories(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().BackupRepositories(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.BackupRepository{}, + resyncPeriod, + indexers, + ) +} + +func (f *backupRepositoryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBackupRepositoryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *backupRepositoryInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.BackupRepository{}, f.defaultInformer) +} + +func (f *backupRepositoryInformer) Lister() v1.BackupRepositoryLister { + return v1.NewBackupRepositoryLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backupstoragelocation.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backupstoragelocation.go new file mode 100644 index 000000000..4c732c8e6 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/backupstoragelocation.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BackupStorageLocationInformer provides access to a shared informer and lister for +// BackupStorageLocations. +type BackupStorageLocationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BackupStorageLocationLister +} + +type backupStorageLocationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBackupStorageLocationInformer constructs a new informer for BackupStorageLocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBackupStorageLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBackupStorageLocationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBackupStorageLocationInformer constructs a new informer for BackupStorageLocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBackupStorageLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().BackupStorageLocations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().BackupStorageLocations(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.BackupStorageLocation{}, + resyncPeriod, + indexers, + ) +} + +func (f *backupStorageLocationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBackupStorageLocationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *backupStorageLocationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.BackupStorageLocation{}, f.defaultInformer) +} + +func (f *backupStorageLocationInformer) Lister() v1.BackupStorageLocationLister { + return v1.NewBackupStorageLocationLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/deletebackuprequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/deletebackuprequest.go new file mode 100644 index 000000000..7019d3bff --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/deletebackuprequest.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DeleteBackupRequestInformer provides access to a shared informer and lister for +// DeleteBackupRequests. +type DeleteBackupRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DeleteBackupRequestLister +} + +type deleteBackupRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeleteBackupRequestInformer constructs a new informer for DeleteBackupRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeleteBackupRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeleteBackupRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeleteBackupRequestInformer constructs a new informer for DeleteBackupRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeleteBackupRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().DeleteBackupRequests(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().DeleteBackupRequests(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.DeleteBackupRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *deleteBackupRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeleteBackupRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deleteBackupRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.DeleteBackupRequest{}, f.defaultInformer) +} + +func (f *deleteBackupRequestInformer) Lister() v1.DeleteBackupRequestLister { + return v1.NewDeleteBackupRequestLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/downloadrequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/downloadrequest.go new file mode 100644 index 000000000..23d91e399 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/downloadrequest.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DownloadRequestInformer provides access to a shared informer and lister for +// DownloadRequests. +type DownloadRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DownloadRequestLister +} + +type downloadRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDownloadRequestInformer constructs a new informer for DownloadRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDownloadRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDownloadRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDownloadRequestInformer constructs a new informer for DownloadRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDownloadRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().DownloadRequests(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().DownloadRequests(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.DownloadRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *downloadRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDownloadRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *downloadRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.DownloadRequest{}, f.defaultInformer) +} + +func (f *downloadRequestInformer) Lister() v1.DownloadRequestLister { + return v1.NewDownloadRequestLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/interface.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/interface.go new file mode 100644 index 000000000..087dd3356 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/interface.go @@ -0,0 +1,115 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Backups returns a BackupInformer. + Backups() BackupInformer + // BackupRepositories returns a BackupRepositoryInformer. + BackupRepositories() BackupRepositoryInformer + // BackupStorageLocations returns a BackupStorageLocationInformer. + BackupStorageLocations() BackupStorageLocationInformer + // DeleteBackupRequests returns a DeleteBackupRequestInformer. + DeleteBackupRequests() DeleteBackupRequestInformer + // DownloadRequests returns a DownloadRequestInformer. + DownloadRequests() DownloadRequestInformer + // PodVolumeBackups returns a PodVolumeBackupInformer. + PodVolumeBackups() PodVolumeBackupInformer + // PodVolumeRestores returns a PodVolumeRestoreInformer. + PodVolumeRestores() PodVolumeRestoreInformer + // Restores returns a RestoreInformer. + Restores() RestoreInformer + // Schedules returns a ScheduleInformer. + Schedules() ScheduleInformer + // ServerStatusRequests returns a ServerStatusRequestInformer. + ServerStatusRequests() ServerStatusRequestInformer + // VolumeSnapshotLocations returns a VolumeSnapshotLocationInformer. + VolumeSnapshotLocations() VolumeSnapshotLocationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Backups returns a BackupInformer. +func (v *version) Backups() BackupInformer { + return &backupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// BackupRepositories returns a BackupRepositoryInformer. +func (v *version) BackupRepositories() BackupRepositoryInformer { + return &backupRepositoryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// BackupStorageLocations returns a BackupStorageLocationInformer. +func (v *version) BackupStorageLocations() BackupStorageLocationInformer { + return &backupStorageLocationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DeleteBackupRequests returns a DeleteBackupRequestInformer. +func (v *version) DeleteBackupRequests() DeleteBackupRequestInformer { + return &deleteBackupRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DownloadRequests returns a DownloadRequestInformer. +func (v *version) DownloadRequests() DownloadRequestInformer { + return &downloadRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodVolumeBackups returns a PodVolumeBackupInformer. +func (v *version) PodVolumeBackups() PodVolumeBackupInformer { + return &podVolumeBackupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodVolumeRestores returns a PodVolumeRestoreInformer. +func (v *version) PodVolumeRestores() PodVolumeRestoreInformer { + return &podVolumeRestoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Restores returns a RestoreInformer. +func (v *version) Restores() RestoreInformer { + return &restoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Schedules returns a ScheduleInformer. +func (v *version) Schedules() ScheduleInformer { + return &scheduleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ServerStatusRequests returns a ServerStatusRequestInformer. +func (v *version) ServerStatusRequests() ServerStatusRequestInformer { + return &serverStatusRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// VolumeSnapshotLocations returns a VolumeSnapshotLocationInformer. +func (v *version) VolumeSnapshotLocations() VolumeSnapshotLocationInformer { + return &volumeSnapshotLocationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/podvolumebackup.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/podvolumebackup.go new file mode 100644 index 000000000..d2835b2ea --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/podvolumebackup.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PodVolumeBackupInformer provides access to a shared informer and lister for +// PodVolumeBackups. +type PodVolumeBackupInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodVolumeBackupLister +} + +type podVolumeBackupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodVolumeBackupInformer constructs a new informer for PodVolumeBackup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodVolumeBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodVolumeBackupInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodVolumeBackupInformer constructs a new informer for PodVolumeBackup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodVolumeBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().PodVolumeBackups(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().PodVolumeBackups(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.PodVolumeBackup{}, + resyncPeriod, + indexers, + ) +} + +func (f *podVolumeBackupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodVolumeBackupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podVolumeBackupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.PodVolumeBackup{}, f.defaultInformer) +} + +func (f *podVolumeBackupInformer) Lister() v1.PodVolumeBackupLister { + return v1.NewPodVolumeBackupLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/podvolumerestore.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/podvolumerestore.go new file mode 100644 index 000000000..eccad43b2 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/podvolumerestore.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PodVolumeRestoreInformer provides access to a shared informer and lister for +// PodVolumeRestores. +type PodVolumeRestoreInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodVolumeRestoreLister +} + +type podVolumeRestoreInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodVolumeRestoreInformer constructs a new informer for PodVolumeRestore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodVolumeRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodVolumeRestoreInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodVolumeRestoreInformer constructs a new informer for PodVolumeRestore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodVolumeRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().PodVolumeRestores(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().PodVolumeRestores(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.PodVolumeRestore{}, + resyncPeriod, + indexers, + ) +} + +func (f *podVolumeRestoreInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodVolumeRestoreInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podVolumeRestoreInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.PodVolumeRestore{}, f.defaultInformer) +} + +func (f *podVolumeRestoreInformer) Lister() v1.PodVolumeRestoreLister { + return v1.NewPodVolumeRestoreLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/restore.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/restore.go new file mode 100644 index 000000000..691d1b7e8 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/restore.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// RestoreInformer provides access to a shared informer and lister for +// Restores. +type RestoreInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RestoreLister +} + +type restoreInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRestoreInformer constructs a new informer for Restore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRestoreInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRestoreInformer constructs a new informer for Restore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Restores(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Restores(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.Restore{}, + resyncPeriod, + indexers, + ) +} + +func (f *restoreInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRestoreInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *restoreInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.Restore{}, f.defaultInformer) +} + +func (f *restoreInformer) Lister() v1.RestoreLister { + return v1.NewRestoreLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/schedule.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/schedule.go new file mode 100644 index 000000000..31114d809 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/schedule.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ScheduleInformer provides access to a shared informer and lister for +// Schedules. +type ScheduleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ScheduleLister +} + +type scheduleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewScheduleInformer constructs a new informer for Schedule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewScheduleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredScheduleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredScheduleInformer constructs a new informer for Schedule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredScheduleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Schedules(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Schedules(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.Schedule{}, + resyncPeriod, + indexers, + ) +} + +func (f *scheduleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredScheduleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *scheduleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.Schedule{}, f.defaultInformer) +} + +func (f *scheduleInformer) Lister() v1.ScheduleLister { + return v1.NewScheduleLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/serverstatusrequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/serverstatusrequest.go new file mode 100644 index 000000000..53290d408 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/serverstatusrequest.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ServerStatusRequestInformer provides access to a shared informer and lister for +// ServerStatusRequests. +type ServerStatusRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServerStatusRequestLister +} + +type serverStatusRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServerStatusRequestInformer constructs a new informer for ServerStatusRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServerStatusRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServerStatusRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServerStatusRequestInformer constructs a new informer for ServerStatusRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServerStatusRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().ServerStatusRequests(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().ServerStatusRequests(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.ServerStatusRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *serverStatusRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServerStatusRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serverStatusRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.ServerStatusRequest{}, f.defaultInformer) +} + +func (f *serverStatusRequestInformer) Lister() v1.ServerStatusRequestLister { + return v1.NewServerStatusRequestLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/volumesnapshotlocation.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/volumesnapshotlocation.go new file mode 100644 index 000000000..3b6c1eca1 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1/volumesnapshotlocation.go @@ -0,0 +1,90 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotLocationInformer provides access to a shared informer and lister for +// VolumeSnapshotLocations. +type VolumeSnapshotLocationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.VolumeSnapshotLocationLister +} + +type volumeSnapshotLocationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVolumeSnapshotLocationInformer constructs a new informer for VolumeSnapshotLocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeSnapshotLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotLocationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeSnapshotLocationInformer constructs a new informer for VolumeSnapshotLocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeSnapshotLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().VolumeSnapshotLocations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().VolumeSnapshotLocations(namespace).Watch(context.TODO(), options) + }, + }, + &velerov1.VolumeSnapshotLocation{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeSnapshotLocationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotLocationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeSnapshotLocationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.VolumeSnapshotLocation{}, f.defaultInformer) +} + +func (f *volumeSnapshotLocationInformer) Lister() v1.VolumeSnapshotLocationLister { + return v1.NewVolumeSnapshotLocationLister(f.Informer().GetIndexer()) +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backup.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backup.go new file mode 100644 index 000000000..fa3f5cb6f --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backup.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BackupLister helps list Backups. +// All objects returned here must be treated as read-only. +type BackupLister interface { + // List lists all Backups in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Backup, err error) + // Backups returns an object that can list and get Backups. + Backups(namespace string) BackupNamespaceLister + BackupListerExpansion +} + +// backupLister implements the BackupLister interface. +type backupLister struct { + indexer cache.Indexer +} + +// NewBackupLister returns a new BackupLister. +func NewBackupLister(indexer cache.Indexer) BackupLister { + return &backupLister{indexer: indexer} +} + +// List lists all Backups in the indexer. +func (s *backupLister) List(selector labels.Selector) (ret []*v1.Backup, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Backup)) + }) + return ret, err +} + +// Backups returns an object that can list and get Backups. +func (s *backupLister) Backups(namespace string) BackupNamespaceLister { + return backupNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BackupNamespaceLister helps list and get Backups. +// All objects returned here must be treated as read-only. +type BackupNamespaceLister interface { + // List lists all Backups in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Backup, err error) + // Get retrieves the Backup from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Backup, error) + BackupNamespaceListerExpansion +} + +// backupNamespaceLister implements the BackupNamespaceLister +// interface. +type backupNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Backups in the indexer for a given namespace. +func (s backupNamespaceLister) List(selector labels.Selector) (ret []*v1.Backup, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Backup)) + }) + return ret, err +} + +// Get retrieves the Backup from the indexer for a given namespace and name. +func (s backupNamespaceLister) Get(name string) (*v1.Backup, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("backup"), name) + } + return obj.(*v1.Backup), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backuprepository.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backuprepository.go new file mode 100644 index 000000000..ef619baf1 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backuprepository.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BackupRepositoryLister helps list BackupRepositories. +// All objects returned here must be treated as read-only. +type BackupRepositoryLister interface { + // List lists all BackupRepositories in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BackupRepository, err error) + // BackupRepositories returns an object that can list and get BackupRepositories. + BackupRepositories(namespace string) BackupRepositoryNamespaceLister + BackupRepositoryListerExpansion +} + +// backupRepositoryLister implements the BackupRepositoryLister interface. +type backupRepositoryLister struct { + indexer cache.Indexer +} + +// NewBackupRepositoryLister returns a new BackupRepositoryLister. +func NewBackupRepositoryLister(indexer cache.Indexer) BackupRepositoryLister { + return &backupRepositoryLister{indexer: indexer} +} + +// List lists all BackupRepositories in the indexer. +func (s *backupRepositoryLister) List(selector labels.Selector) (ret []*v1.BackupRepository, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BackupRepository)) + }) + return ret, err +} + +// BackupRepositories returns an object that can list and get BackupRepositories. +func (s *backupRepositoryLister) BackupRepositories(namespace string) BackupRepositoryNamespaceLister { + return backupRepositoryNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BackupRepositoryNamespaceLister helps list and get BackupRepositories. +// All objects returned here must be treated as read-only. +type BackupRepositoryNamespaceLister interface { + // List lists all BackupRepositories in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BackupRepository, err error) + // Get retrieves the BackupRepository from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.BackupRepository, error) + BackupRepositoryNamespaceListerExpansion +} + +// backupRepositoryNamespaceLister implements the BackupRepositoryNamespaceLister +// interface. +type backupRepositoryNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all BackupRepositories in the indexer for a given namespace. +func (s backupRepositoryNamespaceLister) List(selector labels.Selector) (ret []*v1.BackupRepository, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BackupRepository)) + }) + return ret, err +} + +// Get retrieves the BackupRepository from the indexer for a given namespace and name. +func (s backupRepositoryNamespaceLister) Get(name string) (*v1.BackupRepository, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("backuprepository"), name) + } + return obj.(*v1.BackupRepository), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backupstoragelocation.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backupstoragelocation.go new file mode 100644 index 000000000..74daf16dc --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/backupstoragelocation.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BackupStorageLocationLister helps list BackupStorageLocations. +// All objects returned here must be treated as read-only. +type BackupStorageLocationLister interface { + // List lists all BackupStorageLocations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) + // BackupStorageLocations returns an object that can list and get BackupStorageLocations. + BackupStorageLocations(namespace string) BackupStorageLocationNamespaceLister + BackupStorageLocationListerExpansion +} + +// backupStorageLocationLister implements the BackupStorageLocationLister interface. +type backupStorageLocationLister struct { + indexer cache.Indexer +} + +// NewBackupStorageLocationLister returns a new BackupStorageLocationLister. +func NewBackupStorageLocationLister(indexer cache.Indexer) BackupStorageLocationLister { + return &backupStorageLocationLister{indexer: indexer} +} + +// List lists all BackupStorageLocations in the indexer. +func (s *backupStorageLocationLister) List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BackupStorageLocation)) + }) + return ret, err +} + +// BackupStorageLocations returns an object that can list and get BackupStorageLocations. +func (s *backupStorageLocationLister) BackupStorageLocations(namespace string) BackupStorageLocationNamespaceLister { + return backupStorageLocationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BackupStorageLocationNamespaceLister helps list and get BackupStorageLocations. +// All objects returned here must be treated as read-only. +type BackupStorageLocationNamespaceLister interface { + // List lists all BackupStorageLocations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) + // Get retrieves the BackupStorageLocation from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.BackupStorageLocation, error) + BackupStorageLocationNamespaceListerExpansion +} + +// backupStorageLocationNamespaceLister implements the BackupStorageLocationNamespaceLister +// interface. +type backupStorageLocationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all BackupStorageLocations in the indexer for a given namespace. +func (s backupStorageLocationNamespaceLister) List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BackupStorageLocation)) + }) + return ret, err +} + +// Get retrieves the BackupStorageLocation from the indexer for a given namespace and name. +func (s backupStorageLocationNamespaceLister) Get(name string) (*v1.BackupStorageLocation, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("backupstoragelocation"), name) + } + return obj.(*v1.BackupStorageLocation), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/deletebackuprequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/deletebackuprequest.go new file mode 100644 index 000000000..954e9aaf8 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/deletebackuprequest.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeleteBackupRequestLister helps list DeleteBackupRequests. +// All objects returned here must be treated as read-only. +type DeleteBackupRequestLister interface { + // List lists all DeleteBackupRequests in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.DeleteBackupRequest, err error) + // DeleteBackupRequests returns an object that can list and get DeleteBackupRequests. + DeleteBackupRequests(namespace string) DeleteBackupRequestNamespaceLister + DeleteBackupRequestListerExpansion +} + +// deleteBackupRequestLister implements the DeleteBackupRequestLister interface. +type deleteBackupRequestLister struct { + indexer cache.Indexer +} + +// NewDeleteBackupRequestLister returns a new DeleteBackupRequestLister. +func NewDeleteBackupRequestLister(indexer cache.Indexer) DeleteBackupRequestLister { + return &deleteBackupRequestLister{indexer: indexer} +} + +// List lists all DeleteBackupRequests in the indexer. +func (s *deleteBackupRequestLister) List(selector labels.Selector) (ret []*v1.DeleteBackupRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DeleteBackupRequest)) + }) + return ret, err +} + +// DeleteBackupRequests returns an object that can list and get DeleteBackupRequests. +func (s *deleteBackupRequestLister) DeleteBackupRequests(namespace string) DeleteBackupRequestNamespaceLister { + return deleteBackupRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeleteBackupRequestNamespaceLister helps list and get DeleteBackupRequests. +// All objects returned here must be treated as read-only. +type DeleteBackupRequestNamespaceLister interface { + // List lists all DeleteBackupRequests in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.DeleteBackupRequest, err error) + // Get retrieves the DeleteBackupRequest from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.DeleteBackupRequest, error) + DeleteBackupRequestNamespaceListerExpansion +} + +// deleteBackupRequestNamespaceLister implements the DeleteBackupRequestNamespaceLister +// interface. +type deleteBackupRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DeleteBackupRequests in the indexer for a given namespace. +func (s deleteBackupRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.DeleteBackupRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DeleteBackupRequest)) + }) + return ret, err +} + +// Get retrieves the DeleteBackupRequest from the indexer for a given namespace and name. +func (s deleteBackupRequestNamespaceLister) Get(name string) (*v1.DeleteBackupRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("deletebackuprequest"), name) + } + return obj.(*v1.DeleteBackupRequest), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/downloadrequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/downloadrequest.go new file mode 100644 index 000000000..6552cf02d --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/downloadrequest.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DownloadRequestLister helps list DownloadRequests. +// All objects returned here must be treated as read-only. +type DownloadRequestLister interface { + // List lists all DownloadRequests in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.DownloadRequest, err error) + // DownloadRequests returns an object that can list and get DownloadRequests. + DownloadRequests(namespace string) DownloadRequestNamespaceLister + DownloadRequestListerExpansion +} + +// downloadRequestLister implements the DownloadRequestLister interface. +type downloadRequestLister struct { + indexer cache.Indexer +} + +// NewDownloadRequestLister returns a new DownloadRequestLister. +func NewDownloadRequestLister(indexer cache.Indexer) DownloadRequestLister { + return &downloadRequestLister{indexer: indexer} +} + +// List lists all DownloadRequests in the indexer. +func (s *downloadRequestLister) List(selector labels.Selector) (ret []*v1.DownloadRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DownloadRequest)) + }) + return ret, err +} + +// DownloadRequests returns an object that can list and get DownloadRequests. +func (s *downloadRequestLister) DownloadRequests(namespace string) DownloadRequestNamespaceLister { + return downloadRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DownloadRequestNamespaceLister helps list and get DownloadRequests. +// All objects returned here must be treated as read-only. +type DownloadRequestNamespaceLister interface { + // List lists all DownloadRequests in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.DownloadRequest, err error) + // Get retrieves the DownloadRequest from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.DownloadRequest, error) + DownloadRequestNamespaceListerExpansion +} + +// downloadRequestNamespaceLister implements the DownloadRequestNamespaceLister +// interface. +type downloadRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DownloadRequests in the indexer for a given namespace. +func (s downloadRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.DownloadRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DownloadRequest)) + }) + return ret, err +} + +// Get retrieves the DownloadRequest from the indexer for a given namespace and name. +func (s downloadRequestNamespaceLister) Get(name string) (*v1.DownloadRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("downloadrequest"), name) + } + return obj.(*v1.DownloadRequest), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/expansion_generated.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/expansion_generated.go new file mode 100644 index 000000000..c0cd57654 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/expansion_generated.go @@ -0,0 +1,107 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// BackupListerExpansion allows custom methods to be added to +// BackupLister. +type BackupListerExpansion interface{} + +// BackupNamespaceListerExpansion allows custom methods to be added to +// BackupNamespaceLister. +type BackupNamespaceListerExpansion interface{} + +// BackupRepositoryListerExpansion allows custom methods to be added to +// BackupRepositoryLister. +type BackupRepositoryListerExpansion interface{} + +// BackupRepositoryNamespaceListerExpansion allows custom methods to be added to +// BackupRepositoryNamespaceLister. +type BackupRepositoryNamespaceListerExpansion interface{} + +// BackupStorageLocationListerExpansion allows custom methods to be added to +// BackupStorageLocationLister. +type BackupStorageLocationListerExpansion interface{} + +// BackupStorageLocationNamespaceListerExpansion allows custom methods to be added to +// BackupStorageLocationNamespaceLister. +type BackupStorageLocationNamespaceListerExpansion interface{} + +// DeleteBackupRequestListerExpansion allows custom methods to be added to +// DeleteBackupRequestLister. +type DeleteBackupRequestListerExpansion interface{} + +// DeleteBackupRequestNamespaceListerExpansion allows custom methods to be added to +// DeleteBackupRequestNamespaceLister. +type DeleteBackupRequestNamespaceListerExpansion interface{} + +// DownloadRequestListerExpansion allows custom methods to be added to +// DownloadRequestLister. +type DownloadRequestListerExpansion interface{} + +// DownloadRequestNamespaceListerExpansion allows custom methods to be added to +// DownloadRequestNamespaceLister. +type DownloadRequestNamespaceListerExpansion interface{} + +// PodVolumeBackupListerExpansion allows custom methods to be added to +// PodVolumeBackupLister. +type PodVolumeBackupListerExpansion interface{} + +// PodVolumeBackupNamespaceListerExpansion allows custom methods to be added to +// PodVolumeBackupNamespaceLister. +type PodVolumeBackupNamespaceListerExpansion interface{} + +// PodVolumeRestoreListerExpansion allows custom methods to be added to +// PodVolumeRestoreLister. +type PodVolumeRestoreListerExpansion interface{} + +// PodVolumeRestoreNamespaceListerExpansion allows custom methods to be added to +// PodVolumeRestoreNamespaceLister. +type PodVolumeRestoreNamespaceListerExpansion interface{} + +// RestoreListerExpansion allows custom methods to be added to +// RestoreLister. +type RestoreListerExpansion interface{} + +// RestoreNamespaceListerExpansion allows custom methods to be added to +// RestoreNamespaceLister. +type RestoreNamespaceListerExpansion interface{} + +// ScheduleListerExpansion allows custom methods to be added to +// ScheduleLister. +type ScheduleListerExpansion interface{} + +// ScheduleNamespaceListerExpansion allows custom methods to be added to +// ScheduleNamespaceLister. +type ScheduleNamespaceListerExpansion interface{} + +// ServerStatusRequestListerExpansion allows custom methods to be added to +// ServerStatusRequestLister. +type ServerStatusRequestListerExpansion interface{} + +// ServerStatusRequestNamespaceListerExpansion allows custom methods to be added to +// ServerStatusRequestNamespaceLister. +type ServerStatusRequestNamespaceListerExpansion interface{} + +// VolumeSnapshotLocationListerExpansion allows custom methods to be added to +// VolumeSnapshotLocationLister. +type VolumeSnapshotLocationListerExpansion interface{} + +// VolumeSnapshotLocationNamespaceListerExpansion allows custom methods to be added to +// VolumeSnapshotLocationNamespaceLister. +type VolumeSnapshotLocationNamespaceListerExpansion interface{} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/podvolumebackup.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/podvolumebackup.go new file mode 100644 index 000000000..08ed20d6f --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/podvolumebackup.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodVolumeBackupLister helps list PodVolumeBackups. +// All objects returned here must be treated as read-only. +type PodVolumeBackupLister interface { + // List lists all PodVolumeBackups in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) + // PodVolumeBackups returns an object that can list and get PodVolumeBackups. + PodVolumeBackups(namespace string) PodVolumeBackupNamespaceLister + PodVolumeBackupListerExpansion +} + +// podVolumeBackupLister implements the PodVolumeBackupLister interface. +type podVolumeBackupLister struct { + indexer cache.Indexer +} + +// NewPodVolumeBackupLister returns a new PodVolumeBackupLister. +func NewPodVolumeBackupLister(indexer cache.Indexer) PodVolumeBackupLister { + return &podVolumeBackupLister{indexer: indexer} +} + +// List lists all PodVolumeBackups in the indexer. +func (s *podVolumeBackupLister) List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodVolumeBackup)) + }) + return ret, err +} + +// PodVolumeBackups returns an object that can list and get PodVolumeBackups. +func (s *podVolumeBackupLister) PodVolumeBackups(namespace string) PodVolumeBackupNamespaceLister { + return podVolumeBackupNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodVolumeBackupNamespaceLister helps list and get PodVolumeBackups. +// All objects returned here must be treated as read-only. +type PodVolumeBackupNamespaceLister interface { + // List lists all PodVolumeBackups in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) + // Get retrieves the PodVolumeBackup from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PodVolumeBackup, error) + PodVolumeBackupNamespaceListerExpansion +} + +// podVolumeBackupNamespaceLister implements the PodVolumeBackupNamespaceLister +// interface. +type podVolumeBackupNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodVolumeBackups in the indexer for a given namespace. +func (s podVolumeBackupNamespaceLister) List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodVolumeBackup)) + }) + return ret, err +} + +// Get retrieves the PodVolumeBackup from the indexer for a given namespace and name. +func (s podVolumeBackupNamespaceLister) Get(name string) (*v1.PodVolumeBackup, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("podvolumebackup"), name) + } + return obj.(*v1.PodVolumeBackup), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/podvolumerestore.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/podvolumerestore.go new file mode 100644 index 000000000..93f96b24b --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/podvolumerestore.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodVolumeRestoreLister helps list PodVolumeRestores. +// All objects returned here must be treated as read-only. +type PodVolumeRestoreLister interface { + // List lists all PodVolumeRestores in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) + // PodVolumeRestores returns an object that can list and get PodVolumeRestores. + PodVolumeRestores(namespace string) PodVolumeRestoreNamespaceLister + PodVolumeRestoreListerExpansion +} + +// podVolumeRestoreLister implements the PodVolumeRestoreLister interface. +type podVolumeRestoreLister struct { + indexer cache.Indexer +} + +// NewPodVolumeRestoreLister returns a new PodVolumeRestoreLister. +func NewPodVolumeRestoreLister(indexer cache.Indexer) PodVolumeRestoreLister { + return &podVolumeRestoreLister{indexer: indexer} +} + +// List lists all PodVolumeRestores in the indexer. +func (s *podVolumeRestoreLister) List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodVolumeRestore)) + }) + return ret, err +} + +// PodVolumeRestores returns an object that can list and get PodVolumeRestores. +func (s *podVolumeRestoreLister) PodVolumeRestores(namespace string) PodVolumeRestoreNamespaceLister { + return podVolumeRestoreNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodVolumeRestoreNamespaceLister helps list and get PodVolumeRestores. +// All objects returned here must be treated as read-only. +type PodVolumeRestoreNamespaceLister interface { + // List lists all PodVolumeRestores in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) + // Get retrieves the PodVolumeRestore from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PodVolumeRestore, error) + PodVolumeRestoreNamespaceListerExpansion +} + +// podVolumeRestoreNamespaceLister implements the PodVolumeRestoreNamespaceLister +// interface. +type podVolumeRestoreNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodVolumeRestores in the indexer for a given namespace. +func (s podVolumeRestoreNamespaceLister) List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodVolumeRestore)) + }) + return ret, err +} + +// Get retrieves the PodVolumeRestore from the indexer for a given namespace and name. +func (s podVolumeRestoreNamespaceLister) Get(name string) (*v1.PodVolumeRestore, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("podvolumerestore"), name) + } + return obj.(*v1.PodVolumeRestore), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/restore.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/restore.go new file mode 100644 index 000000000..de0b89ce8 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/restore.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RestoreLister helps list Restores. +// All objects returned here must be treated as read-only. +type RestoreLister interface { + // List lists all Restores in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Restore, err error) + // Restores returns an object that can list and get Restores. + Restores(namespace string) RestoreNamespaceLister + RestoreListerExpansion +} + +// restoreLister implements the RestoreLister interface. +type restoreLister struct { + indexer cache.Indexer +} + +// NewRestoreLister returns a new RestoreLister. +func NewRestoreLister(indexer cache.Indexer) RestoreLister { + return &restoreLister{indexer: indexer} +} + +// List lists all Restores in the indexer. +func (s *restoreLister) List(selector labels.Selector) (ret []*v1.Restore, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Restore)) + }) + return ret, err +} + +// Restores returns an object that can list and get Restores. +func (s *restoreLister) Restores(namespace string) RestoreNamespaceLister { + return restoreNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RestoreNamespaceLister helps list and get Restores. +// All objects returned here must be treated as read-only. +type RestoreNamespaceLister interface { + // List lists all Restores in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Restore, err error) + // Get retrieves the Restore from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Restore, error) + RestoreNamespaceListerExpansion +} + +// restoreNamespaceLister implements the RestoreNamespaceLister +// interface. +type restoreNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Restores in the indexer for a given namespace. +func (s restoreNamespaceLister) List(selector labels.Selector) (ret []*v1.Restore, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Restore)) + }) + return ret, err +} + +// Get retrieves the Restore from the indexer for a given namespace and name. +func (s restoreNamespaceLister) Get(name string) (*v1.Restore, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("restore"), name) + } + return obj.(*v1.Restore), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/schedule.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/schedule.go new file mode 100644 index 000000000..90a262a46 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/schedule.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScheduleLister helps list Schedules. +// All objects returned here must be treated as read-only. +type ScheduleLister interface { + // List lists all Schedules in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Schedule, err error) + // Schedules returns an object that can list and get Schedules. + Schedules(namespace string) ScheduleNamespaceLister + ScheduleListerExpansion +} + +// scheduleLister implements the ScheduleLister interface. +type scheduleLister struct { + indexer cache.Indexer +} + +// NewScheduleLister returns a new ScheduleLister. +func NewScheduleLister(indexer cache.Indexer) ScheduleLister { + return &scheduleLister{indexer: indexer} +} + +// List lists all Schedules in the indexer. +func (s *scheduleLister) List(selector labels.Selector) (ret []*v1.Schedule, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Schedule)) + }) + return ret, err +} + +// Schedules returns an object that can list and get Schedules. +func (s *scheduleLister) Schedules(namespace string) ScheduleNamespaceLister { + return scheduleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScheduleNamespaceLister helps list and get Schedules. +// All objects returned here must be treated as read-only. +type ScheduleNamespaceLister interface { + // List lists all Schedules in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Schedule, err error) + // Get retrieves the Schedule from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Schedule, error) + ScheduleNamespaceListerExpansion +} + +// scheduleNamespaceLister implements the ScheduleNamespaceLister +// interface. +type scheduleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Schedules in the indexer for a given namespace. +func (s scheduleNamespaceLister) List(selector labels.Selector) (ret []*v1.Schedule, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Schedule)) + }) + return ret, err +} + +// Get retrieves the Schedule from the indexer for a given namespace and name. +func (s scheduleNamespaceLister) Get(name string) (*v1.Schedule, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("schedule"), name) + } + return obj.(*v1.Schedule), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/serverstatusrequest.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/serverstatusrequest.go new file mode 100644 index 000000000..c03b60c48 --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/serverstatusrequest.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServerStatusRequestLister helps list ServerStatusRequests. +// All objects returned here must be treated as read-only. +type ServerStatusRequestLister interface { + // List lists all ServerStatusRequests in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ServerStatusRequest, err error) + // ServerStatusRequests returns an object that can list and get ServerStatusRequests. + ServerStatusRequests(namespace string) ServerStatusRequestNamespaceLister + ServerStatusRequestListerExpansion +} + +// serverStatusRequestLister implements the ServerStatusRequestLister interface. +type serverStatusRequestLister struct { + indexer cache.Indexer +} + +// NewServerStatusRequestLister returns a new ServerStatusRequestLister. +func NewServerStatusRequestLister(indexer cache.Indexer) ServerStatusRequestLister { + return &serverStatusRequestLister{indexer: indexer} +} + +// List lists all ServerStatusRequests in the indexer. +func (s *serverStatusRequestLister) List(selector labels.Selector) (ret []*v1.ServerStatusRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServerStatusRequest)) + }) + return ret, err +} + +// ServerStatusRequests returns an object that can list and get ServerStatusRequests. +func (s *serverStatusRequestLister) ServerStatusRequests(namespace string) ServerStatusRequestNamespaceLister { + return serverStatusRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServerStatusRequestNamespaceLister helps list and get ServerStatusRequests. +// All objects returned here must be treated as read-only. +type ServerStatusRequestNamespaceLister interface { + // List lists all ServerStatusRequests in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ServerStatusRequest, err error) + // Get retrieves the ServerStatusRequest from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ServerStatusRequest, error) + ServerStatusRequestNamespaceListerExpansion +} + +// serverStatusRequestNamespaceLister implements the ServerStatusRequestNamespaceLister +// interface. +type serverStatusRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServerStatusRequests in the indexer for a given namespace. +func (s serverStatusRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.ServerStatusRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServerStatusRequest)) + }) + return ret, err +} + +// Get retrieves the ServerStatusRequest from the indexer for a given namespace and name. +func (s serverStatusRequestNamespaceLister) Get(name string) (*v1.ServerStatusRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("serverstatusrequest"), name) + } + return obj.(*v1.ServerStatusRequest), nil +} diff --git a/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/volumesnapshotlocation.go b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/volumesnapshotlocation.go new file mode 100644 index 000000000..8c8aa432f --- /dev/null +++ b/kubewatch/vendor/github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1/volumesnapshotlocation.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotLocationLister helps list VolumeSnapshotLocations. +// All objects returned here must be treated as read-only. +type VolumeSnapshotLocationLister interface { + // List lists all VolumeSnapshotLocations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.VolumeSnapshotLocation, err error) + // VolumeSnapshotLocations returns an object that can list and get VolumeSnapshotLocations. + VolumeSnapshotLocations(namespace string) VolumeSnapshotLocationNamespaceLister + VolumeSnapshotLocationListerExpansion +} + +// volumeSnapshotLocationLister implements the VolumeSnapshotLocationLister interface. +type volumeSnapshotLocationLister struct { + indexer cache.Indexer +} + +// NewVolumeSnapshotLocationLister returns a new VolumeSnapshotLocationLister. +func NewVolumeSnapshotLocationLister(indexer cache.Indexer) VolumeSnapshotLocationLister { + return &volumeSnapshotLocationLister{indexer: indexer} +} + +// List lists all VolumeSnapshotLocations in the indexer. +func (s *volumeSnapshotLocationLister) List(selector labels.Selector) (ret []*v1.VolumeSnapshotLocation, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.VolumeSnapshotLocation)) + }) + return ret, err +} + +// VolumeSnapshotLocations returns an object that can list and get VolumeSnapshotLocations. +func (s *volumeSnapshotLocationLister) VolumeSnapshotLocations(namespace string) VolumeSnapshotLocationNamespaceLister { + return volumeSnapshotLocationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VolumeSnapshotLocationNamespaceLister helps list and get VolumeSnapshotLocations. +// All objects returned here must be treated as read-only. +type VolumeSnapshotLocationNamespaceLister interface { + // List lists all VolumeSnapshotLocations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.VolumeSnapshotLocation, err error) + // Get retrieves the VolumeSnapshotLocation from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.VolumeSnapshotLocation, error) + VolumeSnapshotLocationNamespaceListerExpansion +} + +// volumeSnapshotLocationNamespaceLister implements the VolumeSnapshotLocationNamespaceLister +// interface. +type volumeSnapshotLocationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VolumeSnapshotLocations in the indexer for a given namespace. +func (s volumeSnapshotLocationNamespaceLister) List(selector labels.Selector) (ret []*v1.VolumeSnapshotLocation, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.VolumeSnapshotLocation)) + }) + return ret, err +} + +// Get retrieves the VolumeSnapshotLocation from the indexer for a given namespace and name. +func (s volumeSnapshotLocationNamespaceLister) Get(name string) (*v1.VolumeSnapshotLocation, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("volumesnapshotlocation"), name) + } + return obj.(*v1.VolumeSnapshotLocation), nil +} diff --git a/kubewatch/vendor/modules.txt b/kubewatch/vendor/modules.txt index ca31a87a3..5adcdd6d5 100644 --- a/kubewatch/vendor/modules.txt +++ b/kubewatch/vendor/modules.txt @@ -251,7 +251,7 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/devtron-labs/common-lib v0.0.0 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib v0.0.0 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/async github.com/devtron-labs/common-lib/constants @@ -276,6 +276,7 @@ github.com/devtron-labs/common-lib/utils/k8sObjectsUtil github.com/devtron-labs/common-lib/utils/remoteConnection/bean github.com/devtron-labs/common-lib/utils/runTime github.com/devtron-labs/common-lib/utils/sql +github.com/devtron-labs/common-lib/utils/storage github.com/devtron-labs/common-lib/utils/yaml # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit @@ -885,6 +886,18 @@ github.com/vmihailenco/msgpack/v5/msgpcode github.com/vmihailenco/tagparser/v2 github.com/vmihailenco/tagparser/v2/internal github.com/vmihailenco/tagparser/v2/internal/parser +# github.com/vmware-tanzu/velero v1.14.1 +## explicit; go 1.22.6 +github.com/vmware-tanzu/velero/pkg/apis/velero/shared +github.com/vmware-tanzu/velero/pkg/apis/velero/v1 +github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1 +github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned +github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme +github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1 +github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v2alpha1 +github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/internalinterfaces +github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1 +github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1 # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 @@ -1989,4 +2002,4 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 diff --git a/kubewatch/wire_gen.go b/kubewatch/wire_gen.go index 99b480da9..b4be8c0e8 100644 --- a/kubewatch/wire_gen.go +++ b/kubewatch/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run github.com/google/wire/cmd/wire +//go:generate go run -mod=mod github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject @@ -19,6 +19,11 @@ import ( argoWf2 "github.com/devtron-labs/kubewatch/pkg/informer/cluster/argoWf/cd" "github.com/devtron-labs/kubewatch/pkg/informer/cluster/argoWf/ci" "github.com/devtron-labs/kubewatch/pkg/informer/cluster/systemExec" + "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backup" + "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backupSchedule" + "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/backupStorageLocation" + "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/restore" + "github.com/devtron-labs/kubewatch/pkg/informer/cluster/velero/volumeSnapshotLocation" "github.com/devtron-labs/kubewatch/pkg/logger" "github.com/devtron-labs/kubewatch/pkg/pubsub" "github.com/devtron-labs/kubewatch/pkg/resource" @@ -61,7 +66,12 @@ func InitializeApp() (*App, error) { argoWfInformerImpl := argoWf.NewInformerImpl(sugaredLogger, appConfig, k8sUtilImpl, informerClientImpl, runnable) informerImpl2 := argoWf2.NewInformerImpl(sugaredLogger, appConfig, k8sUtilImpl, informerClientImpl, runnable) systemExecInformerImpl := systemExec.NewInformerImpl(sugaredLogger, appConfig, k8sUtilImpl, pubSubClientServiceImpl, informerClientImpl) - clusterInformerImpl := cluster.NewInformerImpl(sugaredLogger, appConfig, k8sUtilImpl, clusterRepositoryImpl, informerClientImpl, informerImpl, argoWfInformerImpl, informerImpl2, systemExecInformerImpl) + veleroBslInformerInformerImpl := veleroBslInformer.NewInformerImpl(sugaredLogger, appConfig, k8sUtilImpl, informerClientImpl, runnable) + veleroVslInformerInformerImpl := veleroVslInformer.NewInformerImpl(sugaredLogger, appConfig, k8sUtilImpl, informerClientImpl, runnable) + veleroBackupInformerInformerImpl := veleroBackupInformer.NewInformerImpl(sugaredLogger, k8sUtilImpl, appConfig, informerClientImpl, runnable) + veleroRestoreInformerInformerImpl := veleroRestoreInformer.NewInformerImpl(sugaredLogger, k8sUtilImpl, appConfig, informerClientImpl, runnable) + veleroBackupScheduleInformerInformerImpl := veleroBackupScheduleInformer.NewInformerImpl(sugaredLogger, k8sUtilImpl, appConfig, informerClientImpl, runnable) + clusterInformerImpl := cluster.NewInformerImpl(sugaredLogger, appConfig, k8sUtilImpl, clusterRepositoryImpl, informerClientImpl, informerImpl, argoWfInformerImpl, informerImpl2, systemExecInformerImpl, veleroBslInformerInformerImpl, veleroVslInformerInformerImpl, veleroBackupInformerInformerImpl, veleroRestoreInformerInformerImpl, veleroBackupScheduleInformerInformerImpl) runnerImpl := informer.NewRunnerImpl(sugaredLogger, appConfig, k8sUtilImpl, clusterInformerImpl) app := NewApp(routerImpl, sugaredLogger, appConfig, db, runnerImpl, runnable) return app, nil diff --git a/lens/api/RestHandler.go b/lens/api/RestHandler.go index 184b919bd..7a712d8d9 100644 --- a/lens/api/RestHandler.go +++ b/lens/api/RestHandler.go @@ -18,14 +18,18 @@ package api import ( "encoding/json" - "github.com/devtron-labs/lens/pkg" - "go.uber.org/zap" + "fmt" "net/http" "strconv" + + "github.com/devtron-labs/lens/internal/dto" + "github.com/devtron-labs/lens/pkg" + "go.uber.org/zap" ) type RestHandler interface { GetDeploymentMetrics(w http.ResponseWriter, r *http.Request) + GetBulkDeploymentMetrics(w http.ResponseWriter, r *http.Request) ProcessDeploymentEvent(w http.ResponseWriter, r *http.Request) ResetApplication(w http.ResponseWriter, r *http.Request) } @@ -104,7 +108,7 @@ func (impl *RestHandlerImpl) GetDeploymentMetrics(w http.ResponseWriter, r *http //decoder := json.NewDecoder(r.Body) v := r.URL.Query() impl.logger.Infow("metrics request", "req", v) - metricRequest := &pkg.MetricRequest{} + metricRequest := &dto.MetricRequest{} if v.Get("env_id") != "" { envId, err := strconv.Atoi(v.Get("env_id")) if err != nil { @@ -130,17 +134,40 @@ func (impl *RestHandlerImpl) GetDeploymentMetrics(w http.ResponseWriter, r *http metricRequest.To = to } - //err := decoder.Decode(metricRequest) - //if err != nil { - // impl.logger.Error(err) - // impl.writeJsonResp(w, err, nil, http.StatusBadRequest) - // return - //} - metrics, err := impl.deploymentMetricService.GetDeploymentMetrics(metricRequest) + metrics, err := impl.deploymentMetricService.ProcessSingleDoraMetrics(metricRequest) impl.logger.Infof("metrics %+v", metrics) impl.writeJsonResp(w, err, metrics, 200) } +func (impl *RestHandlerImpl) GetBulkDeploymentMetrics(w http.ResponseWriter, r *http.Request) { + decoder := json.NewDecoder(r.Body) + bulkRequest := &dto.BulkMetricRequest{} + + err := decoder.Decode(bulkRequest) + if err != nil { + impl.logger.Error("error decoding bulk request", "err", err) + impl.writeJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + impl.logger.Infow("bulk metrics request", "req", bulkRequest) + + // Validate request + if len(bulkRequest.AppEnvPairs) == 0 { + impl.writeJsonResp(w, fmt.Errorf("app_env_pairs cannot be empty"), nil, http.StatusBadRequest) + return + } + + if bulkRequest.From == nil || bulkRequest.To == nil { + impl.writeJsonResp(w, fmt.Errorf("from and to dates are required"), nil, http.StatusBadRequest) + return + } + + bulkMetrics, err := impl.deploymentMetricService.ProcessBulkDoraMetrics(bulkRequest) + impl.logger.Infof("bulk metrics response: %+v", bulkMetrics) + impl.writeJsonResp(w, err, bulkMetrics, 200) +} + func (impl *RestHandlerImpl) ProcessDeploymentEvent(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) deploymentEvent := &pkg.DeploymentEvent{} diff --git a/lens/api/Router.go b/lens/api/Router.go index beda91472..684163bd7 100644 --- a/lens/api/Router.go +++ b/lens/api/Router.go @@ -18,10 +18,11 @@ package api import ( "encoding/json" + "net/http" + "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" - "net/http" ) type MuxRouter struct { @@ -54,6 +55,7 @@ func (r MuxRouter) Init() { r.Router.Path("/deployment-metrics").HandlerFunc(r.restHandler.GetDeploymentMetrics). Queries("app_id", "{app_id}", "env_id", "{env_id}", "from", "{from}", "to", "{to}"). Methods("GET", "OPTIONS") + r.Router.Path("/deployment-metrics/bulk").HandlerFunc(r.restHandler.GetBulkDeploymentMetrics).Methods("GET", "OPTIONS") r.Router.Path("/new-deployment-event").HandlerFunc(r.restHandler.ProcessDeploymentEvent).Methods("POST") r.Router.Path("/reset-app-environment").HandlerFunc(r.restHandler.ResetApplication).Methods("POST") diff --git a/lens/go.mod b/lens/go.mod index f334252ef..80d46a037 100644 --- a/lens/go.mod +++ b/lens/go.mod @@ -60,6 +60,6 @@ require ( ) replace ( - github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be + github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 ) diff --git a/lens/go.sum b/lens/go.sum index c74e5f1b7..afdac5395 100644 --- a/lens/go.sum +++ b/lens/go.sum @@ -17,8 +17,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be h1:ZufKGk1RMcJsTDgpCfdWcrca90K3s1+88KlVL/4dEmU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be/go.mod h1:+CUhxuWB8uMYIoiXwofuLIXPyiNnwmoZlH90KWAE5Ew= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 h1:xDbz5etX5h6foQDgpT9ukTo2I65b6q32Nu9do5nBPk8= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= github.com/devtron-labs/protos v0.0.3-0.20240912111807-605886d90b8d h1:IV6FWU6eWSfKq67Fs2DBx3LjkX/wtjMj9QB3ufZgga4= github.com/devtron-labs/protos v0.0.3-0.20240912111807-605886d90b8d/go.mod h1:1TqULGlTey+VNhAu/ag7NJuUvByJemkqodsc9L5PHJk= github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= diff --git a/lens/internal/dto/deploymentMetricsDto.go b/lens/internal/dto/deploymentMetricsDto.go new file mode 100644 index 000000000..ba8197689 --- /dev/null +++ b/lens/internal/dto/deploymentMetricsDto.go @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dto + +import ( + "time" +) + +type Metrics struct { + Series []*Metric `json:"series"` + AverageCycleTime float64 `json:"average_cycle_time"` + AverageLeadTime float64 `json:"average_lead_time"` + ChangeFailureRate float64 `json:"change_failure_rate"` + AverageRecoveryTime float64 `json:"average_recovery_time"` + AverageDeploymentSize float32 `json:"average_deployment_size"` + AverageLineAdded float32 `json:"average_line_added"` + AverageLineDeleted float32 `json:"average_line_deleted"` + LastFailedTime string `json:"last_failed_time"` + RecoveryTimeLastFailed float64 `json:"recovery_time_last_failed"` +} + +type Metric struct { + ReleaseType ReleaseType `json:"release_type"` + ReleaseStatus ReleaseStatus `json:"release_status"` + ReleaseTime time.Time `json:"release_time"` + ChangeSizeLineAdded int `json:"change_size_line_added"` + ChangeSizeLineDeleted int `json:"change_size_line_deleted"` + DeploymentSize int `json:"deployment_size"` + CommitHash string `json:"commit_hash"` + CommitTime time.Time `json:"commit_time"` + LeadTime float64 `json:"lead_time"` + CycleTime float64 `json:"cycle_time"` + RecoveryTime float64 `json:"recovery_time"` +} + +type MetricRequest struct { + AppId int `json:"app_id"` + EnvId int `json:"env_id"` + From string `json:"from"` + To string `json:"to"` +} + +type AppEnvPair struct { + AppId int `json:"appId"` + EnvId int `json:"envId"` +} + +type BulkMetricRequest struct { + AppEnvPairs []AppEnvPair `json:"appEnvPairs"` + From *time.Time `json:"from"` + To *time.Time `json:"to"` +} + +type AppEnvMetrics struct { + AppId int `json:"appId"` + EnvId int `json:"envId"` + Metrics *Metrics `json:"metrics"` + Error string `json:"error,omitempty"` +} + +type BulkMetricsResponse struct { + Results []AppEnvMetrics `json:"results"` +} + +// ---------------- +type ReleaseType int + +const ( + Unknown ReleaseType = iota + RollForward + RollBack + Patch +) + +func (releaseType ReleaseType) String() string { + return [...]string{"Unknown", "RollForward", "RollBack", "Patch"}[releaseType] +} + +// -------------- +type ReleaseStatus int + +const ( + Success ReleaseStatus = iota + Failure +) + +func (releaseStatus ReleaseStatus) String() string { + return [...]string{"Success", "Failure"}[releaseStatus] +} + +// ------ +type ProcessStage int + +const ( + Init ProcessStage = iota + ReleaseTypeDetermined + LeadTimeFetch +) + +func (ProcessStage ProcessStage) String() string { + return [...]string{"Init", "ReleaseTypeDetermined", "LeadTimeFetch"}[ProcessStage] +} diff --git a/lens/internal/sql/AppReleaseRepository.go b/lens/internal/sql/AppReleaseRepository.go index 09dccf116..5f1c884fd 100644 --- a/lens/internal/sql/AppReleaseRepository.go +++ b/lens/internal/sql/AppReleaseRepository.go @@ -17,74 +17,37 @@ package sql import ( - "time" - "context" + "fmt" + "strings" + "time" + "github.com/devtron-labs/lens/internal/dto" pg "github.com/go-pg/pg/v10" "go.uber.org/zap" ) type AppRelease struct { - tableName struct{} `pg:"app_release"` - Id int `pg:"id,pk"` - AppId int `pg:"app_id,notnull,use_zero"` //orchestrator appId - EnvironmentId int `pg:"environment_id,notnull,use_zero"` //orchestrator env id - CiArtifactId int `pg:"ci_artifact_id,notnull,use_zero"` //orchestrator ciAretefactId used for identifying rollback (appId,environmentId, ciArtifactId) - ReleaseId int `pg:"release_id,notnull,use_zero"` // orchestrator release counter - PipelineOverrideId int `pg:"pipeline_override_id,notnull,use_zero"` //pipeline override id orchestrator - ChangeSizeLineAdded int `pg:"change_size_line_added,notnull,use_zero"` //total lines added in this release - ChangeSizeLineDeleted int `pg:"change_size_line_deleted,notnull,use_zero"` //total lines deleted during this release - TriggerTime time.Time `pg:"trigger_time,notnull"` //deployment time - ReleaseType ReleaseType `pg:"release_type,notnull,use_zero"` - ReleaseStatus ReleaseStatus `pg:"release_status,notnull,use_zero"` - ProcessStage ProcessStage `pg:"process_status,notnull,use_zero"` - CreatedTime time.Time `pg:"created_time,notnull"` - UpdatedTime time.Time `pg:"updated_time,notnull"` + tableName struct{} `pg:"app_release"` + Id int `pg:"id,pk"` + AppId int `pg:"app_id,notnull,use_zero"` //orchestrator appId + EnvironmentId int `pg:"environment_id,notnull,use_zero"` //orchestrator env id + CiArtifactId int `pg:"ci_artifact_id,notnull,use_zero"` //orchestrator ciAretefactId used for identifying rollback (appId,environmentId, ciArtifactId) + ReleaseId int `pg:"release_id,notnull,use_zero"` // orchestrator release counter + PipelineOverrideId int `pg:"pipeline_override_id,notnull,use_zero"` //pipeline override id orchestrator + ChangeSizeLineAdded int `pg:"change_size_line_added,notnull,use_zero"` //total lines added in this release + ChangeSizeLineDeleted int `pg:"change_size_line_deleted,notnull,use_zero"` //total lines deleted during this release + TriggerTime time.Time `pg:"trigger_time,notnull"` //deployment time + ReleaseType dto.ReleaseType `pg:"release_type,notnull,use_zero"` + ReleaseStatus dto.ReleaseStatus `pg:"release_status,notnull,use_zero"` + ProcessStage dto.ProcessStage `pg:"process_status,notnull,use_zero"` + CreatedTime time.Time `pg:"created_time,notnull"` + UpdatedTime time.Time `pg:"updated_time,notnull"` LeadTime *LeadTime } -// -------------- -type ReleaseStatus int - -const ( - Success ReleaseStatus = iota - Failure -) - -func (releaseStatus ReleaseStatus) String() string { - return [...]string{"Success", "Failure"}[releaseStatus] -} - -// ---------------- -type ReleaseType int - -const ( - Unknown ReleaseType = iota - RollForward - RollBack - Patch -) - -func (releaseType ReleaseType) String() string { - return [...]string{"Unknown", "RollForward", "RollBack", "Patch"}[releaseType] -} - -// ------ -type ProcessStage int - -const ( - Init ProcessStage = iota - ReleaseTypeDetermined - LeadTimeFetch -) - var ctx = context.Background() -func (ProcessStage ProcessStage) String() string { - return [...]string{"Init", "ReleaseTypeDetermined", "LeadTimeFetch"}[ProcessStage] -} - type AppReleaseRepository interface { Save(appRelease *AppRelease) (*AppRelease, error) Update(appRelease *AppRelease) (*AppRelease, error) @@ -92,6 +55,8 @@ type AppReleaseRepository interface { GetPreviousReleaseWithinTime(appId, environmentId int, within time.Time, currentAppReleaseId int) (*AppRelease, error) GetPreviousRelease(appId, environmentId int, appReleaseId int) (*AppRelease, error) GetReleaseBetween(appId, environmentId int, from time.Time, to time.Time) ([]AppRelease, error) + GetReleaseBetweenBulk(appEnvPairs []dto.AppEnvPair, from time.Time, to time.Time) ([]AppRelease, error) + GetPreviousReleasesBulk(latestReleases []AppRelease) (map[string]*AppRelease, error) CleanAppDataForEnvironment(appId, environmentId int) error } type AppReleaseRepositoryImpl struct { @@ -176,6 +141,69 @@ func (impl *AppReleaseRepositoryImpl) GetReleaseBetween(appId, environmentId int return appReleases, err } +func (impl *AppReleaseRepositoryImpl) GetReleaseBetweenBulk(appEnvPairs []dto.AppEnvPair, from time.Time, to time.Time) ([]AppRelease, error) { + if len(appEnvPairs) == 0 { + return []AppRelease{}, nil + } + + var appReleases []AppRelease + + // Build the WHERE clause for multiple app-env pairs + query := impl.dbConnection.Model(&appReleases). + Where("trigger_time >= ?", from). + Where("trigger_time <= ?", to) + + // Create OR conditions for each app-env pair + var conditions []string + var args []interface{} + for _, pair := range appEnvPairs { + conditions = append(conditions, "(app_id = ? AND environment_id = ?)") + args = append(args, pair.AppId, pair.EnvId) + } + + // Combine all conditions with OR + whereClause := "(" + strings.Join(conditions, " OR ") + ")" + query = query.Where(whereClause, args...) + + err := query.Order("app_id").Order("environment_id").Order("id desc").Select() + return appReleases, err +} + +func (impl *AppReleaseRepositoryImpl) GetPreviousReleasesBulk(latestReleases []AppRelease) (map[string]*AppRelease, error) { + appEnvToPreviousReleaseMap := make(map[string]*AppRelease) + + // Group by app-env pair and get the latest release for each + latestByAppEnv := make(map[string]*AppRelease) + for i := range latestReleases { + key := fmt.Sprintf("%d-%d", latestReleases[i].AppId, latestReleases[i].EnvironmentId) + if _, exists := latestByAppEnv[key]; !exists { + latestByAppEnv[key] = &latestReleases[i] + } + } + + // Now get previous releases for each latest release + for key, latest := range latestByAppEnv { + previous := &AppRelease{} + err := impl.dbConnection. + Model(previous). + Where("app_id = ?", latest.AppId). + Where("environment_id = ?", latest.EnvironmentId). + Where("id < ?", latest.Id). + Last() + + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error getting previous release", "appId", latest.AppId, "envId", latest.EnvironmentId, "err", err) + continue + } + + if err != pg.ErrNoRows { + appEnvToPreviousReleaseMap[key] = previous + } + } + + return appEnvToPreviousReleaseMap, nil +} + func (impl *AppReleaseRepositoryImpl) cleanAppDataForEnvironment(appId, environmentId int, tx *pg.Tx) error { r, err := tx.Model((*AppRelease)(nil)). Where("app_id =?", appId). diff --git a/lens/pkg/DeploymentMetricService.go b/lens/pkg/DeploymentMetricService.go index 9e2169ff8..93cb84ce2 100644 --- a/lens/pkg/DeploymentMetricService.go +++ b/lens/pkg/DeploymentMetricService.go @@ -19,51 +19,23 @@ package pkg import ( "time" + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/lens/internal/dto" "github.com/devtron-labs/lens/internal/sql" - pg "github.com/go-pg/pg/v10" + "github.com/devtron-labs/lens/pkg/constants" + utils2 "github.com/devtron-labs/lens/pkg/utils" "go.uber.org/zap" ) -const ( - layout = "2006-01-02T15:04:05.000Z" -) - type DeploymentMetricService interface { - GetDeploymentMetrics(request *MetricRequest) (*Metrics, error) -} - -type Metrics struct { - Series []*Metric `json:"series"` - AverageCycleTime float64 `json:"average_cycle_time"` - AverageLeadTime float64 `json:"average_lead_time"` - ChangeFailureRate float64 `json:"change_failure_rate"` - AverageRecoveryTime float64 `json:"average_recovery_time"` - AverageDeploymentSize float32 `json:"average_deployment_size"` - AverageLineAdded float32 `json:"average_line_added"` - AverageLineDeleted float32 `json:"average_line_deleted"` - LastFailedTime string `json:"last_failed_time"` - RecoveryTimeLastFailed float64 `json:"recovery_time_last_failed"` -} - -type Metric struct { - ReleaseType sql.ReleaseType `json:"release_type"` - ReleaseStatus sql.ReleaseStatus `json:"release_status"` - ReleaseTime time.Time `json:"release_time"` - ChangeSizeLineAdded int `json:"change_size_line_added"` - ChangeSizeLineDeleted int `json:"change_size_line_deleted"` - DeploymentSize int `json:"deployment_size"` - CommitHash string `json:"commit_hash"` - CommitTime time.Time `json:"commit_time"` - LeadTime float64 `json:"lead_time"` - CycleTime float64 `json:"cycle_time"` - RecoveryTime float64 `json:"recovery_time"` -} - -type MetricRequest struct { - AppId int `json:"app_id"` - EnvId int `json:"env_id"` - From string `json:"from"` - To string `json:"to"` + GetDeploymentMetrics(request *dto.MetricRequest) (*dto.Metrics, error) + GetBulkDeploymentMetrics(request *dto.BulkMetricRequest) (*dto.BulkMetricsResponse, error) + + // New DORA metrics functions + ProcessSingleDoraMetrics(request *dto.MetricRequest) (*dto.Metrics, error) + ProcessBulkDoraMetrics(request *dto.BulkMetricRequest) ([]DoraMetrics, error) + CalculateDoraMetrics(appId, envId int, releases []sql.AppRelease, materials []*sql.PipelineMaterial, leadTimes []sql.LeadTime, fromTime, toTime time.Time) *DoraMetrics + GetDoraMetricsSummary(doraMetrics *DoraMetrics) *DoraMetricsSummary } type DeploymentMetricServiceImpl struct { @@ -86,53 +58,192 @@ func NewDeploymentMetricServiceImpl( } } -func (impl DeploymentMetricServiceImpl) GetDeploymentMetrics(request *MetricRequest) (*Metrics, error) { - from, err := time.Parse(layout, request.From) - if err != nil { - return nil, err - } - to, err := time.Parse(layout, request.To) +func (impl DeploymentMetricServiceImpl) GetDeploymentMetrics(request *dto.MetricRequest) (*dto.Metrics, error) { + from, to, err := utils2.ParseDateRange(request.From, request.To) if err != nil { return nil, err } + releases, err := impl.appReleaseRepository.GetReleaseBetween(request.AppId, request.EnvId, from, to) if err != nil { - impl.logger.Errorf("error getting data from db ", "err", err) + impl.logger.Errorw("error getting data from db ", "err", err) return nil, err } + if len(releases) == 0 { - return &Metrics{Series: []*Metric{}}, nil + return utils2.CreateEmptyMetrics(), nil } - var ids []int + var releaseIds []int for _, v := range releases { - ids = append(ids, v.Id) + releaseIds = append(releaseIds, v.Id) } - materials, err := impl.pipelineMaterialRepository.FindByAppReleaseIds(ids) + + materials, err := impl.pipelineMaterialRepository.FindByAppReleaseIds(releaseIds) if err != nil { - impl.logger.Errorf("error getting material from db ", "err", err) + impl.logger.Errorw("error getting material from db ", "err", err) return nil, err } - leadTimes, err := impl.leadTimeRepository.FindByIds(ids) + + leadTimes, err := impl.leadTimeRepository.FindByIds(releaseIds) if err != nil { - impl.logger.Errorf("error getting lead time from db ", "err", err) + impl.logger.Errorw("error getting lead time from db ", "err", err) return nil, err } - lastId := releases[len(releases)-1].Id - lastRelease, err := impl.appReleaseRepository.GetPreviousRelease(request.AppId, request.EnvId, lastId) + + // Get previous release with bounds checking + var lastRelease *sql.AppRelease + if len(releases) > 0 { + lastId := releases[len(releases)-1].Id + lastRelease, err = impl.appReleaseRepository.GetPreviousRelease(request.AppId, request.EnvId, lastId) + if err != nil && !utils.IsErrNoRows(err) { + impl.logger.Errorw("error getting previous release from db ", "err", err) + // Don't return error, just continue without previous release + } + if utils.IsErrNoRows(err) { + lastRelease = nil + } + } + + return impl.populateMetrics(releases, materials, leadTimes, lastRelease) +} + +func (impl DeploymentMetricServiceImpl) GetBulkDeploymentMetrics(request *dto.BulkMetricRequest) (*dto.BulkMetricsResponse, error) { + if len(request.AppEnvPairs) == 0 { + return &dto.BulkMetricsResponse{Results: []dto.AppEnvMetrics{}}, nil + } + + return impl.getBulkDeploymentMetricsWithBulkQueries(request) +} + +func (impl DeploymentMetricServiceImpl) getBulkDeploymentMetricsWithBulkQueries(request *dto.BulkMetricRequest) (*dto.BulkMetricsResponse, error) { + response := &dto.BulkMetricsResponse{ + Results: make([]dto.AppEnvMetrics, len(request.AppEnvPairs)), + } + // Step 1: Get all releases for all app-env pairs in one query + allReleases, err := impl.appReleaseRepository.GetReleaseBetweenBulk(request.AppEnvPairs, *request.From, *request.To) if err != nil { - if err != pg.ErrNoRows { - impl.logger.Errorf("error getting data from db ", "err", err) + impl.logger.Errorw("error getting bulk releases from db", "err", err) + return nil, err + } + + // Step 2: Group releases by app-env pair + releasesByAppEnv := make(map[string][]sql.AppRelease) + var allReleaseIds []int + + for _, release := range allReleases { + key := utils2.GenerateAppEnvKey(release.AppId, release.EnvironmentId) + releasesByAppEnv[key] = append(releasesByAppEnv[key], release) + allReleaseIds = append(allReleaseIds, release.Id) + } + + // Step 3: Get all materials and lead times in bulk + var allMaterials []*sql.PipelineMaterial + var allLeadTimes []sql.LeadTime + + if len(allReleaseIds) > 0 { + allMaterials, err = impl.pipelineMaterialRepository.FindByAppReleaseIds(allReleaseIds) + if err != nil { + impl.logger.Errorw("error getting bulk materials from db", "err", err) + return nil, err + } + + allLeadTimes, err = impl.leadTimeRepository.FindByIds(allReleaseIds) + if err != nil { + impl.logger.Errorw("error getting bulk lead times from db", "err", err) + return nil, err } - lastRelease = nil } - metrics, err := impl.populateMetrics(releases, materials, leadTimes, lastRelease) + + // Step 4: Get previous releases for all app-env pairs + previousReleases, err := impl.appReleaseRepository.GetPreviousReleasesBulk(allReleases) if err != nil { + impl.logger.Errorw("error getting bulk previous releases from db", "err", err) return nil, err } - return metrics, nil + + // Step 5: Process each app-env pair + for i, pair := range request.AppEnvPairs { + key := utils2.GenerateAppEnvKey(pair.AppId, pair.EnvId) + releases := releasesByAppEnv[key] + + appEnvMetric := dto.AppEnvMetrics{ + AppId: pair.AppId, + EnvId: pair.EnvId, + } + + if len(releases) == 0 { + appEnvMetric.Metrics = utils2.CreateEmptyMetrics() + } else { + metrics, err := impl.processAppEnvMetrics(releases, allMaterials, allLeadTimes, previousReleases[key]) + if err != nil { + appEnvMetric.Error = err.Error() + impl.logger.Errorw("error populating metrics for app-env pair", "appId", pair.AppId, "envId", pair.EnvId, "err", err) + } else { + appEnvMetric.Metrics = metrics + } + } + + response.Results[i] = appEnvMetric + } + + return response, nil +} + +// processAppEnvMetrics processes metrics for a single app-env pair +func (impl DeploymentMetricServiceImpl) processAppEnvMetrics(releases []sql.AppRelease, allMaterials []*sql.PipelineMaterial, allLeadTimes []sql.LeadTime, previousRelease *sql.AppRelease) (*dto.Metrics, error) { + releaseIds := make([]int, len(releases)) + for i, release := range releases { + releaseIds[i] = release.Id + } + + // Filter materials and lead times for this app-env pair + materials := impl.filterMaterialsByReleaseIds(allMaterials, releaseIds) + leadTimes := impl.filterLeadTimesByReleaseIds(allLeadTimes, releaseIds) + + return impl.populateMetrics(releases, materials, leadTimes, previousRelease) } -func (impl DeploymentMetricServiceImpl) populateMetrics(appReleases []sql.AppRelease, materials []*sql.PipelineMaterial, leadTimes []sql.LeadTime, lastRelease *sql.AppRelease) (*Metrics, error) { +// filterMaterialsByReleaseIds filters materials for specific release IDs +func (impl DeploymentMetricServiceImpl) filterMaterialsByReleaseIds(allMaterials []*sql.PipelineMaterial, releaseIds []int) []*sql.PipelineMaterial { + if len(releaseIds) == 0 { + return []*sql.PipelineMaterial{} + } + + releaseIdSet := make(map[int]bool, len(releaseIds)) + for _, id := range releaseIds { + releaseIdSet[id] = true + } + + filtered := make([]*sql.PipelineMaterial, 0, len(releaseIds)) + for _, material := range allMaterials { + if releaseIdSet[material.AppReleaseId] { + filtered = append(filtered, material) + } + } + return filtered +} + +// filterLeadTimesByReleaseIds filters lead times for specific release IDs +func (impl DeploymentMetricServiceImpl) filterLeadTimesByReleaseIds(allLeadTimes []sql.LeadTime, releaseIds []int) []sql.LeadTime { + if len(releaseIds) == 0 { + return []sql.LeadTime{} + } + + releaseIdSet := make(map[int]bool, len(releaseIds)) + for _, id := range releaseIds { + releaseIdSet[id] = true + } + + filtered := make([]sql.LeadTime, 0, len(releaseIds)) // Pre-allocate with estimated capacity + for _, leadTime := range allLeadTimes { + if releaseIdSet[leadTime.AppReleaseId] { + filtered = append(filtered, leadTime) + } + } + return filtered +} + +func (impl DeploymentMetricServiceImpl) populateMetrics(appReleases []sql.AppRelease, materials []*sql.PipelineMaterial, leadTimes []sql.LeadTime, lastRelease *sql.AppRelease) (*dto.Metrics, error) { releases := impl.transform(appReleases, materials, leadTimes) leadTimesCount := 0 totalLeadTime := float64(0) @@ -161,7 +272,7 @@ func (impl DeploymentMetricServiceImpl) populateMetrics(appReleases []sql.AppRel averageCycleTime = totalCycleTime / float64(cycleTimeCount) } - metrics := &Metrics{ + metrics := &dto.Metrics{ Series: releases, //ChangeFailureRate: changeFailureRate, AverageCycleTime: averageCycleTime, @@ -178,16 +289,16 @@ func (impl DeploymentMetricServiceImpl) populateMetrics(appReleases []sql.AppRel return metrics, nil } -func (impl DeploymentMetricServiceImpl) calculateChangeFailureRateAndRecoveryTime(metrics *Metrics) { +func (impl DeploymentMetricServiceImpl) calculateChangeFailureRateAndRecoveryTime(metrics *dto.Metrics) { releases := metrics.Series failed := 0 success := 0 recoveryTime := float64(0) recovered := 0 for _, v := range releases { - if v.ReleaseStatus == sql.Failure { + if v.ReleaseStatus == dto.Failure { if metrics.LastFailedTime == "" { - metrics.LastFailedTime = v.ReleaseTime.Format(layout) + metrics.LastFailedTime = v.ReleaseTime.Format(constants.Layout) } //if i != 0 { // releases[i].RecoveryTime = releases[i].ReleaseTime.Sub(releases[i+1].ReleaseTime) @@ -195,17 +306,17 @@ func (impl DeploymentMetricServiceImpl) calculateChangeFailureRateAndRecoveryTim //} failed++ } - if v.ReleaseStatus == sql.Success { + if v.ReleaseStatus == dto.Success { success++ } } for i := 0; i < len(releases); i++ { - if releases[i].ReleaseStatus == sql.Failure { - if i < len(releases)-1 && releases[i+1].ReleaseStatus == sql.Failure { + if releases[i].ReleaseStatus == dto.Failure { + if i < len(releases)-1 && releases[i+1].ReleaseStatus == dto.Failure { continue } for j := i - 1; j >= 0; j-- { - if releases[j].ReleaseStatus == sql.Success { + if releases[j].ReleaseStatus == dto.Success { releases[i].RecoveryTime = releases[j].ReleaseTime.Sub(releases[i].ReleaseTime).Minutes() recoveryTime += releases[i].RecoveryTime recovered++ @@ -229,7 +340,7 @@ func (impl DeploymentMetricServiceImpl) calculateChangeFailureRateAndRecoveryTim metrics.AverageRecoveryTime = averageRecoveryTime } -func (impl DeploymentMetricServiceImpl) calculateChangeSize(metrics *Metrics) { +func (impl DeploymentMetricServiceImpl) calculateChangeSize(metrics *dto.Metrics) { releases := metrics.Series lineAdded := 0 lineDeleted := 0 @@ -244,7 +355,7 @@ func (impl DeploymentMetricServiceImpl) calculateChangeSize(metrics *Metrics) { metrics.AverageLineDeleted = float32(lineDeleted) / float32(len(releases)) } -func (impl DeploymentMetricServiceImpl) transform(releases []sql.AppRelease, materials []*sql.PipelineMaterial, leadTimes []sql.LeadTime) []*Metric { +func (impl DeploymentMetricServiceImpl) transform(releases []sql.AppRelease, materials []*sql.PipelineMaterial, leadTimes []sql.LeadTime) []*dto.Metric { pm := make(map[int]*sql.PipelineMaterial) for _, v := range materials { pm[v.AppReleaseId] = v @@ -256,9 +367,9 @@ func (impl DeploymentMetricServiceImpl) transform(releases []sql.AppRelease, mat impl.logger.Errorw("materials ", "mat", pm) - metrics := make([]*Metric, 0) + metrics := make([]*dto.Metric, 0) for _, v := range releases { - metric := &Metric{ + metric := &dto.Metric{ ReleaseType: v.ReleaseType, ReleaseStatus: v.ReleaseStatus, ReleaseTime: v.TriggerTime, @@ -283,3 +394,528 @@ func (impl DeploymentMetricServiceImpl) transform(releases []sql.AppRelease, mat } return metrics } + +// ============================================================================ +// NEW DORA METRICS CALCULATION FUNCTIONS +// ============================================================================ + +// DoraMetrics represents the four key DORA metrics +type DoraMetrics struct { + AppId int `json:"app_id"` + EnvId int `json:"env_id"` + DeploymentFrequency float64 `json:"deployment_frequency"` // Deployments per day + ChangeFailureRate float64 `json:"change_failure_rate"` // Percentage + MeanLeadTimeForChanges float64 `json:"mean_lead_time_for_changes"` // Minutes + MeanTimeToRecovery float64 `json:"mean_time_to_recovery"` // Minutes +} + +// CalculateDoraMetrics calculates all four DORA metrics based on the provided formulas +func (impl DeploymentMetricServiceImpl) CalculateDoraMetrics(appId, envId int, releases []sql.AppRelease, materials []*sql.PipelineMaterial, leadTimes []sql.LeadTime, fromTime, toTime time.Time) *DoraMetrics { + if len(releases) == 0 { + return &DoraMetrics{ + AppId: appId, + EnvId: envId, + } + } + + // Transform releases to dto.Metric format for easier processing + metrics := impl.transform(releases, materials, leadTimes) + + return &DoraMetrics{ + AppId: appId, + EnvId: envId, + DeploymentFrequency: impl.calculateDeploymentFrequency(metrics, fromTime, toTime), + ChangeFailureRate: impl.calculateChangeFailureRateNew(metrics), + MeanLeadTimeForChanges: impl.calculateMeanLeadTimeForChanges(metrics), + MeanTimeToRecovery: impl.calculateMeanTimeToRecovery(metrics), + } +} + +// calculateDeploymentFrequency calculates deployment frequency +// Formula: Deployments to Production ÷ Time Period +func (impl DeploymentMetricServiceImpl) calculateDeploymentFrequency(metrics []*dto.Metric, fromTime, toTime time.Time) float64 { + if len(metrics) == 0 { + return 0.0 + } + + // calculating time period in days + timePeriodDays := toTime.Sub(fromTime).Hours() / 24.0 + if timePeriodDays <= 0 { + return 0.0 + } + + return float64(len(metrics)) / timePeriodDays +} + +// calculateChangeFailureRateNew calculates change failure rate +// Formula: (Failed Deployments ÷ Total Deployments) × 100 +func (impl DeploymentMetricServiceImpl) calculateChangeFailureRateNew(metrics []*dto.Metric) float64 { + if len(metrics) == 0 { + return 0.0 + } + + failedDeployments := 0 + totalDeployments := len(metrics) + + for _, metric := range metrics { + if metric.ReleaseStatus == dto.Failure { + failedDeployments++ + } + } + + if totalDeployments == 0 { + return 0.0 + } + + return (float64(failedDeployments) / float64(totalDeployments)) * 100.0 +} + +// calculateMeanLeadTimeForChanges calculates mean lead time for changes +// Formula: (Σ (Deployment Time – Commit Time)) ÷ Number of Changes +func (impl DeploymentMetricServiceImpl) calculateMeanLeadTimeForChanges(metrics []*dto.Metric) float64 { + if len(metrics) == 0 { + return 0.0 + } + + totalLeadTime := 0.0 + validChanges := 0 + + for _, metric := range metrics { + // Only consider successful deployments with valid lead time + if metric.ReleaseStatus == dto.Success && metric.LeadTime != float64(0) { + totalLeadTime += metric.LeadTime + validChanges++ + } + } + + if validChanges == 0 { + return 0.0 + } + + return totalLeadTime / float64(validChanges) +} + +// calculateMeanTimeToRecovery calculates mean time to recovery (MTTR) +// Formula: (Σ (Recovery Time – Failure Time)) ÷ Number of Incidents +func (impl DeploymentMetricServiceImpl) calculateMeanTimeToRecovery(metrics []*dto.Metric) float64 { + if len(metrics) == 0 { + return 0.0 + } + + totalRecoveryTime := 0.0 + incidents := 0 + + // Sort metrics by release time (assuming they are already sorted by ID desc) + // We need to find failed deployments and their recovery times + for i := 0; i < len(metrics); i++ { + if metrics[i].ReleaseStatus == dto.Failure { + // Look for the next successful deployment after this failure + recoveryTime := impl.findRecoveryTime(metrics, i) + if recoveryTime > 0 { + totalRecoveryTime += recoveryTime + incidents++ + } + } + } + + if incidents == 0 { + return 0.0 + } + + return totalRecoveryTime / float64(incidents) +} + +// findRecoveryTime finds the recovery time for a failed deployment +func (impl DeploymentMetricServiceImpl) findRecoveryTime(metrics []*dto.Metric, failureIndex int) float64 { + if failureIndex >= len(metrics) { + return 0.0 + } + + failureTime := metrics[failureIndex].ReleaseTime + + // Look for the next successful deployment (going backwards in time since metrics are sorted by ID desc) + for i := failureIndex - 1; i >= 0; i-- { + if metrics[i].ReleaseStatus == dto.Success { + recoveryTime := metrics[i].ReleaseTime + // Calculate recovery time in minutes + return recoveryTime.Sub(failureTime).Minutes() + } + } + + return 0.0 +} + +// CalculateDoraMetricsForBulk calculates DORA metrics for bulk processing (without materials data) +func (impl DeploymentMetricServiceImpl) CalculateDoraMetricsForBulk(appEnvPairs []dto.AppEnvPair, releasesByAppEnv map[string][]sql.AppRelease, allLeadTimes []sql.LeadTime, fromTime, toTime time.Time) []DoraMetrics { + result := make([]DoraMetrics, len(appEnvPairs)) + + for i, pair := range appEnvPairs { + key := utils2.GenerateAppEnvKey(pair.AppId, pair.EnvId) + releases := releasesByAppEnv[key] + + if len(releases) == 0 { + result[i] = DoraMetrics{ + AppId: pair.AppId, + EnvId: pair.EnvId, + } + continue + } + + // Get release IDs for filtering lead times + releaseIds := make([]int, len(releases)) + for j, release := range releases { + releaseIds[j] = release.Id + } + + // Filter lead times for this app-env pair + leadTimes := impl.filterLeadTimesByReleaseIds(allLeadTimes, releaseIds) + + // Calculate DORA metrics for this app-env pair (without materials) + doraMetrics := impl.CalculateDoraMetricsForBulkWithoutMaterials(pair.AppId, pair.EnvId, releases, leadTimes, fromTime, toTime) + result[i] = *doraMetrics + } + + return result +} + +// CalculateDoraMetricsForBulkWithoutMaterials calculates DORA metrics without materials data (for bulk processing) +func (impl DeploymentMetricServiceImpl) CalculateDoraMetricsForBulkWithoutMaterials(appId, envId int, releases []sql.AppRelease, leadTimes []sql.LeadTime, fromTime, toTime time.Time) *DoraMetrics { + if len(releases) == 0 { + return &DoraMetrics{ + AppId: appId, + EnvId: envId, + } + } + + // Transform releases to dto.Metric format without materials + metrics := impl.transformWithoutMaterials(releases, leadTimes) + + return &DoraMetrics{ + AppId: appId, + EnvId: envId, + DeploymentFrequency: impl.calculateDeploymentFrequency(metrics, fromTime, toTime), + ChangeFailureRate: impl.calculateChangeFailureRateNew(metrics), + MeanLeadTimeForChanges: impl.calculateMeanLeadTimeForChanges(metrics), + MeanTimeToRecovery: impl.calculateMeanTimeToRecovery(metrics), + } +} + +// transformWithoutMaterials transforms releases to metrics without materials data +func (impl DeploymentMetricServiceImpl) transformWithoutMaterials(releases []sql.AppRelease, leadTimes []sql.LeadTime) []*dto.Metric { + lt := make(map[int]sql.LeadTime) + for _, v := range leadTimes { + lt[v.AppReleaseId] = v + } + + metrics := make([]*dto.Metric, 0) + for _, v := range releases { + metric := &dto.Metric{ + ReleaseType: v.ReleaseType, + ReleaseStatus: v.ReleaseStatus, + ReleaseTime: v.TriggerTime, + } + + if l, ok := lt[v.Id]; ok { + metric.LeadTime = l.LeadTime.Minutes() + } + + metrics = append(metrics, metric) + } + return metrics +} + +// CalculateDoraMetricsWithProductionFilter calculates DORA metrics with production environment filtering +func (impl DeploymentMetricServiceImpl) CalculateDoraMetricsWithProductionFilter(appId, envId int, releases []sql.AppRelease, materials []*sql.PipelineMaterial, leadTimes []sql.LeadTime, fromTime, toTime time.Time, isProductionEnv bool) *DoraMetrics { + if len(releases) == 0 { + return &DoraMetrics{ + AppId: appId, + EnvId: envId, + } + } + + // Transform releases to dto.Metric format + metrics := impl.transform(releases, materials, leadTimes) + + return &DoraMetrics{ + AppId: appId, + EnvId: envId, + DeploymentFrequency: impl.calculateDeploymentFrequencyWithFilter(metrics, fromTime, toTime, isProductionEnv), + ChangeFailureRate: impl.calculateChangeFailureRateNew(metrics), + MeanLeadTimeForChanges: impl.calculateMeanLeadTimeForChanges(metrics), + MeanTimeToRecovery: impl.calculateMeanTimeToRecovery(metrics), + } +} + +// calculateDeploymentFrequencyWithFilter calculates deployment frequency with production filter +func (impl DeploymentMetricServiceImpl) calculateDeploymentFrequencyWithFilter(metrics []*dto.Metric, fromTime, toTime time.Time, isProductionEnv bool) float64 { + if len(metrics) == 0 { + return 0.0 + } + + // Count deployments based on environment type + deploymentCount := 0 + for _, metric := range metrics { + // For production environments, count all deployments + // For non-production, count successful deployments only + if isProductionEnv { + deploymentCount++ + } else if metric.ReleaseStatus == dto.Success { + deploymentCount++ + } + } + + // Calculate time period in days + timePeriodDays := toTime.Sub(fromTime).Hours() / 24.0 + if timePeriodDays <= 0 { + return 0.0 + } + + return float64(deploymentCount) / timePeriodDays +} + +// GetDoraMetricsSummary provides a summary of DORA metrics with performance classification +func (impl DeploymentMetricServiceImpl) GetDoraMetricsSummary(doraMetrics *DoraMetrics) *DoraMetricsSummary { + return &DoraMetricsSummary{ + Metrics: doraMetrics, + Performance: DoraPerformanceClassification{ + DeploymentFrequencyLevel: impl.classifyDeploymentFrequency(doraMetrics.DeploymentFrequency), + ChangeFailureRateLevel: impl.classifyChangeFailureRate(doraMetrics.ChangeFailureRate), + MeanLeadTimeLevel: impl.classifyMeanLeadTime(doraMetrics.MeanLeadTimeForChanges), + MeanTimeToRecoveryLevel: impl.classifyMeanTimeToRecovery(doraMetrics.MeanTimeToRecovery), + }, + } +} + +// DoraMetricsSummary contains DORA metrics with performance classification +type DoraMetricsSummary struct { + Metrics *DoraMetrics `json:"metrics"` + Performance DoraPerformanceClassification `json:"performance"` +} + +// DoraPerformanceClassification classifies DORA metrics performance levels +type DoraPerformanceClassification struct { + DeploymentFrequencyLevel string `json:"deployment_frequency_level"` + ChangeFailureRateLevel string `json:"change_failure_rate_level"` + MeanLeadTimeLevel string `json:"mean_lead_time_level"` + MeanTimeToRecoveryLevel string `json:"mean_time_to_recovery_level"` +} + +// Performance level constants +const ( + PerformanceElite = "Elite" + PerformanceHigh = "High" + PerformanceMedium = "Medium" + PerformanceLow = "Low" +) + +// classifyDeploymentFrequency classifies deployment frequency performance +func (impl DeploymentMetricServiceImpl) classifyDeploymentFrequency(frequency float64) string { + // Based on DORA research benchmarks (deployments per day) + if frequency >= 1.0 { + return PerformanceElite // Multiple deployments per day + } else if frequency >= 0.14 { // ~1 per week + return PerformanceHigh + } else if frequency >= 0.033 { // ~1 per month + return PerformanceMedium + } + return PerformanceLow +} + +// classifyChangeFailureRate classifies change failure rate performance +func (impl DeploymentMetricServiceImpl) classifyChangeFailureRate(rate float64) string { + // Based on DORA research benchmarks (percentage) + if rate <= 15.0 { + return PerformanceElite + } else if rate <= 20.0 { + return PerformanceHigh + } else if rate <= 30.0 { + return PerformanceMedium + } + return PerformanceLow +} + +// classifyMeanLeadTime classifies mean lead time performance +func (impl DeploymentMetricServiceImpl) classifyMeanLeadTime(leadTime float64) string { + // Convert minutes to hours for classification + leadTimeHours := leadTime / 60.0 + + // Based on DORA research benchmarks + if leadTimeHours <= 24.0 { // Less than one day + return PerformanceElite + } else if leadTimeHours <= 168.0 { // Less than one week + return PerformanceHigh + } else if leadTimeHours <= 720.0 { // Less than one month + return PerformanceMedium + } + return PerformanceLow +} + +// classifyMeanTimeToRecovery classifies MTTR performance +func (impl DeploymentMetricServiceImpl) classifyMeanTimeToRecovery(mttr float64) string { + // Convert minutes to hours for classification + mttrHours := mttr / 60.0 + + // Based on DORA research benchmarks + if mttrHours <= 1.0 { // Less than one hour + return PerformanceElite + } else if mttrHours <= 24.0 { // Less than one day + return PerformanceHigh + } else if mttrHours <= 168.0 { // Less than one week + return PerformanceMedium + } + return PerformanceLow +} + +// ProcessBulkDoraMetrics processes DORA metrics for bulk requests (without materials data) +// This function can be used alongside getBulkDeploymentMetricsWithBulkQueries +func (impl DeploymentMetricServiceImpl) ProcessBulkDoraMetrics(request *dto.BulkMetricRequest) ([]DoraMetrics, error) { + if len(request.AppEnvPairs) == 0 { + return []DoraMetrics{}, nil + } + + // Step 1: Get all releases for all app-env pairs in one query + allReleases, err := impl.appReleaseRepository.GetReleaseBetweenBulk(request.AppEnvPairs, *request.From, *request.To) + if err != nil { + impl.logger.Errorw("error getting bulk releases for DORA metrics", "err", err) + return nil, err + } + + // Step 2: Group releases by app-env pair + releasesByAppEnv := make(map[string][]sql.AppRelease) + var allReleaseIds []int + + for _, release := range allReleases { + key := utils2.GenerateAppEnvKey(release.AppId, release.EnvironmentId) + releasesByAppEnv[key] = append(releasesByAppEnv[key], release) + allReleaseIds = append(allReleaseIds, release.Id) + } + + // Step 3: Get only lead times in bulk (materials not needed for bulk processing) + var allLeadTimes []sql.LeadTime + + if len(allReleaseIds) > 0 { + allLeadTimes, err = impl.leadTimeRepository.FindByIds(allReleaseIds) + if err != nil { + impl.logger.Errorw("error getting bulk lead times for DORA metrics", "err", err) + return nil, err + } + } + + // Step 4: Calculate DORA metrics for all app-env pairs (without materials) + return impl.CalculateDoraMetricsForBulk(request.AppEnvPairs, releasesByAppEnv, allLeadTimes, *request.From, *request.To), nil +} + +// calculateCycleTimeBetweenReleases calculates the time between consecutive releases +func (impl DeploymentMetricServiceImpl) calculateCycleTimeBetweenReleases(releases []*dto.Metric, lastRelease *sql.AppRelease) { + if len(releases) == 0 { + return + } + + // Calculate cycle time between consecutive releases + for i := 0; i < len(releases)-1; i++ { + releases[i].CycleTime = releases[i].ReleaseTime.Sub(releases[i+1].ReleaseTime).Minutes() + } + + // Handle the last release + if lastRelease != nil { + releases[len(releases)-1].CycleTime = releases[len(releases)-1].ReleaseTime.Sub(lastRelease.TriggerTime).Minutes() + } else if len(releases) > 0 { + releases[len(releases)-1].CycleTime = 0 + } +} + +// populateMetricsWithImprovedLogic populates dto.Metrics using DORA calculation helper functions +func (impl DeploymentMetricServiceImpl) populateMetricsWithImprovedLogic(appReleases []sql.AppRelease, materials []*sql.PipelineMaterial, leadTimes []sql.LeadTime, lastRelease *sql.AppRelease, fromTime, toTime time.Time) (*dto.Metrics, error) { + releases := impl.transform(appReleases, materials, leadTimes) + + impl.calculateCycleTimeBetweenReleases(releases, lastRelease) + lastFailedTime := "" + recoveryTimeLastFailed := float64(0) + for i := 0; i < len(releases); i++ { + if releases[i].ReleaseStatus == dto.Failure { + if lastFailedTime == "" { + lastFailedTime = releases[i].ReleaseTime.Format(constants.Layout) + } + if i < len(releases)-1 && releases[i+1].ReleaseStatus == dto.Failure { + continue + } + for j := i - 1; j >= 0; j-- { + if releases[j].ReleaseStatus == dto.Success { + releases[i].RecoveryTime = releases[j].ReleaseTime.Sub(releases[i].ReleaseTime).Minutes() + if recoveryTimeLastFailed == 0 { + recoveryTimeLastFailed = releases[i].RecoveryTime + } + break + } + } + } + } + + metrics := &dto.Metrics{ + Series: releases, + AverageCycleTime: impl.calculateDeploymentFrequency(releases, fromTime, toTime), + AverageLeadTime: impl.calculateMeanLeadTimeForChanges(releases), + ChangeFailureRate: impl.calculateChangeFailureRateNew(releases), + AverageRecoveryTime: impl.calculateMeanTimeToRecovery(releases), + LastFailedTime: lastFailedTime, + RecoveryTimeLastFailed: recoveryTimeLastFailed, + } + + // Calculate change size metrics + if len(metrics.Series) > 0 { + impl.calculateChangeSize(metrics) + } + + return metrics, nil +} + +// ProcessSingleDoraMetrics processes DORA metrics for a single app-env pair +func (impl DeploymentMetricServiceImpl) ProcessSingleDoraMetrics(request *dto.MetricRequest) (*dto.Metrics, error) { + from, to, err := utils2.ParseDateRange(request.From, request.To) + if err != nil { + return nil, err + } + + releases, err := impl.appReleaseRepository.GetReleaseBetween(request.AppId, request.EnvId, from, to) + if err != nil { + impl.logger.Errorw("error getting releases for DORA metrics", "err", err) + return nil, err + } + + if len(releases) == 0 { + return utils2.CreateEmptyMetrics(), nil + } + + var releaseIds []int + for _, v := range releases { + releaseIds = append(releaseIds, v.Id) + } + + materials, err := impl.pipelineMaterialRepository.FindByAppReleaseIds(releaseIds) + if err != nil { + impl.logger.Errorw("error getting materials for DORA metrics", "err", err) + return nil, err + } + + leadTimes, err := impl.leadTimeRepository.FindByIds(releaseIds) + if err != nil { + impl.logger.Errorw("error getting lead times for DORA metrics", "err", err) + return nil, err + } + + // Get previous release with bounds checking + var lastRelease *sql.AppRelease + if len(releases) > 0 { + lastId := releases[len(releases)-1].Id + lastRelease, err = impl.appReleaseRepository.GetPreviousRelease(request.AppId, request.EnvId, lastId) + if err != nil && !utils.IsErrNoRows(err) { + impl.logger.Errorw("error getting previous release from db ", "err", err) + // Don't return error, just continue without previous release + } + if utils.IsErrNoRows(err) { + lastRelease = nil + } + } + + return impl.populateMetricsWithImprovedLogic(releases, materials, leadTimes, lastRelease, from, to) +} diff --git a/lens/pkg/DeploymentMetricService_test.go b/lens/pkg/DeploymentMetricService_test.go index da9b19a36..d7cc676b8 100644 --- a/lens/pkg/DeploymentMetricService_test.go +++ b/lens/pkg/DeploymentMetricService_test.go @@ -17,11 +17,13 @@ package pkg import ( - "github.com/devtron-labs/lens/internal/sql" - "go.uber.org/zap" "reflect" "testing" "time" + + "github.com/devtron-labs/lens/internal/dto" + "github.com/devtron-labs/lens/internal/sql" + "go.uber.org/zap" ) func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { @@ -48,8 +50,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 10, ChangeSizeLineDeleted: 10, TriggerTime: currentTime.AddDate(0, 0, -1), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -65,8 +67,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 11, ChangeSizeLineDeleted: 11, TriggerTime: currentTime.AddDate(0, 0, -2), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -82,8 +84,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -3), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -118,8 +120,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 10, ChangeSizeLineDeleted: 10, TriggerTime: currentTime.AddDate(0, 0, -1), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -135,8 +137,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 11, ChangeSizeLineDeleted: 11, TriggerTime: currentTime.AddDate(0, 0, -2), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -152,8 +154,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -3), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -170,8 +172,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -4), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -188,8 +190,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 10, ChangeSizeLineDeleted: 10, TriggerTime: currentTime.AddDate(0, 0, -1), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -205,8 +207,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 11, ChangeSizeLineDeleted: 11, TriggerTime: currentTime.AddDate(0, 0, -2), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -222,8 +224,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -3), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -240,8 +242,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -4), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -258,8 +260,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 10, ChangeSizeLineDeleted: 10, TriggerTime: currentTime.AddDate(0, 0, -1), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -275,8 +277,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 11, ChangeSizeLineDeleted: 11, TriggerTime: currentTime.AddDate(0, 0, -2), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -292,8 +294,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -3), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -310,8 +312,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -4), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -328,8 +330,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 10, ChangeSizeLineDeleted: 10, TriggerTime: currentTime.AddDate(0, 0, -1), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -345,8 +347,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 11, ChangeSizeLineDeleted: 11, TriggerTime: currentTime.AddDate(0, 0, -2), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Failure, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Failure, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -362,8 +364,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -3), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -380,8 +382,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -4), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -398,8 +400,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 10, ChangeSizeLineDeleted: 10, TriggerTime: currentTime.AddDate(0, 0, -1), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -415,8 +417,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 11, ChangeSizeLineDeleted: 11, TriggerTime: currentTime.AddDate(0, 0, -2), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -432,8 +434,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -3), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -450,8 +452,8 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { ChangeSizeLineAdded: 12, ChangeSizeLineDeleted: 12, TriggerTime: currentTime.AddDate(0, 0, -4), - ReleaseType: sql.RollForward, - ReleaseStatus: sql.Success, + ReleaseType: dto.RollForward, + ReleaseStatus: dto.Success, ProcessStage: 0, CreatedTime: time.Time{}, UpdatedTime: time.Time{}, @@ -461,7 +463,7 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { name string fields fields args args - want *Metrics + want *dto.Metrics wantErr bool }{ { @@ -478,7 +480,7 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { leadTimes: []sql.LeadTime{}, lastRelease: nil, }, - want: &Metrics{ + want: &dto.Metrics{ Series: nil, AverageCycleTime: 0, AverageLeadTime: 0, @@ -503,7 +505,7 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { leadTimes: []sql.LeadTime{}, lastRelease: nil, }, - want: &Metrics{ + want: &dto.Metrics{ Series: nil, AverageCycleTime: 24, AverageLeadTime: 0, @@ -528,7 +530,7 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { leadTimes: []sql.LeadTime{}, lastRelease: &lastReleaseS, }, - want: &Metrics{ + want: &dto.Metrics{ Series: nil, AverageCycleTime: 24, AverageLeadTime: 0, @@ -553,7 +555,7 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { leadTimes: []sql.LeadTime{}, lastRelease: &lastReleaseF, }, - want: &Metrics{ + want: &dto.Metrics{ Series: nil, AverageCycleTime: 24, AverageLeadTime: 0, @@ -578,7 +580,7 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { leadTimes: []sql.LeadTime{}, lastRelease: &lastReleaseF1, }, - want: &Metrics{ + want: &dto.Metrics{ Series: nil, AverageCycleTime: 24, AverageLeadTime: 0, @@ -603,7 +605,7 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { leadTimes: []sql.LeadTime{}, lastRelease: &lastReleaseF2, }, - want: &Metrics{ + want: &dto.Metrics{ Series: nil, AverageCycleTime: 24, AverageLeadTime: 0, @@ -628,7 +630,7 @@ func TestDeploymentMetricServiceImpl_populateMetrics(t *testing.T) { leadTimes: []sql.LeadTime{}, lastRelease: &lastReleaseF3, }, - want: &Metrics{ + want: &dto.Metrics{ Series: nil, AverageCycleTime: 24, AverageLeadTime: 0, diff --git a/lens/pkg/IngestionService.go b/lens/pkg/IngestionService.go index 90937944a..d8b885476 100644 --- a/lens/pkg/IngestionService.go +++ b/lens/pkg/IngestionService.go @@ -18,10 +18,12 @@ package pkg import ( "context" + "time" + "github.com/caarlos0/env" "github.com/devtron-labs/lens/bean" + "github.com/devtron-labs/lens/internal/dto" pb "github.com/devtron-labs/protos/gitSensor" - "time" "github.com/devtron-labs/lens/client/gitSensor" "github.com/devtron-labs/lens/internal/sql" @@ -103,7 +105,7 @@ func (impl *IngestionServiceImpl) ProcessDeploymentEvent(deploymentEvent *Deploy if err != nil { return nil, err } - if appRelease.ReleaseType == sql.RollBack { + if appRelease.ReleaseType == dto.RollBack { //no need to fetch git detail return return appRelease, nil //FIXME } @@ -134,7 +136,7 @@ func (impl *IngestionServiceImpl) markPreviousTriggerFail(release *sql.AppReleas } if previousAppRelease != nil { impl.logger.Infow("pipeline failure detected", "PreviousappRelease", previousAppRelease) - previousAppRelease.ReleaseStatus = sql.Failure + previousAppRelease.ReleaseStatus = dto.Failure previousAppRelease.UpdatedTime = time.Now() _, err = impl.appReleaseRepository.Update(previousAppRelease) if err != nil { @@ -142,7 +144,7 @@ func (impl *IngestionServiceImpl) markPreviousTriggerFail(release *sql.AppReleas return err } //mark this release as patch - release.ReleaseType = sql.Patch + release.ReleaseType = dto.Patch release.UpdatedTime = time.Now() _, err = impl.appReleaseRepository.Update(release) if err != nil { @@ -242,7 +244,7 @@ func (impl *IngestionServiceImpl) fetchAndSaveChangesFromGit(appRelease *sql.App } appRelease.UpdatedTime = time.Now() - appRelease.ProcessStage = sql.LeadTimeFetch + appRelease.ProcessStage = dto.LeadTimeFetch appRelease.ChangeSizeLineAdded = lineAdded appRelease.ChangeSizeLineDeleted = lineRemoved appRelease, err = impl.appReleaseRepository.Update(appRelease) @@ -264,8 +266,8 @@ func (impl *IngestionServiceImpl) saveAppRelease(deploymentEvent *DeploymentEven UpdatedTime: time.Now(), PipelineOverrideId: deploymentEvent.PipelineOverrideId, ReleaseId: deploymentEvent.ReleaseId, - ProcessStage: sql.Init, - ReleaseType: sql.Unknown, + ProcessStage: dto.Init, + ReleaseType: dto.Unknown, } appRelease, err := impl.appReleaseRepository.Save(appRelease) if err != nil { @@ -301,11 +303,11 @@ func (impl *IngestionServiceImpl) checkAndUpdateReleaseType(appRelease *sql.AppR return appRelease, err } if duplicate { - appRelease.ReleaseType = sql.RollBack + appRelease.ReleaseType = dto.RollBack } else { - appRelease.ReleaseType = sql.RollForward + appRelease.ReleaseType = dto.RollForward } - appRelease.ProcessStage = sql.ReleaseTypeDetermined + appRelease.ProcessStage = dto.ReleaseTypeDetermined appRelease.UpdatedTime = time.Now() appRelease, err = impl.appReleaseRepository.Update(appRelease) diff --git a/lens/pkg/constants/constants.go b/lens/pkg/constants/constants.go new file mode 100644 index 000000000..bd4c77233 --- /dev/null +++ b/lens/pkg/constants/constants.go @@ -0,0 +1,5 @@ +package constants + +const ( + Layout = "2006-01-02T15:04:05.000Z" +) diff --git a/lens/pkg/utils/utils.go b/lens/pkg/utils/utils.go new file mode 100644 index 000000000..0b4efc72d --- /dev/null +++ b/lens/pkg/utils/utils.go @@ -0,0 +1,32 @@ +package utils + +import ( + "fmt" + "time" + + "github.com/devtron-labs/lens/internal/dto" + "github.com/devtron-labs/lens/pkg/constants" +) + +// ParseDateRange parses from and to date strings +func ParseDateRange(from, to string) (time.Time, time.Time, error) { + fromTime, err := time.Parse(constants.Layout, from) + if err != nil { + return time.Time{}, time.Time{}, err + } + toTime, err := time.Parse(constants.Layout, to) + if err != nil { + return time.Time{}, time.Time{}, err + } + return fromTime, toTime, nil +} + +// GenerateAppEnvKey creates a consistent key for app-env pair mapping +func GenerateAppEnvKey(appId, envId int) string { + return fmt.Sprintf("%d-%d", appId, envId) +} + +// CreateEmptyMetrics creates an empty metrics response +func CreateEmptyMetrics() *dto.Metrics { + return &dto.Metrics{Series: []*dto.Metric{}} +} diff --git a/lens/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go b/lens/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go index b7434bb27..cfd7f98e4 100644 --- a/lens/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go +++ b/lens/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go @@ -124,6 +124,21 @@ const ( INFRA_HELM_RELEASE_ACTION_TOPIC string = "INFRA_HELM_RELEASE_ACTION_TOPIC" INFRA_HELM_RELEASE_ACTION_GROUP string = "INFRA_HELM_RELEASE_ACTION_GROUP" INFRA_HELM_RELEASE_ACTION_DURABLE string = "INFRA_HELM_RELEASE_ACTION_DURABLE" + COST_MODULE_INSTALLATION_TOPIC string = "COST_MODULE_INSTALLATION_TOPIC" + COST_MODULE_INSTALLATION_GROUP string = "COST_MODULE_INSTALLATION_GROUP" + COST_MODULE_INSTALLATION_DURABLE string = "COST_MODULE_INSTALLATION_DURABLE" + COST_MODULE_GPU_INSTALLATION_TOPIC string = "COST_MODULE_GPU_INSTALLATION_TOPIC" + COST_MODULE_GPU_INSTALLATION_GROUP string = "COST_MODULE_GPU_INSTALLATION_GROUP" + COST_MODULE_GPU_INSTALLATION_DURABLE string = "COST_MODULE_GPU_INSTALLATION_DURABLE" + STORAGE_MODULE_TOPIC string = "STORAGE_MODULE_TOPIC" + STORAGE_MODULE_GROUP string = "STORAGE_MODULE_GROUP" + STORAGE_MODULE_DURABLE string = "STORAGE_MODULE_DURABLE" + STORAGE_VELERO_INSTALL_TOPIC string = "STORAGE_VELERO_INSTALL_TOPIC" + STORAGE_VELERO_INSTALL_GROUP string = "STORAGE_VELERO_INSTALL_GROUP" + STORAGE_VELERO_INSTALL_DURABLE string = "STORAGE_VELERO_INSTALL_DURABLE" + STORAGE_VELERO_POST_INSTALLATION_TOPIC string = "STORAGE_VELERO_POST_INSTALLATION_TOPIC" + STORAGE_VELERO_POST_INSTALLATION_GROUP string = "STORAGE_VELERO_POST_INSTALLATION_GROUP" + STORAGE_VELERO_POST_INSTALLATION_DURABLE string = "STORAGE_VELERO_POST_INSTALLATION_DURABLE" ) type NatsTopic struct { @@ -179,6 +194,11 @@ var natsTopicMapping = map[string]NatsTopic{ INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC: {topicName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_GROUP, consumerName: INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE}, INFRA_HELM_RELEASE_ACTION_TOPIC: {topicName: INFRA_HELM_RELEASE_ACTION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: INFRA_HELM_RELEASE_ACTION_GROUP, consumerName: INFRA_HELM_RELEASE_ACTION_DURABLE}, + COST_MODULE_INSTALLATION_TOPIC: {topicName: COST_MODULE_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_INSTALLATION_GROUP, consumerName: COST_MODULE_INSTALLATION_DURABLE}, + COST_MODULE_GPU_INSTALLATION_TOPIC: {topicName: COST_MODULE_GPU_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: COST_MODULE_GPU_INSTALLATION_GROUP, consumerName: COST_MODULE_GPU_INSTALLATION_DURABLE}, + STORAGE_MODULE_TOPIC: {topicName: STORAGE_MODULE_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_MODULE_GROUP, consumerName: STORAGE_MODULE_DURABLE}, + STORAGE_VELERO_INSTALL_TOPIC: {topicName: STORAGE_VELERO_INSTALL_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_INSTALL_GROUP, consumerName: STORAGE_VELERO_INSTALL_DURABLE}, + STORAGE_VELERO_POST_INSTALLATION_TOPIC: {topicName: STORAGE_VELERO_POST_INSTALLATION_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: STORAGE_VELERO_POST_INSTALLATION_GROUP, consumerName: STORAGE_VELERO_POST_INSTALLATION_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -221,6 +241,11 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ INFRASTRACTURE_INSTALLATION_SUCCESS_DURABLE: {}, INFRASTRACTURE_INSTALLATION_DELETE_SUCCESS_DURABLE: {}, INFRA_HELM_RELEASE_ACTION_DURABLE: {}, + COST_MODULE_INSTALLATION_DURABLE: {}, + COST_MODULE_GPU_INSTALLATION_DURABLE: {}, + STORAGE_MODULE_DURABLE: {}, + STORAGE_VELERO_INSTALL_DURABLE: {}, + STORAGE_VELERO_POST_INSTALLATION_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string diff --git a/lens/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go b/lens/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go new file mode 100644 index 000000000..372765014 --- /dev/null +++ b/lens/vendor/github.com/devtron-labs/common-lib/utils/TimeUtils.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "strings" + "time" +) + +type TimeRangeRequest struct { + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=today yesterday week month quarter lastWeek lastMonth lastQuarter last24Hours last7Days last30Days last90Days"` +} + +func NewTimeRangeRequest(from *time.Time, to *time.Time) *TimeRangeRequest { + return &TimeRangeRequest{ + From: from, + To: to, + } +} + +func NewTimeWindowRequest(timeWindow TimeWindows) *TimeRangeRequest { + return &TimeRangeRequest{ + TimeWindow: &timeWindow, + } +} + +// TimeWindows is a string type that represents different time windows +type TimeWindows string + +func (timeRange TimeWindows) String() string { + return string(timeRange) +} + +// Define constants for different time windows +const ( + Today TimeWindows = "today" + Yesterday TimeWindows = "yesterday" + Week TimeWindows = "week" + Month TimeWindows = "month" + Quarter TimeWindows = "quarter" + LastWeek TimeWindows = "lastWeek" + LastMonth TimeWindows = "lastMonth" + Year TimeWindows = "year" + LastQuarter TimeWindows = "lastQuarter" + Last24Hours TimeWindows = "last24Hours" + Last7Days TimeWindows = "last7Days" + Last30Days TimeWindows = "last30Days" + Last90Days TimeWindows = "last90Days" +) + +func (timeRange *TimeRangeRequest) ParseAndValidateTimeRange() (*TimeRangeRequest, error) { + if timeRange == nil { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("invalid time range request. either from/to or timeWindow must be provided") + } + now := time.Now() + // If timeWindow is provided, it takes preference over from/to + if timeRange.TimeWindow != nil { + switch *timeRange.TimeWindow { + case Today: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Yesterday: + start := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()).Add(-24 * time.Hour) + end := start.Add(24 * time.Hour) + return NewTimeRangeRequest(&start, &end), nil + case Week: + // Current week (Monday to Sunday) + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + start := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Month: + start := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Quarter: + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterStart := time.Month((quarter-1)*3 + 1) + start := time.Date(now.Year(), quarterStart, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case LastWeek: + weekday := int(now.Weekday()) + if weekday == 0 { // Sunday + weekday = 7 + } + thisWeekStart := now.AddDate(0, 0, -(weekday - 1)).Truncate(24 * time.Hour) + lastWeekStart := thisWeekStart.AddDate(0, 0, -7) + lastWeekEnd := thisWeekStart.Add(-time.Second) + return NewTimeRangeRequest(&lastWeekStart, &lastWeekEnd), nil + case LastMonth: + thisMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + lastMonthStart := thisMonthStart.AddDate(0, -1, 0) + lastMonthEnd := thisMonthStart.Add(-time.Second) + return NewTimeRangeRequest(&lastMonthStart, &lastMonthEnd), nil + case LastQuarter: + // Calculate current quarter + currentQuarter := ((int(now.Month()) - 1) / 3) + 1 + + // Calculate previous quarter + var prevQuarter int + var prevYear int + if currentQuarter == 1 { + // If current quarter is Q1, previous quarter is Q4 of previous year + prevQuarter = 4 + prevYear = now.Year() - 1 + } else { + // Otherwise, previous quarter is in the same year + prevQuarter = currentQuarter - 1 + prevYear = now.Year() + } + + // Calculate start and end of previous quarter + prevQuarterStartMonth := time.Month((prevQuarter-1)*3 + 1) + prevQuarterStart := time.Date(prevYear, prevQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + + // End of previous quarter is the start of current quarter minus 1 second + currentQuarterStartMonth := time.Month((currentQuarter-1)*3 + 1) + currentQuarterStart := time.Date(now.Year(), currentQuarterStartMonth, 1, 0, 0, 0, 0, now.Location()) + if currentQuarter == 1 { + // If current quarter is Q1, we need to calculate Q4 end of previous year + currentQuarterStart = time.Date(now.Year(), time.January, 1, 0, 0, 0, 0, now.Location()) + } + prevQuarterEnd := currentQuarterStart.Add(-time.Second) + + return NewTimeRangeRequest(&prevQuarterStart, &prevQuarterEnd), nil + case Year: + start := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + return NewTimeRangeRequest(&start, &now), nil + case Last24Hours: + start := now.Add(-24 * time.Hour) + return NewTimeRangeRequest(&start, &now), nil + case Last7Days: + start := now.AddDate(0, 0, -7) + return NewTimeRangeRequest(&start, &now), nil + case Last30Days: + start := now.AddDate(0, 0, -30) + return NewTimeRangeRequest(&start, &now), nil + case Last90Days: + start := now.AddDate(0, 0, -90) + return NewTimeRangeRequest(&start, &now), nil + default: + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("unsupported time window: %q", *timeRange.TimeWindow) + } + } + + // Use from/to dates if provided + if timeRange.From != nil && timeRange.To != nil { + if timeRange.From.After(*timeRange.To) { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from date cannot be after to date") + } + return NewTimeRangeRequest(timeRange.From, timeRange.To), nil + } else { + return NewTimeRangeRequest(&time.Time{}, &time.Time{}), fmt.Errorf("from and to dates are required if time window is not provided") + } +} + +// TimeBoundariesRequest represents the request for time boundary frames +type TimeBoundariesRequest struct { + TimeWindowBoundaries []string `json:"timeWindowBoundaries" schema:"timeWindowBoundaries" validate:"omitempty,min=1"` + TimeWindow *TimeWindows `json:"timeWindow" schema:"timeWindow" validate:"omitempty,oneof=week month quarter year"` // week, month, quarter, year + Iterations int `json:"iterations" schema:"iterations" validate:"omitempty,min=1"` +} + +// TimeWindowBoundaries represents the start and end times for a time window +type TimeWindowBoundaries struct { + StartTime time.Time + EndTime time.Time +} + +func (timeBoundaries *TimeBoundariesRequest) ParseAndValidateTimeBoundaries() ([]TimeWindowBoundaries, error) { + if timeBoundaries == nil { + return []TimeWindowBoundaries{}, fmt.Errorf("invalid time boundaries request") + } + // If timeWindow is provided, it takes preference over timeWindowBoundaries + if timeBoundaries.TimeWindow != nil { + switch *timeBoundaries.TimeWindow { + case Week: + return GetWeeklyTimeBoundaries(timeBoundaries.Iterations), nil + case Month: + return GetMonthlyTimeBoundaries(timeBoundaries.Iterations), nil + case Quarter: + return GetQuarterlyTimeBoundaries(timeBoundaries.Iterations), nil + case Year: + return GetYearlyTimeBoundaries(timeBoundaries.Iterations), nil + default: + return []TimeWindowBoundaries{}, fmt.Errorf("unsupported time window: %q", *timeBoundaries.TimeWindow) + } + } else if len(timeBoundaries.TimeWindowBoundaries) != 0 { + // Validate time window + return DecodeAndValidateTimeWindowBoundaries(timeBoundaries.TimeWindowBoundaries) + } else { + return []TimeWindowBoundaries{}, fmt.Errorf("time window boundaries are required if time window is not provided") + } +} + +func GetWeeklyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + weekday := int(now.Weekday()) + if weekday == 0 { + weekday = 7 + } + // Get start of this week (Monday) + weekStart := now.AddDate(0, 0, -(weekday - 1)) + // Set time to midnight + weekStart = time.Date(weekStart.Year(), weekStart.Month(), weekStart.Day(), 0, 0, 0, 0, weekStart.Location()) + + for i := 0; i < iterations; i++ { + start := weekStart.AddDate(0, 0, -7*i) + end := start.AddDate(0, 0, 7) + // For the current week, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetMonthlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this month (1st) + monthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := monthStart.AddDate(0, -i, 0) + end := start.AddDate(0, 1, 0) + // For the current month, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetQuarterlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + quarter := ((int(now.Month()) - 1) / 3) + 1 + quarterMonth := time.Month((quarter-1)*3 + 1) + // Get start of this quarter (1st of the month) + quarterStart := time.Date(now.Year(), quarterMonth, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := quarterStart.AddDate(0, -3*i, 0) + end := start.AddDate(0, 3, 0) + // For the current quarter, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func GetYearlyTimeBoundaries(iterations int) []TimeWindowBoundaries { + if iterations <= 0 { + return []TimeWindowBoundaries{} + } + boundaries := make([]TimeWindowBoundaries, iterations) + now := time.Now() + // Get start of this year (1st of January) + yearStart := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, now.Location()) + for i := 0; i < iterations; i++ { + start := yearStart.AddDate(-i, 0, 0) + end := start.AddDate(1, 0, 0) + // For the current year, if now < end, set end = now + if i == 0 && now.Before(end) { + end = now + } + boundaries[i] = TimeWindowBoundaries{ + StartTime: start, + EndTime: end, + } + } + return boundaries +} + +func DecodeAndValidateTimeWindowBoundaries(timeWindowBoundaries []string) ([]TimeWindowBoundaries, error) { + boundaries := make([]TimeWindowBoundaries, 0, len(timeWindowBoundaries)) + for _, boundary := range timeWindowBoundaries { + parts := strings.Split(boundary, "|") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid time window boundary format: %q", boundary) + } + startTime, err := time.Parse(time.RFC3339, parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid start time format: %q. expected format: %q", parts[0], time.RFC3339) + } + endTime, err := time.Parse(time.RFC3339, parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid end time format: %q. expected format: %q", parts[1], time.RFC3339) + } + if startTime.After(endTime) { + return nil, fmt.Errorf("start time cannot be after end time: %q", boundary) + } + boundaries = append(boundaries, TimeWindowBoundaries{ + StartTime: startTime, + EndTime: endTime, + }) + } + return boundaries, nil +} diff --git a/lens/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/lens/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index ea16a2f72..2bbfa1dc1 100644 --- a/lens/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/lens/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -74,7 +74,9 @@ type PgQueryMonitoringConfig struct { } func GetPgQueryMonitoringConfig(serviceName string) (PgQueryMonitoringConfig, error) { - cfg := &PgQueryMonitoringConfig{} + cfg := &PgQueryMonitoringConfig{ + ServiceName: serviceName, + } err := env.Parse(cfg) return *cfg, err } diff --git a/lens/vendor/modules.txt b/lens/vendor/modules.txt index 8b34d3667..bca6f98a8 100644 --- a/lens/vendor/modules.txt +++ b/lens/vendor/modules.txt @@ -7,7 +7,7 @@ github.com/caarlos0/env # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/devtron-labs/common-lib v0.19.1 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib v0.19.1 => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713 ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/constants github.com/devtron-labs/common-lib/fetchAllEnv @@ -304,4 +304,4 @@ google.golang.org/protobuf/types/known/timestamppb # mellium.im/sasl v0.3.2 ## explicit; go 1.20 mellium.im/sasl -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251104082051-1f7627ecb6be +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251205120949-62ef158e4713