diff --git a/Makefile b/Makefile index 6de73e89..051a8013 100644 --- a/Makefile +++ b/Makefile @@ -56,6 +56,9 @@ generate-v1beta1-serverless-client: install-openapi-generator ## Generate server @echo "==> Generating serverless cdc client" rm -rf pkg/tidbcloud/v1beta1/serverless/cdc cd tools/openapi-generator && npx openapi-generator-cli generate --inline-schema-options RESOLVE_INLINE_ENUMS=true --additional-properties=withGoMod=false,enumClassPrefix=true,disallowAdditionalPropertiesIfNotPresent=false --global-property=apiTests=false,apiDocs=false,modelDocs=false,modelTests=false -i ../../pkg/tidbcloud/v1beta1/serverless/cdc.swagger.json -g go -o ../../pkg/tidbcloud/v1beta1/serverless/cdc --package-name cdc -c go/config.yaml + @echo "==> Generating serverless cdc client" + rm -rf pkg/tidbcloud/v1beta1/serverless/migration + cd tools/openapi-generator && npx openapi-generator-cli generate --inline-schema-options RESOLVE_INLINE_ENUMS=true --additional-properties=withGoMod=false,enumClassPrefix=true,disallowAdditionalPropertiesIfNotPresent=false --global-property=apiTests=false,apiDocs=false,modelDocs=false,modelTests=false -i ../../pkg/tidbcloud/v1beta1/serverless/dm.swagger.json -g go -o ../../pkg/tidbcloud/v1beta1/serverless/migration --package-name migration -c go/config.yaml cd pkg && go fmt ./tidbcloud/v1beta1/serverless/... && goimports -w . @echo "==> Generating serverless privatelink client" rm -rf pkg/tidbcloud/v1beta1/serverless/privatelink diff --git a/go.mod b/go.mod index d949bff1..1496ce93 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.24.0 require ( github.com/AlecAivazis/survey/v2 v2.3.6 + github.com/AlekSi/pointer v1.2.0 github.com/aws/aws-sdk-go-v2 v1.27.1 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.23 github.com/charmbracelet/bubbles v0.17.1 @@ -31,6 +32,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.9.0 + github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a github.com/tidbcloud/tidbcloud-cli/pkg v0.0.1 github.com/xo/usql v0.19.2 github.com/zalando/go-keyring v0.2.3 diff --git a/go.sum b/go.sum index 4c870cc0..b23afa23 100644 --- a/go.sum +++ b/go.sum @@ -27,6 +27,8 @@ github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XB github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= github.com/AlecAivazis/survey/v2 v2.3.6 h1:NvTuVHISgTHEHeBFqt6BHOe4Ny/NwGZr7w+F8S9ziyw= github.com/AlecAivazis/survey/v2 v2.3.6/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI= +github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= +github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= @@ -641,6 +643,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a h1:a6TNDN9CgG+cYjaeN8l2mc4kSz2iMiCDQxPEyltUV/I= +github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo= github.com/thda/tds v0.1.7 h1:s29kbnJK0agL3ps85A/sb9XS2uxgKF5UJ6AZjbyqXX4= github.com/thda/tds v0.1.7/go.mod h1:isLIF1oZdXfkqVMJM8RyNrsjlHPlTKnPlnsBs7ngZcM= github.com/trinodb/trino-go-client v0.315.0 h1:9mU+42VGw9Hnp9R1hkhWlIrQp9o+V01Gx1KlHjTkM1c= diff --git a/internal/cli/serverless/changefeed/list.go b/internal/cli/serverless/changefeed/list.go index 87c94a41..8b5269de 100644 --- a/internal/cli/serverless/changefeed/list.go +++ b/internal/cli/serverless/changefeed/list.go @@ -16,6 +16,7 @@ package changefeed import ( "fmt" + "time" "github.com/juju/errors" "github.com/spf13/cobra" @@ -138,7 +139,7 @@ func ListCmd(h *internal.Helper) *cobra.Command { *item.DisplayName, string(item.Sink.Type), string(*item.State), - item.CreateTime.String(), + item.CreateTime.Format(time.RFC3339), }) } err := output.PrintHumanTable(h.IOStreams.Out, columns, rows) diff --git a/internal/cli/serverless/cluster.go b/internal/cli/serverless/cluster.go index 7bce6336..16abdfb2 100644 --- a/internal/cli/serverless/cluster.go +++ b/internal/cli/serverless/cluster.go @@ -22,6 +22,7 @@ import ( "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/changefeed" "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/dataimport" "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/export" + "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/migration" "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/privatelink" "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/sqluser" @@ -54,6 +55,7 @@ func Cmd(h *internal.Helper) *cobra.Command { serverlessCmd.AddCommand(authorizednetwork.AuthorizedNetworkCmd(h)) serverlessCmd.AddCommand(changefeed.ChangefeedCmd(h)) serverlessCmd.AddCommand(privatelink.PrivateLinkConnectionCmd(h)) + serverlessCmd.AddCommand(migration.MigrationCmd(h)) return serverlessCmd } diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go new file mode 100644 index 00000000..5e91c2bd --- /dev/null +++ b/internal/cli/serverless/migration/create.go @@ -0,0 +1,303 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "context" + "encoding/json" + "fmt" + "os" + "slices" + "strings" + "time" + + "github.com/AlekSi/pointer" + aws "github.com/aws/aws-sdk-go-v2/aws" + "github.com/fatih/color" + "github.com/juju/errors" + "github.com/spf13/cobra" + "github.com/tailscale/hujson" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/output" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" + pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" +) + +func CreateCmd(h *internal.Helper) *cobra.Command { + var cmd = &cobra.Command{ + Use: "create", + Short: "Create a migration", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Create a migration: + $ %[1]s serverless migration create -c --display-name --config-file --dry-run + $ %[1]s serverless migration create -c --display-name --config-file +`, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return markCreateMigrationRequiredFlags(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + dryRun, err := cmd.Flags().GetBool(flag.DryRun) + if err != nil { + return errors.Trace(err) + } + clusterID, err := cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + name, err := cmd.Flags().GetString(flag.DisplayName) + if err != nil { + return errors.Trace(err) + } + if strings.TrimSpace(name) == "" { + return errors.New("display name is required") + } + configPath, err := cmd.Flags().GetString(flag.MigrationConfigFile) + if err != nil { + return errors.Trace(err) + } + configPath = strings.TrimSpace(configPath) + if configPath == "" { + return errors.New("config file path is required") + } + definitionBytes, err := os.ReadFile(configPath) + if err != nil { + return errors.Annotatef(err, "failed to read config file %q", configPath) + } + definitionStr := string(definitionBytes) + + sources, target, mode, err := parseMigrationDefinition(definitionStr) + if err != nil { + return err + } + + if dryRun { + precheckBody := &pkgmigration.MigrationServicePrecheckBody{ + DisplayName: name, + Sources: sources, + Target: target, + Mode: mode, + } + return runMigrationPrecheck(ctx, d, clusterID, precheckBody, h) + } + + createBody := &pkgmigration.MigrationServiceCreateMigrationBody{ + DisplayName: name, + Sources: sources, + Target: target, + Mode: mode, + } + + resp, err := d.CreateMigration(ctx, clusterID, createBody) + if err != nil { + return errors.Trace(err) + } + + migrationID := aws.ToString(resp.MigrationId) + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration %s(%s) created", name, migrationID)) + return nil + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") + cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") + cmd.Flags().String(flag.MigrationConfigFile, "", "Path to a migration config JSON file. Use \"ticloud serverless migration template --mode \" to print templates.") + cmd.Flags().Bool(flag.DryRun, false, "Run a migration precheck (dry run) with the provided inputs without creating a migration.") + + return cmd +} + +func markCreateMigrationRequiredFlags(cmd *cobra.Command) error { + for _, fn := range []string{flag.ClusterID, flag.DisplayName, flag.MigrationConfigFile} { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + return nil +} + +const ( + precheckPollInterval = 5 * time.Second + precheckPollTimeout = 2 * time.Minute +) + +func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clusterID string, body *pkgmigration.MigrationServicePrecheckBody, h *internal.Helper) error { + resp, err := client.CreateMigrationPrecheck(ctx, clusterID, body) + if err != nil { + return errors.Trace(err) + } + if resp.PrecheckId == nil || *resp.PrecheckId == "" { + return errors.New("precheck created but ID is empty") + } + precheckID := *resp.PrecheckId + fmt.Fprintf(h.IOStreams.Out, "migration precheck %s created, polling results...\n", precheckID) + + ticker := time.NewTicker(precheckPollInterval) + defer ticker.Stop() + pollCtx, cancel := context.WithTimeout(ctx, precheckPollTimeout) + defer cancel() + + // Poll precheck status until it finishes or the overall timeout is hit. + for { + select { + case <-pollCtx.Done(): + if pollCtx.Err() == context.DeadlineExceeded { + return errors.Errorf("migration precheck polling timed out after %s", precheckPollTimeout) + } + return pollCtx.Err() + case <-ticker.C: + result, err := client.GetMigrationPrecheck(pollCtx, clusterID, precheckID) + if err != nil { + return errors.Trace(err) + } + finished, err := printPrecheckSummary(result, h) + if err != nil { + return err + } + if !finished { + continue + } + if result.GetStatus() == pkgmigration.MIGRATIONPRECHECKSTATUS_FAILED { + fmt.Fprintln(h.IOStreams.Out, color.RedString("migration precheck %s failed", precheckID)) + return errors.New("migration precheck failed") + } + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration precheck %s passed", precheckID)) + return nil + } + } +} + +func isPrecheckUnfinished(status pkgmigration.MigrationPrecheckStatus) bool { + switch status { + case pkgmigration.MIGRATIONPRECHECKSTATUS_PENDING, + pkgmigration.MIGRATIONPRECHECKSTATUS_RUNNING: + return true + default: + return false + } +} + +func printPrecheckSummary(result *pkgmigration.MigrationPrecheck, h *internal.Helper) (bool, error) { + if isPrecheckUnfinished(result.GetStatus()) { + fmt.Fprintf(h.IOStreams.Out, "precheck %s summary (status %s)\n", result.GetPrecheckId(), result.GetStatus()) + fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", + aws.ToInt32(result.Total), aws.ToInt32(result.SuccessCnt), aws.ToInt32(result.WarnCnt), aws.ToInt32(result.FailedCnt)) + return false, nil + } + + fmt.Fprintf(h.IOStreams.Out, "precheck %s finished with status %s\n", result.GetPrecheckId(), result.GetStatus()) + fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", + aws.ToInt32(result.Total), aws.ToInt32(result.SuccessCnt), aws.ToInt32(result.WarnCnt), aws.ToInt32(result.FailedCnt)) + if len(result.Items) == 0 { + return true, nil + } + columns := []output.Column{"Type", "Status", "Description", "Reason", "Solution"} + rows := make([]output.Row, 0, len(result.Items)) + for _, item := range result.Items { + if !shouldPrintPrecheckItem(item.Status) { + continue + } + rows = append(rows, output.Row{ + string(pointer.Get(item.Type)), + string(pointer.Get(item.Status)), + pointer.Get(item.Description), + pointer.Get(item.Reason), + pointer.Get(item.Solution), + }) + } + if len(rows) == 0 { + return true, nil + } + return true, output.PrintHumanTable(h.IOStreams.Out, columns, rows) +} + +// shouldPrintPrecheckItem reports whether a precheck item should be shown to users. +// Currently only WARNING and FAILED statuses surface because SUCCESS does not +// provide actionable information. +func shouldPrintPrecheckItem(status *pkgmigration.PrecheckItemStatus) bool { + if status == nil { + return false + } + switch *status { + case pkgmigration.PRECHECKITEMSTATUS_WARNING, + pkgmigration.PRECHECKITEMSTATUS_FAILED: + return true + default: + return false + } +} + +func parseMigrationDefinition(value string) ([]pkgmigration.Source, pkgmigration.Target, pkgmigration.TaskMode, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil, pkgmigration.Target{}, "", errors.New("migration config is required; use --config-file") + } + var payload struct { + Sources []pkgmigration.Source `json:"sources"` + Target *pkgmigration.Target `json:"target"` + Mode string `json:"mode"` + } + stdJson, err := standardizeJSON([]byte(trimmed)) + if err != nil { + return nil, pkgmigration.Target{}, "", errors.Annotate(err, "invalid migration definition JSON") + } + if err := json.Unmarshal(stdJson, &payload); err != nil { + return nil, pkgmigration.Target{}, "", errors.Annotate(err, "invalid migration definition JSON") + } + if len(payload.Sources) == 0 { + return nil, pkgmigration.Target{}, "", errors.New("migration definition must include at least one source") + } + if payload.Target == nil { + return nil, pkgmigration.Target{}, "", errors.New("migration definition must include the target block") + } + mode, err := parseMigrationMode(payload.Mode) + if err != nil { + return nil, pkgmigration.Target{}, "", err + } + return payload.Sources, *payload.Target, mode, nil +} + +func parseMigrationMode(value string) (pkgmigration.TaskMode, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return "", errors.New("empty config file") + } + normalized := strings.ToUpper(trimmed) + mode := pkgmigration.TaskMode(normalized) + if slices.Contains(pkgmigration.AllowedTaskModeEnumValues, mode) { + return mode, nil + } + return "", errors.Errorf("invalid mode %q, allowed values: %s", value, pkgmigration.AllowedTaskModeEnumValues) +} + +// standardizeJSON accepts JSON With Commas and Comments(JWCC) see +// https://nigeltao.github.io/blog/2021/json-with-commas-comments.html) and +// returns a standard JSON byte slice ready for json.Unmarshal. +func standardizeJSON(b []byte) ([]byte, error) { + ast, err := hujson.Parse(b) + if err != nil { + return b, err + } + ast.Standardize() + return ast.Pack(), nil +} diff --git a/internal/cli/serverless/migration/delete.go b/internal/cli/serverless/migration/delete.go new file mode 100644 index 00000000..1abe641c --- /dev/null +++ b/internal/cli/serverless/migration/delete.go @@ -0,0 +1,149 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "fmt" + + "github.com/AlecAivazis/survey/v2" + "github.com/AlecAivazis/survey/v2/terminal" + "github.com/fatih/color" + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" + "github.com/tidbcloud/tidbcloud-cli/internal/util" +) + +type DeleteOpts struct { + interactive bool +} + +func (c DeleteOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationID, + } +} + +func (c *DeleteOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func DeleteCmd(h *internal.Helper) *cobra.Command { + opts := DeleteOpts{interactive: true} + var force bool + + var cmd = &cobra.Command{ + Use: "delete", + Short: "Delete a migration", + Aliases: []string{"rm"}, + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Delete a migration in interactive mode: + $ %[1]s serverless migration delete + + Delete a migration in non-interactive mode: + $ %[1]s serverless migration delete -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, migrationID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + migration, err := cloud.GetSelectedMigration(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + migrationID = migration.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + migrationID, err = cmd.Flags().GetString(flag.MigrationID) + if err != nil { + return errors.Trace(err) + } + } + + if !force { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support prompt, please run with --force to delete the migration") + } + prompt := &survey.Input{ + Message: fmt.Sprintf("%s %s %s", color.BlueString("Please type"), color.HiBlueString(config.Confirmed), color.BlueString("to confirm:")), + } + var confirmation string + if err := survey.AskOne(prompt, &confirmation); err != nil { + if errors.Is(err, terminal.InterruptErr) { + return util.InterruptError + } + return err + } + if confirmation != config.Confirmed { + return errors.New("Incorrect confirm string entered, skipping migration deletion") + } + } + + if _, err := d.DeleteMigration(ctx, clusterID, migrationID); err != nil { + return errors.Trace(err) + } + + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration %s deleted", migrationID)) + return nil + }, + } + + cmd.Flags().BoolVar(&force, flag.Force, false, "Delete without confirmation.") + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration.") + cmd.Flags().StringP(flag.MigrationID, flag.MigrationIDShort, "", "ID of the migration to delete.") + return cmd +} diff --git a/internal/cli/serverless/migration/describe.go b/internal/cli/serverless/migration/describe.go new file mode 100644 index 00000000..fb0a68f7 --- /dev/null +++ b/internal/cli/serverless/migration/describe.go @@ -0,0 +1,126 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/output" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" +) + +type DescribeOpts struct { + interactive bool +} + +func (c DescribeOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationID, + } +} + +func (c *DescribeOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func DescribeCmd(h *internal.Helper) *cobra.Command { + opts := DescribeOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "describe", + Short: "Describe a migration", + Aliases: []string{"get"}, + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Describe a migration in interactive mode: + $ %[1]s serverless migration describe + + Describe a migration in non-interactive mode: + $ %[1]s serverless migration describe -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, migrationID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + + migration, err := cloud.GetSelectedMigration(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + migrationID = migration.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + migrationID, err = cmd.Flags().GetString(flag.MigrationID) + if err != nil { + return errors.Trace(err) + } + } + + resp, err := d.GetMigration(ctx, clusterID, migrationID) + if err != nil { + return errors.Trace(err) + } + + return errors.Trace(output.PrintJson(h.IOStreams.Out, resp)) + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration.") + cmd.Flags().StringP(flag.MigrationID, flag.MigrationIDShort, "", "ID of the migration to describe.") + return cmd +} diff --git a/internal/cli/serverless/migration/list.go b/internal/cli/serverless/migration/list.go new file mode 100644 index 00000000..798d3576 --- /dev/null +++ b/internal/cli/serverless/migration/list.go @@ -0,0 +1,138 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "fmt" + "time" + + "github.com/AlekSi/pointer" + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/output" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" +) + +type ListOpts struct { + interactive bool +} + +func (c ListOpts) NonInteractiveFlags() []string { + return []string{flag.ClusterID} +} + +func (c *ListOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func ListCmd(h *internal.Helper) *cobra.Command { + opts := ListOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "list", + Short: "List migrations", + Aliases: []string{"ls"}, + Args: cobra.NoArgs, + Example: fmt.Sprintf(` List migrations in interactive mode: + $ %[1]s serverless migration list + + List migrations in non-interactive mode with JSON output: + $ %[1]s serverless migration list -c -o json`, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + } + + pageSize := int32(h.QueryPageSize) + resp, err := d.ListMigrations(ctx, clusterID, &pageSize, nil, nil) + if err != nil { + return errors.Trace(err) + } + + format, err := cmd.Flags().GetString(flag.Output) + if err != nil { + return errors.Trace(err) + } + + if format == output.JsonFormat || !h.IOStreams.CanPrompt { + return errors.Trace(output.PrintJson(h.IOStreams.Out, resp)) + } + + if format != output.HumanFormat { + return fmt.Errorf("unsupported output format: %s", format) + } + + columns := []output.Column{"ID", "Name", "Mode", "State", "CreatedAt"} + var rows []output.Row + for _, task := range resp.Migrations { + id := pointer.Get(task.MigrationId) + name := pointer.Get(task.DisplayName) + mode := string(pointer.Get(task.Mode)) + state := string(pointer.Get(task.State)) + rows = append(rows, output.Row{id, name, mode, state, task.CreateTime.Format(time.RFC3339)}) + } + return errors.Trace(output.PrintHumanTable(h.IOStreams.Out, columns, rows)) + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The cluster ID of the migration tasks to list.") + cmd.Flags().StringP(flag.Output, flag.OutputShort, output.HumanFormat, flag.OutputHelp) + return cmd +} diff --git a/internal/cli/serverless/migration/migration.go b/internal/cli/serverless/migration/migration.go new file mode 100644 index 00000000..1ad0195d --- /dev/null +++ b/internal/cli/serverless/migration/migration.go @@ -0,0 +1,39 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" +) + +func MigrationCmd(h *internal.Helper) *cobra.Command { + var cmd = &cobra.Command{ + Use: "migration", + Short: "Manage TiDB Cloud Serverless migrations", + Aliases: []string{"dm"}, + } + + cmd.AddCommand(CreateCmd(h)) + cmd.AddCommand(DescribeCmd(h)) + cmd.AddCommand(ListCmd(h)) + cmd.AddCommand(DeleteCmd(h)) + cmd.AddCommand(TemplateCmd(h)) + cmd.AddCommand(PauseCmd(h)) + cmd.AddCommand(ResumeCmd(h)) + + return cmd +} diff --git a/internal/cli/serverless/migration/pause.go b/internal/cli/serverless/migration/pause.go new file mode 100644 index 00000000..01b0c207 --- /dev/null +++ b/internal/cli/serverless/migration/pause.go @@ -0,0 +1,123 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" +) + +type PauseOpts struct { + interactive bool +} + +func (c PauseOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationID, + } +} + +func (c *PauseOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func PauseCmd(h *internal.Helper) *cobra.Command { + opts := PauseOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "pause", + Short: "Pause a migration", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Pause a migration in interactive mode: + $ %[1]s serverless migration pause + + Pause a migration in non-interactive mode: + $ %[1]s serverless migration pause -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, migrationID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + migration, err := cloud.GetSelectedMigration(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + migrationID = migration.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + migrationID, err = cmd.Flags().GetString(flag.MigrationID) + if err != nil { + return errors.Trace(err) + } + } + + if err := d.PauseMigration(ctx, clusterID, migrationID); err != nil { + return errors.Trace(err) + } + + fmt.Fprintf(h.IOStreams.Out, "migration %s paused\n", migrationID) + return nil + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration.") + cmd.Flags().StringP(flag.MigrationID, flag.MigrationIDShort, "", "ID of the migration to pause.") + return cmd +} diff --git a/internal/cli/serverless/migration/resume.go b/internal/cli/serverless/migration/resume.go new file mode 100644 index 00000000..100fa6ae --- /dev/null +++ b/internal/cli/serverless/migration/resume.go @@ -0,0 +1,123 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" +) + +type ResumeOpts struct { + interactive bool +} + +func (c ResumeOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationID, + } +} + +func (c *ResumeOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func ResumeCmd(h *internal.Helper) *cobra.Command { + opts := ResumeOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "resume", + Short: "Resume a paused migration", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Resume a migration in interactive mode: + $ %[1]s serverless migration resume + + Resume a migration in non-interactive mode: + $ %[1]s serverless migration resume -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, migrationID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + migration, err := cloud.GetSelectedMigration(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + migrationID = migration.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + migrationID, err = cmd.Flags().GetString(flag.MigrationID) + if err != nil { + return errors.Trace(err) + } + } + + if err := d.ResumeMigration(ctx, clusterID, migrationID); err != nil { + return errors.Trace(err) + } + + fmt.Fprintf(h.IOStreams.Out, "migration %s resumed\n", migrationID) + return nil + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration.") + cmd.Flags().StringP(flag.MigrationID, flag.MigrationIDShort, "", "ID of the migration to resume.") + return cmd +} diff --git a/internal/cli/serverless/migration/template.go b/internal/cli/serverless/migration/template.go new file mode 100644 index 00000000..1f6ddd12 --- /dev/null +++ b/internal/cli/serverless/migration/template.go @@ -0,0 +1,236 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "fmt" + "strings" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" +) + +const ( + migrationDefinitionAllTemplate = `{ + // Required migration mode. Use "ALL" for full + incremental. + "mode": "ALL", + // Target TiDB Cloud user credentials used by the migration + "target": { + "user": "migration_user", + "password": "Passw0rd!" + }, + // List at least one migration source + "sources": [ + { + // Required: source database type. Supported values: MYSQL, ALICLOUD_RDS_MYSQL, AWS_RDS_MYSQL + "sourceType": "MYSQL", + "connProfile": { + // Required connection type. Supported values: PUBLIC, PRIVATE_LINK + // PUBLIC connections require host + "connType": "PUBLIC", + "host": "10.0.0.8", + // PRIVATE_LINK connections use endpointId. Get endpointId by 'ticloud plc' commands. + "connType": "PRIVATE_LINK", + "endpointId": "pl-xxxxxxxx", + "host": "10.0.0.2", + "port": 3306, + "user": "dm_sync_user", + "password": "Passw0rd!", + // optional TLS settings + "security": { + // TLS materials must be Base64 encoded + "sslCaContent": "", + "sslCertContent": "", + "sslKeyContent": "", + "certAllowedCn": ["client-cn"] + } + }, + // Optional block/allow rules to control synced schemas/tables (mutually exclusive with routeRules) + "baRules": { + "doDbs": ["app_db"], + "doTables": [ + {"schema": "app_db", "table": "orders"}, + {"schema": "app_db", "table": "customers"} + ] + }, + // Optional route rules to rename objects during migration (mutually exclusive with baRules) + "routeRules": [ + { + "sourceTable": { + "schemaPattern": "app_db", + "tablePattern": "orders" + }, + "targetTable": { + "schema": "app_db", + "table": "orders_copy" + } + } + ], + } + ] +}` + + migrationDefinitionIncrementalTemplate = `{ + // Incremental-only mode keeps the source and target in sync + "mode": "INCREMENTAL", + // Target TiDB Cloud user credentials used by the migration + "target": { + "user": "migration_user", + "password": "Passw0rd!" + }, + "sources": [ + { + // Required: source database type. Supported values: MYSQL, ALICLOUD_RDS_MYSQL, AWS_RDS_MYSQL + "sourceType": "MYSQL", + "connProfile": { + // Required connection type. Supported values: PUBLIC, PRIVATE_LINK + // PUBLIC connections require host + "connType": "PUBLIC", + "host": "10.0.0.8", + // PRIVATE_LINK connections use endpointId. Get endpointId by 'ticloud plc' commands. + "connType": "PRIVATE_LINK", + "endpointId": "pl-xxxxxxxx", + "port": 3306, + "user": "dm_sync_user", + "password": "Passw0rd!", + // optional TLS settings + "security": { + // TLS materials must be Base64 encoded + "sslCaContent": "", + "sslCertContent": "", + "sslKeyContent": "", + "certAllowedCn": ["client-cn"] + } + }, + // Optional block/allow rules when only part of the data should be replicated (mutually exclusive with routeRules) + "baRules": { + "doDbs": ["app_db"], + "doTables": [ + {"schema": "app_db", "table": "orders"} + ] + }, + // Optional route rule sample for remapping objects during incremental sync (mutually exclusive with baRules) + "routeRules": [ + { + "sourceTable": { + "schemaPattern": "app_db", + "tablePattern": "orders" + }, + "targetTable": { + "schema": "app_db", + "table": "orders_copy" + } + } + ], + // Optional start position for incremental sync (binlog position or GTID) + "binlogName": "mysql-bin.000001", + "binlogPos": 4, + "binlogGtid": "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-12345" + } + ] +}` +) + +type templateVariant struct { + heading string + body string +} + +var allowedTemplateModes = []pkgmigration.TaskMode{pkgmigration.TASKMODE_ALL, pkgmigration.TASKMODE_INCREMENTAL} + +var definitionTemplates = map[pkgmigration.TaskMode]templateVariant{ + pkgmigration.TASKMODE_ALL: { + heading: "Definition template (mode = ALL)", + body: migrationDefinitionAllTemplate, + }, + pkgmigration.TASKMODE_INCREMENTAL: { + heading: "Definition template (mode = INCREMENTAL)", + body: migrationDefinitionIncrementalTemplate, + }, +} + +func TemplateCmd(h *internal.Helper) *cobra.Command { + cmd := &cobra.Command{ + Use: "template", + Short: "Show migration JSON templates", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Show the ALL mode migration template: + $ %[1]s serverless migration template --mode all + + Show the INCREMENTAL migration template: + $ %[1]s serverless migration template --mode incremental`, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return cmd.MarkFlagRequired(flag.MigrationMode) + }, + RunE: func(cmd *cobra.Command, args []string) error { + modeValue, err := cmd.Flags().GetString(flag.MigrationMode) + if err != nil { + return err + } + mode, err := parseTemplateMode(modeValue) + if err != nil { + return err + } + return renderMigrationTemplate(h, mode) + }, + } + + cmd.Flags().String( + flag.MigrationMode, + "", + fmt.Sprintf( + "Migration mode template to show, one of [%s].", + strings.Join(allowedTemplateModeStrings(), ", "), + ), + ) + return cmd +} + +func renderMigrationTemplate(h *internal.Helper, mode pkgmigration.TaskMode) error { + variant, ok := definitionTemplates[mode] + if !ok { + return fmt.Errorf("unknown mode %q, allowed values: %s", mode, strings.Join(allowedTemplateModeStrings(), ", ")) + } + + fmt.Fprintln(h.IOStreams.Out, color.GreenString(variant.heading)) + fmt.Fprintln(h.IOStreams.Out, variant.body) + return nil +} + +func parseTemplateMode(raw string) (pkgmigration.TaskMode, error) { + trimmed := strings.TrimSpace(raw) + if trimmed == "" { + return "", fmt.Errorf("mode is required; use --%s", flag.MigrationMode) + } + normalized := strings.ToUpper(trimmed) + mode := pkgmigration.TaskMode(normalized) + if _, ok := definitionTemplates[mode]; ok { + return mode, nil + } + return "", fmt.Errorf("unknown mode %q, allowed values: %s", trimmed, strings.Join(allowedTemplateModeStrings(), ", ")) +} + +func allowedTemplateModeStrings() []string { + values := make([]string, 0, len(allowedTemplateModes)) + for _, mode := range allowedTemplateModes { + values = append(values, strings.ToLower(string(mode))) + } + return values +} diff --git a/internal/cli/serverless/privatelink/list.go b/internal/cli/serverless/privatelink/list.go index 0e6eaab4..c7deb220 100644 --- a/internal/cli/serverless/privatelink/list.go +++ b/internal/cli/serverless/privatelink/list.go @@ -16,6 +16,7 @@ package privatelink import ( "fmt" + "time" "github.com/juju/errors" "github.com/spf13/cobra" @@ -130,7 +131,7 @@ func ListCmd(h *internal.Helper) *cobra.Command { item.DisplayName, string(item.Type), string(*item.State), - item.CreateTime.String(), + item.CreateTime.Format(time.RFC3339), }) } err := output.PrintHumanTable(h.IOStreams.Out, columns, rows) diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 6c429080..97529487 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -124,6 +124,12 @@ const ( AWSEndpointServiceName string = "aws.endpoint-service-name" AWSEndpointServiceRegion string = "aws.endpoint-service-region" AlicloudEndpointServiceName string = "alicloud.endpoint-service-name" + + MigrationID string = "migration-id" + MigrationIDShort string = "m" + MigrationConfigFile string = "config-file" + MigrationMode string = "mode" + DryRun string = "dry-run" ) const OutputHelp = "Output format, one of [\"human\" \"json\"]. For the complete result, please use json format." diff --git a/internal/mock/api_client.go b/internal/mock/api_client.go index b980617d..95bada0f 100644 --- a/internal/mock/api_client.go +++ b/internal/mock/api_client.go @@ -20,6 +20,8 @@ import ( imp "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/imp" + migration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" + mock "github.com/stretchr/testify/mock" privatelink "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/privatelink" @@ -78,6 +80,24 @@ func (_m *TiDBCloudClient) CancelImport(ctx context.Context, clusterId string, i return r0 } +// CancelMigrationPrecheck provides a mock function with given fields: ctx, clusterId, precheckId +func (_m *TiDBCloudClient) CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) error { + ret := _m.Called(ctx, clusterId, precheckId) + + if len(ret) == 0 { + panic("no return value specified for CancelMigrationPrecheck") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, clusterId, precheckId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // CancelUpload provides a mock function with given fields: ctx, clusterId, uploadId func (_m *TiDBCloudClient) CancelUpload(ctx context.Context, clusterId string, uploadId *string) error { ret := _m.Called(ctx, clusterId, uploadId) @@ -294,6 +314,66 @@ func (_m *TiDBCloudClient) CreateImport(ctx context.Context, clusterId string, b return r0, r1 } +// CreateMigration provides a mock function with given fields: ctx, clusterId, body +func (_m *TiDBCloudClient) CreateMigration(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) { + ret := _m.Called(ctx, clusterId, body) + + if len(ret) == 0 { + panic("no return value specified for CreateMigration") + } + + var r0 *migration.Migration + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error)); ok { + return rf(ctx, clusterId, body) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) *migration.Migration); ok { + r0 = rf(ctx, clusterId, body) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.Migration) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) error); ok { + r1 = rf(ctx, clusterId, body) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateMigrationPrecheck provides a mock function with given fields: ctx, clusterId, body +func (_m *TiDBCloudClient) CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) { + ret := _m.Called(ctx, clusterId, body) + + if len(ret) == 0 { + panic("no return value specified for CreateMigrationPrecheck") + } + + var r0 *migration.CreateMigrationPrecheckResp + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error)); ok { + return rf(ctx, clusterId, body) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServicePrecheckBody) *migration.CreateMigrationPrecheckResp); ok { + r0 = rf(ctx, clusterId, body) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.CreateMigrationPrecheckResp) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServicePrecheckBody) error); ok { + r1 = rf(ctx, clusterId, body) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // CreatePrivateLinkConnection provides a mock function with given fields: ctx, clusterId, body func (_m *TiDBCloudClient) CreatePrivateLinkConnection(ctx context.Context, clusterId string, body *privatelink.PrivateLinkConnectionServiceCreatePrivateLinkConnectionBody) (*privatelink.PrivateLinkConnection, error) { ret := _m.Called(ctx, clusterId, body) @@ -534,6 +614,36 @@ func (_m *TiDBCloudClient) DeleteExport(ctx context.Context, clusterId string, e return r0, r1 } +// DeleteMigration provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) DeleteMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + ret := _m.Called(ctx, clusterId, taskId) + + if len(ret) == 0 { + panic("no return value specified for DeleteMigration") + } + + var r0 *migration.Migration + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.Migration, error)); ok { + return rf(ctx, clusterId, taskId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.Migration); ok { + r0 = rf(ctx, clusterId, taskId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.Migration) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, clusterId, taskId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // DeletePrivateLinkConnection provides a mock function with given fields: ctx, clusterId, privateLinkConnectionId func (_m *TiDBCloudClient) DeletePrivateLinkConnection(ctx context.Context, clusterId string, privateLinkConnectionId string) (*privatelink.PrivateLinkConnection, error) { ret := _m.Called(ctx, clusterId, privateLinkConnectionId) @@ -954,6 +1064,66 @@ func (_m *TiDBCloudClient) GetImport(ctx context.Context, clusterId string, id s return r0, r1 } +// GetMigration provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) GetMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + ret := _m.Called(ctx, clusterId, taskId) + + if len(ret) == 0 { + panic("no return value specified for GetMigration") + } + + var r0 *migration.Migration + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.Migration, error)); ok { + return rf(ctx, clusterId, taskId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.Migration); ok { + r0 = rf(ctx, clusterId, taskId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.Migration) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, clusterId, taskId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetMigrationPrecheck provides a mock function with given fields: ctx, clusterId, precheckId +func (_m *TiDBCloudClient) GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) { + ret := _m.Called(ctx, clusterId, precheckId) + + if len(ret) == 0 { + panic("no return value specified for GetMigrationPrecheck") + } + + var r0 *migration.MigrationPrecheck + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.MigrationPrecheck, error)); ok { + return rf(ctx, clusterId, precheckId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.MigrationPrecheck); ok { + r0 = rf(ctx, clusterId, precheckId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.MigrationPrecheck) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, clusterId, precheckId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetPrivateLinkAvailabilityZones provides a mock function with given fields: ctx, clusterId func (_m *TiDBCloudClient) GetPrivateLinkAvailabilityZones(ctx context.Context, clusterId string) (*privatelink.GetAvailabilityZonesResponse, error) { ret := _m.Called(ctx, clusterId) @@ -1314,6 +1484,36 @@ func (_m *TiDBCloudClient) ListImports(ctx context.Context, clusterId string, pa return r0, r1 } +// ListMigrations provides a mock function with given fields: ctx, clusterId, pageSize, pageToken, orderBy +func (_m *TiDBCloudClient) ListMigrations(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) { + ret := _m.Called(ctx, clusterId, pageSize, pageToken, orderBy) + + if len(ret) == 0 { + panic("no return value specified for ListMigrations") + } + + var r0 *migration.ListMigrationsResp + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *int32, *string, *string) (*migration.ListMigrationsResp, error)); ok { + return rf(ctx, clusterId, pageSize, pageToken, orderBy) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *int32, *string, *string) *migration.ListMigrationsResp); ok { + r0 = rf(ctx, clusterId, pageSize, pageToken, orderBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.ListMigrationsResp) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *int32, *string, *string) error); ok { + r1 = rf(ctx, clusterId, pageSize, pageToken, orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ListPrivateLinkConnections provides a mock function with given fields: ctx, clusterId, pageSize, pageToken func (_m *TiDBCloudClient) ListPrivateLinkConnections(ctx context.Context, clusterId string, pageSize *int32, pageToken *string) (*privatelink.ListPrivateLinkConnectionsResponse, error) { ret := _m.Called(ctx, clusterId, pageSize, pageToken) @@ -1464,6 +1664,24 @@ func (_m *TiDBCloudClient) PartialUpdateCluster(ctx context.Context, clusterId s return r0, r1 } +// PauseMigration provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) PauseMigration(ctx context.Context, clusterId string, taskId string) error { + ret := _m.Called(ctx, clusterId, taskId) + + if len(ret) == 0 { + panic("no return value specified for PauseMigration") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, clusterId, taskId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ResetBranch provides a mock function with given fields: ctx, clusterId, branchId func (_m *TiDBCloudClient) ResetBranch(ctx context.Context, clusterId string, branchId string) (*branch.Branch, error) { ret := _m.Called(ctx, clusterId, branchId) @@ -1524,6 +1742,24 @@ func (_m *TiDBCloudClient) Restore(ctx context.Context, body *br.V1beta1RestoreR return r0, r1 } +// ResumeMigration provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) ResumeMigration(ctx context.Context, clusterId string, taskId string) error { + ret := _m.Called(ctx, clusterId, taskId) + + if len(ret) == 0 { + panic("no return value specified for ResumeMigration") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, clusterId, taskId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // StartChangefeed provides a mock function with given fields: ctx, clusterId, changefeedId func (_m *TiDBCloudClient) StartChangefeed(ctx context.Context, clusterId string, changefeedId string) (*cdc.Changefeed, error) { ret := _m.Called(ctx, clusterId, changefeedId) diff --git a/internal/service/cloud/api_client.go b/internal/service/cloud/api_client.go index 72b25591..d70291a6 100644 --- a/internal/service/cloud/api_client.go +++ b/internal/service/cloud/api_client.go @@ -34,6 +34,7 @@ import ( "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/cluster" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/export" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/imp" + "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/privatelink" "github.com/icholy/digest" @@ -105,6 +106,24 @@ type TiDBCloudClient interface { DownloadExportFiles(ctx context.Context, clusterId string, exportId string, body *export.ExportServiceDownloadExportFilesBody) (*export.DownloadExportFilesResponse, error) + CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) error + + DeleteMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) + + CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) + + CreateMigration(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) + + GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) + + GetMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) + + ListMigrations(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) + + PauseMigration(ctx context.Context, clusterId string, taskId string) error + + ResumeMigration(ctx context.Context, clusterId string, taskId string) error + ListSQLUsers(ctx context.Context, clusterID string, pageSize *int32, pageToken *string) (*iam.ApiListSqlUsersRsp, error) CreateSQLUser(ctx context.Context, clusterID string, body *iam.ApiCreateSqlUserReq) (*iam.ApiSqlUser, error) @@ -162,11 +181,12 @@ type ClientDelegate struct { alc *auditlog.APIClient cdc *cdc.APIClient plc *privatelink.APIClient + mc *migration.APIClient } func NewClientDelegateWithToken(token string, serverlessEndpoint string, iamEndpoint string) (*ClientDelegate, error) { transport := NewBearTokenTransport(token) - bc, sc, brc, sic, ec, ic, alc, cdc, plc, err := NewApiClient(transport, serverlessEndpoint, iamEndpoint) + bc, sc, brc, sic, ec, ic, alc, cdc, plc, mc, err := NewApiClient(transport, serverlessEndpoint, iamEndpoint) if err != nil { return nil, err } @@ -180,12 +200,13 @@ func NewClientDelegateWithToken(token string, serverlessEndpoint string, iamEndp alc: alc, cdc: cdc, plc: plc, + mc: mc, }, nil } func NewClientDelegateWithApiKey(publicKey string, privateKey string, serverlessEndpoint string, iamEndpoint string) (*ClientDelegate, error) { transport := NewDigestTransport(publicKey, privateKey) - bc, sc, brc, sic, ec, ic, alc, cdc, plc, err := NewApiClient(transport, serverlessEndpoint, iamEndpoint) + bc, sc, brc, sic, ec, ic, alc, cdc, plc, mc, err := NewApiClient(transport, serverlessEndpoint, iamEndpoint) if err != nil { return nil, err } @@ -199,6 +220,7 @@ func NewClientDelegateWithApiKey(publicKey string, privateKey string, serverless alc: alc, cdc: cdc, plc: plc, + mc: mc, }, nil } @@ -479,6 +501,71 @@ func (d *ClientDelegate) DownloadExportFiles(ctx context.Context, clusterId stri return res, parseError(err, h) } +func (d *ClientDelegate) CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) error { + _, h, err := d.mc.MigrationAPI.MigrationServiceCancelPrecheck(ctx, clusterId, precheckId).Execute() + return parseError(err, h) +} + +func (d *ClientDelegate) DeleteMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceDeleteMigration(ctx, clusterId, taskId).Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) { + r := d.mc.MigrationAPI.MigrationServicePrecheck(ctx, clusterId) + if body != nil { + r = r.Body(*body) + } + res, h, err := r.Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) CreateMigration(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) { + r := d.mc.MigrationAPI.MigrationServiceCreateMigration(ctx, clusterId) + if body != nil { + r = r.Body(*body) + } + res, h, err := r.Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceGetPrecheck(ctx, clusterId, precheckId).Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) GetMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceGetMigration(ctx, clusterId, taskId).Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) ListMigrations(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) { + r := d.mc.MigrationAPI.MigrationServiceListMigrations(ctx, clusterId) + if pageToken != nil { + r = r.PageToken(*pageToken) + } + if pageSize != nil { + r = r.PageSize(*pageSize) + } + if orderBy != nil { + r = r.OrderBy(*orderBy) + } + res, h, err := r.Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) PauseMigration(ctx context.Context, clusterId string, taskId string) error { + payload := map[string]interface{}{} + _, h, err := d.mc.MigrationAPI.MigrationServicePauseMigration(ctx, clusterId, taskId).Body(payload).Execute() + return parseError(err, h) +} + +func (d *ClientDelegate) ResumeMigration(ctx context.Context, clusterId string, taskId string) error { + payload := map[string]interface{}{} + _, h, err := d.mc.MigrationAPI.MigrationServiceResumeMigration(ctx, clusterId, taskId).Body(payload).Execute() + return parseError(err, h) +} + func (d *ClientDelegate) ListSQLUsers(ctx context.Context, clusterID string, pageSize *int32, pageToken *string) (*iam.ApiListSqlUsersRsp, error) { r := d.ic.AccountAPI.V1beta1ClustersClusterIdSqlUsersGet(ctx, clusterID) if pageSize != nil { @@ -631,7 +718,7 @@ func (d *ClientDelegate) GetPrivateLinkAvailabilityZones(ctx context.Context, cl // ===== Private Link Connection ===== -func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint string) (*branch.APIClient, *cluster.APIClient, *br.APIClient, *imp.APIClient, *export.APIClient, *iam.APIClient, *auditlog.APIClient, *cdc.APIClient, *privatelink.APIClient, error) { +func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint string) (*branch.APIClient, *cluster.APIClient, *br.APIClient, *imp.APIClient, *export.APIClient, *iam.APIClient, *auditlog.APIClient, *cdc.APIClient, *privatelink.APIClient, *migration.APIClient, error) { httpclient := &http.Client{ Transport: rt, } @@ -639,12 +726,12 @@ func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint s // v1beta1 api (serverless) serverlessURL, err := prop.ValidateApiUrl(serverlessEndpoint) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err } iamURL, err := prop.ValidateApiUrl(iamEndpoint) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err } userAgent := fmt.Sprintf("%s/%s", config.CliName, version.Version) @@ -694,12 +781,16 @@ func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint s privateLinkCfg.Host = serverlessURL.Host privateLinkCfg.UserAgent = userAgent - return branch.NewAPIClient(branchCfg), cluster.NewAPIClient(clusterCfg), - br.NewAPIClient(backupRestoreCfg), - imp.NewAPIClient(importCfg), export.NewAPIClient(exportCfg), - iam.NewAPIClient(iamCfg), auditlog.NewAPIClient(auditLogCfg), cdc.NewAPIClient(cdcCfg), - privatelink.NewAPIClient(privateLinkCfg), nil + migrationCfg := migration.NewConfiguration() + migrationCfg.HTTPClient = httpclient + migrationCfg.Host = serverlessURL.Host + migrationCfg.UserAgent = userAgent + return branch.NewAPIClient(branchCfg), cluster.NewAPIClient(clusterCfg), + br.NewAPIClient(backupRestoreCfg), imp.NewAPIClient(importCfg), + export.NewAPIClient(exportCfg), iam.NewAPIClient(iamCfg), + auditlog.NewAPIClient(auditLogCfg), cdc.NewAPIClient(cdcCfg), + privatelink.NewAPIClient(privateLinkCfg), migration.NewAPIClient(migrationCfg), nil } func NewDigestTransport(publicKey, privateKey string) http.RoundTripper { diff --git a/internal/service/cloud/logic.go b/internal/service/cloud/logic.go index ecada978..f9f26c3a 100644 --- a/internal/service/cloud/logic.go +++ b/internal/service/cloud/logic.go @@ -31,6 +31,7 @@ import ( "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/cluster" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/export" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/imp" + "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" tea "github.com/charmbracelet/bubbletea" "github.com/juju/errors" @@ -88,6 +89,12 @@ type Changefeed struct { Type string } +type MigrationTask struct { + ID string + Name string + State string +} + type AuditLogFilterRule struct { FilterRuleId string DisplayName string @@ -160,6 +167,13 @@ func (c Changefeed) String() string { return fmt.Sprintf("%s(%s)", c.Name, c.ID) } +func (m MigrationTask) String() string { + if m.Name == "" || m.Name == m.ID { + return fmt.Sprintf("%s[%s]", m.ID, m.State) + } + return fmt.Sprintf("%s(%s)[%s]", m.Name, m.ID, m.State) +} + func GetSelectedProject(ctx context.Context, pageSize int64, client TiDBCloudClient) (*Project, error) { _, projectItems, err := RetrieveProjects(ctx, pageSize, client) if err != nil { @@ -1041,6 +1055,69 @@ func GetSelectedChangefeed(ctx context.Context, clusterID string, pageSize int64 return changefeed.(*Changefeed), nil } +func GetSelectedMigration(ctx context.Context, clusterID string, pageSize int64, client TiDBCloudClient) (*MigrationTask, error) { + var items = make([]interface{}, 0) + pageSizeInt32 := int32(pageSize) + resp, err := client.ListMigrations(ctx, clusterID, &pageSizeInt32, nil, nil) + if err != nil { + return nil, errors.Trace(err) + } + appendMigrationTaskItems := func(tasks []migration.Migration) { + for _, item := range tasks { + if item.MigrationId == nil { + continue + } + name := *item.MigrationId + if item.DisplayName != nil && *item.DisplayName != "" { + name = *item.DisplayName + } + state := "" + if item.State != nil { + state = string(*item.State) + } + items = append(items, &MigrationTask{ + ID: *item.MigrationId, + Name: name, + State: state, + }) + } + } + appendMigrationTaskItems(resp.Migrations) + for resp.NextPageToken != nil && *resp.NextPageToken != "" { + resp, err = client.ListMigrations(ctx, clusterID, &pageSizeInt32, resp.NextPageToken, nil) + if err != nil { + return nil, errors.Trace(err) + } + appendMigrationTaskItems(resp.Migrations) + } + + if len(items) == 0 { + return nil, fmt.Errorf("no available migration tasks found") + } + + model, err := ui.InitialSelectModel(items, "Choose the migration task:") + if err != nil { + return nil, errors.Trace(err) + } + itemsPerPage := 6 + model.EnablePagination(itemsPerPage) + model.EnableFilter() + + p := tea.NewProgram(model) + migrationModel, err := p.Run() + if err != nil { + return nil, errors.Trace(err) + } + if m, _ := migrationModel.(ui.SelectModel); m.Interrupted { + return nil, util.InterruptError + } + selected := migrationModel.(ui.SelectModel).GetSelectedItem() + if selected == nil { + return nil, errors.New("no migration task selected") + } + return selected.(*MigrationTask), nil +} + func GetSelectedFilterRule(ctx context.Context, clusterID string, client TiDBCloudClient) (*AuditLogFilterRule, error) { rulesResp, err := client.ListAuditLogFilterRules(ctx, clusterID) if err != nil { diff --git a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json new file mode 100644 index 00000000..b58bf9f7 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json @@ -0,0 +1,1238 @@ +{ + "swagger": "2.0", + "info": { + "title": "TiDB Cloud Starter and Essential API", + "description": "TiDB Cloud Starter and Essential API", + "version": "v1beta1" + }, + "tags": [ + { + "name": "MigrationService" + } + ], + "host": "serverless.tidbapi.com", + "schemes": [ + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/v1beta1/clusters/{clusterId}/migrations": { + "get": { + "summary": "List migrations", + "operationId": "MigrationService_ListMigrations", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/ListMigrationsResp" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster to list migrations for.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "pageToken", + "description": "Optional. The page token, default is empty.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "pageSize", + "description": "Optional. The page size, default is 10.", + "in": "query", + "required": false, + "type": "integer", + "format": "int32", + "default": 10 + }, + { + "name": "orderBy", + "description": "Specifies the sorting order of results. Use a comma-separated list of field names, optionally appending `desc` for descending order. For example, `createTime desc`. By default, fields are sorted in ascending order.\n\nSupported field: `createTime`.", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + }, + "post": { + "summary": "Create a migration", + "operationId": "MigrationService_CreateMigration", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/Migration" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster to create the migration in.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MigrationService.CreateMigrationBody" + } + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{\\n \"name\": \"my-migration\",\\n \"sources\": [\\n {\\n \"connProfile\": {\\n \"connType\": \"PUBLIC\",\\n \"host\": \"1.2.3.4\",\\n \"port\": 3306,\\n \"user\": \"root\",\\n \"password\": \"secret\"\\n },\\n \"sourceType\": \"MYSQL\"\\n }\\n ],\\n \"target\": {\\n \"user\": \"tidb\",\\n \"password\": \"tidb_password\"\\n },\\n \"mode\": \"ALL\"\\n}'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}": { + "get": { + "summary": "Get a migration", + "operationId": "MigrationService_GetMigration", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/Migration" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "migrationId", + "description": "The ID of the migration.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + }, + "delete": { + "summary": "Delete a migration", + "operationId": "MigrationService_DeleteMigration", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/Migration" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "migrationId", + "description": "The ID of the migration to delete.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}:pause": { + "post": { + "summary": "Pause a running migration", + "operationId": "MigrationService_PauseMigration", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": {} + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "migrationId", + "description": "The ID of the migration to pause.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MigrationService.PauseMigrationBody" + } + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}:pause' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}:resume": { + "post": { + "summary": "Resume a paused migration", + "operationId": "MigrationService_ResumeMigration", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": {} + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "migrationId", + "description": "The ID of the migration to resume.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MigrationService.ResumeMigrationBody" + } + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}:resume' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrationPrechecks": { + "post": { + "summary": "Run a precheck for a migration", + "operationId": "MigrationService_Precheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/CreateMigrationPrecheckResp" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster to create the migration in.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MigrationService.PrecheckBody" + } + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{\\n \"name\": \"my-migration\",\\n \"sources\": [\\n {\\n \"connProfile\": {\\n \"connType\": \"PUBLIC\",\\n \"host\": \"1.2.3.4\",\\n \"port\": 3306,\\n \"user\": \"root\",\\n \"password\": \"secret\"\\n },\\n \"sourceType\": \"MYSQL\"\\n }\\n ],\\n \"target\": {\\n \"user\": \"tidb\",\\n \"password\": \"tidb_password\"\\n },\\n \"mode\": \"ALL\"\\n}'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}": { + "get": { + "summary": "Get a migration precheck", + "operationId": "MigrationService_GetPrecheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/MigrationPrecheck" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "precheckId", + "description": "The ID of the precheck.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{precheck_id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + }, + "delete": { + "summary": "Cancel a migration precheck", + "operationId": "MigrationService_CancelPrecheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": {} + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "precheckId", + "description": "The ID of the precheck to cancel.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{precheck_id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + } + } + }, + "definitions": { + "Any": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "BlockAllowRules": { + "type": "object", + "properties": { + "doDbs": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Database names to include in migration." + }, + "doTables": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/Table" + }, + "description": "Table-level allow-list rules." + } + } + }, + "ConnProfile": { + "type": "object", + "properties": { + "connType": { + "description": "Connection type (e.g., PUBLIC, PRIVATE_LINK).", + "allOf": [ + { + "$ref": "#/definitions/ConnType" + } + ] + }, + "endpointId": { + "type": "string", + "description": "Private link endpoint ID." + }, + "host": { + "type": "string", + "description": "Source host." + }, + "port": { + "type": "integer", + "format": "int32", + "description": "Source port." + }, + "user": { + "type": "string", + "description": "Source user." + }, + "password": { + "type": "string", + "description": "Source password." + }, + "security": { + "description": "TLS/SSL settings; if not set, use defaults.", + "allOf": [ + { + "$ref": "#/definitions/Security" + } + ] + } + }, + "required": [ + "connType", + "port", + "user", + "password" + ] + }, + "ConnType": { + "type": "string", + "enum": [ + "PUBLIC", + "PRIVATE_LINK" + ], + "description": "The connection type used to connect to the source database.\n\n - PUBLIC: Connect over the public internet.\n - PRIVATE_LINK: Connect via Private Link/Private Endpoint." + }, + "CreateMigrationPrecheckResp": { + "type": "object", + "properties": { + "precheckId": { + "type": "string", + "description": "The ID of the created precheck.", + "readOnly": true + } + } + }, + "DumpDetail": { + "type": "object", + "properties": { + "bps": { + "type": "string", + "format": "int64", + "description": "Bytes per second processed during dump.", + "readOnly": true + }, + "progress": { + "type": "number", + "format": "double", + "description": "Progress of dump phase (0-100).", + "readOnly": true + }, + "totalTables": { + "type": "string", + "format": "int64", + "description": "Total number of tables to dump.", + "readOnly": true + }, + "completedTables": { + "type": "string", + "format": "int64", + "description": "Number of tables completed dumping.", + "readOnly": true + }, + "finishedBytes": { + "type": "string", + "format": "int64", + "description": "Total bytes finished dumping.", + "readOnly": true + }, + "finishedRows": { + "type": "string", + "format": "int64", + "description": "Total rows finished dumping.", + "readOnly": true + } + } + }, + "ListMigrationsResp": { + "type": "object", + "properties": { + "migrations": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/Migration" + }, + "description": "The list of migrations.", + "readOnly": true + }, + "totalSize": { + "type": "integer", + "format": "int64", + "description": "The total number of migrations matching the query.", + "readOnly": true + }, + "nextPageToken": { + "type": "string", + "description": "Token to retrieve the next page of results.", + "readOnly": true + } + } + }, + "LoadDetail": { + "type": "object", + "properties": { + "bps": { + "type": "string", + "format": "int64", + "description": "Bytes per second processed during load.", + "readOnly": true + }, + "progress": { + "type": "number", + "format": "double", + "description": "Progress of load phase (0-100).", + "readOnly": true + }, + "finishedBytes": { + "type": "string", + "format": "int64", + "description": "Total bytes finished loading.", + "readOnly": true + }, + "totalBytes": { + "type": "string", + "format": "int64", + "description": "Total bytes to load.", + "readOnly": true + } + } + }, + "Migration": { + "type": "object", + "properties": { + "migrationId": { + "type": "string", + "description": "The unique ID of the migration.", + "readOnly": true + }, + "displayName": { + "type": "string", + "description": "The display name of the migration.", + "readOnly": true + }, + "subTasks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/SubTask" + }, + "description": "The list of subtasks composing this migration.", + "readOnly": true + }, + "targetUser": { + "type": "string", + "description": "The target database username used by the migration.", + "readOnly": true + }, + "createTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp when the migration was created.", + "readOnly": true + }, + "mode": { + "description": "The migration mode of the migration.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/TaskMode" + } + ] + }, + "state": { + "description": "The current state of the migration.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/Migration.State" + } + ] + } + } + }, + "Migration.State": { + "type": "string", + "enum": [ + "CREATING", + "RUNNING", + "PAUSED", + "FAILED", + "DELETING" + ], + "description": "Overall state of a migration.\n\n - CREATING: Task is being created.\n - RUNNING: Task is actively running.\n - PAUSED: Task is paused.\n - FAILED: Task failed with error.\n - DELETING: Task is being deleted." + }, + "MigrationPrecheck": { + "type": "object", + "properties": { + "precheckId": { + "type": "string", + "description": "The ID of the precheck.", + "readOnly": true + }, + "total": { + "type": "integer", + "format": "int32", + "description": "Total number of precheck items.", + "readOnly": true + }, + "failedCnt": { + "type": "integer", + "format": "int32", + "description": "Number of failed items.", + "readOnly": true + }, + "warnCnt": { + "type": "integer", + "format": "int32", + "description": "Number of items with warnings.", + "readOnly": true + }, + "successCnt": { + "type": "integer", + "format": "int32", + "description": "Number of successful items.", + "readOnly": true + }, + "status": { + "description": "Overall status of the precheck.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/MigrationPrecheck.Status" + } + ] + }, + "items": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/PrecheckItem" + }, + "description": "Details for each precheck item.", + "readOnly": true + } + } + }, + "MigrationPrecheck.Status": { + "type": "string", + "enum": [ + "RUNNING", + "FINISHED", + "PENDING", + "FAILED", + "CANCELED" + ], + "description": " - RUNNING: Precheck is in progress.\n - FINISHED: Precheck finished successfully.\n - PENDING: Precheck is pending.\n - FAILED: Precheck failed.\n - CANCELED: Precheck is canceled." + }, + "MigrationService.CreateMigrationBody": { + "type": "object", + "properties": { + "displayName": { + "type": "string", + "description": "The display name of the migration." + }, + "sources": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/Source" + }, + "description": "The data sources to migrate from." + }, + "target": { + "description": "The target database credentials.", + "allOf": [ + { + "$ref": "#/definitions/Target" + } + ] + }, + "mode": { + "description": "The migration mode (full+incremental or incremental-only).", + "allOf": [ + { + "$ref": "#/definitions/TaskMode" + } + ] + } + }, + "required": [ + "displayName", + "sources", + "target", + "mode" + ] + }, + "MigrationService.PauseMigrationBody": { + "type": "object", + "title": "PauseMigrationReq is used to pause a migration" + }, + "MigrationService.PrecheckBody": { + "type": "object", + "properties": { + "displayName": { + "type": "string", + "description": "The display name of the migration." + }, + "sources": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/Source" + }, + "description": "The data sources to migrate from." + }, + "target": { + "description": "The target database credentials.", + "allOf": [ + { + "$ref": "#/definitions/Target" + } + ] + }, + "mode": { + "description": "The migration mode (full+incremental or incremental-only).", + "allOf": [ + { + "$ref": "#/definitions/TaskMode" + } + ] + } + }, + "required": [ + "displayName", + "sources", + "target", + "mode" + ] + }, + "MigrationService.ResumeMigrationBody": { + "type": "object", + "title": "ResumeMigrationReq is used to resume a paused migration" + }, + "PrecheckItem": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Human-readable description of the check.", + "readOnly": true + }, + "status": { + "description": "Status of this check.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/PrecheckItem.Status" + } + ] + }, + "solution": { + "type": "string", + "description": "Suggested solution if the check failed or warned.", + "readOnly": true + }, + "reason": { + "type": "string", + "description": "Reason for the failure or warning.", + "readOnly": true + }, + "solutionDocUrl": { + "type": "string", + "description": "Documentation URL for the solution.", + "readOnly": true + }, + "type": { + "description": "The type of precheck.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/PrecheckItemType" + } + ] + } + } + }, + "PrecheckItem.Status": { + "type": "string", + "enum": [ + "SUCCESS", + "WARNING", + "FAILED" + ], + "description": " - SUCCESS: Check passed successfully.\n - WARNING: Check resulted in a warning.\n - FAILED: Check failed." + }, + "PrecheckItemType": { + "type": "string", + "enum": [ + "DUMP_PRIVILEGE_CHECKING", + "REPLICATION_PRIVILEGE_CHECKING", + "VERSION_CHECKING", + "SERVER_ID_CHECKING", + "BINLOG_ENABLE_CHECKING", + "BINLOG_FORMAT_CHECKING", + "BINLOG_ROW_IMAGE_CHECKING", + "TABLE_SCHEMA_CHECKING", + "BINLOG_DB_CHECKING", + "CONN_NUMBER_CHECKING", + "TARGET_DB_PRIVILEGE_CHECKING", + "META_POSITION_CHECKING", + "LIGHTNING_TABLE_EMPTY_CHECKING", + "PRIMARY_KEY_CHECKING" + ], + "description": "Types of prechecks performed before starting a migration.\n\n - DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges.\n - REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges.\n - VERSION_CHECKING: Check source database version compatibility.\n - SERVER_ID_CHECKING: Check source server_id configuration.\n - BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source.\n - BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source.\n - BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting.\n - TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target.\n - BINLOG_DB_CHECKING: Check binlog database-level filtering configuration.\n - CONN_NUMBER_CHECKING: Check concurrent connections limit/availability.\n - TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges.\n - META_POSITION_CHECKING: Check saved meta/binlog position validity.\n - LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load.\n - PRIMARY_KEY_CHECKING: Check primary key settings on source tables." + }, + "RouteRule": { + "type": "object", + "properties": { + "sourceTable": { + "description": "Source table pattern to match.", + "allOf": [ + { + "$ref": "#/definitions/RouteRule.Source" + } + ] + }, + "targetTable": { + "description": "Target table to route to.", + "allOf": [ + { + "$ref": "#/definitions/Table" + } + ] + } + } + }, + "RouteRule.Source": { + "type": "object", + "properties": { + "schemaPattern": { + "type": "string", + "description": "Schema pattern of the source, supports wildcards." + }, + "tablePattern": { + "type": "string", + "description": "Table pattern of the source, supports wildcards." + } + } + }, + "Security": { + "type": "object", + "properties": { + "certAllowedCn": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Allowed certificate Common Names." + }, + "sslCaContent": { + "type": "string", + "format": "byte", + "description": "CA certificate content in PEM." + }, + "sslCertContent": { + "type": "string", + "format": "byte", + "description": "Client certificate content in PEM." + }, + "sslKeyContent": { + "type": "string", + "format": "byte", + "description": "Client private key in PEM." + } + } + }, + "Source": { + "type": "object", + "properties": { + "connProfile": { + "description": "Connection profile for the source database.", + "allOf": [ + { + "$ref": "#/definitions/ConnProfile" + } + ] + }, + "baRules": { + "description": "Block/allow rules for databases and tables, which is exclusive with route_rules.", + "allOf": [ + { + "$ref": "#/definitions/BlockAllowRules" + } + ] + }, + "routeRules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/RouteRule" + }, + "description": "Table route rules\uff0cwhich is exclusive with ba_rules." + }, + "binlogName": { + "type": "string", + "x-nullable": true, + "description": "Starting binlog file name for incremental sync." + }, + "binlogPosition": { + "type": "integer", + "format": "int32", + "x-nullable": true, + "description": "Starting binlog position for incremental sync." + }, + "binlogGtid": { + "type": "string", + "x-nullable": true, + "description": "Starting GTID set for incremental sync." + }, + "sourceType": { + "description": "Source type (e.g., MySQL).", + "allOf": [ + { + "$ref": "#/definitions/Source.SourceType" + } + ] + } + }, + "required": [ + "connProfile", + "sourceType" + ] + }, + "Source.SourceType": { + "type": "string", + "enum": [ + "MYSQL", + "ALICLOUD_RDS_MYSQL", + "AWS_RDS_MYSQL" + ], + "description": "The source database type.\n\n - MYSQL: Self-managed MySQL.\n - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL.\n - AWS_RDS_MYSQL: Amazon RDS for MySQL." + }, + "Status": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/Any" + } + } + } + }, + "SubTask": { + "type": "object", + "properties": { + "source": { + "description": "Source configuration for this subtask.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/Source" + } + ] + }, + "currentStep": { + "description": "Current step of the subtask.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/SubTask.Step" + } + ] + }, + "stage": { + "description": "Current stage of the subtask.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/SubTask.Stage" + } + ] + }, + "dumpDetail": { + "description": "Detail of dump phase, if applicable.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/DumpDetail" + } + ] + }, + "loadDetail": { + "description": "Detail of load phase, if applicable.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/LoadDetail" + } + ] + }, + "syncDetail": { + "description": "Detail of sync phase, if applicable.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/SyncDetail" + } + ] + }, + "errorMsg": { + "type": "string", + "x-nullable": true, + "description": "Error message when the subtask fails.", + "readOnly": true + } + } + }, + "SubTask.Stage": { + "type": "string", + "enum": [ + "RUNNING", + "PAUSED", + "FAILED", + "FINISHED", + "UNKNOWN" + ], + "description": "The high-level lifecycle stage of a subtask.\n\n - RUNNING: Subtask is running.\n - PAUSED: Subtask is paused.\n - FAILED: Subtask failed.\n - FINISHED: Subtask finished successfully.\n - UNKNOWN: Subtask stage is unknown." + }, + "SubTask.Step": { + "type": "string", + "enum": [ + "DUMP", + "LOAD", + "SYNC" + ], + "description": "The current step within a subtask.\n\n - DUMP: Dump/export data from source.\n - LOAD: Load/import data into target.\n - SYNC: Sync/replicate binlog changes." + }, + "SyncDetail": { + "type": "object", + "properties": { + "rps": { + "type": "string", + "format": "int64", + "description": "Rows processed per second during sync.", + "readOnly": true + }, + "latency": { + "type": "string", + "format": "int64", + "description": "Replication latency in seconds.", + "readOnly": true + }, + "checkpoint": { + "type": "string", + "description": "Synchronization checkpoint.", + "readOnly": true + } + } + }, + "Table": { + "type": "object", + "properties": { + "schema": { + "type": "string", + "description": "Schema name." + }, + "table": { + "type": "string", + "description": "Table name." + } + } + }, + "Target": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "Target database user." + }, + "password": { + "type": "string", + "description": "Target database password." + } + }, + "required": [ + "user", + "password" + ] + }, + "TaskMode": { + "type": "string", + "enum": [ + "ALL", + "INCREMENTAL" + ], + "description": "Migration task mode.\n\n - ALL: Full + incremental migration (all phases).\n - INCREMENTAL: Incremental-only migration (replication)." + } + } +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.gitignore b/pkg/tidbcloud/v1beta1/serverless/migration/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator-ignore b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator-ignore new file mode 100644 index 00000000..7484ee59 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator-ignore @@ -0,0 +1,23 @@ +# OpenAPI Generator Ignore +# Generated by openapi-generator https://github.com/openapitools/openapi-generator + +# Use this file to prevent files from being overwritten by the generator. +# The patterns follow closely to .gitignore or .dockerignore. + +# As an example, the C# client generator defines ApiClient.cs. +# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line: +#ApiClient.cs + +# You can match any string of characters against a directory, file or extension with a single asterisk (*): +#foo/*/qux +# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux + +# You can recursively match patterns against a directory, file or extension with a double asterisk (**): +#foo/**/qux +# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux + +# You can also negate patterns with an exclamation (!). +# For example, you can ignore all files in a docs folder with the file extension .md: +#docs/*.md +# Then explicitly reverse the ignore rule for a single file: +#!docs/README.md diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES new file mode 100644 index 00000000..78f02545 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES @@ -0,0 +1,41 @@ +.gitignore +.openapi-generator-ignore +.travis.yml +README.md +api/openapi.yaml +api_migration.go +client.go +configuration.go +git_push.sh +model_any.go +model_block_allow_rules.go +model_conn_profile.go +model_conn_type.go +model_create_migration_precheck_resp.go +model_dump_detail.go +model_list_migrations_resp.go +model_load_detail.go +model_migration.go +model_migration_precheck.go +model_migration_precheck_status.go +model_migration_service_create_migration_body.go +model_migration_service_precheck_body.go +model_migration_state.go +model_precheck_item.go +model_precheck_item_status.go +model_precheck_item_type.go +model_route_rule.go +model_route_rule_source.go +model_security.go +model_source.go +model_source_source_type.go +model_status.go +model_sub_task.go +model_sub_task_stage.go +model_sub_task_step.go +model_sync_detail.go +model_table.go +model_target.go +model_task_mode.go +response.go +utils.go diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/VERSION b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/VERSION new file mode 100644 index 00000000..5f84a81d --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/VERSION @@ -0,0 +1 @@ +7.12.0 diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.travis.yml b/pkg/tidbcloud/v1beta1/serverless/migration/.travis.yml new file mode 100644 index 00000000..f5cb2ce9 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.travis.yml @@ -0,0 +1,8 @@ +language: go + +install: + - go get -d -v . + +script: + - go build -v ./ + diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/README.md b/pkg/tidbcloud/v1beta1/serverless/migration/README.md new file mode 100644 index 00000000..03b7dac1 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/README.md @@ -0,0 +1,150 @@ +# Go API client for migration + +TiDB Cloud Starter and Essential API + +## Overview +This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project. By using the [OpenAPI-spec](https://www.openapis.org/) from a remote server, you can easily generate an API client. + +- API version: v1beta1 +- Package version: 1.0.0 +- Generator version: 7.12.0 +- Build package: org.openapitools.codegen.languages.GoClientCodegen + +## Installation + +Install the following dependencies: + +```sh +go get github.com/stretchr/testify/assert +go get golang.org/x/net/context +``` + +Put the package under your project folder and add the following in import: + +```go +import migration "github.com/GIT_USER_ID/GIT_REPO_ID" +``` + +To use a proxy, set the environment variable `HTTP_PROXY`: + +```go +os.Setenv("HTTP_PROXY", "http://proxy_name:proxy_port") +``` + +## Configuration of Server URL + +Default configuration comes with `Servers` field that contains server objects as defined in the OpenAPI specification. + +### Select Server Configuration + +For using other server than the one defined on index 0 set context value `migration.ContextServerIndex` of type `int`. + +```go +ctx := context.WithValue(context.Background(), migration.ContextServerIndex, 1) +``` + +### Templated Server URL + +Templated server URL is formatted using default variables from configuration or from context value `migration.ContextServerVariables` of type `map[string]string`. + +```go +ctx := context.WithValue(context.Background(), migration.ContextServerVariables, map[string]string{ + "basePath": "v2", +}) +``` + +Note, enum values are always validated and all unused variables are silently ignored. + +### URLs Configuration per Operation + +Each operation can use different server URL defined using `OperationServers` map in the `Configuration`. +An operation is uniquely identified by `"{classname}Service.{nickname}"` string. +Similar rules for overriding default operation server index and variables applies by using `migration.ContextOperationServerIndices` and `migration.ContextOperationServerVariables` context maps. + +```go +ctx := context.WithValue(context.Background(), migration.ContextOperationServerIndices, map[string]int{ + "{classname}Service.{nickname}": 2, +}) +ctx = context.WithValue(context.Background(), migration.ContextOperationServerVariables, map[string]map[string]string{ + "{classname}Service.{nickname}": { + "port": "8443", + }, +}) +``` + +## Documentation for API Endpoints + +All URIs are relative to *https://serverless.tidbapi.com* + +Class | Method | HTTP request | Description +------------ | ------------- | ------------- | ------------- +*MigrationAPI* | [**MigrationServiceCancelPrecheck**](docs/MigrationAPI.md#migrationservicecancelprecheck) | **Delete** /v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId} | Cancel a migration precheck +*MigrationAPI* | [**MigrationServiceCreateMigration**](docs/MigrationAPI.md#migrationservicecreatemigration) | **Post** /v1beta1/clusters/{clusterId}/migrations | Create a migration +*MigrationAPI* | [**MigrationServiceDeleteMigration**](docs/MigrationAPI.md#migrationservicedeletemigration) | **Delete** /v1beta1/clusters/{clusterId}/migrations/{migrationId} | Delete a migration +*MigrationAPI* | [**MigrationServiceGetMigration**](docs/MigrationAPI.md#migrationservicegetmigration) | **Get** /v1beta1/clusters/{clusterId}/migrations/{migrationId} | Get a migration +*MigrationAPI* | [**MigrationServiceGetPrecheck**](docs/MigrationAPI.md#migrationservicegetprecheck) | **Get** /v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId} | Get a migration precheck +*MigrationAPI* | [**MigrationServiceListMigrations**](docs/MigrationAPI.md#migrationservicelistmigrations) | **Get** /v1beta1/clusters/{clusterId}/migrations | List migrations +*MigrationAPI* | [**MigrationServicePauseMigration**](docs/MigrationAPI.md#migrationservicepausemigration) | **Post** /v1beta1/clusters/{clusterId}/migrations/{migrationId}:pause | Pause a running migration +*MigrationAPI* | [**MigrationServicePrecheck**](docs/MigrationAPI.md#migrationserviceprecheck) | **Post** /v1beta1/clusters/{clusterId}/migrationPrechecks | Run a precheck for a migration +*MigrationAPI* | [**MigrationServiceResumeMigration**](docs/MigrationAPI.md#migrationserviceresumemigration) | **Post** /v1beta1/clusters/{clusterId}/migrations/{migrationId}:resume | Resume a paused migration + + +## Documentation For Models + + - [Any](docs/Any.md) + - [BlockAllowRules](docs/BlockAllowRules.md) + - [ConnProfile](docs/ConnProfile.md) + - [ConnType](docs/ConnType.md) + - [CreateMigrationPrecheckResp](docs/CreateMigrationPrecheckResp.md) + - [DumpDetail](docs/DumpDetail.md) + - [ListMigrationsResp](docs/ListMigrationsResp.md) + - [LoadDetail](docs/LoadDetail.md) + - [Migration](docs/Migration.md) + - [MigrationPrecheck](docs/MigrationPrecheck.md) + - [MigrationPrecheckStatus](docs/MigrationPrecheckStatus.md) + - [MigrationServiceCreateMigrationBody](docs/MigrationServiceCreateMigrationBody.md) + - [MigrationServicePrecheckBody](docs/MigrationServicePrecheckBody.md) + - [MigrationState](docs/MigrationState.md) + - [PrecheckItem](docs/PrecheckItem.md) + - [PrecheckItemStatus](docs/PrecheckItemStatus.md) + - [PrecheckItemType](docs/PrecheckItemType.md) + - [RouteRule](docs/RouteRule.md) + - [RouteRuleSource](docs/RouteRuleSource.md) + - [Security](docs/Security.md) + - [Source](docs/Source.md) + - [SourceSourceType](docs/SourceSourceType.md) + - [Status](docs/Status.md) + - [SubTask](docs/SubTask.md) + - [SubTaskStage](docs/SubTaskStage.md) + - [SubTaskStep](docs/SubTaskStep.md) + - [SyncDetail](docs/SyncDetail.md) + - [Table](docs/Table.md) + - [Target](docs/Target.md) + - [TaskMode](docs/TaskMode.md) + + +## Documentation For Authorization + +Endpoints do not require authorization. + + +## Documentation for Utility Methods + +Due to the fact that model structure members are all pointers, this package contains +a number of utility functions to easily obtain pointers to values of basic types. +Each of these functions takes a value of the given basic type and returns a pointer to it: + +* `PtrBool` +* `PtrInt` +* `PtrInt32` +* `PtrInt64` +* `PtrFloat` +* `PtrFloat32` +* `PtrFloat64` +* `PtrString` +* `PtrTime` + +## Author + + + diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml new file mode 100644 index 00000000..19a6b41b --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml @@ -0,0 +1,1113 @@ +openapi: 3.0.1 +info: + description: TiDB Cloud Starter and Essential API + title: TiDB Cloud Starter and Essential API + version: v1beta1 +servers: +- url: https://serverless.tidbapi.com/ +tags: +- name: MigrationService +paths: + /v1beta1/clusters/{clusterId}/migrations: + get: + operationId: MigrationService_ListMigrations + parameters: + - description: The ID of the cluster to list migrations for. + in: path + name: clusterId + required: true + schema: + type: string + - description: "Optional. The page token, default is empty." + in: query + name: pageToken + schema: + type: string + - description: "Optional. The page size, default is 10." + in: query + name: pageSize + schema: + default: 10 + format: int32 + type: integer + - description: |- + Specifies the sorting order of results. Use a comma-separated list of field names, optionally appending `desc` for descending order. For example, `createTime desc`. By default, fields are sorted in ascending order. + + Supported field: `createTime`. + in: query + name: orderBy + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ListMigrationsResp' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: List migrations + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' + post: + operationId: MigrationService_CreateMigration + parameters: + - description: The ID of the cluster to create the migration in. + in: path + name: clusterId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationService.CreateMigrationBody' + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/Migration' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Create a migration + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Content-Type: application/json' \ + +--header 'Accept: application/json' \ + +--data '{\n "name": "my-migration",\n "sources": [\n {\n "connProfile": {\n "connType": "PUBLIC",\n "host": "1.2.3.4",\n "port": 3306,\n "user": "root",\n "password": "secret"\n },\n "sourceType": "MYSQL"\n }\n ],\n "target": {\n "user": "tidb",\n "password": "tidb_password"\n },\n "mode": "ALL"\n}' + x-codegen-request-body-name: body + /v1beta1/clusters/{clusterId}/migrations/{migrationId}: + delete: + operationId: MigrationService_DeleteMigration + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the migration to delete. + in: path + name: migrationId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/Migration' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Delete a migration + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' + get: + operationId: MigrationService_GetMigration + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the migration. + in: path + name: migrationId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/Migration' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Get a migration + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' + /v1beta1/clusters/{clusterId}/migrations/{migrationId}:pause: + post: + operationId: MigrationService_PauseMigration + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the migration to pause. + in: path + name: migrationId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationService.PauseMigrationBody' + required: true + responses: + "200": + content: + application/json: + schema: + type: object + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Pause a running migration + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}:pause' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Content-Type: application/json' \ + +--header 'Accept: application/json' + x-codegen-request-body-name: body + /v1beta1/clusters/{clusterId}/migrations/{migrationId}:resume: + post: + operationId: MigrationService_ResumeMigration + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the migration to resume. + in: path + name: migrationId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationService.ResumeMigrationBody' + required: true + responses: + "200": + content: + application/json: + schema: + type: object + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Resume a paused migration + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}:resume' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Content-Type: application/json' \ + +--header 'Accept: application/json' + x-codegen-request-body-name: body + /v1beta1/clusters/{clusterId}/migrationPrechecks: + post: + operationId: MigrationService_Precheck + parameters: + - description: The ID of the cluster to create the migration in. + in: path + name: clusterId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationService.PrecheckBody' + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CreateMigrationPrecheckResp' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Run a precheck for a migration + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Content-Type: application/json' \ + +--header 'Accept: application/json' \ + +--data '{\n "name": "my-migration",\n "sources": [\n {\n "connProfile": {\n "connType": "PUBLIC",\n "host": "1.2.3.4",\n "port": 3306,\n "user": "root",\n "password": "secret"\n },\n "sourceType": "MYSQL"\n }\n ],\n "target": {\n "user": "tidb",\n "password": "tidb_password"\n },\n "mode": "ALL"\n}' + x-codegen-request-body-name: body + /v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}: + delete: + operationId: MigrationService_CancelPrecheck + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the precheck to cancel. + in: path + name: precheckId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + type: object + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Cancel a migration precheck + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{precheck_id}' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' + get: + operationId: MigrationService_GetPrecheck + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the precheck. + in: path + name: precheckId + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationPrecheck' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Get a migration precheck + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{precheck_id}' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' +components: + schemas: + Any: + additionalProperties: + type: object + example: + '@type': '@type' + properties: + '@type': + type: string + type: object + BlockAllowRules: + properties: + doDbs: + description: Database names to include in migration. + items: + type: string + type: array + doTables: + description: Table-level allow-list rules. + items: + $ref: '#/components/schemas/Table' + type: array + type: object + ConnProfile: + properties: + connType: + allOf: + - $ref: '#/components/schemas/ConnType' + description: "Connection type (e.g., PUBLIC, PRIVATE_LINK)." + type: object + endpointId: + description: Private link endpoint ID. + type: string + host: + description: Source host. + type: string + port: + description: Source port. + format: int32 + type: integer + user: + description: Source user. + type: string + password: + description: Source password. + type: string + security: + allOf: + - $ref: '#/components/schemas/Security' + description: "TLS/SSL settings; if not set, use defaults." + type: object + required: + - connType + - password + - port + - user + type: object + ConnType: + description: |- + The connection type used to connect to the source database. + + - PUBLIC: Connect over the public internet. + - PRIVATE_LINK: Connect via Private Link/Private Endpoint. + enum: + - PUBLIC + - PRIVATE_LINK + type: string + CreateMigrationPrecheckResp: + example: + precheckId: precheckId + properties: + precheckId: + description: The ID of the created precheck. + readOnly: true + type: string + type: object + DumpDetail: + properties: + bps: + description: Bytes per second processed during dump. + format: int64 + readOnly: true + type: string + progress: + description: Progress of dump phase (0-100). + format: double + readOnly: true + type: number + totalTables: + description: Total number of tables to dump. + format: int64 + readOnly: true + type: string + completedTables: + description: Number of tables completed dumping. + format: int64 + readOnly: true + type: string + finishedBytes: + description: Total bytes finished dumping. + format: int64 + readOnly: true + type: string + finishedRows: + description: Total rows finished dumping. + format: int64 + readOnly: true + type: string + type: object + ListMigrationsResp: + example: + totalSize: 0 + migrations: + - mode: "{}" + migrationId: migrationId + createTime: 2000-01-23T04:56:07.000+00:00 + displayName: displayName + subTasks: + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + state: "{}" + targetUser: targetUser + - mode: "{}" + migrationId: migrationId + createTime: 2000-01-23T04:56:07.000+00:00 + displayName: displayName + subTasks: + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + state: "{}" + targetUser: targetUser + nextPageToken: nextPageToken + properties: + migrations: + description: The list of migrations. + items: + $ref: '#/components/schemas/Migration' + readOnly: true + type: array + totalSize: + description: The total number of migrations matching the query. + format: int64 + readOnly: true + type: integer + nextPageToken: + description: Token to retrieve the next page of results. + readOnly: true + type: string + type: object + LoadDetail: + properties: + bps: + description: Bytes per second processed during load. + format: int64 + readOnly: true + type: string + progress: + description: Progress of load phase (0-100). + format: double + readOnly: true + type: number + finishedBytes: + description: Total bytes finished loading. + format: int64 + readOnly: true + type: string + totalBytes: + description: Total bytes to load. + format: int64 + readOnly: true + type: string + type: object + Migration: + example: + mode: "{}" + migrationId: migrationId + createTime: 2000-01-23T04:56:07.000+00:00 + displayName: displayName + subTasks: + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + state: "{}" + targetUser: targetUser + properties: + migrationId: + description: The unique ID of the migration. + readOnly: true + type: string + displayName: + description: The display name of the migration. + readOnly: true + type: string + subTasks: + description: The list of subtasks composing this migration. + items: + $ref: '#/components/schemas/SubTask' + readOnly: true + type: array + targetUser: + description: The target database username used by the migration. + readOnly: true + type: string + createTime: + description: The timestamp when the migration was created. + format: date-time + readOnly: true + type: string + mode: + allOf: + - $ref: '#/components/schemas/TaskMode' + description: The migration mode of the migration. + type: object + state: + allOf: + - $ref: '#/components/schemas/Migration.State' + description: The current state of the migration. + type: object + type: object + Migration.State: + description: |- + Overall state of a migration. + + - CREATING: Task is being created. + - RUNNING: Task is actively running. + - PAUSED: Task is paused. + - FAILED: Task failed with error. + - DELETING: Task is being deleted. + enum: + - CREATING + - RUNNING + - PAUSED + - FAILED + - DELETING + type: string + MigrationPrecheck: + example: + precheckId: precheckId + failedCnt: 6 + total: 0 + successCnt: 5 + warnCnt: 1 + items: + - reason: reason + solution: solution + solutionDocUrl: solutionDocUrl + description: description + type: "{}" + status: "{}" + - reason: reason + solution: solution + solutionDocUrl: solutionDocUrl + description: description + type: "{}" + status: "{}" + status: "{}" + properties: + precheckId: + description: The ID of the precheck. + readOnly: true + type: string + total: + description: Total number of precheck items. + format: int32 + readOnly: true + type: integer + failedCnt: + description: Number of failed items. + format: int32 + readOnly: true + type: integer + warnCnt: + description: Number of items with warnings. + format: int32 + readOnly: true + type: integer + successCnt: + description: Number of successful items. + format: int32 + readOnly: true + type: integer + status: + allOf: + - $ref: '#/components/schemas/MigrationPrecheck.Status' + description: Overall status of the precheck. + type: object + items: + description: Details for each precheck item. + items: + $ref: '#/components/schemas/PrecheckItem' + readOnly: true + type: array + type: object + MigrationPrecheck.Status: + description: |2- + - RUNNING: Precheck is in progress. + - FINISHED: Precheck finished successfully. + - PENDING: Precheck is pending. + - FAILED: Precheck failed. + - CANCELED: Precheck is canceled. + enum: + - RUNNING + - FINISHED + - PENDING + - FAILED + - CANCELED + type: string + MigrationService.CreateMigrationBody: + properties: + displayName: + description: The display name of the migration. + type: string + sources: + description: The data sources to migrate from. + items: + $ref: '#/components/schemas/Source' + type: array + target: + allOf: + - $ref: '#/components/schemas/Target' + description: The target database credentials. + type: object + mode: + allOf: + - $ref: '#/components/schemas/TaskMode' + description: The migration mode (full+incremental or incremental-only). + type: object + required: + - displayName + - mode + - sources + - target + type: object + MigrationService.PauseMigrationBody: + title: PauseMigrationReq is used to pause a migration + type: object + MigrationService.PrecheckBody: + properties: + displayName: + description: The display name of the migration. + type: string + sources: + description: The data sources to migrate from. + items: + $ref: '#/components/schemas/Source' + type: array + target: + allOf: + - $ref: '#/components/schemas/Target' + description: The target database credentials. + type: object + mode: + allOf: + - $ref: '#/components/schemas/TaskMode' + description: The migration mode (full+incremental or incremental-only). + type: object + required: + - displayName + - mode + - sources + - target + type: object + MigrationService.ResumeMigrationBody: + title: ResumeMigrationReq is used to resume a paused migration + type: object + PrecheckItem: + example: + reason: reason + solution: solution + solutionDocUrl: solutionDocUrl + description: description + type: "{}" + status: "{}" + properties: + description: + description: Human-readable description of the check. + readOnly: true + type: string + status: + allOf: + - $ref: '#/components/schemas/PrecheckItem.Status' + description: Status of this check. + type: object + solution: + description: Suggested solution if the check failed or warned. + readOnly: true + type: string + reason: + description: Reason for the failure or warning. + readOnly: true + type: string + solutionDocUrl: + description: Documentation URL for the solution. + readOnly: true + type: string + type: + allOf: + - $ref: '#/components/schemas/PrecheckItemType' + description: The type of precheck. + type: object + type: object + PrecheckItem.Status: + description: |2- + - SUCCESS: Check passed successfully. + - WARNING: Check resulted in a warning. + - FAILED: Check failed. + enum: + - SUCCESS + - WARNING + - FAILED + type: string + PrecheckItemType: + description: |- + Types of prechecks performed before starting a migration. + + - DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges. + - REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges. + - VERSION_CHECKING: Check source database version compatibility. + - SERVER_ID_CHECKING: Check source server_id configuration. + - BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source. + - BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source. + - BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting. + - TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target. + - BINLOG_DB_CHECKING: Check binlog database-level filtering configuration. + - CONN_NUMBER_CHECKING: Check concurrent connections limit/availability. + - TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges. + - META_POSITION_CHECKING: Check saved meta/binlog position validity. + - LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load. + - PRIMARY_KEY_CHECKING: Check primary key settings on source tables. + enum: + - DUMP_PRIVILEGE_CHECKING + - REPLICATION_PRIVILEGE_CHECKING + - VERSION_CHECKING + - SERVER_ID_CHECKING + - BINLOG_ENABLE_CHECKING + - BINLOG_FORMAT_CHECKING + - BINLOG_ROW_IMAGE_CHECKING + - TABLE_SCHEMA_CHECKING + - BINLOG_DB_CHECKING + - CONN_NUMBER_CHECKING + - TARGET_DB_PRIVILEGE_CHECKING + - META_POSITION_CHECKING + - LIGHTNING_TABLE_EMPTY_CHECKING + - PRIMARY_KEY_CHECKING + type: string + RouteRule: + properties: + sourceTable: + allOf: + - $ref: '#/components/schemas/RouteRule.Source' + description: Source table pattern to match. + type: object + targetTable: + allOf: + - $ref: '#/components/schemas/Table' + description: Target table to route to. + type: object + type: object + RouteRule.Source: + properties: + schemaPattern: + description: "Schema pattern of the source, supports wildcards." + type: string + tablePattern: + description: "Table pattern of the source, supports wildcards." + type: string + type: object + Security: + properties: + certAllowedCn: + description: Allowed certificate Common Names. + items: + type: string + type: array + sslCaContent: + description: CA certificate content in PEM. + format: byte + pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + type: string + sslCertContent: + description: Client certificate content in PEM. + format: byte + pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + type: string + sslKeyContent: + description: Client private key in PEM. + format: byte + pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + type: string + type: object + Source: + properties: + connProfile: + allOf: + - $ref: '#/components/schemas/ConnProfile' + description: Connection profile for the source database. + type: object + baRules: + allOf: + - $ref: '#/components/schemas/BlockAllowRules' + description: "Block/allow rules for databases and tables, which is exclusive\ + \ with route_rules." + type: object + routeRules: + description: Table route rules,which is exclusive with ba_rules. + items: + $ref: '#/components/schemas/RouteRule' + type: array + binlogName: + description: Starting binlog file name for incremental sync. + nullable: true + type: string + binlogPosition: + description: Starting binlog position for incremental sync. + format: int32 + nullable: true + type: integer + binlogGtid: + description: Starting GTID set for incremental sync. + nullable: true + type: string + sourceType: + allOf: + - $ref: '#/components/schemas/Source.SourceType' + description: "Source type (e.g., MySQL)." + type: object + required: + - connProfile + - sourceType + type: object + Source.SourceType: + description: |- + The source database type. + + - MYSQL: Self-managed MySQL. + - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. + - AWS_RDS_MYSQL: Amazon RDS for MySQL. + enum: + - MYSQL + - ALICLOUD_RDS_MYSQL + - AWS_RDS_MYSQL + type: string + Status: + example: + code: 6 + details: + - '@type': '@type' + - '@type': '@type' + message: message + properties: + code: + format: int32 + type: integer + message: + type: string + details: + items: + $ref: '#/components/schemas/Any' + type: array + type: object + SubTask: + example: + currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + properties: + source: + allOf: + - $ref: '#/components/schemas/Source' + description: Source configuration for this subtask. + type: object + currentStep: + allOf: + - $ref: '#/components/schemas/SubTask.Step' + description: Current step of the subtask. + type: object + stage: + allOf: + - $ref: '#/components/schemas/SubTask.Stage' + description: Current stage of the subtask. + type: object + dumpDetail: + allOf: + - $ref: '#/components/schemas/DumpDetail' + description: "Detail of dump phase, if applicable." + type: object + loadDetail: + allOf: + - $ref: '#/components/schemas/LoadDetail' + description: "Detail of load phase, if applicable." + type: object + syncDetail: + allOf: + - $ref: '#/components/schemas/SyncDetail' + description: "Detail of sync phase, if applicable." + type: object + errorMsg: + description: Error message when the subtask fails. + nullable: true + readOnly: true + type: string + type: object + SubTask.Stage: + description: |- + The high-level lifecycle stage of a subtask. + + - RUNNING: Subtask is running. + - PAUSED: Subtask is paused. + - FAILED: Subtask failed. + - FINISHED: Subtask finished successfully. + - UNKNOWN: Subtask stage is unknown. + enum: + - RUNNING + - PAUSED + - FAILED + - FINISHED + - UNKNOWN + type: string + SubTask.Step: + description: |- + The current step within a subtask. + + - DUMP: Dump/export data from source. + - LOAD: Load/import data into target. + - SYNC: Sync/replicate binlog changes. + enum: + - DUMP + - LOAD + - SYNC + type: string + SyncDetail: + properties: + rps: + description: Rows processed per second during sync. + format: int64 + readOnly: true + type: string + latency: + description: Replication latency in seconds. + format: int64 + readOnly: true + type: string + checkpoint: + description: Synchronization checkpoint. + readOnly: true + type: string + type: object + Table: + properties: + schema: + description: Schema name. + type: string + table: + description: Table name. + type: string + type: object + Target: + properties: + user: + description: Target database user. + type: string + password: + description: Target database password. + type: string + required: + - password + - user + type: object + TaskMode: + description: |- + Migration task mode. + + - ALL: Full + incremental migration (all phases). + - INCREMENTAL: Incremental-only migration (replication). + enum: + - ALL + - INCREMENTAL + type: string +x-original-swagger-version: "2.0" diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go b/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go new file mode 100644 index 00000000..f1844f06 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go @@ -0,0 +1,1114 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "bytes" + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// MigrationAPIService MigrationAPI service +type MigrationAPIService service + +type ApiMigrationServiceCancelPrecheckRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + precheckId string +} + +func (r ApiMigrationServiceCancelPrecheckRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServiceCancelPrecheckExecute(r) +} + +/* +MigrationServiceCancelPrecheck Cancel a migration precheck + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param precheckId The ID of the precheck to cancel. + @return ApiMigrationServiceCancelPrecheckRequest +*/ +func (a *MigrationAPIService) MigrationServiceCancelPrecheck(ctx context.Context, clusterId string, precheckId string) ApiMigrationServiceCancelPrecheckRequest { + return ApiMigrationServiceCancelPrecheckRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + precheckId: precheckId, + } +} + +// Execute executes the request +// +// @return map[string]interface{} +func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrationServiceCancelPrecheckRequest) (map[string]interface{}, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodDelete + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue map[string]interface{} + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelPrecheck") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"precheckId"+"}", url.PathEscape(parameterValueToString(r.precheckId, "precheckId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceCreateMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + body *MigrationServiceCreateMigrationBody +} + +func (r ApiMigrationServiceCreateMigrationRequest) Body(body MigrationServiceCreateMigrationBody) ApiMigrationServiceCreateMigrationRequest { + r.body = &body + return r +} + +func (r ApiMigrationServiceCreateMigrationRequest) Execute() (*Migration, *http.Response, error) { + return r.ApiService.MigrationServiceCreateMigrationExecute(r) +} + +/* +MigrationServiceCreateMigration Create a migration + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster to create the migration in. + @return ApiMigrationServiceCreateMigrationRequest +*/ +func (a *MigrationAPIService) MigrationServiceCreateMigration(ctx context.Context, clusterId string) ApiMigrationServiceCreateMigrationRequest { + return ApiMigrationServiceCreateMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + } +} + +// Execute executes the request +// +// @return Migration +func (a *MigrationAPIService) MigrationServiceCreateMigrationExecute(r ApiMigrationServiceCreateMigrationRequest) (*Migration, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodPost + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *Migration + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCreateMigration") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.body + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceDeleteMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string +} + +func (r ApiMigrationServiceDeleteMigrationRequest) Execute() (*Migration, *http.Response, error) { + return r.ApiService.MigrationServiceDeleteMigrationExecute(r) +} + +/* +MigrationServiceDeleteMigration Delete a migration + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param migrationId The ID of the migration to delete. + @return ApiMigrationServiceDeleteMigrationRequest +*/ +func (a *MigrationAPIService) MigrationServiceDeleteMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServiceDeleteMigrationRequest { + return ApiMigrationServiceDeleteMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, + } +} + +// Execute executes the request +// +// @return Migration +func (a *MigrationAPIService) MigrationServiceDeleteMigrationExecute(r ApiMigrationServiceDeleteMigrationRequest) (*Migration, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodDelete + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *Migration + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceDeleteMigration") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceGetMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string +} + +func (r ApiMigrationServiceGetMigrationRequest) Execute() (*Migration, *http.Response, error) { + return r.ApiService.MigrationServiceGetMigrationExecute(r) +} + +/* +MigrationServiceGetMigration Get a migration + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param migrationId The ID of the migration. + @return ApiMigrationServiceGetMigrationRequest +*/ +func (a *MigrationAPIService) MigrationServiceGetMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServiceGetMigrationRequest { + return ApiMigrationServiceGetMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, + } +} + +// Execute executes the request +// +// @return Migration +func (a *MigrationAPIService) MigrationServiceGetMigrationExecute(r ApiMigrationServiceGetMigrationRequest) (*Migration, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodGet + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *Migration + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceGetMigration") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceGetPrecheckRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + precheckId string +} + +func (r ApiMigrationServiceGetPrecheckRequest) Execute() (*MigrationPrecheck, *http.Response, error) { + return r.ApiService.MigrationServiceGetPrecheckExecute(r) +} + +/* +MigrationServiceGetPrecheck Get a migration precheck + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param precheckId The ID of the precheck. + @return ApiMigrationServiceGetPrecheckRequest +*/ +func (a *MigrationAPIService) MigrationServiceGetPrecheck(ctx context.Context, clusterId string, precheckId string) ApiMigrationServiceGetPrecheckRequest { + return ApiMigrationServiceGetPrecheckRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + precheckId: precheckId, + } +} + +// Execute executes the request +// +// @return MigrationPrecheck +func (a *MigrationAPIService) MigrationServiceGetPrecheckExecute(r ApiMigrationServiceGetPrecheckRequest) (*MigrationPrecheck, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodGet + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *MigrationPrecheck + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceGetPrecheck") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"precheckId"+"}", url.PathEscape(parameterValueToString(r.precheckId, "precheckId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceListMigrationsRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + pageToken *string + pageSize *int32 + orderBy *string +} + +// Optional. The page token, default is empty. +func (r ApiMigrationServiceListMigrationsRequest) PageToken(pageToken string) ApiMigrationServiceListMigrationsRequest { + r.pageToken = &pageToken + return r +} + +// Optional. The page size, default is 10. +func (r ApiMigrationServiceListMigrationsRequest) PageSize(pageSize int32) ApiMigrationServiceListMigrationsRequest { + r.pageSize = &pageSize + return r +} + +// Specifies the sorting order of results. Use a comma-separated list of field names, optionally appending `desc` for descending order. For example, `createTime desc`. By default, fields are sorted in ascending order. Supported field: `createTime`. +func (r ApiMigrationServiceListMigrationsRequest) OrderBy(orderBy string) ApiMigrationServiceListMigrationsRequest { + r.orderBy = &orderBy + return r +} + +func (r ApiMigrationServiceListMigrationsRequest) Execute() (*ListMigrationsResp, *http.Response, error) { + return r.ApiService.MigrationServiceListMigrationsExecute(r) +} + +/* +MigrationServiceListMigrations List migrations + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster to list migrations for. + @return ApiMigrationServiceListMigrationsRequest +*/ +func (a *MigrationAPIService) MigrationServiceListMigrations(ctx context.Context, clusterId string) ApiMigrationServiceListMigrationsRequest { + return ApiMigrationServiceListMigrationsRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + } +} + +// Execute executes the request +// +// @return ListMigrationsResp +func (a *MigrationAPIService) MigrationServiceListMigrationsExecute(r ApiMigrationServiceListMigrationsRequest) (*ListMigrationsResp, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodGet + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *ListMigrationsResp + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceListMigrations") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + if r.pageToken != nil { + parameterAddToHeaderOrQuery(localVarQueryParams, "pageToken", r.pageToken, "", "") + } + if r.pageSize != nil { + parameterAddToHeaderOrQuery(localVarQueryParams, "pageSize", r.pageSize, "", "") + } else { + var defaultValue int32 = 10 + r.pageSize = &defaultValue + } + if r.orderBy != nil { + parameterAddToHeaderOrQuery(localVarQueryParams, "orderBy", r.orderBy, "", "") + } + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServicePauseMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string + body *map[string]interface{} +} + +func (r ApiMigrationServicePauseMigrationRequest) Body(body map[string]interface{}) ApiMigrationServicePauseMigrationRequest { + r.body = &body + return r +} + +func (r ApiMigrationServicePauseMigrationRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServicePauseMigrationExecute(r) +} + +/* +MigrationServicePauseMigration Pause a running migration + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param migrationId The ID of the migration to pause. + @return ApiMigrationServicePauseMigrationRequest +*/ +func (a *MigrationAPIService) MigrationServicePauseMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServicePauseMigrationRequest { + return ApiMigrationServicePauseMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, + } +} + +// Execute executes the request +// +// @return map[string]interface{} +func (a *MigrationAPIService) MigrationServicePauseMigrationExecute(r ApiMigrationServicePauseMigrationRequest) (map[string]interface{}, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodPost + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue map[string]interface{} + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServicePauseMigration") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}:pause" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.body + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServicePrecheckRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + body *MigrationServicePrecheckBody +} + +func (r ApiMigrationServicePrecheckRequest) Body(body MigrationServicePrecheckBody) ApiMigrationServicePrecheckRequest { + r.body = &body + return r +} + +func (r ApiMigrationServicePrecheckRequest) Execute() (*CreateMigrationPrecheckResp, *http.Response, error) { + return r.ApiService.MigrationServicePrecheckExecute(r) +} + +/* +MigrationServicePrecheck Run a precheck for a migration + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster to create the migration in. + @return ApiMigrationServicePrecheckRequest +*/ +func (a *MigrationAPIService) MigrationServicePrecheck(ctx context.Context, clusterId string) ApiMigrationServicePrecheckRequest { + return ApiMigrationServicePrecheckRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + } +} + +// Execute executes the request +// +// @return CreateMigrationPrecheckResp +func (a *MigrationAPIService) MigrationServicePrecheckExecute(r ApiMigrationServicePrecheckRequest) (*CreateMigrationPrecheckResp, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodPost + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *CreateMigrationPrecheckResp + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServicePrecheck") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.body + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceResumeMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string + body *map[string]interface{} +} + +func (r ApiMigrationServiceResumeMigrationRequest) Body(body map[string]interface{}) ApiMigrationServiceResumeMigrationRequest { + r.body = &body + return r +} + +func (r ApiMigrationServiceResumeMigrationRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServiceResumeMigrationExecute(r) +} + +/* +MigrationServiceResumeMigration Resume a paused migration + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param migrationId The ID of the migration to resume. + @return ApiMigrationServiceResumeMigrationRequest +*/ +func (a *MigrationAPIService) MigrationServiceResumeMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServiceResumeMigrationRequest { + return ApiMigrationServiceResumeMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, + } +} + +// Execute executes the request +// +// @return map[string]interface{} +func (a *MigrationAPIService) MigrationServiceResumeMigrationExecute(r ApiMigrationServiceResumeMigrationRequest) (map[string]interface{}, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodPost + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue map[string]interface{} + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceResumeMigration") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}:resume" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.body + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/client.go b/pkg/tidbcloud/v1beta1/serverless/migration/client.go new file mode 100644 index 00000000..a3518159 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/client.go @@ -0,0 +1,655 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +var ( + JsonCheck = regexp.MustCompile(`(?i:(?:application|text)/(?:[^;]+\+)?json)`) + XmlCheck = regexp.MustCompile(`(?i:(?:application|text)/(?:[^;]+\+)?xml)`) + queryParamSplit = regexp.MustCompile(`(^|&)([^&]+)`) + queryDescape = strings.NewReplacer("%5B", "[", "%5D", "]") +) + +// APIClient manages communication with the TiDB Cloud Starter and Essential API API vv1beta1 +// In most cases there should be only one, shared, APIClient. +type APIClient struct { + cfg *Configuration + common service // Reuse a single struct instead of allocating one for each service on the heap. + + // API Services + + MigrationAPI *MigrationAPIService +} + +type service struct { + client *APIClient +} + +// NewAPIClient creates a new API client. Requires a userAgent string describing your application. +// optionally a custom http.Client to allow for advanced features such as caching. +func NewAPIClient(cfg *Configuration) *APIClient { + if cfg.HTTPClient == nil { + cfg.HTTPClient = http.DefaultClient + } + + c := &APIClient{} + c.cfg = cfg + c.common.client = c + + // API Services + c.MigrationAPI = (*MigrationAPIService)(&c.common) + + return c +} + +func atoi(in string) (int, error) { + return strconv.Atoi(in) +} + +// selectHeaderContentType select a content type from the available list. +func selectHeaderContentType(contentTypes []string) string { + if len(contentTypes) == 0 { + return "" + } + if contains(contentTypes, "application/json") { + return "application/json" + } + return contentTypes[0] // use the first content type specified in 'consumes' +} + +// selectHeaderAccept join all accept types and return +func selectHeaderAccept(accepts []string) string { + if len(accepts) == 0 { + return "" + } + + if contains(accepts, "application/json") { + return "application/json" + } + + return strings.Join(accepts, ",") +} + +// contains is a case insensitive match, finding needle in a haystack +func contains(haystack []string, needle string) bool { + for _, a := range haystack { + if strings.EqualFold(a, needle) { + return true + } + } + return false +} + +// Verify optional parameters are of the correct type. +func typeCheckParameter(obj interface{}, expected string, name string) error { + // Make sure there is an object. + if obj == nil { + return nil + } + + // Check the type is as expected. + if reflect.TypeOf(obj).String() != expected { + return fmt.Errorf("expected %s to be of type %s but received %s", name, expected, reflect.TypeOf(obj).String()) + } + return nil +} + +func parameterValueToString(obj interface{}, key string) string { + if reflect.TypeOf(obj).Kind() != reflect.Ptr { + if actualObj, ok := obj.(interface{ GetActualInstanceValue() interface{} }); ok { + return fmt.Sprintf("%v", actualObj.GetActualInstanceValue()) + } + + return fmt.Sprintf("%v", obj) + } + var param, ok = obj.(MappedNullable) + if !ok { + return "" + } + dataMap, err := param.ToMap() + if err != nil { + return "" + } + return fmt.Sprintf("%v", dataMap[key]) +} + +// parameterAddToHeaderOrQuery adds the provided object to the request header or url query +// supporting deep object syntax +func parameterAddToHeaderOrQuery(headerOrQueryParams interface{}, keyPrefix string, obj interface{}, style string, collectionType string) { + var v = reflect.ValueOf(obj) + var value = "" + if v == reflect.ValueOf(nil) { + value = "null" + } else { + switch v.Kind() { + case reflect.Invalid: + value = "invalid" + + case reflect.Struct: + if t, ok := obj.(MappedNullable); ok { + dataMap, err := t.ToMap() + if err != nil { + return + } + parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, dataMap, style, collectionType) + return + } + if t, ok := obj.(time.Time); ok { + parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, t.Format(time.RFC3339Nano), style, collectionType) + return + } + value = v.Type().String() + " value" + case reflect.Slice: + var indValue = reflect.ValueOf(obj) + if indValue == reflect.ValueOf(nil) { + return + } + var lenIndValue = indValue.Len() + for i := 0; i < lenIndValue; i++ { + var arrayValue = indValue.Index(i) + var keyPrefixForCollectionType = keyPrefix + if style == "deepObject" { + keyPrefixForCollectionType = keyPrefix + "[" + strconv.Itoa(i) + "]" + } + parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefixForCollectionType, arrayValue.Interface(), style, collectionType) + } + return + + case reflect.Map: + var indValue = reflect.ValueOf(obj) + if indValue == reflect.ValueOf(nil) { + return + } + iter := indValue.MapRange() + for iter.Next() { + k, v := iter.Key(), iter.Value() + parameterAddToHeaderOrQuery(headerOrQueryParams, fmt.Sprintf("%s[%s]", keyPrefix, k.String()), v.Interface(), style, collectionType) + } + return + + case reflect.Interface: + fallthrough + case reflect.Ptr: + parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, v.Elem().Interface(), style, collectionType) + return + + case reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64: + value = strconv.FormatInt(v.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64, reflect.Uintptr: + value = strconv.FormatUint(v.Uint(), 10) + case reflect.Float32, reflect.Float64: + value = strconv.FormatFloat(v.Float(), 'g', -1, 32) + case reflect.Bool: + value = strconv.FormatBool(v.Bool()) + case reflect.String: + value = v.String() + default: + value = v.Type().String() + " value" + } + } + + switch valuesMap := headerOrQueryParams.(type) { + case url.Values: + if collectionType == "csv" && valuesMap.Get(keyPrefix) != "" { + valuesMap.Set(keyPrefix, valuesMap.Get(keyPrefix)+","+value) + } else { + valuesMap.Add(keyPrefix, value) + } + break + case map[string]string: + valuesMap[keyPrefix] = value + break + } +} + +// helper for converting interface{} parameters to json strings +func parameterToJson(obj interface{}) (string, error) { + jsonBuf, err := json.Marshal(obj) + if err != nil { + return "", err + } + return string(jsonBuf), err +} + +// callAPI do the request. +func (c *APIClient) callAPI(request *http.Request) (*http.Response, error) { + if c.cfg.Debug { + dump, err := httputil.DumpRequestOut(request, true) + if err != nil { + return nil, err + } + log.Printf("\n%s\n", string(dump)) + } + + resp, err := c.cfg.HTTPClient.Do(request) + if err != nil { + return resp, err + } + + if c.cfg.Debug { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return resp, err + } + log.Printf("\n%s\n", string(dump)) + } + return resp, err +} + +// Allow modification of underlying config for alternate implementations and testing +// Caution: modifying the configuration while live can cause data races and potentially unwanted behavior +func (c *APIClient) GetConfig() *Configuration { + return c.cfg +} + +type formFile struct { + fileBytes []byte + fileName string + formFileName string +} + +// prepareRequest build the request +func (c *APIClient) prepareRequest( + ctx context.Context, + path string, method string, + postBody interface{}, + headerParams map[string]string, + queryParams url.Values, + formParams url.Values, + formFiles []formFile) (localVarRequest *http.Request, err error) { + + var body *bytes.Buffer + + // Detect postBody type and post. + if postBody != nil { + contentType := headerParams["Content-Type"] + if contentType == "" { + contentType = detectContentType(postBody) + headerParams["Content-Type"] = contentType + } + + body, err = setBody(postBody, contentType) + if err != nil { + return nil, err + } + } + + // add form parameters and file if available. + if strings.HasPrefix(headerParams["Content-Type"], "multipart/form-data") && len(formParams) > 0 || (len(formFiles) > 0) { + if body != nil { + return nil, errors.New("Cannot specify postBody and multipart form at the same time.") + } + body = &bytes.Buffer{} + w := multipart.NewWriter(body) + + for k, v := range formParams { + for _, iv := range v { + if strings.HasPrefix(k, "@") { // file + err = addFile(w, k[1:], iv) + if err != nil { + return nil, err + } + } else { // form value + w.WriteField(k, iv) + } + } + } + for _, formFile := range formFiles { + if len(formFile.fileBytes) > 0 && formFile.fileName != "" { + w.Boundary() + part, err := w.CreateFormFile(formFile.formFileName, filepath.Base(formFile.fileName)) + if err != nil { + return nil, err + } + _, err = part.Write(formFile.fileBytes) + if err != nil { + return nil, err + } + } + } + + // Set the Boundary in the Content-Type + headerParams["Content-Type"] = w.FormDataContentType() + + // Set Content-Length + headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len()) + w.Close() + } + + if strings.HasPrefix(headerParams["Content-Type"], "application/x-www-form-urlencoded") && len(formParams) > 0 { + if body != nil { + return nil, errors.New("Cannot specify postBody and x-www-form-urlencoded form at the same time.") + } + body = &bytes.Buffer{} + body.WriteString(formParams.Encode()) + // Set Content-Length + headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len()) + } + + // Setup path and query parameters + url, err := url.Parse(path) + if err != nil { + return nil, err + } + + // Override request host, if applicable + if c.cfg.Host != "" { + url.Host = c.cfg.Host + } + + // Override request scheme, if applicable + if c.cfg.Scheme != "" { + url.Scheme = c.cfg.Scheme + } + + // Adding Query Param + query := url.Query() + for k, v := range queryParams { + for _, iv := range v { + query.Add(k, iv) + } + } + + // Encode the parameters. + url.RawQuery = queryParamSplit.ReplaceAllStringFunc(query.Encode(), func(s string) string { + pieces := strings.Split(s, "=") + pieces[0] = queryDescape.Replace(pieces[0]) + return strings.Join(pieces, "=") + }) + + // Generate a new request + if body != nil { + localVarRequest, err = http.NewRequest(method, url.String(), body) + } else { + localVarRequest, err = http.NewRequest(method, url.String(), nil) + } + if err != nil { + return nil, err + } + + // add header parameters, if any + if len(headerParams) > 0 { + headers := http.Header{} + for h, v := range headerParams { + headers[h] = []string{v} + } + localVarRequest.Header = headers + } + + // Add the user agent to the request. + localVarRequest.Header.Add("User-Agent", c.cfg.UserAgent) + + if ctx != nil { + // add context to the request + localVarRequest = localVarRequest.WithContext(ctx) + + // Walk through any authentication. + + } + + for header, value := range c.cfg.DefaultHeader { + localVarRequest.Header.Add(header, value) + } + return localVarRequest, nil +} + +func (c *APIClient) decode(v interface{}, b []byte, contentType string) (err error) { + if len(b) == 0 { + return nil + } + if s, ok := v.(*string); ok { + *s = string(b) + return nil + } + if f, ok := v.(*os.File); ok { + f, err = os.CreateTemp("", "HttpClientFile") + if err != nil { + return + } + _, err = f.Write(b) + if err != nil { + return + } + _, err = f.Seek(0, io.SeekStart) + return + } + if f, ok := v.(**os.File); ok { + *f, err = os.CreateTemp("", "HttpClientFile") + if err != nil { + return + } + _, err = (*f).Write(b) + if err != nil { + return + } + _, err = (*f).Seek(0, io.SeekStart) + return + } + if XmlCheck.MatchString(contentType) { + if err = xml.Unmarshal(b, v); err != nil { + return err + } + return nil + } + if JsonCheck.MatchString(contentType) { + if actualObj, ok := v.(interface{ GetActualInstance() interface{} }); ok { // oneOf, anyOf schemas + if unmarshalObj, ok := actualObj.(interface{ UnmarshalJSON([]byte) error }); ok { // make sure it has UnmarshalJSON defined + if err = unmarshalObj.UnmarshalJSON(b); err != nil { + return err + } + } else { + return errors.New("Unknown type with GetActualInstance but no unmarshalObj.UnmarshalJSON defined") + } + } else if err = json.Unmarshal(b, v); err != nil { // simple model + return err + } + return nil + } + return errors.New("undefined response type") +} + +// Add a file to the multipart request +func addFile(w *multipart.Writer, fieldName, path string) error { + file, err := os.Open(filepath.Clean(path)) + if err != nil { + return err + } + err = file.Close() + if err != nil { + return err + } + + part, err := w.CreateFormFile(fieldName, filepath.Base(path)) + if err != nil { + return err + } + _, err = io.Copy(part, file) + + return err +} + +// Set request body from an interface{} +func setBody(body interface{}, contentType string) (bodyBuf *bytes.Buffer, err error) { + if bodyBuf == nil { + bodyBuf = &bytes.Buffer{} + } + + if reader, ok := body.(io.Reader); ok { + _, err = bodyBuf.ReadFrom(reader) + } else if fp, ok := body.(*os.File); ok { + _, err = bodyBuf.ReadFrom(fp) + } else if b, ok := body.([]byte); ok { + _, err = bodyBuf.Write(b) + } else if s, ok := body.(string); ok { + _, err = bodyBuf.WriteString(s) + } else if s, ok := body.(*string); ok { + _, err = bodyBuf.WriteString(*s) + } else if JsonCheck.MatchString(contentType) { + err = json.NewEncoder(bodyBuf).Encode(body) + } else if XmlCheck.MatchString(contentType) { + var bs []byte + bs, err = xml.Marshal(body) + if err == nil { + bodyBuf.Write(bs) + } + } + + if err != nil { + return nil, err + } + + if bodyBuf.Len() == 0 { + err = fmt.Errorf("invalid body type %s\n", contentType) + return nil, err + } + return bodyBuf, nil +} + +// detectContentType method is used to figure out `Request.Body` content type for request header +func detectContentType(body interface{}) string { + contentType := "text/plain; charset=utf-8" + kind := reflect.TypeOf(body).Kind() + + switch kind { + case reflect.Struct, reflect.Map, reflect.Ptr: + contentType = "application/json; charset=utf-8" + case reflect.String: + contentType = "text/plain; charset=utf-8" + default: + if b, ok := body.([]byte); ok { + contentType = http.DetectContentType(b) + } else if kind == reflect.Slice { + contentType = "application/json; charset=utf-8" + } + } + + return contentType +} + +// Ripped from https://github.com/gregjones/httpcache/blob/master/httpcache.go +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// CacheExpires helper function to determine remaining time before repeating a request. +func CacheExpires(r *http.Response) time.Time { + // Figure out when the cache expires. + var expires time.Time + now, err := time.Parse(time.RFC1123, r.Header.Get("date")) + if err != nil { + return time.Now() + } + respCacheControl := parseCacheControl(r.Header) + + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err := time.ParseDuration(maxAge + "s") + if err != nil { + expires = now + } else { + expires = now.Add(lifetime) + } + } else { + expiresHeader := r.Header.Get("Expires") + if expiresHeader != "" { + expires, err = time.Parse(time.RFC1123, expiresHeader) + if err != nil { + expires = now + } + } + } + return expires +} + +func strlen(s string) int { + return utf8.RuneCountInString(s) +} + +// GenericOpenAPIError Provides access to the body, error and model on returned errors. +type GenericOpenAPIError struct { + body []byte + error string + model interface{} +} + +// Error returns non-empty string if there was an error. +func (e GenericOpenAPIError) Error() string { + return e.error +} + +// Body returns the raw bytes of the response +func (e GenericOpenAPIError) Body() []byte { + return e.body +} + +// Model returns the unpacked model of the error +func (e GenericOpenAPIError) Model() interface{} { + return e.model +} + +// format error message using title and detail when model implements rfc7807 +func formatErrorMessage(status string, v interface{}) string { + str := "" + metaValue := reflect.ValueOf(v).Elem() + + if metaValue.Kind() == reflect.Struct { + field := metaValue.FieldByName("Title") + if field != (reflect.Value{}) { + str = fmt.Sprintf("%s", field.Interface()) + } + + field = metaValue.FieldByName("Detail") + if field != (reflect.Value{}) { + str = fmt.Sprintf("%s (%s)", str, field.Interface()) + } + } + + return strings.TrimSpace(fmt.Sprintf("%s %s", status, str)) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/configuration.go b/pkg/tidbcloud/v1beta1/serverless/migration/configuration.go new file mode 100644 index 00000000..8d803723 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/configuration.go @@ -0,0 +1,214 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "context" + "fmt" + "net/http" + "strings" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextServerIndex uses a server configuration from the index. + ContextServerIndex = contextKey("serverIndex") + + // ContextOperationServerIndices uses a server configuration from the index mapping. + ContextOperationServerIndices = contextKey("serverOperationIndices") + + // ContextServerVariables overrides a server configuration variables. + ContextServerVariables = contextKey("serverVariables") + + // ContextOperationServerVariables overrides a server configuration variables using operation specific values. + ContextOperationServerVariables = contextKey("serverOperationVariables") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +// ServerVariable stores the information about a server variable +type ServerVariable struct { + Description string + DefaultValue string + EnumValues []string +} + +// ServerConfiguration stores the information about a server +type ServerConfiguration struct { + URL string + Description string + Variables map[string]ServerVariable +} + +// ServerConfigurations stores multiple ServerConfiguration items +type ServerConfigurations []ServerConfiguration + +// Configuration stores the configuration of the API client +type Configuration struct { + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + Debug bool `json:"debug,omitempty"` + Servers ServerConfigurations + OperationServers map[string]ServerConfigurations + HTTPClient *http.Client +} + +// NewConfiguration returns a new Configuration object +func NewConfiguration() *Configuration { + cfg := &Configuration{ + DefaultHeader: make(map[string]string), + UserAgent: "OpenAPI-Generator/1.0.0/go", + Debug: false, + Servers: ServerConfigurations{ + { + URL: "https://serverless.tidbapi.com", + Description: "No description provided", + }, + }, + OperationServers: map[string]ServerConfigurations{}, + } + return cfg +} + +// AddDefaultHeader adds a new HTTP header to the default header in the request +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} + +// URL formats template on a index using given variables +func (sc ServerConfigurations) URL(index int, variables map[string]string) (string, error) { + if index < 0 || len(sc) <= index { + return "", fmt.Errorf("index %v out of range %v", index, len(sc)-1) + } + server := sc[index] + url := server.URL + + // go through variables and replace placeholders + for name, variable := range server.Variables { + if value, ok := variables[name]; ok { + found := bool(len(variable.EnumValues) == 0) + for _, enumValue := range variable.EnumValues { + if value == enumValue { + found = true + } + } + if !found { + return "", fmt.Errorf("the variable %s in the server URL has invalid value %v. Must be %v", name, value, variable.EnumValues) + } + url = strings.Replace(url, "{"+name+"}", value, -1) + } else { + url = strings.Replace(url, "{"+name+"}", variable.DefaultValue, -1) + } + } + return url, nil +} + +// ServerURL returns URL based on server settings +func (c *Configuration) ServerURL(index int, variables map[string]string) (string, error) { + return c.Servers.URL(index, variables) +} + +func getServerIndex(ctx context.Context) (int, error) { + si := ctx.Value(ContextServerIndex) + if si != nil { + if index, ok := si.(int); ok { + return index, nil + } + return 0, reportError("Invalid type %T should be int", si) + } + return 0, nil +} + +func getServerOperationIndex(ctx context.Context, endpoint string) (int, error) { + osi := ctx.Value(ContextOperationServerIndices) + if osi != nil { + if operationIndices, ok := osi.(map[string]int); !ok { + return 0, reportError("Invalid type %T should be map[string]int", osi) + } else { + index, ok := operationIndices[endpoint] + if ok { + return index, nil + } + } + } + return getServerIndex(ctx) +} + +func getServerVariables(ctx context.Context) (map[string]string, error) { + sv := ctx.Value(ContextServerVariables) + if sv != nil { + if variables, ok := sv.(map[string]string); ok { + return variables, nil + } + return nil, reportError("ctx value of ContextServerVariables has invalid type %T should be map[string]string", sv) + } + return nil, nil +} + +func getServerOperationVariables(ctx context.Context, endpoint string) (map[string]string, error) { + osv := ctx.Value(ContextOperationServerVariables) + if osv != nil { + if operationVariables, ok := osv.(map[string]map[string]string); !ok { + return nil, reportError("ctx value of ContextOperationServerVariables has invalid type %T should be map[string]map[string]string", osv) + } else { + variables, ok := operationVariables[endpoint] + if ok { + return variables, nil + } + } + } + return getServerVariables(ctx) +} + +// ServerURLWithContext returns a new server URL given an endpoint +func (c *Configuration) ServerURLWithContext(ctx context.Context, endpoint string) (string, error) { + sc, ok := c.OperationServers[endpoint] + if !ok { + sc = c.Servers + } + + if ctx == nil { + return sc.URL(0, nil) + } + + index, err := getServerOperationIndex(ctx, endpoint) + if err != nil { + return "", err + } + + variables, err := getServerOperationVariables(ctx, endpoint) + if err != nil { + return "", err + } + + return sc.URL(index, variables) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/git_push.sh b/pkg/tidbcloud/v1beta1/serverless/migration/git_push.sh new file mode 100644 index 00000000..f53a75d4 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/git_push.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# ref: https://help.github.com/articles/adding-an-existing-project-to-github-using-the-command-line/ +# +# Usage example: /bin/sh ./git_push.sh wing328 openapi-petstore-perl "minor update" "gitlab.com" + +git_user_id=$1 +git_repo_id=$2 +release_note=$3 +git_host=$4 + +if [ "$git_host" = "" ]; then + git_host="github.com" + echo "[INFO] No command line input provided. Set \$git_host to $git_host" +fi + +if [ "$git_user_id" = "" ]; then + git_user_id="GIT_USER_ID" + echo "[INFO] No command line input provided. Set \$git_user_id to $git_user_id" +fi + +if [ "$git_repo_id" = "" ]; then + git_repo_id="GIT_REPO_ID" + echo "[INFO] No command line input provided. Set \$git_repo_id to $git_repo_id" +fi + +if [ "$release_note" = "" ]; then + release_note="Minor update" + echo "[INFO] No command line input provided. Set \$release_note to $release_note" +fi + +# Initialize the local directory as a Git repository +git init + +# Adds the files in the local repository and stages them for commit. +git add . + +# Commits the tracked changes and prepares them to be pushed to a remote repository. +git commit -m "$release_note" + +# Sets the new remote +git_remote=$(git remote) +if [ "$git_remote" = "" ]; then # git remote not defined + + if [ "$GIT_TOKEN" = "" ]; then + echo "[INFO] \$GIT_TOKEN (environment variable) is not set. Using the git credential in your environment." + git remote add origin https://${git_host}/${git_user_id}/${git_repo_id}.git + else + git remote add origin https://${git_user_id}:"${GIT_TOKEN}"@${git_host}/${git_user_id}/${git_repo_id}.git + fi + +fi + +git pull origin master + +# Pushes (Forces) the changes in the local repository up to the remote repository +echo "Git pushing to https://${git_host}/${git_user_id}/${git_repo_id}.git" +git push origin master 2>&1 | grep -v 'To https' diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_any.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_any.go new file mode 100644 index 00000000..ecdfe9e3 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_any.go @@ -0,0 +1,153 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the Any type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Any{} + +// Any struct for Any +type Any struct { + Type *string `json:"@type,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _Any Any + +// NewAny instantiates a new Any object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewAny() *Any { + this := Any{} + return &this +} + +// NewAnyWithDefaults instantiates a new Any object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewAnyWithDefaults() *Any { + this := Any{} + return &this +} + +// GetType returns the Type field value if set, zero value otherwise. +func (o *Any) GetType() string { + if o == nil || IsNil(o.Type) { + var ret string + return ret + } + return *o.Type +} + +// GetTypeOk returns a tuple with the Type field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Any) GetTypeOk() (*string, bool) { + if o == nil || IsNil(o.Type) { + return nil, false + } + return o.Type, true +} + +// HasType returns a boolean if a field has been set. +func (o *Any) HasType() bool { + if o != nil && !IsNil(o.Type) { + return true + } + + return false +} + +// SetType gets a reference to the given string and assigns it to the Type field. +func (o *Any) SetType(v string) { + o.Type = &v +} + +func (o Any) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Any) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Type) { + toSerialize["@type"] = o.Type + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Any) UnmarshalJSON(data []byte) (err error) { + varAny := _Any{} + + err = json.Unmarshal(data, &varAny) + + if err != nil { + return err + } + + *o = Any(varAny) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "@type") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableAny struct { + value *Any + isSet bool +} + +func (v NullableAny) Get() *Any { + return v.value +} + +func (v *NullableAny) Set(val *Any) { + v.value = val + v.isSet = true +} + +func (v NullableAny) IsSet() bool { + return v.isSet +} + +func (v *NullableAny) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableAny(val *Any) *NullableAny { + return &NullableAny{value: val, isSet: true} +} + +func (v NullableAny) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableAny) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go new file mode 100644 index 00000000..3ddf2549 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the BlockAllowRules type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BlockAllowRules{} + +// BlockAllowRules struct for BlockAllowRules +type BlockAllowRules struct { + // Database names to include in migration. + DoDbs []string `json:"doDbs,omitempty"` + // Table-level allow-list rules. + DoTables []Table `json:"doTables,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _BlockAllowRules BlockAllowRules + +// NewBlockAllowRules instantiates a new BlockAllowRules object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBlockAllowRules() *BlockAllowRules { + this := BlockAllowRules{} + return &this +} + +// NewBlockAllowRulesWithDefaults instantiates a new BlockAllowRules object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBlockAllowRulesWithDefaults() *BlockAllowRules { + this := BlockAllowRules{} + return &this +} + +// GetDoDbs returns the DoDbs field value if set, zero value otherwise. +func (o *BlockAllowRules) GetDoDbs() []string { + if o == nil || IsNil(o.DoDbs) { + var ret []string + return ret + } + return o.DoDbs +} + +// GetDoDbsOk returns a tuple with the DoDbs field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BlockAllowRules) GetDoDbsOk() ([]string, bool) { + if o == nil || IsNil(o.DoDbs) { + return nil, false + } + return o.DoDbs, true +} + +// HasDoDbs returns a boolean if a field has been set. +func (o *BlockAllowRules) HasDoDbs() bool { + if o != nil && !IsNil(o.DoDbs) { + return true + } + + return false +} + +// SetDoDbs gets a reference to the given []string and assigns it to the DoDbs field. +func (o *BlockAllowRules) SetDoDbs(v []string) { + o.DoDbs = v +} + +// GetDoTables returns the DoTables field value if set, zero value otherwise. +func (o *BlockAllowRules) GetDoTables() []Table { + if o == nil || IsNil(o.DoTables) { + var ret []Table + return ret + } + return o.DoTables +} + +// GetDoTablesOk returns a tuple with the DoTables field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BlockAllowRules) GetDoTablesOk() ([]Table, bool) { + if o == nil || IsNil(o.DoTables) { + return nil, false + } + return o.DoTables, true +} + +// HasDoTables returns a boolean if a field has been set. +func (o *BlockAllowRules) HasDoTables() bool { + if o != nil && !IsNil(o.DoTables) { + return true + } + + return false +} + +// SetDoTables gets a reference to the given []Table and assigns it to the DoTables field. +func (o *BlockAllowRules) SetDoTables(v []Table) { + o.DoTables = v +} + +func (o BlockAllowRules) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BlockAllowRules) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.DoDbs) { + toSerialize["doDbs"] = o.DoDbs + } + if !IsNil(o.DoTables) { + toSerialize["doTables"] = o.DoTables + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *BlockAllowRules) UnmarshalJSON(data []byte) (err error) { + varBlockAllowRules := _BlockAllowRules{} + + err = json.Unmarshal(data, &varBlockAllowRules) + + if err != nil { + return err + } + + *o = BlockAllowRules(varBlockAllowRules) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "doDbs") + delete(additionalProperties, "doTables") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableBlockAllowRules struct { + value *BlockAllowRules + isSet bool +} + +func (v NullableBlockAllowRules) Get() *BlockAllowRules { + return v.value +} + +func (v *NullableBlockAllowRules) Set(val *BlockAllowRules) { + v.value = val + v.isSet = true +} + +func (v NullableBlockAllowRules) IsSet() bool { + return v.isSet +} + +func (v *NullableBlockAllowRules) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBlockAllowRules(val *BlockAllowRules) *NullableBlockAllowRules { + return &NullableBlockAllowRules{value: val, isSet: true} +} + +func (v NullableBlockAllowRules) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBlockAllowRules) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go new file mode 100644 index 00000000..e6103377 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go @@ -0,0 +1,371 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the ConnProfile type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ConnProfile{} + +// ConnProfile struct for ConnProfile +type ConnProfile struct { + // Connection type (e.g., PUBLIC, PRIVATE_LINK). + ConnType ConnType `json:"connType"` + // Private link endpoint ID. + EndpointId *string `json:"endpointId,omitempty"` + // Source host. + Host *string `json:"host,omitempty"` + // Source port. + Port int32 `json:"port"` + // Source user. + User string `json:"user"` + // Source password. + Password string `json:"password"` + // TLS/SSL settings; if not set, use defaults. + Security *Security `json:"security,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _ConnProfile ConnProfile + +// NewConnProfile instantiates a new ConnProfile object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewConnProfile(connType ConnType, port int32, user string, password string) *ConnProfile { + this := ConnProfile{} + this.ConnType = connType + this.Port = port + this.User = user + this.Password = password + return &this +} + +// NewConnProfileWithDefaults instantiates a new ConnProfile object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewConnProfileWithDefaults() *ConnProfile { + this := ConnProfile{} + return &this +} + +// GetConnType returns the ConnType field value +func (o *ConnProfile) GetConnType() ConnType { + if o == nil { + var ret ConnType + return ret + } + + return o.ConnType +} + +// GetConnTypeOk returns a tuple with the ConnType field value +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetConnTypeOk() (*ConnType, bool) { + if o == nil { + return nil, false + } + return &o.ConnType, true +} + +// SetConnType sets field value +func (o *ConnProfile) SetConnType(v ConnType) { + o.ConnType = v +} + +// GetEndpointId returns the EndpointId field value if set, zero value otherwise. +func (o *ConnProfile) GetEndpointId() string { + if o == nil || IsNil(o.EndpointId) { + var ret string + return ret + } + return *o.EndpointId +} + +// GetEndpointIdOk returns a tuple with the EndpointId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetEndpointIdOk() (*string, bool) { + if o == nil || IsNil(o.EndpointId) { + return nil, false + } + return o.EndpointId, true +} + +// HasEndpointId returns a boolean if a field has been set. +func (o *ConnProfile) HasEndpointId() bool { + if o != nil && !IsNil(o.EndpointId) { + return true + } + + return false +} + +// SetEndpointId gets a reference to the given string and assigns it to the EndpointId field. +func (o *ConnProfile) SetEndpointId(v string) { + o.EndpointId = &v +} + +// GetHost returns the Host field value if set, zero value otherwise. +func (o *ConnProfile) GetHost() string { + if o == nil || IsNil(o.Host) { + var ret string + return ret + } + return *o.Host +} + +// GetHostOk returns a tuple with the Host field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetHostOk() (*string, bool) { + if o == nil || IsNil(o.Host) { + return nil, false + } + return o.Host, true +} + +// HasHost returns a boolean if a field has been set. +func (o *ConnProfile) HasHost() bool { + if o != nil && !IsNil(o.Host) { + return true + } + + return false +} + +// SetHost gets a reference to the given string and assigns it to the Host field. +func (o *ConnProfile) SetHost(v string) { + o.Host = &v +} + +// GetPort returns the Port field value +func (o *ConnProfile) GetPort() int32 { + if o == nil { + var ret int32 + return ret + } + + return o.Port +} + +// GetPortOk returns a tuple with the Port field value +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetPortOk() (*int32, bool) { + if o == nil { + return nil, false + } + return &o.Port, true +} + +// SetPort sets field value +func (o *ConnProfile) SetPort(v int32) { + o.Port = v +} + +// GetUser returns the User field value +func (o *ConnProfile) GetUser() string { + if o == nil { + var ret string + return ret + } + + return o.User +} + +// GetUserOk returns a tuple with the User field value +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetUserOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.User, true +} + +// SetUser sets field value +func (o *ConnProfile) SetUser(v string) { + o.User = v +} + +// GetPassword returns the Password field value +func (o *ConnProfile) GetPassword() string { + if o == nil { + var ret string + return ret + } + + return o.Password +} + +// GetPasswordOk returns a tuple with the Password field value +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetPasswordOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Password, true +} + +// SetPassword sets field value +func (o *ConnProfile) SetPassword(v string) { + o.Password = v +} + +// GetSecurity returns the Security field value if set, zero value otherwise. +func (o *ConnProfile) GetSecurity() Security { + if o == nil || IsNil(o.Security) { + var ret Security + return ret + } + return *o.Security +} + +// GetSecurityOk returns a tuple with the Security field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetSecurityOk() (*Security, bool) { + if o == nil || IsNil(o.Security) { + return nil, false + } + return o.Security, true +} + +// HasSecurity returns a boolean if a field has been set. +func (o *ConnProfile) HasSecurity() bool { + if o != nil && !IsNil(o.Security) { + return true + } + + return false +} + +// SetSecurity gets a reference to the given Security and assigns it to the Security field. +func (o *ConnProfile) SetSecurity(v Security) { + o.Security = &v +} + +func (o ConnProfile) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o ConnProfile) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["connType"] = o.ConnType + if !IsNil(o.EndpointId) { + toSerialize["endpointId"] = o.EndpointId + } + if !IsNil(o.Host) { + toSerialize["host"] = o.Host + } + toSerialize["port"] = o.Port + toSerialize["user"] = o.User + toSerialize["password"] = o.Password + if !IsNil(o.Security) { + toSerialize["security"] = o.Security + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *ConnProfile) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "connType", + "port", + "user", + "password", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varConnProfile := _ConnProfile{} + + err = json.Unmarshal(data, &varConnProfile) + + if err != nil { + return err + } + + *o = ConnProfile(varConnProfile) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "connType") + delete(additionalProperties, "endpointId") + delete(additionalProperties, "host") + delete(additionalProperties, "port") + delete(additionalProperties, "user") + delete(additionalProperties, "password") + delete(additionalProperties, "security") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableConnProfile struct { + value *ConnProfile + isSet bool +} + +func (v NullableConnProfile) Get() *ConnProfile { + return v.value +} + +func (v *NullableConnProfile) Set(val *ConnProfile) { + v.value = val + v.isSet = true +} + +func (v NullableConnProfile) IsSet() bool { + return v.isSet +} + +func (v *NullableConnProfile) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableConnProfile(val *ConnProfile) *NullableConnProfile { + return &NullableConnProfile{value: val, isSet: true} +} + +func (v NullableConnProfile) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableConnProfile) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_type.go new file mode 100644 index 00000000..a0a2e39a --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_type.go @@ -0,0 +1,105 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// ConnType The connection type used to connect to the source database. - PUBLIC: Connect over the public internet. - PRIVATE_LINK: Connect via Private Link/Private Endpoint. +type ConnType string + +// List of ConnType +const ( + CONNTYPE_PUBLIC ConnType = "PUBLIC" + CONNTYPE_PRIVATE_LINK ConnType = "PRIVATE_LINK" +) + +// All allowed values of ConnType enum +var AllowedConnTypeEnumValues = []ConnType{ + "PUBLIC", + "PRIVATE_LINK", +} + +func (v *ConnType) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := ConnType(value) + for _, existing := range AllowedConnTypeEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = ConnType(value) + return nil +} + +// NewConnTypeFromValue returns a pointer to a valid ConnType for the value passed as argument +func NewConnTypeFromValue(v string) *ConnType { + ev := ConnType(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v ConnType) IsValid() bool { + for _, existing := range AllowedConnTypeEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to ConnType value +func (v ConnType) Ptr() *ConnType { + return &v +} + +type NullableConnType struct { + value *ConnType + isSet bool +} + +func (v NullableConnType) Get() *ConnType { + return v.value +} + +func (v *NullableConnType) Set(val *ConnType) { + v.value = val + v.isSet = true +} + +func (v NullableConnType) IsSet() bool { + return v.isSet +} + +func (v *NullableConnType) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableConnType(val *ConnType) *NullableConnType { + return &NullableConnType{value: val, isSet: true} +} + +func (v NullableConnType) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableConnType) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go new file mode 100644 index 00000000..70d4f4ca --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go @@ -0,0 +1,154 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the CreateMigrationPrecheckResp type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &CreateMigrationPrecheckResp{} + +// CreateMigrationPrecheckResp struct for CreateMigrationPrecheckResp +type CreateMigrationPrecheckResp struct { + // The ID of the created precheck. + PrecheckId *string `json:"precheckId,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _CreateMigrationPrecheckResp CreateMigrationPrecheckResp + +// NewCreateMigrationPrecheckResp instantiates a new CreateMigrationPrecheckResp object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewCreateMigrationPrecheckResp() *CreateMigrationPrecheckResp { + this := CreateMigrationPrecheckResp{} + return &this +} + +// NewCreateMigrationPrecheckRespWithDefaults instantiates a new CreateMigrationPrecheckResp object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewCreateMigrationPrecheckRespWithDefaults() *CreateMigrationPrecheckResp { + this := CreateMigrationPrecheckResp{} + return &this +} + +// GetPrecheckId returns the PrecheckId field value if set, zero value otherwise. +func (o *CreateMigrationPrecheckResp) GetPrecheckId() string { + if o == nil || IsNil(o.PrecheckId) { + var ret string + return ret + } + return *o.PrecheckId +} + +// GetPrecheckIdOk returns a tuple with the PrecheckId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *CreateMigrationPrecheckResp) GetPrecheckIdOk() (*string, bool) { + if o == nil || IsNil(o.PrecheckId) { + return nil, false + } + return o.PrecheckId, true +} + +// HasPrecheckId returns a boolean if a field has been set. +func (o *CreateMigrationPrecheckResp) HasPrecheckId() bool { + if o != nil && !IsNil(o.PrecheckId) { + return true + } + + return false +} + +// SetPrecheckId gets a reference to the given string and assigns it to the PrecheckId field. +func (o *CreateMigrationPrecheckResp) SetPrecheckId(v string) { + o.PrecheckId = &v +} + +func (o CreateMigrationPrecheckResp) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o CreateMigrationPrecheckResp) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.PrecheckId) { + toSerialize["precheckId"] = o.PrecheckId + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *CreateMigrationPrecheckResp) UnmarshalJSON(data []byte) (err error) { + varCreateMigrationPrecheckResp := _CreateMigrationPrecheckResp{} + + err = json.Unmarshal(data, &varCreateMigrationPrecheckResp) + + if err != nil { + return err + } + + *o = CreateMigrationPrecheckResp(varCreateMigrationPrecheckResp) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "precheckId") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableCreateMigrationPrecheckResp struct { + value *CreateMigrationPrecheckResp + isSet bool +} + +func (v NullableCreateMigrationPrecheckResp) Get() *CreateMigrationPrecheckResp { + return v.value +} + +func (v *NullableCreateMigrationPrecheckResp) Set(val *CreateMigrationPrecheckResp) { + v.value = val + v.isSet = true +} + +func (v NullableCreateMigrationPrecheckResp) IsSet() bool { + return v.isSet +} + +func (v *NullableCreateMigrationPrecheckResp) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableCreateMigrationPrecheckResp(val *CreateMigrationPrecheckResp) *NullableCreateMigrationPrecheckResp { + return &NullableCreateMigrationPrecheckResp{value: val, isSet: true} +} + +func (v NullableCreateMigrationPrecheckResp) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableCreateMigrationPrecheckResp) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_dump_detail.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_dump_detail.go new file mode 100644 index 00000000..54496692 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_dump_detail.go @@ -0,0 +1,344 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the DumpDetail type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DumpDetail{} + +// DumpDetail struct for DumpDetail +type DumpDetail struct { + // Bytes per second processed during dump. + Bps *string `json:"bps,omitempty"` + // Progress of dump phase (0-100). + Progress *float64 `json:"progress,omitempty"` + // Total number of tables to dump. + TotalTables *string `json:"totalTables,omitempty"` + // Number of tables completed dumping. + CompletedTables *string `json:"completedTables,omitempty"` + // Total bytes finished dumping. + FinishedBytes *string `json:"finishedBytes,omitempty"` + // Total rows finished dumping. + FinishedRows *string `json:"finishedRows,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _DumpDetail DumpDetail + +// NewDumpDetail instantiates a new DumpDetail object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDumpDetail() *DumpDetail { + this := DumpDetail{} + return &this +} + +// NewDumpDetailWithDefaults instantiates a new DumpDetail object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDumpDetailWithDefaults() *DumpDetail { + this := DumpDetail{} + return &this +} + +// GetBps returns the Bps field value if set, zero value otherwise. +func (o *DumpDetail) GetBps() string { + if o == nil || IsNil(o.Bps) { + var ret string + return ret + } + return *o.Bps +} + +// GetBpsOk returns a tuple with the Bps field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetBpsOk() (*string, bool) { + if o == nil || IsNil(o.Bps) { + return nil, false + } + return o.Bps, true +} + +// HasBps returns a boolean if a field has been set. +func (o *DumpDetail) HasBps() bool { + if o != nil && !IsNil(o.Bps) { + return true + } + + return false +} + +// SetBps gets a reference to the given string and assigns it to the Bps field. +func (o *DumpDetail) SetBps(v string) { + o.Bps = &v +} + +// GetProgress returns the Progress field value if set, zero value otherwise. +func (o *DumpDetail) GetProgress() float64 { + if o == nil || IsNil(o.Progress) { + var ret float64 + return ret + } + return *o.Progress +} + +// GetProgressOk returns a tuple with the Progress field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetProgressOk() (*float64, bool) { + if o == nil || IsNil(o.Progress) { + return nil, false + } + return o.Progress, true +} + +// HasProgress returns a boolean if a field has been set. +func (o *DumpDetail) HasProgress() bool { + if o != nil && !IsNil(o.Progress) { + return true + } + + return false +} + +// SetProgress gets a reference to the given float64 and assigns it to the Progress field. +func (o *DumpDetail) SetProgress(v float64) { + o.Progress = &v +} + +// GetTotalTables returns the TotalTables field value if set, zero value otherwise. +func (o *DumpDetail) GetTotalTables() string { + if o == nil || IsNil(o.TotalTables) { + var ret string + return ret + } + return *o.TotalTables +} + +// GetTotalTablesOk returns a tuple with the TotalTables field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetTotalTablesOk() (*string, bool) { + if o == nil || IsNil(o.TotalTables) { + return nil, false + } + return o.TotalTables, true +} + +// HasTotalTables returns a boolean if a field has been set. +func (o *DumpDetail) HasTotalTables() bool { + if o != nil && !IsNil(o.TotalTables) { + return true + } + + return false +} + +// SetTotalTables gets a reference to the given string and assigns it to the TotalTables field. +func (o *DumpDetail) SetTotalTables(v string) { + o.TotalTables = &v +} + +// GetCompletedTables returns the CompletedTables field value if set, zero value otherwise. +func (o *DumpDetail) GetCompletedTables() string { + if o == nil || IsNil(o.CompletedTables) { + var ret string + return ret + } + return *o.CompletedTables +} + +// GetCompletedTablesOk returns a tuple with the CompletedTables field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetCompletedTablesOk() (*string, bool) { + if o == nil || IsNil(o.CompletedTables) { + return nil, false + } + return o.CompletedTables, true +} + +// HasCompletedTables returns a boolean if a field has been set. +func (o *DumpDetail) HasCompletedTables() bool { + if o != nil && !IsNil(o.CompletedTables) { + return true + } + + return false +} + +// SetCompletedTables gets a reference to the given string and assigns it to the CompletedTables field. +func (o *DumpDetail) SetCompletedTables(v string) { + o.CompletedTables = &v +} + +// GetFinishedBytes returns the FinishedBytes field value if set, zero value otherwise. +func (o *DumpDetail) GetFinishedBytes() string { + if o == nil || IsNil(o.FinishedBytes) { + var ret string + return ret + } + return *o.FinishedBytes +} + +// GetFinishedBytesOk returns a tuple with the FinishedBytes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetFinishedBytesOk() (*string, bool) { + if o == nil || IsNil(o.FinishedBytes) { + return nil, false + } + return o.FinishedBytes, true +} + +// HasFinishedBytes returns a boolean if a field has been set. +func (o *DumpDetail) HasFinishedBytes() bool { + if o != nil && !IsNil(o.FinishedBytes) { + return true + } + + return false +} + +// SetFinishedBytes gets a reference to the given string and assigns it to the FinishedBytes field. +func (o *DumpDetail) SetFinishedBytes(v string) { + o.FinishedBytes = &v +} + +// GetFinishedRows returns the FinishedRows field value if set, zero value otherwise. +func (o *DumpDetail) GetFinishedRows() string { + if o == nil || IsNil(o.FinishedRows) { + var ret string + return ret + } + return *o.FinishedRows +} + +// GetFinishedRowsOk returns a tuple with the FinishedRows field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetFinishedRowsOk() (*string, bool) { + if o == nil || IsNil(o.FinishedRows) { + return nil, false + } + return o.FinishedRows, true +} + +// HasFinishedRows returns a boolean if a field has been set. +func (o *DumpDetail) HasFinishedRows() bool { + if o != nil && !IsNil(o.FinishedRows) { + return true + } + + return false +} + +// SetFinishedRows gets a reference to the given string and assigns it to the FinishedRows field. +func (o *DumpDetail) SetFinishedRows(v string) { + o.FinishedRows = &v +} + +func (o DumpDetail) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DumpDetail) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Bps) { + toSerialize["bps"] = o.Bps + } + if !IsNil(o.Progress) { + toSerialize["progress"] = o.Progress + } + if !IsNil(o.TotalTables) { + toSerialize["totalTables"] = o.TotalTables + } + if !IsNil(o.CompletedTables) { + toSerialize["completedTables"] = o.CompletedTables + } + if !IsNil(o.FinishedBytes) { + toSerialize["finishedBytes"] = o.FinishedBytes + } + if !IsNil(o.FinishedRows) { + toSerialize["finishedRows"] = o.FinishedRows + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *DumpDetail) UnmarshalJSON(data []byte) (err error) { + varDumpDetail := _DumpDetail{} + + err = json.Unmarshal(data, &varDumpDetail) + + if err != nil { + return err + } + + *o = DumpDetail(varDumpDetail) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "bps") + delete(additionalProperties, "progress") + delete(additionalProperties, "totalTables") + delete(additionalProperties, "completedTables") + delete(additionalProperties, "finishedBytes") + delete(additionalProperties, "finishedRows") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableDumpDetail struct { + value *DumpDetail + isSet bool +} + +func (v NullableDumpDetail) Get() *DumpDetail { + return v.value +} + +func (v *NullableDumpDetail) Set(val *DumpDetail) { + v.value = val + v.isSet = true +} + +func (v NullableDumpDetail) IsSet() bool { + return v.isSet +} + +func (v *NullableDumpDetail) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDumpDetail(val *DumpDetail) *NullableDumpDetail { + return &NullableDumpDetail{value: val, isSet: true} +} + +func (v NullableDumpDetail) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDumpDetail) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migrations_resp.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migrations_resp.go new file mode 100644 index 00000000..4fa97dc3 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migrations_resp.go @@ -0,0 +1,230 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the ListMigrationsResp type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ListMigrationsResp{} + +// ListMigrationsResp struct for ListMigrationsResp +type ListMigrationsResp struct { + // The list of migrations. + Migrations []Migration `json:"migrations,omitempty"` + // The total number of migrations matching the query. + TotalSize *int64 `json:"totalSize,omitempty"` + // Token to retrieve the next page of results. + NextPageToken *string `json:"nextPageToken,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _ListMigrationsResp ListMigrationsResp + +// NewListMigrationsResp instantiates a new ListMigrationsResp object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewListMigrationsResp() *ListMigrationsResp { + this := ListMigrationsResp{} + return &this +} + +// NewListMigrationsRespWithDefaults instantiates a new ListMigrationsResp object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewListMigrationsRespWithDefaults() *ListMigrationsResp { + this := ListMigrationsResp{} + return &this +} + +// GetMigrations returns the Migrations field value if set, zero value otherwise. +func (o *ListMigrationsResp) GetMigrations() []Migration { + if o == nil || IsNil(o.Migrations) { + var ret []Migration + return ret + } + return o.Migrations +} + +// GetMigrationsOk returns a tuple with the Migrations field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ListMigrationsResp) GetMigrationsOk() ([]Migration, bool) { + if o == nil || IsNil(o.Migrations) { + return nil, false + } + return o.Migrations, true +} + +// HasMigrations returns a boolean if a field has been set. +func (o *ListMigrationsResp) HasMigrations() bool { + if o != nil && !IsNil(o.Migrations) { + return true + } + + return false +} + +// SetMigrations gets a reference to the given []Migration and assigns it to the Migrations field. +func (o *ListMigrationsResp) SetMigrations(v []Migration) { + o.Migrations = v +} + +// GetTotalSize returns the TotalSize field value if set, zero value otherwise. +func (o *ListMigrationsResp) GetTotalSize() int64 { + if o == nil || IsNil(o.TotalSize) { + var ret int64 + return ret + } + return *o.TotalSize +} + +// GetTotalSizeOk returns a tuple with the TotalSize field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ListMigrationsResp) GetTotalSizeOk() (*int64, bool) { + if o == nil || IsNil(o.TotalSize) { + return nil, false + } + return o.TotalSize, true +} + +// HasTotalSize returns a boolean if a field has been set. +func (o *ListMigrationsResp) HasTotalSize() bool { + if o != nil && !IsNil(o.TotalSize) { + return true + } + + return false +} + +// SetTotalSize gets a reference to the given int64 and assigns it to the TotalSize field. +func (o *ListMigrationsResp) SetTotalSize(v int64) { + o.TotalSize = &v +} + +// GetNextPageToken returns the NextPageToken field value if set, zero value otherwise. +func (o *ListMigrationsResp) GetNextPageToken() string { + if o == nil || IsNil(o.NextPageToken) { + var ret string + return ret + } + return *o.NextPageToken +} + +// GetNextPageTokenOk returns a tuple with the NextPageToken field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ListMigrationsResp) GetNextPageTokenOk() (*string, bool) { + if o == nil || IsNil(o.NextPageToken) { + return nil, false + } + return o.NextPageToken, true +} + +// HasNextPageToken returns a boolean if a field has been set. +func (o *ListMigrationsResp) HasNextPageToken() bool { + if o != nil && !IsNil(o.NextPageToken) { + return true + } + + return false +} + +// SetNextPageToken gets a reference to the given string and assigns it to the NextPageToken field. +func (o *ListMigrationsResp) SetNextPageToken(v string) { + o.NextPageToken = &v +} + +func (o ListMigrationsResp) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o ListMigrationsResp) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Migrations) { + toSerialize["migrations"] = o.Migrations + } + if !IsNil(o.TotalSize) { + toSerialize["totalSize"] = o.TotalSize + } + if !IsNil(o.NextPageToken) { + toSerialize["nextPageToken"] = o.NextPageToken + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *ListMigrationsResp) UnmarshalJSON(data []byte) (err error) { + varListMigrationsResp := _ListMigrationsResp{} + + err = json.Unmarshal(data, &varListMigrationsResp) + + if err != nil { + return err + } + + *o = ListMigrationsResp(varListMigrationsResp) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "migrations") + delete(additionalProperties, "totalSize") + delete(additionalProperties, "nextPageToken") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableListMigrationsResp struct { + value *ListMigrationsResp + isSet bool +} + +func (v NullableListMigrationsResp) Get() *ListMigrationsResp { + return v.value +} + +func (v *NullableListMigrationsResp) Set(val *ListMigrationsResp) { + v.value = val + v.isSet = true +} + +func (v NullableListMigrationsResp) IsSet() bool { + return v.isSet +} + +func (v *NullableListMigrationsResp) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableListMigrationsResp(val *ListMigrationsResp) *NullableListMigrationsResp { + return &NullableListMigrationsResp{value: val, isSet: true} +} + +func (v NullableListMigrationsResp) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableListMigrationsResp) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_load_detail.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_load_detail.go new file mode 100644 index 00000000..d0d3b7ba --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_load_detail.go @@ -0,0 +1,268 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the LoadDetail type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &LoadDetail{} + +// LoadDetail struct for LoadDetail +type LoadDetail struct { + // Bytes per second processed during load. + Bps *string `json:"bps,omitempty"` + // Progress of load phase (0-100). + Progress *float64 `json:"progress,omitempty"` + // Total bytes finished loading. + FinishedBytes *string `json:"finishedBytes,omitempty"` + // Total bytes to load. + TotalBytes *string `json:"totalBytes,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _LoadDetail LoadDetail + +// NewLoadDetail instantiates a new LoadDetail object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewLoadDetail() *LoadDetail { + this := LoadDetail{} + return &this +} + +// NewLoadDetailWithDefaults instantiates a new LoadDetail object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewLoadDetailWithDefaults() *LoadDetail { + this := LoadDetail{} + return &this +} + +// GetBps returns the Bps field value if set, zero value otherwise. +func (o *LoadDetail) GetBps() string { + if o == nil || IsNil(o.Bps) { + var ret string + return ret + } + return *o.Bps +} + +// GetBpsOk returns a tuple with the Bps field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *LoadDetail) GetBpsOk() (*string, bool) { + if o == nil || IsNil(o.Bps) { + return nil, false + } + return o.Bps, true +} + +// HasBps returns a boolean if a field has been set. +func (o *LoadDetail) HasBps() bool { + if o != nil && !IsNil(o.Bps) { + return true + } + + return false +} + +// SetBps gets a reference to the given string and assigns it to the Bps field. +func (o *LoadDetail) SetBps(v string) { + o.Bps = &v +} + +// GetProgress returns the Progress field value if set, zero value otherwise. +func (o *LoadDetail) GetProgress() float64 { + if o == nil || IsNil(o.Progress) { + var ret float64 + return ret + } + return *o.Progress +} + +// GetProgressOk returns a tuple with the Progress field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *LoadDetail) GetProgressOk() (*float64, bool) { + if o == nil || IsNil(o.Progress) { + return nil, false + } + return o.Progress, true +} + +// HasProgress returns a boolean if a field has been set. +func (o *LoadDetail) HasProgress() bool { + if o != nil && !IsNil(o.Progress) { + return true + } + + return false +} + +// SetProgress gets a reference to the given float64 and assigns it to the Progress field. +func (o *LoadDetail) SetProgress(v float64) { + o.Progress = &v +} + +// GetFinishedBytes returns the FinishedBytes field value if set, zero value otherwise. +func (o *LoadDetail) GetFinishedBytes() string { + if o == nil || IsNil(o.FinishedBytes) { + var ret string + return ret + } + return *o.FinishedBytes +} + +// GetFinishedBytesOk returns a tuple with the FinishedBytes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *LoadDetail) GetFinishedBytesOk() (*string, bool) { + if o == nil || IsNil(o.FinishedBytes) { + return nil, false + } + return o.FinishedBytes, true +} + +// HasFinishedBytes returns a boolean if a field has been set. +func (o *LoadDetail) HasFinishedBytes() bool { + if o != nil && !IsNil(o.FinishedBytes) { + return true + } + + return false +} + +// SetFinishedBytes gets a reference to the given string and assigns it to the FinishedBytes field. +func (o *LoadDetail) SetFinishedBytes(v string) { + o.FinishedBytes = &v +} + +// GetTotalBytes returns the TotalBytes field value if set, zero value otherwise. +func (o *LoadDetail) GetTotalBytes() string { + if o == nil || IsNil(o.TotalBytes) { + var ret string + return ret + } + return *o.TotalBytes +} + +// GetTotalBytesOk returns a tuple with the TotalBytes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *LoadDetail) GetTotalBytesOk() (*string, bool) { + if o == nil || IsNil(o.TotalBytes) { + return nil, false + } + return o.TotalBytes, true +} + +// HasTotalBytes returns a boolean if a field has been set. +func (o *LoadDetail) HasTotalBytes() bool { + if o != nil && !IsNil(o.TotalBytes) { + return true + } + + return false +} + +// SetTotalBytes gets a reference to the given string and assigns it to the TotalBytes field. +func (o *LoadDetail) SetTotalBytes(v string) { + o.TotalBytes = &v +} + +func (o LoadDetail) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o LoadDetail) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Bps) { + toSerialize["bps"] = o.Bps + } + if !IsNil(o.Progress) { + toSerialize["progress"] = o.Progress + } + if !IsNil(o.FinishedBytes) { + toSerialize["finishedBytes"] = o.FinishedBytes + } + if !IsNil(o.TotalBytes) { + toSerialize["totalBytes"] = o.TotalBytes + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *LoadDetail) UnmarshalJSON(data []byte) (err error) { + varLoadDetail := _LoadDetail{} + + err = json.Unmarshal(data, &varLoadDetail) + + if err != nil { + return err + } + + *o = LoadDetail(varLoadDetail) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "bps") + delete(additionalProperties, "progress") + delete(additionalProperties, "finishedBytes") + delete(additionalProperties, "totalBytes") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableLoadDetail struct { + value *LoadDetail + isSet bool +} + +func (v NullableLoadDetail) Get() *LoadDetail { + return v.value +} + +func (v *NullableLoadDetail) Set(val *LoadDetail) { + v.value = val + v.isSet = true +} + +func (v NullableLoadDetail) IsSet() bool { + return v.isSet +} + +func (v *NullableLoadDetail) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableLoadDetail(val *LoadDetail) *NullableLoadDetail { + return &NullableLoadDetail{value: val, isSet: true} +} + +func (v NullableLoadDetail) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableLoadDetail) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration.go new file mode 100644 index 00000000..1012d2ec --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration.go @@ -0,0 +1,383 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "time" +) + +// checks if the Migration type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Migration{} + +// Migration struct for Migration +type Migration struct { + // The unique ID of the migration. + MigrationId *string `json:"migrationId,omitempty"` + // The display name of the migration. + DisplayName *string `json:"displayName,omitempty"` + // The list of subtasks composing this migration. + SubTasks []SubTask `json:"subTasks,omitempty"` + // The target database username used by the migration. + TargetUser *string `json:"targetUser,omitempty"` + // The timestamp when the migration was created. + CreateTime *time.Time `json:"createTime,omitempty"` + // The migration mode of the migration. + Mode *TaskMode `json:"mode,omitempty"` + // The current state of the migration. + State *MigrationState `json:"state,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _Migration Migration + +// NewMigration instantiates a new Migration object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigration() *Migration { + this := Migration{} + return &this +} + +// NewMigrationWithDefaults instantiates a new Migration object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationWithDefaults() *Migration { + this := Migration{} + return &this +} + +// GetMigrationId returns the MigrationId field value if set, zero value otherwise. +func (o *Migration) GetMigrationId() string { + if o == nil || IsNil(o.MigrationId) { + var ret string + return ret + } + return *o.MigrationId +} + +// GetMigrationIdOk returns a tuple with the MigrationId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Migration) GetMigrationIdOk() (*string, bool) { + if o == nil || IsNil(o.MigrationId) { + return nil, false + } + return o.MigrationId, true +} + +// HasMigrationId returns a boolean if a field has been set. +func (o *Migration) HasMigrationId() bool { + if o != nil && !IsNil(o.MigrationId) { + return true + } + + return false +} + +// SetMigrationId gets a reference to the given string and assigns it to the MigrationId field. +func (o *Migration) SetMigrationId(v string) { + o.MigrationId = &v +} + +// GetDisplayName returns the DisplayName field value if set, zero value otherwise. +func (o *Migration) GetDisplayName() string { + if o == nil || IsNil(o.DisplayName) { + var ret string + return ret + } + return *o.DisplayName +} + +// GetDisplayNameOk returns a tuple with the DisplayName field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Migration) GetDisplayNameOk() (*string, bool) { + if o == nil || IsNil(o.DisplayName) { + return nil, false + } + return o.DisplayName, true +} + +// HasDisplayName returns a boolean if a field has been set. +func (o *Migration) HasDisplayName() bool { + if o != nil && !IsNil(o.DisplayName) { + return true + } + + return false +} + +// SetDisplayName gets a reference to the given string and assigns it to the DisplayName field. +func (o *Migration) SetDisplayName(v string) { + o.DisplayName = &v +} + +// GetSubTasks returns the SubTasks field value if set, zero value otherwise. +func (o *Migration) GetSubTasks() []SubTask { + if o == nil || IsNil(o.SubTasks) { + var ret []SubTask + return ret + } + return o.SubTasks +} + +// GetSubTasksOk returns a tuple with the SubTasks field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Migration) GetSubTasksOk() ([]SubTask, bool) { + if o == nil || IsNil(o.SubTasks) { + return nil, false + } + return o.SubTasks, true +} + +// HasSubTasks returns a boolean if a field has been set. +func (o *Migration) HasSubTasks() bool { + if o != nil && !IsNil(o.SubTasks) { + return true + } + + return false +} + +// SetSubTasks gets a reference to the given []SubTask and assigns it to the SubTasks field. +func (o *Migration) SetSubTasks(v []SubTask) { + o.SubTasks = v +} + +// GetTargetUser returns the TargetUser field value if set, zero value otherwise. +func (o *Migration) GetTargetUser() string { + if o == nil || IsNil(o.TargetUser) { + var ret string + return ret + } + return *o.TargetUser +} + +// GetTargetUserOk returns a tuple with the TargetUser field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Migration) GetTargetUserOk() (*string, bool) { + if o == nil || IsNil(o.TargetUser) { + return nil, false + } + return o.TargetUser, true +} + +// HasTargetUser returns a boolean if a field has been set. +func (o *Migration) HasTargetUser() bool { + if o != nil && !IsNil(o.TargetUser) { + return true + } + + return false +} + +// SetTargetUser gets a reference to the given string and assigns it to the TargetUser field. +func (o *Migration) SetTargetUser(v string) { + o.TargetUser = &v +} + +// GetCreateTime returns the CreateTime field value if set, zero value otherwise. +func (o *Migration) GetCreateTime() time.Time { + if o == nil || IsNil(o.CreateTime) { + var ret time.Time + return ret + } + return *o.CreateTime +} + +// GetCreateTimeOk returns a tuple with the CreateTime field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Migration) GetCreateTimeOk() (*time.Time, bool) { + if o == nil || IsNil(o.CreateTime) { + return nil, false + } + return o.CreateTime, true +} + +// HasCreateTime returns a boolean if a field has been set. +func (o *Migration) HasCreateTime() bool { + if o != nil && !IsNil(o.CreateTime) { + return true + } + + return false +} + +// SetCreateTime gets a reference to the given time.Time and assigns it to the CreateTime field. +func (o *Migration) SetCreateTime(v time.Time) { + o.CreateTime = &v +} + +// GetMode returns the Mode field value if set, zero value otherwise. +func (o *Migration) GetMode() TaskMode { + if o == nil || IsNil(o.Mode) { + var ret TaskMode + return ret + } + return *o.Mode +} + +// GetModeOk returns a tuple with the Mode field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Migration) GetModeOk() (*TaskMode, bool) { + if o == nil || IsNil(o.Mode) { + return nil, false + } + return o.Mode, true +} + +// HasMode returns a boolean if a field has been set. +func (o *Migration) HasMode() bool { + if o != nil && !IsNil(o.Mode) { + return true + } + + return false +} + +// SetMode gets a reference to the given TaskMode and assigns it to the Mode field. +func (o *Migration) SetMode(v TaskMode) { + o.Mode = &v +} + +// GetState returns the State field value if set, zero value otherwise. +func (o *Migration) GetState() MigrationState { + if o == nil || IsNil(o.State) { + var ret MigrationState + return ret + } + return *o.State +} + +// GetStateOk returns a tuple with the State field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Migration) GetStateOk() (*MigrationState, bool) { + if o == nil || IsNil(o.State) { + return nil, false + } + return o.State, true +} + +// HasState returns a boolean if a field has been set. +func (o *Migration) HasState() bool { + if o != nil && !IsNil(o.State) { + return true + } + + return false +} + +// SetState gets a reference to the given MigrationState and assigns it to the State field. +func (o *Migration) SetState(v MigrationState) { + o.State = &v +} + +func (o Migration) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Migration) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.MigrationId) { + toSerialize["migrationId"] = o.MigrationId + } + if !IsNil(o.DisplayName) { + toSerialize["displayName"] = o.DisplayName + } + if !IsNil(o.SubTasks) { + toSerialize["subTasks"] = o.SubTasks + } + if !IsNil(o.TargetUser) { + toSerialize["targetUser"] = o.TargetUser + } + if !IsNil(o.CreateTime) { + toSerialize["createTime"] = o.CreateTime + } + if !IsNil(o.Mode) { + toSerialize["mode"] = o.Mode + } + if !IsNil(o.State) { + toSerialize["state"] = o.State + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Migration) UnmarshalJSON(data []byte) (err error) { + varMigration := _Migration{} + + err = json.Unmarshal(data, &varMigration) + + if err != nil { + return err + } + + *o = Migration(varMigration) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "migrationId") + delete(additionalProperties, "displayName") + delete(additionalProperties, "subTasks") + delete(additionalProperties, "targetUser") + delete(additionalProperties, "createTime") + delete(additionalProperties, "mode") + delete(additionalProperties, "state") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigration struct { + value *Migration + isSet bool +} + +func (v NullableMigration) Get() *Migration { + return v.value +} + +func (v *NullableMigration) Set(val *Migration) { + v.value = val + v.isSet = true +} + +func (v NullableMigration) IsSet() bool { + return v.isSet +} + +func (v *NullableMigration) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigration(val *Migration) *NullableMigration { + return &NullableMigration{value: val, isSet: true} +} + +func (v NullableMigration) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigration) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go new file mode 100644 index 00000000..6dc9411b --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go @@ -0,0 +1,382 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the MigrationPrecheck type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &MigrationPrecheck{} + +// MigrationPrecheck struct for MigrationPrecheck +type MigrationPrecheck struct { + // The ID of the precheck. + PrecheckId *string `json:"precheckId,omitempty"` + // Total number of precheck items. + Total *int32 `json:"total,omitempty"` + // Number of failed items. + FailedCnt *int32 `json:"failedCnt,omitempty"` + // Number of items with warnings. + WarnCnt *int32 `json:"warnCnt,omitempty"` + // Number of successful items. + SuccessCnt *int32 `json:"successCnt,omitempty"` + // Overall status of the precheck. + Status *MigrationPrecheckStatus `json:"status,omitempty"` + // Details for each precheck item. + Items []PrecheckItem `json:"items,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _MigrationPrecheck MigrationPrecheck + +// NewMigrationPrecheck instantiates a new MigrationPrecheck object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigrationPrecheck() *MigrationPrecheck { + this := MigrationPrecheck{} + return &this +} + +// NewMigrationPrecheckWithDefaults instantiates a new MigrationPrecheck object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationPrecheckWithDefaults() *MigrationPrecheck { + this := MigrationPrecheck{} + return &this +} + +// GetPrecheckId returns the PrecheckId field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetPrecheckId() string { + if o == nil || IsNil(o.PrecheckId) { + var ret string + return ret + } + return *o.PrecheckId +} + +// GetPrecheckIdOk returns a tuple with the PrecheckId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetPrecheckIdOk() (*string, bool) { + if o == nil || IsNil(o.PrecheckId) { + return nil, false + } + return o.PrecheckId, true +} + +// HasPrecheckId returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasPrecheckId() bool { + if o != nil && !IsNil(o.PrecheckId) { + return true + } + + return false +} + +// SetPrecheckId gets a reference to the given string and assigns it to the PrecheckId field. +func (o *MigrationPrecheck) SetPrecheckId(v string) { + o.PrecheckId = &v +} + +// GetTotal returns the Total field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetTotal() int32 { + if o == nil || IsNil(o.Total) { + var ret int32 + return ret + } + return *o.Total +} + +// GetTotalOk returns a tuple with the Total field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetTotalOk() (*int32, bool) { + if o == nil || IsNil(o.Total) { + return nil, false + } + return o.Total, true +} + +// HasTotal returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasTotal() bool { + if o != nil && !IsNil(o.Total) { + return true + } + + return false +} + +// SetTotal gets a reference to the given int32 and assigns it to the Total field. +func (o *MigrationPrecheck) SetTotal(v int32) { + o.Total = &v +} + +// GetFailedCnt returns the FailedCnt field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetFailedCnt() int32 { + if o == nil || IsNil(o.FailedCnt) { + var ret int32 + return ret + } + return *o.FailedCnt +} + +// GetFailedCntOk returns a tuple with the FailedCnt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetFailedCntOk() (*int32, bool) { + if o == nil || IsNil(o.FailedCnt) { + return nil, false + } + return o.FailedCnt, true +} + +// HasFailedCnt returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasFailedCnt() bool { + if o != nil && !IsNil(o.FailedCnt) { + return true + } + + return false +} + +// SetFailedCnt gets a reference to the given int32 and assigns it to the FailedCnt field. +func (o *MigrationPrecheck) SetFailedCnt(v int32) { + o.FailedCnt = &v +} + +// GetWarnCnt returns the WarnCnt field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetWarnCnt() int32 { + if o == nil || IsNil(o.WarnCnt) { + var ret int32 + return ret + } + return *o.WarnCnt +} + +// GetWarnCntOk returns a tuple with the WarnCnt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetWarnCntOk() (*int32, bool) { + if o == nil || IsNil(o.WarnCnt) { + return nil, false + } + return o.WarnCnt, true +} + +// HasWarnCnt returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasWarnCnt() bool { + if o != nil && !IsNil(o.WarnCnt) { + return true + } + + return false +} + +// SetWarnCnt gets a reference to the given int32 and assigns it to the WarnCnt field. +func (o *MigrationPrecheck) SetWarnCnt(v int32) { + o.WarnCnt = &v +} + +// GetSuccessCnt returns the SuccessCnt field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetSuccessCnt() int32 { + if o == nil || IsNil(o.SuccessCnt) { + var ret int32 + return ret + } + return *o.SuccessCnt +} + +// GetSuccessCntOk returns a tuple with the SuccessCnt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetSuccessCntOk() (*int32, bool) { + if o == nil || IsNil(o.SuccessCnt) { + return nil, false + } + return o.SuccessCnt, true +} + +// HasSuccessCnt returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasSuccessCnt() bool { + if o != nil && !IsNil(o.SuccessCnt) { + return true + } + + return false +} + +// SetSuccessCnt gets a reference to the given int32 and assigns it to the SuccessCnt field. +func (o *MigrationPrecheck) SetSuccessCnt(v int32) { + o.SuccessCnt = &v +} + +// GetStatus returns the Status field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetStatus() MigrationPrecheckStatus { + if o == nil || IsNil(o.Status) { + var ret MigrationPrecheckStatus + return ret + } + return *o.Status +} + +// GetStatusOk returns a tuple with the Status field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetStatusOk() (*MigrationPrecheckStatus, bool) { + if o == nil || IsNil(o.Status) { + return nil, false + } + return o.Status, true +} + +// HasStatus returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasStatus() bool { + if o != nil && !IsNil(o.Status) { + return true + } + + return false +} + +// SetStatus gets a reference to the given MigrationPrecheckStatus and assigns it to the Status field. +func (o *MigrationPrecheck) SetStatus(v MigrationPrecheckStatus) { + o.Status = &v +} + +// GetItems returns the Items field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetItems() []PrecheckItem { + if o == nil || IsNil(o.Items) { + var ret []PrecheckItem + return ret + } + return o.Items +} + +// GetItemsOk returns a tuple with the Items field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetItemsOk() ([]PrecheckItem, bool) { + if o == nil || IsNil(o.Items) { + return nil, false + } + return o.Items, true +} + +// HasItems returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasItems() bool { + if o != nil && !IsNil(o.Items) { + return true + } + + return false +} + +// SetItems gets a reference to the given []PrecheckItem and assigns it to the Items field. +func (o *MigrationPrecheck) SetItems(v []PrecheckItem) { + o.Items = v +} + +func (o MigrationPrecheck) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o MigrationPrecheck) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.PrecheckId) { + toSerialize["precheckId"] = o.PrecheckId + } + if !IsNil(o.Total) { + toSerialize["total"] = o.Total + } + if !IsNil(o.FailedCnt) { + toSerialize["failedCnt"] = o.FailedCnt + } + if !IsNil(o.WarnCnt) { + toSerialize["warnCnt"] = o.WarnCnt + } + if !IsNil(o.SuccessCnt) { + toSerialize["successCnt"] = o.SuccessCnt + } + if !IsNil(o.Status) { + toSerialize["status"] = o.Status + } + if !IsNil(o.Items) { + toSerialize["items"] = o.Items + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *MigrationPrecheck) UnmarshalJSON(data []byte) (err error) { + varMigrationPrecheck := _MigrationPrecheck{} + + err = json.Unmarshal(data, &varMigrationPrecheck) + + if err != nil { + return err + } + + *o = MigrationPrecheck(varMigrationPrecheck) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "precheckId") + delete(additionalProperties, "total") + delete(additionalProperties, "failedCnt") + delete(additionalProperties, "warnCnt") + delete(additionalProperties, "successCnt") + delete(additionalProperties, "status") + delete(additionalProperties, "items") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigrationPrecheck struct { + value *MigrationPrecheck + isSet bool +} + +func (v NullableMigrationPrecheck) Get() *MigrationPrecheck { + return v.value +} + +func (v *NullableMigrationPrecheck) Set(val *MigrationPrecheck) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationPrecheck) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationPrecheck) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationPrecheck(val *MigrationPrecheck) *NullableMigrationPrecheck { + return &NullableMigrationPrecheck{value: val, isSet: true} +} + +func (v NullableMigrationPrecheck) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationPrecheck) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck_status.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck_status.go new file mode 100644 index 00000000..12ec29f0 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck_status.go @@ -0,0 +1,111 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// MigrationPrecheckStatus - RUNNING: Precheck is in progress. - FINISHED: Precheck finished successfully. - PENDING: Precheck is pending. - FAILED: Precheck failed. - CANCELED: Precheck is canceled. +type MigrationPrecheckStatus string + +// List of MigrationPrecheck.Status +const ( + MIGRATIONPRECHECKSTATUS_RUNNING MigrationPrecheckStatus = "RUNNING" + MIGRATIONPRECHECKSTATUS_FINISHED MigrationPrecheckStatus = "FINISHED" + MIGRATIONPRECHECKSTATUS_PENDING MigrationPrecheckStatus = "PENDING" + MIGRATIONPRECHECKSTATUS_FAILED MigrationPrecheckStatus = "FAILED" + MIGRATIONPRECHECKSTATUS_CANCELED MigrationPrecheckStatus = "CANCELED" +) + +// All allowed values of MigrationPrecheckStatus enum +var AllowedMigrationPrecheckStatusEnumValues = []MigrationPrecheckStatus{ + "RUNNING", + "FINISHED", + "PENDING", + "FAILED", + "CANCELED", +} + +func (v *MigrationPrecheckStatus) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := MigrationPrecheckStatus(value) + for _, existing := range AllowedMigrationPrecheckStatusEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = MigrationPrecheckStatus(value) + return nil +} + +// NewMigrationPrecheckStatusFromValue returns a pointer to a valid MigrationPrecheckStatus for the value passed as argument +func NewMigrationPrecheckStatusFromValue(v string) *MigrationPrecheckStatus { + ev := MigrationPrecheckStatus(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v MigrationPrecheckStatus) IsValid() bool { + for _, existing := range AllowedMigrationPrecheckStatusEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to MigrationPrecheck.Status value +func (v MigrationPrecheckStatus) Ptr() *MigrationPrecheckStatus { + return &v +} + +type NullableMigrationPrecheckStatus struct { + value *MigrationPrecheckStatus + isSet bool +} + +func (v NullableMigrationPrecheckStatus) Get() *MigrationPrecheckStatus { + return v.value +} + +func (v *NullableMigrationPrecheckStatus) Set(val *MigrationPrecheckStatus) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationPrecheckStatus) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationPrecheckStatus) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationPrecheckStatus(val *MigrationPrecheckStatus) *NullableMigrationPrecheckStatus { + return &NullableMigrationPrecheckStatus{value: val, isSet: true} +} + +func (v NullableMigrationPrecheckStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationPrecheckStatus) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_migration_body.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_migration_body.go new file mode 100644 index 00000000..1c5f1ab0 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_migration_body.go @@ -0,0 +1,257 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the MigrationServiceCreateMigrationBody type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &MigrationServiceCreateMigrationBody{} + +// MigrationServiceCreateMigrationBody struct for MigrationServiceCreateMigrationBody +type MigrationServiceCreateMigrationBody struct { + // The display name of the migration. + DisplayName string `json:"displayName"` + // The data sources to migrate from. + Sources []Source `json:"sources"` + // The target database credentials. + Target Target `json:"target"` + // The migration mode (full+incremental or incremental-only). + Mode TaskMode `json:"mode"` + AdditionalProperties map[string]interface{} +} + +type _MigrationServiceCreateMigrationBody MigrationServiceCreateMigrationBody + +// NewMigrationServiceCreateMigrationBody instantiates a new MigrationServiceCreateMigrationBody object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigrationServiceCreateMigrationBody(displayName string, sources []Source, target Target, mode TaskMode) *MigrationServiceCreateMigrationBody { + this := MigrationServiceCreateMigrationBody{} + this.DisplayName = displayName + this.Sources = sources + this.Target = target + this.Mode = mode + return &this +} + +// NewMigrationServiceCreateMigrationBodyWithDefaults instantiates a new MigrationServiceCreateMigrationBody object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationServiceCreateMigrationBodyWithDefaults() *MigrationServiceCreateMigrationBody { + this := MigrationServiceCreateMigrationBody{} + return &this +} + +// GetDisplayName returns the DisplayName field value +func (o *MigrationServiceCreateMigrationBody) GetDisplayName() string { + if o == nil { + var ret string + return ret + } + + return o.DisplayName +} + +// GetDisplayNameOk returns a tuple with the DisplayName field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateMigrationBody) GetDisplayNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.DisplayName, true +} + +// SetDisplayName sets field value +func (o *MigrationServiceCreateMigrationBody) SetDisplayName(v string) { + o.DisplayName = v +} + +// GetSources returns the Sources field value +func (o *MigrationServiceCreateMigrationBody) GetSources() []Source { + if o == nil { + var ret []Source + return ret + } + + return o.Sources +} + +// GetSourcesOk returns a tuple with the Sources field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateMigrationBody) GetSourcesOk() ([]Source, bool) { + if o == nil { + return nil, false + } + return o.Sources, true +} + +// SetSources sets field value +func (o *MigrationServiceCreateMigrationBody) SetSources(v []Source) { + o.Sources = v +} + +// GetTarget returns the Target field value +func (o *MigrationServiceCreateMigrationBody) GetTarget() Target { + if o == nil { + var ret Target + return ret + } + + return o.Target +} + +// GetTargetOk returns a tuple with the Target field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateMigrationBody) GetTargetOk() (*Target, bool) { + if o == nil { + return nil, false + } + return &o.Target, true +} + +// SetTarget sets field value +func (o *MigrationServiceCreateMigrationBody) SetTarget(v Target) { + o.Target = v +} + +// GetMode returns the Mode field value +func (o *MigrationServiceCreateMigrationBody) GetMode() TaskMode { + if o == nil { + var ret TaskMode + return ret + } + + return o.Mode +} + +// GetModeOk returns a tuple with the Mode field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateMigrationBody) GetModeOk() (*TaskMode, bool) { + if o == nil { + return nil, false + } + return &o.Mode, true +} + +// SetMode sets field value +func (o *MigrationServiceCreateMigrationBody) SetMode(v TaskMode) { + o.Mode = v +} + +func (o MigrationServiceCreateMigrationBody) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o MigrationServiceCreateMigrationBody) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["displayName"] = o.DisplayName + toSerialize["sources"] = o.Sources + toSerialize["target"] = o.Target + toSerialize["mode"] = o.Mode + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *MigrationServiceCreateMigrationBody) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "displayName", + "sources", + "target", + "mode", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varMigrationServiceCreateMigrationBody := _MigrationServiceCreateMigrationBody{} + + err = json.Unmarshal(data, &varMigrationServiceCreateMigrationBody) + + if err != nil { + return err + } + + *o = MigrationServiceCreateMigrationBody(varMigrationServiceCreateMigrationBody) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "displayName") + delete(additionalProperties, "sources") + delete(additionalProperties, "target") + delete(additionalProperties, "mode") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigrationServiceCreateMigrationBody struct { + value *MigrationServiceCreateMigrationBody + isSet bool +} + +func (v NullableMigrationServiceCreateMigrationBody) Get() *MigrationServiceCreateMigrationBody { + return v.value +} + +func (v *NullableMigrationServiceCreateMigrationBody) Set(val *MigrationServiceCreateMigrationBody) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationServiceCreateMigrationBody) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationServiceCreateMigrationBody) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationServiceCreateMigrationBody(val *MigrationServiceCreateMigrationBody) *NullableMigrationServiceCreateMigrationBody { + return &NullableMigrationServiceCreateMigrationBody{value: val, isSet: true} +} + +func (v NullableMigrationServiceCreateMigrationBody) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationServiceCreateMigrationBody) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go new file mode 100644 index 00000000..047968a8 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go @@ -0,0 +1,257 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the MigrationServicePrecheckBody type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &MigrationServicePrecheckBody{} + +// MigrationServicePrecheckBody struct for MigrationServicePrecheckBody +type MigrationServicePrecheckBody struct { + // The display name of the migration. + DisplayName string `json:"displayName"` + // The data sources to migrate from. + Sources []Source `json:"sources"` + // The target database credentials. + Target Target `json:"target"` + // The migration mode (full+incremental or incremental-only). + Mode TaskMode `json:"mode"` + AdditionalProperties map[string]interface{} +} + +type _MigrationServicePrecheckBody MigrationServicePrecheckBody + +// NewMigrationServicePrecheckBody instantiates a new MigrationServicePrecheckBody object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigrationServicePrecheckBody(displayName string, sources []Source, target Target, mode TaskMode) *MigrationServicePrecheckBody { + this := MigrationServicePrecheckBody{} + this.DisplayName = displayName + this.Sources = sources + this.Target = target + this.Mode = mode + return &this +} + +// NewMigrationServicePrecheckBodyWithDefaults instantiates a new MigrationServicePrecheckBody object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationServicePrecheckBodyWithDefaults() *MigrationServicePrecheckBody { + this := MigrationServicePrecheckBody{} + return &this +} + +// GetDisplayName returns the DisplayName field value +func (o *MigrationServicePrecheckBody) GetDisplayName() string { + if o == nil { + var ret string + return ret + } + + return o.DisplayName +} + +// GetDisplayNameOk returns a tuple with the DisplayName field value +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetDisplayNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.DisplayName, true +} + +// SetDisplayName sets field value +func (o *MigrationServicePrecheckBody) SetDisplayName(v string) { + o.DisplayName = v +} + +// GetSources returns the Sources field value +func (o *MigrationServicePrecheckBody) GetSources() []Source { + if o == nil { + var ret []Source + return ret + } + + return o.Sources +} + +// GetSourcesOk returns a tuple with the Sources field value +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetSourcesOk() ([]Source, bool) { + if o == nil { + return nil, false + } + return o.Sources, true +} + +// SetSources sets field value +func (o *MigrationServicePrecheckBody) SetSources(v []Source) { + o.Sources = v +} + +// GetTarget returns the Target field value +func (o *MigrationServicePrecheckBody) GetTarget() Target { + if o == nil { + var ret Target + return ret + } + + return o.Target +} + +// GetTargetOk returns a tuple with the Target field value +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetTargetOk() (*Target, bool) { + if o == nil { + return nil, false + } + return &o.Target, true +} + +// SetTarget sets field value +func (o *MigrationServicePrecheckBody) SetTarget(v Target) { + o.Target = v +} + +// GetMode returns the Mode field value +func (o *MigrationServicePrecheckBody) GetMode() TaskMode { + if o == nil { + var ret TaskMode + return ret + } + + return o.Mode +} + +// GetModeOk returns a tuple with the Mode field value +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetModeOk() (*TaskMode, bool) { + if o == nil { + return nil, false + } + return &o.Mode, true +} + +// SetMode sets field value +func (o *MigrationServicePrecheckBody) SetMode(v TaskMode) { + o.Mode = v +} + +func (o MigrationServicePrecheckBody) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o MigrationServicePrecheckBody) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["displayName"] = o.DisplayName + toSerialize["sources"] = o.Sources + toSerialize["target"] = o.Target + toSerialize["mode"] = o.Mode + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *MigrationServicePrecheckBody) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "displayName", + "sources", + "target", + "mode", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varMigrationServicePrecheckBody := _MigrationServicePrecheckBody{} + + err = json.Unmarshal(data, &varMigrationServicePrecheckBody) + + if err != nil { + return err + } + + *o = MigrationServicePrecheckBody(varMigrationServicePrecheckBody) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "displayName") + delete(additionalProperties, "sources") + delete(additionalProperties, "target") + delete(additionalProperties, "mode") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigrationServicePrecheckBody struct { + value *MigrationServicePrecheckBody + isSet bool +} + +func (v NullableMigrationServicePrecheckBody) Get() *MigrationServicePrecheckBody { + return v.value +} + +func (v *NullableMigrationServicePrecheckBody) Set(val *MigrationServicePrecheckBody) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationServicePrecheckBody) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationServicePrecheckBody) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationServicePrecheckBody(val *MigrationServicePrecheckBody) *NullableMigrationServicePrecheckBody { + return &NullableMigrationServicePrecheckBody{value: val, isSet: true} +} + +func (v NullableMigrationServicePrecheckBody) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationServicePrecheckBody) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go new file mode 100644 index 00000000..703c78be --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go @@ -0,0 +1,111 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// MigrationState Overall state of a migration. - CREATING: Task is being created. - RUNNING: Task is actively running. - PAUSED: Task is paused. - FAILED: Task failed with error. - DELETING: Task is being deleted. +type MigrationState string + +// List of Migration.State +const ( + MIGRATIONSTATE_CREATING MigrationState = "CREATING" + MIGRATIONSTATE_RUNNING MigrationState = "RUNNING" + MIGRATIONSTATE_PAUSED MigrationState = "PAUSED" + MIGRATIONSTATE_FAILED MigrationState = "FAILED" + MIGRATIONSTATE_DELETING MigrationState = "DELETING" +) + +// All allowed values of MigrationState enum +var AllowedMigrationStateEnumValues = []MigrationState{ + "CREATING", + "RUNNING", + "PAUSED", + "FAILED", + "DELETING", +} + +func (v *MigrationState) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := MigrationState(value) + for _, existing := range AllowedMigrationStateEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = MigrationState(value) + return nil +} + +// NewMigrationStateFromValue returns a pointer to a valid MigrationState for the value passed as argument +func NewMigrationStateFromValue(v string) *MigrationState { + ev := MigrationState(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v MigrationState) IsValid() bool { + for _, existing := range AllowedMigrationStateEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to Migration.State value +func (v MigrationState) Ptr() *MigrationState { + return &v +} + +type NullableMigrationState struct { + value *MigrationState + isSet bool +} + +func (v NullableMigrationState) Get() *MigrationState { + return v.value +} + +func (v *NullableMigrationState) Set(val *MigrationState) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationState) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationState) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationState(val *MigrationState) *NullableMigrationState { + return &NullableMigrationState{value: val, isSet: true} +} + +func (v NullableMigrationState) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationState) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go new file mode 100644 index 00000000..f3a93501 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go @@ -0,0 +1,344 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the PrecheckItem type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &PrecheckItem{} + +// PrecheckItem struct for PrecheckItem +type PrecheckItem struct { + // Human-readable description of the check. + Description *string `json:"description,omitempty"` + // Status of this check. + Status *PrecheckItemStatus `json:"status,omitempty"` + // Suggested solution if the check failed or warned. + Solution *string `json:"solution,omitempty"` + // Reason for the failure or warning. + Reason *string `json:"reason,omitempty"` + // Documentation URL for the solution. + SolutionDocUrl *string `json:"solutionDocUrl,omitempty"` + // The type of precheck. + Type *PrecheckItemType `json:"type,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _PrecheckItem PrecheckItem + +// NewPrecheckItem instantiates a new PrecheckItem object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewPrecheckItem() *PrecheckItem { + this := PrecheckItem{} + return &this +} + +// NewPrecheckItemWithDefaults instantiates a new PrecheckItem object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewPrecheckItemWithDefaults() *PrecheckItem { + this := PrecheckItem{} + return &this +} + +// GetDescription returns the Description field value if set, zero value otherwise. +func (o *PrecheckItem) GetDescription() string { + if o == nil || IsNil(o.Description) { + var ret string + return ret + } + return *o.Description +} + +// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetDescriptionOk() (*string, bool) { + if o == nil || IsNil(o.Description) { + return nil, false + } + return o.Description, true +} + +// HasDescription returns a boolean if a field has been set. +func (o *PrecheckItem) HasDescription() bool { + if o != nil && !IsNil(o.Description) { + return true + } + + return false +} + +// SetDescription gets a reference to the given string and assigns it to the Description field. +func (o *PrecheckItem) SetDescription(v string) { + o.Description = &v +} + +// GetStatus returns the Status field value if set, zero value otherwise. +func (o *PrecheckItem) GetStatus() PrecheckItemStatus { + if o == nil || IsNil(o.Status) { + var ret PrecheckItemStatus + return ret + } + return *o.Status +} + +// GetStatusOk returns a tuple with the Status field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetStatusOk() (*PrecheckItemStatus, bool) { + if o == nil || IsNil(o.Status) { + return nil, false + } + return o.Status, true +} + +// HasStatus returns a boolean if a field has been set. +func (o *PrecheckItem) HasStatus() bool { + if o != nil && !IsNil(o.Status) { + return true + } + + return false +} + +// SetStatus gets a reference to the given PrecheckItemStatus and assigns it to the Status field. +func (o *PrecheckItem) SetStatus(v PrecheckItemStatus) { + o.Status = &v +} + +// GetSolution returns the Solution field value if set, zero value otherwise. +func (o *PrecheckItem) GetSolution() string { + if o == nil || IsNil(o.Solution) { + var ret string + return ret + } + return *o.Solution +} + +// GetSolutionOk returns a tuple with the Solution field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetSolutionOk() (*string, bool) { + if o == nil || IsNil(o.Solution) { + return nil, false + } + return o.Solution, true +} + +// HasSolution returns a boolean if a field has been set. +func (o *PrecheckItem) HasSolution() bool { + if o != nil && !IsNil(o.Solution) { + return true + } + + return false +} + +// SetSolution gets a reference to the given string and assigns it to the Solution field. +func (o *PrecheckItem) SetSolution(v string) { + o.Solution = &v +} + +// GetReason returns the Reason field value if set, zero value otherwise. +func (o *PrecheckItem) GetReason() string { + if o == nil || IsNil(o.Reason) { + var ret string + return ret + } + return *o.Reason +} + +// GetReasonOk returns a tuple with the Reason field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetReasonOk() (*string, bool) { + if o == nil || IsNil(o.Reason) { + return nil, false + } + return o.Reason, true +} + +// HasReason returns a boolean if a field has been set. +func (o *PrecheckItem) HasReason() bool { + if o != nil && !IsNil(o.Reason) { + return true + } + + return false +} + +// SetReason gets a reference to the given string and assigns it to the Reason field. +func (o *PrecheckItem) SetReason(v string) { + o.Reason = &v +} + +// GetSolutionDocUrl returns the SolutionDocUrl field value if set, zero value otherwise. +func (o *PrecheckItem) GetSolutionDocUrl() string { + if o == nil || IsNil(o.SolutionDocUrl) { + var ret string + return ret + } + return *o.SolutionDocUrl +} + +// GetSolutionDocUrlOk returns a tuple with the SolutionDocUrl field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetSolutionDocUrlOk() (*string, bool) { + if o == nil || IsNil(o.SolutionDocUrl) { + return nil, false + } + return o.SolutionDocUrl, true +} + +// HasSolutionDocUrl returns a boolean if a field has been set. +func (o *PrecheckItem) HasSolutionDocUrl() bool { + if o != nil && !IsNil(o.SolutionDocUrl) { + return true + } + + return false +} + +// SetSolutionDocUrl gets a reference to the given string and assigns it to the SolutionDocUrl field. +func (o *PrecheckItem) SetSolutionDocUrl(v string) { + o.SolutionDocUrl = &v +} + +// GetType returns the Type field value if set, zero value otherwise. +func (o *PrecheckItem) GetType() PrecheckItemType { + if o == nil || IsNil(o.Type) { + var ret PrecheckItemType + return ret + } + return *o.Type +} + +// GetTypeOk returns a tuple with the Type field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetTypeOk() (*PrecheckItemType, bool) { + if o == nil || IsNil(o.Type) { + return nil, false + } + return o.Type, true +} + +// HasType returns a boolean if a field has been set. +func (o *PrecheckItem) HasType() bool { + if o != nil && !IsNil(o.Type) { + return true + } + + return false +} + +// SetType gets a reference to the given PrecheckItemType and assigns it to the Type field. +func (o *PrecheckItem) SetType(v PrecheckItemType) { + o.Type = &v +} + +func (o PrecheckItem) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o PrecheckItem) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Description) { + toSerialize["description"] = o.Description + } + if !IsNil(o.Status) { + toSerialize["status"] = o.Status + } + if !IsNil(o.Solution) { + toSerialize["solution"] = o.Solution + } + if !IsNil(o.Reason) { + toSerialize["reason"] = o.Reason + } + if !IsNil(o.SolutionDocUrl) { + toSerialize["solutionDocUrl"] = o.SolutionDocUrl + } + if !IsNil(o.Type) { + toSerialize["type"] = o.Type + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *PrecheckItem) UnmarshalJSON(data []byte) (err error) { + varPrecheckItem := _PrecheckItem{} + + err = json.Unmarshal(data, &varPrecheckItem) + + if err != nil { + return err + } + + *o = PrecheckItem(varPrecheckItem) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "description") + delete(additionalProperties, "status") + delete(additionalProperties, "solution") + delete(additionalProperties, "reason") + delete(additionalProperties, "solutionDocUrl") + delete(additionalProperties, "type") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullablePrecheckItem struct { + value *PrecheckItem + isSet bool +} + +func (v NullablePrecheckItem) Get() *PrecheckItem { + return v.value +} + +func (v *NullablePrecheckItem) Set(val *PrecheckItem) { + v.value = val + v.isSet = true +} + +func (v NullablePrecheckItem) IsSet() bool { + return v.isSet +} + +func (v *NullablePrecheckItem) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePrecheckItem(val *PrecheckItem) *NullablePrecheckItem { + return &NullablePrecheckItem{value: val, isSet: true} +} + +func (v NullablePrecheckItem) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePrecheckItem) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_status.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_status.go new file mode 100644 index 00000000..f17d7d00 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_status.go @@ -0,0 +1,107 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// PrecheckItemStatus - SUCCESS: Check passed successfully. - WARNING: Check resulted in a warning. - FAILED: Check failed. +type PrecheckItemStatus string + +// List of PrecheckItem.Status +const ( + PRECHECKITEMSTATUS_SUCCESS PrecheckItemStatus = "SUCCESS" + PRECHECKITEMSTATUS_WARNING PrecheckItemStatus = "WARNING" + PRECHECKITEMSTATUS_FAILED PrecheckItemStatus = "FAILED" +) + +// All allowed values of PrecheckItemStatus enum +var AllowedPrecheckItemStatusEnumValues = []PrecheckItemStatus{ + "SUCCESS", + "WARNING", + "FAILED", +} + +func (v *PrecheckItemStatus) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := PrecheckItemStatus(value) + for _, existing := range AllowedPrecheckItemStatusEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = PrecheckItemStatus(value) + return nil +} + +// NewPrecheckItemStatusFromValue returns a pointer to a valid PrecheckItemStatus for the value passed as argument +func NewPrecheckItemStatusFromValue(v string) *PrecheckItemStatus { + ev := PrecheckItemStatus(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v PrecheckItemStatus) IsValid() bool { + for _, existing := range AllowedPrecheckItemStatusEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to PrecheckItem.Status value +func (v PrecheckItemStatus) Ptr() *PrecheckItemStatus { + return &v +} + +type NullablePrecheckItemStatus struct { + value *PrecheckItemStatus + isSet bool +} + +func (v NullablePrecheckItemStatus) Get() *PrecheckItemStatus { + return v.value +} + +func (v *NullablePrecheckItemStatus) Set(val *PrecheckItemStatus) { + v.value = val + v.isSet = true +} + +func (v NullablePrecheckItemStatus) IsSet() bool { + return v.isSet +} + +func (v *NullablePrecheckItemStatus) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePrecheckItemStatus(val *PrecheckItemStatus) *NullablePrecheckItemStatus { + return &NullablePrecheckItemStatus{value: val, isSet: true} +} + +func (v NullablePrecheckItemStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePrecheckItemStatus) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go new file mode 100644 index 00000000..04749b6c --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go @@ -0,0 +1,129 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// PrecheckItemType Types of prechecks performed before starting a migration. - DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges. - REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges. - VERSION_CHECKING: Check source database version compatibility. - SERVER_ID_CHECKING: Check source server_id configuration. - BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source. - BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source. - BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting. - TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target. - BINLOG_DB_CHECKING: Check binlog database-level filtering configuration. - CONN_NUMBER_CHECKING: Check concurrent connections limit/availability. - TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges. - META_POSITION_CHECKING: Check saved meta/binlog position validity. - LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load. - PRIMARY_KEY_CHECKING: Check primary key settings on source tables. +type PrecheckItemType string + +// List of PrecheckItemType +const ( + PRECHECKITEMTYPE_DUMP_PRIVILEGE_CHECKING PrecheckItemType = "DUMP_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_REPLICATION_PRIVILEGE_CHECKING PrecheckItemType = "REPLICATION_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_VERSION_CHECKING PrecheckItemType = "VERSION_CHECKING" + PRECHECKITEMTYPE_SERVER_ID_CHECKING PrecheckItemType = "SERVER_ID_CHECKING" + PRECHECKITEMTYPE_BINLOG_ENABLE_CHECKING PrecheckItemType = "BINLOG_ENABLE_CHECKING" + PRECHECKITEMTYPE_BINLOG_FORMAT_CHECKING PrecheckItemType = "BINLOG_FORMAT_CHECKING" + PRECHECKITEMTYPE_BINLOG_ROW_IMAGE_CHECKING PrecheckItemType = "BINLOG_ROW_IMAGE_CHECKING" + PRECHECKITEMTYPE_TABLE_SCHEMA_CHECKING PrecheckItemType = "TABLE_SCHEMA_CHECKING" + PRECHECKITEMTYPE_BINLOG_DB_CHECKING PrecheckItemType = "BINLOG_DB_CHECKING" + PRECHECKITEMTYPE_CONN_NUMBER_CHECKING PrecheckItemType = "CONN_NUMBER_CHECKING" + PRECHECKITEMTYPE_TARGET_DB_PRIVILEGE_CHECKING PrecheckItemType = "TARGET_DB_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_META_POSITION_CHECKING PrecheckItemType = "META_POSITION_CHECKING" + PRECHECKITEMTYPE_LIGHTNING_TABLE_EMPTY_CHECKING PrecheckItemType = "LIGHTNING_TABLE_EMPTY_CHECKING" + PRECHECKITEMTYPE_PRIMARY_KEY_CHECKING PrecheckItemType = "PRIMARY_KEY_CHECKING" +) + +// All allowed values of PrecheckItemType enum +var AllowedPrecheckItemTypeEnumValues = []PrecheckItemType{ + "DUMP_PRIVILEGE_CHECKING", + "REPLICATION_PRIVILEGE_CHECKING", + "VERSION_CHECKING", + "SERVER_ID_CHECKING", + "BINLOG_ENABLE_CHECKING", + "BINLOG_FORMAT_CHECKING", + "BINLOG_ROW_IMAGE_CHECKING", + "TABLE_SCHEMA_CHECKING", + "BINLOG_DB_CHECKING", + "CONN_NUMBER_CHECKING", + "TARGET_DB_PRIVILEGE_CHECKING", + "META_POSITION_CHECKING", + "LIGHTNING_TABLE_EMPTY_CHECKING", + "PRIMARY_KEY_CHECKING", +} + +func (v *PrecheckItemType) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := PrecheckItemType(value) + for _, existing := range AllowedPrecheckItemTypeEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = PrecheckItemType(value) + return nil +} + +// NewPrecheckItemTypeFromValue returns a pointer to a valid PrecheckItemType for the value passed as argument +func NewPrecheckItemTypeFromValue(v string) *PrecheckItemType { + ev := PrecheckItemType(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v PrecheckItemType) IsValid() bool { + for _, existing := range AllowedPrecheckItemTypeEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to PrecheckItemType value +func (v PrecheckItemType) Ptr() *PrecheckItemType { + return &v +} + +type NullablePrecheckItemType struct { + value *PrecheckItemType + isSet bool +} + +func (v NullablePrecheckItemType) Get() *PrecheckItemType { + return v.value +} + +func (v *NullablePrecheckItemType) Set(val *PrecheckItemType) { + v.value = val + v.isSet = true +} + +func (v NullablePrecheckItemType) IsSet() bool { + return v.isSet +} + +func (v *NullablePrecheckItemType) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePrecheckItemType(val *PrecheckItemType) *NullablePrecheckItemType { + return &NullablePrecheckItemType{value: val, isSet: true} +} + +func (v NullablePrecheckItemType) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePrecheckItemType) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go new file mode 100644 index 00000000..8b0dc20c --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the RouteRule type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &RouteRule{} + +// RouteRule struct for RouteRule +type RouteRule struct { + // Source table pattern to match. + SourceTable *RouteRuleSource `json:"sourceTable,omitempty"` + // Target table to route to. + TargetTable *Table `json:"targetTable,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _RouteRule RouteRule + +// NewRouteRule instantiates a new RouteRule object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewRouteRule() *RouteRule { + this := RouteRule{} + return &this +} + +// NewRouteRuleWithDefaults instantiates a new RouteRule object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewRouteRuleWithDefaults() *RouteRule { + this := RouteRule{} + return &this +} + +// GetSourceTable returns the SourceTable field value if set, zero value otherwise. +func (o *RouteRule) GetSourceTable() RouteRuleSource { + if o == nil || IsNil(o.SourceTable) { + var ret RouteRuleSource + return ret + } + return *o.SourceTable +} + +// GetSourceTableOk returns a tuple with the SourceTable field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRule) GetSourceTableOk() (*RouteRuleSource, bool) { + if o == nil || IsNil(o.SourceTable) { + return nil, false + } + return o.SourceTable, true +} + +// HasSourceTable returns a boolean if a field has been set. +func (o *RouteRule) HasSourceTable() bool { + if o != nil && !IsNil(o.SourceTable) { + return true + } + + return false +} + +// SetSourceTable gets a reference to the given RouteRuleSource and assigns it to the SourceTable field. +func (o *RouteRule) SetSourceTable(v RouteRuleSource) { + o.SourceTable = &v +} + +// GetTargetTable returns the TargetTable field value if set, zero value otherwise. +func (o *RouteRule) GetTargetTable() Table { + if o == nil || IsNil(o.TargetTable) { + var ret Table + return ret + } + return *o.TargetTable +} + +// GetTargetTableOk returns a tuple with the TargetTable field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRule) GetTargetTableOk() (*Table, bool) { + if o == nil || IsNil(o.TargetTable) { + return nil, false + } + return o.TargetTable, true +} + +// HasTargetTable returns a boolean if a field has been set. +func (o *RouteRule) HasTargetTable() bool { + if o != nil && !IsNil(o.TargetTable) { + return true + } + + return false +} + +// SetTargetTable gets a reference to the given Table and assigns it to the TargetTable field. +func (o *RouteRule) SetTargetTable(v Table) { + o.TargetTable = &v +} + +func (o RouteRule) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o RouteRule) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.SourceTable) { + toSerialize["sourceTable"] = o.SourceTable + } + if !IsNil(o.TargetTable) { + toSerialize["targetTable"] = o.TargetTable + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *RouteRule) UnmarshalJSON(data []byte) (err error) { + varRouteRule := _RouteRule{} + + err = json.Unmarshal(data, &varRouteRule) + + if err != nil { + return err + } + + *o = RouteRule(varRouteRule) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "sourceTable") + delete(additionalProperties, "targetTable") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableRouteRule struct { + value *RouteRule + isSet bool +} + +func (v NullableRouteRule) Get() *RouteRule { + return v.value +} + +func (v *NullableRouteRule) Set(val *RouteRule) { + v.value = val + v.isSet = true +} + +func (v NullableRouteRule) IsSet() bool { + return v.isSet +} + +func (v *NullableRouteRule) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableRouteRule(val *RouteRule) *NullableRouteRule { + return &NullableRouteRule{value: val, isSet: true} +} + +func (v NullableRouteRule) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableRouteRule) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_source.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_source.go new file mode 100644 index 00000000..32a57aa9 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_source.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the RouteRuleSource type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &RouteRuleSource{} + +// RouteRuleSource struct for RouteRuleSource +type RouteRuleSource struct { + // Schema pattern of the source, supports wildcards. + SchemaPattern *string `json:"schemaPattern,omitempty"` + // Table pattern of the source, supports wildcards. + TablePattern *string `json:"tablePattern,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _RouteRuleSource RouteRuleSource + +// NewRouteRuleSource instantiates a new RouteRuleSource object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewRouteRuleSource() *RouteRuleSource { + this := RouteRuleSource{} + return &this +} + +// NewRouteRuleSourceWithDefaults instantiates a new RouteRuleSource object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewRouteRuleSourceWithDefaults() *RouteRuleSource { + this := RouteRuleSource{} + return &this +} + +// GetSchemaPattern returns the SchemaPattern field value if set, zero value otherwise. +func (o *RouteRuleSource) GetSchemaPattern() string { + if o == nil || IsNil(o.SchemaPattern) { + var ret string + return ret + } + return *o.SchemaPattern +} + +// GetSchemaPatternOk returns a tuple with the SchemaPattern field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRuleSource) GetSchemaPatternOk() (*string, bool) { + if o == nil || IsNil(o.SchemaPattern) { + return nil, false + } + return o.SchemaPattern, true +} + +// HasSchemaPattern returns a boolean if a field has been set. +func (o *RouteRuleSource) HasSchemaPattern() bool { + if o != nil && !IsNil(o.SchemaPattern) { + return true + } + + return false +} + +// SetSchemaPattern gets a reference to the given string and assigns it to the SchemaPattern field. +func (o *RouteRuleSource) SetSchemaPattern(v string) { + o.SchemaPattern = &v +} + +// GetTablePattern returns the TablePattern field value if set, zero value otherwise. +func (o *RouteRuleSource) GetTablePattern() string { + if o == nil || IsNil(o.TablePattern) { + var ret string + return ret + } + return *o.TablePattern +} + +// GetTablePatternOk returns a tuple with the TablePattern field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRuleSource) GetTablePatternOk() (*string, bool) { + if o == nil || IsNil(o.TablePattern) { + return nil, false + } + return o.TablePattern, true +} + +// HasTablePattern returns a boolean if a field has been set. +func (o *RouteRuleSource) HasTablePattern() bool { + if o != nil && !IsNil(o.TablePattern) { + return true + } + + return false +} + +// SetTablePattern gets a reference to the given string and assigns it to the TablePattern field. +func (o *RouteRuleSource) SetTablePattern(v string) { + o.TablePattern = &v +} + +func (o RouteRuleSource) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o RouteRuleSource) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.SchemaPattern) { + toSerialize["schemaPattern"] = o.SchemaPattern + } + if !IsNil(o.TablePattern) { + toSerialize["tablePattern"] = o.TablePattern + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *RouteRuleSource) UnmarshalJSON(data []byte) (err error) { + varRouteRuleSource := _RouteRuleSource{} + + err = json.Unmarshal(data, &varRouteRuleSource) + + if err != nil { + return err + } + + *o = RouteRuleSource(varRouteRuleSource) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "schemaPattern") + delete(additionalProperties, "tablePattern") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableRouteRuleSource struct { + value *RouteRuleSource + isSet bool +} + +func (v NullableRouteRuleSource) Get() *RouteRuleSource { + return v.value +} + +func (v *NullableRouteRuleSource) Set(val *RouteRuleSource) { + v.value = val + v.isSet = true +} + +func (v NullableRouteRuleSource) IsSet() bool { + return v.isSet +} + +func (v *NullableRouteRuleSource) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableRouteRuleSource(val *RouteRuleSource) *NullableRouteRuleSource { + return &NullableRouteRuleSource{value: val, isSet: true} +} + +func (v NullableRouteRuleSource) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableRouteRuleSource) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_security.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_security.go new file mode 100644 index 00000000..007edc54 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_security.go @@ -0,0 +1,268 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the Security type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Security{} + +// Security struct for Security +type Security struct { + // Allowed certificate Common Names. + CertAllowedCn []string `json:"certAllowedCn,omitempty"` + // CA certificate content in PEM. + SslCaContent *string `json:"sslCaContent,omitempty" validate:"regexp=^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$"` + // Client certificate content in PEM. + SslCertContent *string `json:"sslCertContent,omitempty" validate:"regexp=^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$"` + // Client private key in PEM. + SslKeyContent *string `json:"sslKeyContent,omitempty" validate:"regexp=^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$"` + AdditionalProperties map[string]interface{} +} + +type _Security Security + +// NewSecurity instantiates a new Security object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSecurity() *Security { + this := Security{} + return &this +} + +// NewSecurityWithDefaults instantiates a new Security object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSecurityWithDefaults() *Security { + this := Security{} + return &this +} + +// GetCertAllowedCn returns the CertAllowedCn field value if set, zero value otherwise. +func (o *Security) GetCertAllowedCn() []string { + if o == nil || IsNil(o.CertAllowedCn) { + var ret []string + return ret + } + return o.CertAllowedCn +} + +// GetCertAllowedCnOk returns a tuple with the CertAllowedCn field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Security) GetCertAllowedCnOk() ([]string, bool) { + if o == nil || IsNil(o.CertAllowedCn) { + return nil, false + } + return o.CertAllowedCn, true +} + +// HasCertAllowedCn returns a boolean if a field has been set. +func (o *Security) HasCertAllowedCn() bool { + if o != nil && !IsNil(o.CertAllowedCn) { + return true + } + + return false +} + +// SetCertAllowedCn gets a reference to the given []string and assigns it to the CertAllowedCn field. +func (o *Security) SetCertAllowedCn(v []string) { + o.CertAllowedCn = v +} + +// GetSslCaContent returns the SslCaContent field value if set, zero value otherwise. +func (o *Security) GetSslCaContent() string { + if o == nil || IsNil(o.SslCaContent) { + var ret string + return ret + } + return *o.SslCaContent +} + +// GetSslCaContentOk returns a tuple with the SslCaContent field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Security) GetSslCaContentOk() (*string, bool) { + if o == nil || IsNil(o.SslCaContent) { + return nil, false + } + return o.SslCaContent, true +} + +// HasSslCaContent returns a boolean if a field has been set. +func (o *Security) HasSslCaContent() bool { + if o != nil && !IsNil(o.SslCaContent) { + return true + } + + return false +} + +// SetSslCaContent gets a reference to the given string and assigns it to the SslCaContent field. +func (o *Security) SetSslCaContent(v string) { + o.SslCaContent = &v +} + +// GetSslCertContent returns the SslCertContent field value if set, zero value otherwise. +func (o *Security) GetSslCertContent() string { + if o == nil || IsNil(o.SslCertContent) { + var ret string + return ret + } + return *o.SslCertContent +} + +// GetSslCertContentOk returns a tuple with the SslCertContent field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Security) GetSslCertContentOk() (*string, bool) { + if o == nil || IsNil(o.SslCertContent) { + return nil, false + } + return o.SslCertContent, true +} + +// HasSslCertContent returns a boolean if a field has been set. +func (o *Security) HasSslCertContent() bool { + if o != nil && !IsNil(o.SslCertContent) { + return true + } + + return false +} + +// SetSslCertContent gets a reference to the given string and assigns it to the SslCertContent field. +func (o *Security) SetSslCertContent(v string) { + o.SslCertContent = &v +} + +// GetSslKeyContent returns the SslKeyContent field value if set, zero value otherwise. +func (o *Security) GetSslKeyContent() string { + if o == nil || IsNil(o.SslKeyContent) { + var ret string + return ret + } + return *o.SslKeyContent +} + +// GetSslKeyContentOk returns a tuple with the SslKeyContent field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Security) GetSslKeyContentOk() (*string, bool) { + if o == nil || IsNil(o.SslKeyContent) { + return nil, false + } + return o.SslKeyContent, true +} + +// HasSslKeyContent returns a boolean if a field has been set. +func (o *Security) HasSslKeyContent() bool { + if o != nil && !IsNil(o.SslKeyContent) { + return true + } + + return false +} + +// SetSslKeyContent gets a reference to the given string and assigns it to the SslKeyContent field. +func (o *Security) SetSslKeyContent(v string) { + o.SslKeyContent = &v +} + +func (o Security) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Security) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.CertAllowedCn) { + toSerialize["certAllowedCn"] = o.CertAllowedCn + } + if !IsNil(o.SslCaContent) { + toSerialize["sslCaContent"] = o.SslCaContent + } + if !IsNil(o.SslCertContent) { + toSerialize["sslCertContent"] = o.SslCertContent + } + if !IsNil(o.SslKeyContent) { + toSerialize["sslKeyContent"] = o.SslKeyContent + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Security) UnmarshalJSON(data []byte) (err error) { + varSecurity := _Security{} + + err = json.Unmarshal(data, &varSecurity) + + if err != nil { + return err + } + + *o = Security(varSecurity) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "certAllowedCn") + delete(additionalProperties, "sslCaContent") + delete(additionalProperties, "sslCertContent") + delete(additionalProperties, "sslKeyContent") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableSecurity struct { + value *Security + isSet bool +} + +func (v NullableSecurity) Get() *Security { + return v.value +} + +func (v *NullableSecurity) Set(val *Security) { + v.value = val + v.isSet = true +} + +func (v NullableSecurity) IsSet() bool { + return v.isSet +} + +func (v *NullableSecurity) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSecurity(val *Security) *NullableSecurity { + return &NullableSecurity{value: val, isSet: true} +} + +func (v NullableSecurity) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSecurity) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go new file mode 100644 index 00000000..bad6131c --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go @@ -0,0 +1,420 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the Source type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Source{} + +// Source struct for Source +type Source struct { + // Connection profile for the source database. + ConnProfile ConnProfile `json:"connProfile"` + // Block/allow rules for databases and tables, which is exclusive with route_rules. + BaRules *BlockAllowRules `json:"baRules,omitempty"` + // Table route rules,which is exclusive with ba_rules. + RouteRules []RouteRule `json:"routeRules,omitempty"` + // Starting binlog file name for incremental sync. + BinlogName NullableString `json:"binlogName,omitempty"` + // Starting binlog position for incremental sync. + BinlogPosition NullableInt32 `json:"binlogPosition,omitempty"` + // Starting GTID set for incremental sync. + BinlogGtid NullableString `json:"binlogGtid,omitempty"` + // Source type (e.g., MySQL). + SourceType SourceSourceType `json:"sourceType"` + AdditionalProperties map[string]interface{} +} + +type _Source Source + +// NewSource instantiates a new Source object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSource(connProfile ConnProfile, sourceType SourceSourceType) *Source { + this := Source{} + this.ConnProfile = connProfile + this.SourceType = sourceType + return &this +} + +// NewSourceWithDefaults instantiates a new Source object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSourceWithDefaults() *Source { + this := Source{} + return &this +} + +// GetConnProfile returns the ConnProfile field value +func (o *Source) GetConnProfile() ConnProfile { + if o == nil { + var ret ConnProfile + return ret + } + + return o.ConnProfile +} + +// GetConnProfileOk returns a tuple with the ConnProfile field value +// and a boolean to check if the value has been set. +func (o *Source) GetConnProfileOk() (*ConnProfile, bool) { + if o == nil { + return nil, false + } + return &o.ConnProfile, true +} + +// SetConnProfile sets field value +func (o *Source) SetConnProfile(v ConnProfile) { + o.ConnProfile = v +} + +// GetBaRules returns the BaRules field value if set, zero value otherwise. +func (o *Source) GetBaRules() BlockAllowRules { + if o == nil || IsNil(o.BaRules) { + var ret BlockAllowRules + return ret + } + return *o.BaRules +} + +// GetBaRulesOk returns a tuple with the BaRules field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Source) GetBaRulesOk() (*BlockAllowRules, bool) { + if o == nil || IsNil(o.BaRules) { + return nil, false + } + return o.BaRules, true +} + +// HasBaRules returns a boolean if a field has been set. +func (o *Source) HasBaRules() bool { + if o != nil && !IsNil(o.BaRules) { + return true + } + + return false +} + +// SetBaRules gets a reference to the given BlockAllowRules and assigns it to the BaRules field. +func (o *Source) SetBaRules(v BlockAllowRules) { + o.BaRules = &v +} + +// GetRouteRules returns the RouteRules field value if set, zero value otherwise. +func (o *Source) GetRouteRules() []RouteRule { + if o == nil || IsNil(o.RouteRules) { + var ret []RouteRule + return ret + } + return o.RouteRules +} + +// GetRouteRulesOk returns a tuple with the RouteRules field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Source) GetRouteRulesOk() ([]RouteRule, bool) { + if o == nil || IsNil(o.RouteRules) { + return nil, false + } + return o.RouteRules, true +} + +// HasRouteRules returns a boolean if a field has been set. +func (o *Source) HasRouteRules() bool { + if o != nil && !IsNil(o.RouteRules) { + return true + } + + return false +} + +// SetRouteRules gets a reference to the given []RouteRule and assigns it to the RouteRules field. +func (o *Source) SetRouteRules(v []RouteRule) { + o.RouteRules = v +} + +// GetBinlogName returns the BinlogName field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *Source) GetBinlogName() string { + if o == nil || IsNil(o.BinlogName.Get()) { + var ret string + return ret + } + return *o.BinlogName.Get() +} + +// GetBinlogNameOk returns a tuple with the BinlogName field value if set, nil otherwise +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *Source) GetBinlogNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return o.BinlogName.Get(), o.BinlogName.IsSet() +} + +// HasBinlogName returns a boolean if a field has been set. +func (o *Source) HasBinlogName() bool { + if o != nil && o.BinlogName.IsSet() { + return true + } + + return false +} + +// SetBinlogName gets a reference to the given NullableString and assigns it to the BinlogName field. +func (o *Source) SetBinlogName(v string) { + o.BinlogName.Set(&v) +} + +// SetBinlogNameNil sets the value for BinlogName to be an explicit nil +func (o *Source) SetBinlogNameNil() { + o.BinlogName.Set(nil) +} + +// UnsetBinlogName ensures that no value is present for BinlogName, not even an explicit nil +func (o *Source) UnsetBinlogName() { + o.BinlogName.Unset() +} + +// GetBinlogPosition returns the BinlogPosition field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *Source) GetBinlogPosition() int32 { + if o == nil || IsNil(o.BinlogPosition.Get()) { + var ret int32 + return ret + } + return *o.BinlogPosition.Get() +} + +// GetBinlogPositionOk returns a tuple with the BinlogPosition field value if set, nil otherwise +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *Source) GetBinlogPositionOk() (*int32, bool) { + if o == nil { + return nil, false + } + return o.BinlogPosition.Get(), o.BinlogPosition.IsSet() +} + +// HasBinlogPosition returns a boolean if a field has been set. +func (o *Source) HasBinlogPosition() bool { + if o != nil && o.BinlogPosition.IsSet() { + return true + } + + return false +} + +// SetBinlogPosition gets a reference to the given NullableInt32 and assigns it to the BinlogPosition field. +func (o *Source) SetBinlogPosition(v int32) { + o.BinlogPosition.Set(&v) +} + +// SetBinlogPositionNil sets the value for BinlogPosition to be an explicit nil +func (o *Source) SetBinlogPositionNil() { + o.BinlogPosition.Set(nil) +} + +// UnsetBinlogPosition ensures that no value is present for BinlogPosition, not even an explicit nil +func (o *Source) UnsetBinlogPosition() { + o.BinlogPosition.Unset() +} + +// GetBinlogGtid returns the BinlogGtid field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *Source) GetBinlogGtid() string { + if o == nil || IsNil(o.BinlogGtid.Get()) { + var ret string + return ret + } + return *o.BinlogGtid.Get() +} + +// GetBinlogGtidOk returns a tuple with the BinlogGtid field value if set, nil otherwise +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *Source) GetBinlogGtidOk() (*string, bool) { + if o == nil { + return nil, false + } + return o.BinlogGtid.Get(), o.BinlogGtid.IsSet() +} + +// HasBinlogGtid returns a boolean if a field has been set. +func (o *Source) HasBinlogGtid() bool { + if o != nil && o.BinlogGtid.IsSet() { + return true + } + + return false +} + +// SetBinlogGtid gets a reference to the given NullableString and assigns it to the BinlogGtid field. +func (o *Source) SetBinlogGtid(v string) { + o.BinlogGtid.Set(&v) +} + +// SetBinlogGtidNil sets the value for BinlogGtid to be an explicit nil +func (o *Source) SetBinlogGtidNil() { + o.BinlogGtid.Set(nil) +} + +// UnsetBinlogGtid ensures that no value is present for BinlogGtid, not even an explicit nil +func (o *Source) UnsetBinlogGtid() { + o.BinlogGtid.Unset() +} + +// GetSourceType returns the SourceType field value +func (o *Source) GetSourceType() SourceSourceType { + if o == nil { + var ret SourceSourceType + return ret + } + + return o.SourceType +} + +// GetSourceTypeOk returns a tuple with the SourceType field value +// and a boolean to check if the value has been set. +func (o *Source) GetSourceTypeOk() (*SourceSourceType, bool) { + if o == nil { + return nil, false + } + return &o.SourceType, true +} + +// SetSourceType sets field value +func (o *Source) SetSourceType(v SourceSourceType) { + o.SourceType = v +} + +func (o Source) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Source) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["connProfile"] = o.ConnProfile + if !IsNil(o.BaRules) { + toSerialize["baRules"] = o.BaRules + } + if !IsNil(o.RouteRules) { + toSerialize["routeRules"] = o.RouteRules + } + if o.BinlogName.IsSet() { + toSerialize["binlogName"] = o.BinlogName.Get() + } + if o.BinlogPosition.IsSet() { + toSerialize["binlogPosition"] = o.BinlogPosition.Get() + } + if o.BinlogGtid.IsSet() { + toSerialize["binlogGtid"] = o.BinlogGtid.Get() + } + toSerialize["sourceType"] = o.SourceType + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Source) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "connProfile", + "sourceType", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varSource := _Source{} + + err = json.Unmarshal(data, &varSource) + + if err != nil { + return err + } + + *o = Source(varSource) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "connProfile") + delete(additionalProperties, "baRules") + delete(additionalProperties, "routeRules") + delete(additionalProperties, "binlogName") + delete(additionalProperties, "binlogPosition") + delete(additionalProperties, "binlogGtid") + delete(additionalProperties, "sourceType") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableSource struct { + value *Source + isSet bool +} + +func (v NullableSource) Get() *Source { + return v.value +} + +func (v *NullableSource) Set(val *Source) { + v.value = val + v.isSet = true +} + +func (v NullableSource) IsSet() bool { + return v.isSet +} + +func (v *NullableSource) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSource(val *Source) *NullableSource { + return &NullableSource{value: val, isSet: true} +} + +func (v NullableSource) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSource) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go new file mode 100644 index 00000000..dea0370f --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go @@ -0,0 +1,107 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// SourceSourceType The source database type. - MYSQL: Self-managed MySQL. - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. - AWS_RDS_MYSQL: Amazon RDS for MySQL. +type SourceSourceType string + +// List of Source.SourceType +const ( + SOURCESOURCETYPE_MYSQL SourceSourceType = "MYSQL" + SOURCESOURCETYPE_ALICLOUD_RDS_MYSQL SourceSourceType = "ALICLOUD_RDS_MYSQL" + SOURCESOURCETYPE_AWS_RDS_MYSQL SourceSourceType = "AWS_RDS_MYSQL" +) + +// All allowed values of SourceSourceType enum +var AllowedSourceSourceTypeEnumValues = []SourceSourceType{ + "MYSQL", + "ALICLOUD_RDS_MYSQL", + "AWS_RDS_MYSQL", +} + +func (v *SourceSourceType) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := SourceSourceType(value) + for _, existing := range AllowedSourceSourceTypeEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = SourceSourceType(value) + return nil +} + +// NewSourceSourceTypeFromValue returns a pointer to a valid SourceSourceType for the value passed as argument +func NewSourceSourceTypeFromValue(v string) *SourceSourceType { + ev := SourceSourceType(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v SourceSourceType) IsValid() bool { + for _, existing := range AllowedSourceSourceTypeEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to Source.SourceType value +func (v SourceSourceType) Ptr() *SourceSourceType { + return &v +} + +type NullableSourceSourceType struct { + value *SourceSourceType + isSet bool +} + +func (v NullableSourceSourceType) Get() *SourceSourceType { + return v.value +} + +func (v *NullableSourceSourceType) Set(val *SourceSourceType) { + v.value = val + v.isSet = true +} + +func (v NullableSourceSourceType) IsSet() bool { + return v.isSet +} + +func (v *NullableSourceSourceType) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSourceSourceType(val *SourceSourceType) *NullableSourceSourceType { + return &NullableSourceSourceType{value: val, isSet: true} +} + +func (v NullableSourceSourceType) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSourceSourceType) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_status.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_status.go new file mode 100644 index 00000000..b46a9490 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_status.go @@ -0,0 +1,227 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the Status type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Status{} + +// Status struct for Status +type Status struct { + Code *int32 `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Details []Any `json:"details,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _Status Status + +// NewStatus instantiates a new Status object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewStatus() *Status { + this := Status{} + return &this +} + +// NewStatusWithDefaults instantiates a new Status object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewStatusWithDefaults() *Status { + this := Status{} + return &this +} + +// GetCode returns the Code field value if set, zero value otherwise. +func (o *Status) GetCode() int32 { + if o == nil || IsNil(o.Code) { + var ret int32 + return ret + } + return *o.Code +} + +// GetCodeOk returns a tuple with the Code field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Status) GetCodeOk() (*int32, bool) { + if o == nil || IsNil(o.Code) { + return nil, false + } + return o.Code, true +} + +// HasCode returns a boolean if a field has been set. +func (o *Status) HasCode() bool { + if o != nil && !IsNil(o.Code) { + return true + } + + return false +} + +// SetCode gets a reference to the given int32 and assigns it to the Code field. +func (o *Status) SetCode(v int32) { + o.Code = &v +} + +// GetMessage returns the Message field value if set, zero value otherwise. +func (o *Status) GetMessage() string { + if o == nil || IsNil(o.Message) { + var ret string + return ret + } + return *o.Message +} + +// GetMessageOk returns a tuple with the Message field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Status) GetMessageOk() (*string, bool) { + if o == nil || IsNil(o.Message) { + return nil, false + } + return o.Message, true +} + +// HasMessage returns a boolean if a field has been set. +func (o *Status) HasMessage() bool { + if o != nil && !IsNil(o.Message) { + return true + } + + return false +} + +// SetMessage gets a reference to the given string and assigns it to the Message field. +func (o *Status) SetMessage(v string) { + o.Message = &v +} + +// GetDetails returns the Details field value if set, zero value otherwise. +func (o *Status) GetDetails() []Any { + if o == nil || IsNil(o.Details) { + var ret []Any + return ret + } + return o.Details +} + +// GetDetailsOk returns a tuple with the Details field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Status) GetDetailsOk() ([]Any, bool) { + if o == nil || IsNil(o.Details) { + return nil, false + } + return o.Details, true +} + +// HasDetails returns a boolean if a field has been set. +func (o *Status) HasDetails() bool { + if o != nil && !IsNil(o.Details) { + return true + } + + return false +} + +// SetDetails gets a reference to the given []Any and assigns it to the Details field. +func (o *Status) SetDetails(v []Any) { + o.Details = v +} + +func (o Status) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Status) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Code) { + toSerialize["code"] = o.Code + } + if !IsNil(o.Message) { + toSerialize["message"] = o.Message + } + if !IsNil(o.Details) { + toSerialize["details"] = o.Details + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Status) UnmarshalJSON(data []byte) (err error) { + varStatus := _Status{} + + err = json.Unmarshal(data, &varStatus) + + if err != nil { + return err + } + + *o = Status(varStatus) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "code") + delete(additionalProperties, "message") + delete(additionalProperties, "details") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableStatus struct { + value *Status + isSet bool +} + +func (v NullableStatus) Get() *Status { + return v.value +} + +func (v *NullableStatus) Set(val *Status) { + v.value = val + v.isSet = true +} + +func (v NullableStatus) IsSet() bool { + return v.isSet +} + +func (v *NullableStatus) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableStatus(val *Status) *NullableStatus { + return &NullableStatus{value: val, isSet: true} +} + +func (v NullableStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableStatus) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task.go new file mode 100644 index 00000000..b09a054c --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task.go @@ -0,0 +1,393 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the SubTask type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &SubTask{} + +// SubTask struct for SubTask +type SubTask struct { + // Source configuration for this subtask. + Source *Source `json:"source,omitempty"` + // Current step of the subtask. + CurrentStep *SubTaskStep `json:"currentStep,omitempty"` + // Current stage of the subtask. + Stage *SubTaskStage `json:"stage,omitempty"` + // Detail of dump phase, if applicable. + DumpDetail *DumpDetail `json:"dumpDetail,omitempty"` + // Detail of load phase, if applicable. + LoadDetail *LoadDetail `json:"loadDetail,omitempty"` + // Detail of sync phase, if applicable. + SyncDetail *SyncDetail `json:"syncDetail,omitempty"` + // Error message when the subtask fails. + ErrorMsg NullableString `json:"errorMsg,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _SubTask SubTask + +// NewSubTask instantiates a new SubTask object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSubTask() *SubTask { + this := SubTask{} + return &this +} + +// NewSubTaskWithDefaults instantiates a new SubTask object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSubTaskWithDefaults() *SubTask { + this := SubTask{} + return &this +} + +// GetSource returns the Source field value if set, zero value otherwise. +func (o *SubTask) GetSource() Source { + if o == nil || IsNil(o.Source) { + var ret Source + return ret + } + return *o.Source +} + +// GetSourceOk returns a tuple with the Source field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetSourceOk() (*Source, bool) { + if o == nil || IsNil(o.Source) { + return nil, false + } + return o.Source, true +} + +// HasSource returns a boolean if a field has been set. +func (o *SubTask) HasSource() bool { + if o != nil && !IsNil(o.Source) { + return true + } + + return false +} + +// SetSource gets a reference to the given Source and assigns it to the Source field. +func (o *SubTask) SetSource(v Source) { + o.Source = &v +} + +// GetCurrentStep returns the CurrentStep field value if set, zero value otherwise. +func (o *SubTask) GetCurrentStep() SubTaskStep { + if o == nil || IsNil(o.CurrentStep) { + var ret SubTaskStep + return ret + } + return *o.CurrentStep +} + +// GetCurrentStepOk returns a tuple with the CurrentStep field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetCurrentStepOk() (*SubTaskStep, bool) { + if o == nil || IsNil(o.CurrentStep) { + return nil, false + } + return o.CurrentStep, true +} + +// HasCurrentStep returns a boolean if a field has been set. +func (o *SubTask) HasCurrentStep() bool { + if o != nil && !IsNil(o.CurrentStep) { + return true + } + + return false +} + +// SetCurrentStep gets a reference to the given SubTaskStep and assigns it to the CurrentStep field. +func (o *SubTask) SetCurrentStep(v SubTaskStep) { + o.CurrentStep = &v +} + +// GetStage returns the Stage field value if set, zero value otherwise. +func (o *SubTask) GetStage() SubTaskStage { + if o == nil || IsNil(o.Stage) { + var ret SubTaskStage + return ret + } + return *o.Stage +} + +// GetStageOk returns a tuple with the Stage field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetStageOk() (*SubTaskStage, bool) { + if o == nil || IsNil(o.Stage) { + return nil, false + } + return o.Stage, true +} + +// HasStage returns a boolean if a field has been set. +func (o *SubTask) HasStage() bool { + if o != nil && !IsNil(o.Stage) { + return true + } + + return false +} + +// SetStage gets a reference to the given SubTaskStage and assigns it to the Stage field. +func (o *SubTask) SetStage(v SubTaskStage) { + o.Stage = &v +} + +// GetDumpDetail returns the DumpDetail field value if set, zero value otherwise. +func (o *SubTask) GetDumpDetail() DumpDetail { + if o == nil || IsNil(o.DumpDetail) { + var ret DumpDetail + return ret + } + return *o.DumpDetail +} + +// GetDumpDetailOk returns a tuple with the DumpDetail field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetDumpDetailOk() (*DumpDetail, bool) { + if o == nil || IsNil(o.DumpDetail) { + return nil, false + } + return o.DumpDetail, true +} + +// HasDumpDetail returns a boolean if a field has been set. +func (o *SubTask) HasDumpDetail() bool { + if o != nil && !IsNil(o.DumpDetail) { + return true + } + + return false +} + +// SetDumpDetail gets a reference to the given DumpDetail and assigns it to the DumpDetail field. +func (o *SubTask) SetDumpDetail(v DumpDetail) { + o.DumpDetail = &v +} + +// GetLoadDetail returns the LoadDetail field value if set, zero value otherwise. +func (o *SubTask) GetLoadDetail() LoadDetail { + if o == nil || IsNil(o.LoadDetail) { + var ret LoadDetail + return ret + } + return *o.LoadDetail +} + +// GetLoadDetailOk returns a tuple with the LoadDetail field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetLoadDetailOk() (*LoadDetail, bool) { + if o == nil || IsNil(o.LoadDetail) { + return nil, false + } + return o.LoadDetail, true +} + +// HasLoadDetail returns a boolean if a field has been set. +func (o *SubTask) HasLoadDetail() bool { + if o != nil && !IsNil(o.LoadDetail) { + return true + } + + return false +} + +// SetLoadDetail gets a reference to the given LoadDetail and assigns it to the LoadDetail field. +func (o *SubTask) SetLoadDetail(v LoadDetail) { + o.LoadDetail = &v +} + +// GetSyncDetail returns the SyncDetail field value if set, zero value otherwise. +func (o *SubTask) GetSyncDetail() SyncDetail { + if o == nil || IsNil(o.SyncDetail) { + var ret SyncDetail + return ret + } + return *o.SyncDetail +} + +// GetSyncDetailOk returns a tuple with the SyncDetail field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetSyncDetailOk() (*SyncDetail, bool) { + if o == nil || IsNil(o.SyncDetail) { + return nil, false + } + return o.SyncDetail, true +} + +// HasSyncDetail returns a boolean if a field has been set. +func (o *SubTask) HasSyncDetail() bool { + if o != nil && !IsNil(o.SyncDetail) { + return true + } + + return false +} + +// SetSyncDetail gets a reference to the given SyncDetail and assigns it to the SyncDetail field. +func (o *SubTask) SetSyncDetail(v SyncDetail) { + o.SyncDetail = &v +} + +// GetErrorMsg returns the ErrorMsg field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *SubTask) GetErrorMsg() string { + if o == nil || IsNil(o.ErrorMsg.Get()) { + var ret string + return ret + } + return *o.ErrorMsg.Get() +} + +// GetErrorMsgOk returns a tuple with the ErrorMsg field value if set, nil otherwise +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *SubTask) GetErrorMsgOk() (*string, bool) { + if o == nil { + return nil, false + } + return o.ErrorMsg.Get(), o.ErrorMsg.IsSet() +} + +// HasErrorMsg returns a boolean if a field has been set. +func (o *SubTask) HasErrorMsg() bool { + if o != nil && o.ErrorMsg.IsSet() { + return true + } + + return false +} + +// SetErrorMsg gets a reference to the given NullableString and assigns it to the ErrorMsg field. +func (o *SubTask) SetErrorMsg(v string) { + o.ErrorMsg.Set(&v) +} + +// SetErrorMsgNil sets the value for ErrorMsg to be an explicit nil +func (o *SubTask) SetErrorMsgNil() { + o.ErrorMsg.Set(nil) +} + +// UnsetErrorMsg ensures that no value is present for ErrorMsg, not even an explicit nil +func (o *SubTask) UnsetErrorMsg() { + o.ErrorMsg.Unset() +} + +func (o SubTask) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o SubTask) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Source) { + toSerialize["source"] = o.Source + } + if !IsNil(o.CurrentStep) { + toSerialize["currentStep"] = o.CurrentStep + } + if !IsNil(o.Stage) { + toSerialize["stage"] = o.Stage + } + if !IsNil(o.DumpDetail) { + toSerialize["dumpDetail"] = o.DumpDetail + } + if !IsNil(o.LoadDetail) { + toSerialize["loadDetail"] = o.LoadDetail + } + if !IsNil(o.SyncDetail) { + toSerialize["syncDetail"] = o.SyncDetail + } + if o.ErrorMsg.IsSet() { + toSerialize["errorMsg"] = o.ErrorMsg.Get() + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *SubTask) UnmarshalJSON(data []byte) (err error) { + varSubTask := _SubTask{} + + err = json.Unmarshal(data, &varSubTask) + + if err != nil { + return err + } + + *o = SubTask(varSubTask) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "source") + delete(additionalProperties, "currentStep") + delete(additionalProperties, "stage") + delete(additionalProperties, "dumpDetail") + delete(additionalProperties, "loadDetail") + delete(additionalProperties, "syncDetail") + delete(additionalProperties, "errorMsg") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableSubTask struct { + value *SubTask + isSet bool +} + +func (v NullableSubTask) Get() *SubTask { + return v.value +} + +func (v *NullableSubTask) Set(val *SubTask) { + v.value = val + v.isSet = true +} + +func (v NullableSubTask) IsSet() bool { + return v.isSet +} + +func (v *NullableSubTask) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSubTask(val *SubTask) *NullableSubTask { + return &NullableSubTask{value: val, isSet: true} +} + +func (v NullableSubTask) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSubTask) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go new file mode 100644 index 00000000..0c3ab74b --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go @@ -0,0 +1,111 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// SubTaskStage The high-level lifecycle stage of a subtask. - RUNNING: Subtask is running. - PAUSED: Subtask is paused. - FAILED: Subtask failed. - FINISHED: Subtask finished successfully. - UNKNOWN: Subtask stage is unknown. +type SubTaskStage string + +// List of SubTask.Stage +const ( + SUBTASKSTAGE_RUNNING SubTaskStage = "RUNNING" + SUBTASKSTAGE_PAUSED SubTaskStage = "PAUSED" + SUBTASKSTAGE_FAILED SubTaskStage = "FAILED" + SUBTASKSTAGE_FINISHED SubTaskStage = "FINISHED" + SUBTASKSTAGE_UNKNOWN SubTaskStage = "UNKNOWN" +) + +// All allowed values of SubTaskStage enum +var AllowedSubTaskStageEnumValues = []SubTaskStage{ + "RUNNING", + "PAUSED", + "FAILED", + "FINISHED", + "UNKNOWN", +} + +func (v *SubTaskStage) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := SubTaskStage(value) + for _, existing := range AllowedSubTaskStageEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = SubTaskStage(value) + return nil +} + +// NewSubTaskStageFromValue returns a pointer to a valid SubTaskStage for the value passed as argument +func NewSubTaskStageFromValue(v string) *SubTaskStage { + ev := SubTaskStage(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v SubTaskStage) IsValid() bool { + for _, existing := range AllowedSubTaskStageEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to SubTask.Stage value +func (v SubTaskStage) Ptr() *SubTaskStage { + return &v +} + +type NullableSubTaskStage struct { + value *SubTaskStage + isSet bool +} + +func (v NullableSubTaskStage) Get() *SubTaskStage { + return v.value +} + +func (v *NullableSubTaskStage) Set(val *SubTaskStage) { + v.value = val + v.isSet = true +} + +func (v NullableSubTaskStage) IsSet() bool { + return v.isSet +} + +func (v *NullableSubTaskStage) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSubTaskStage(val *SubTaskStage) *NullableSubTaskStage { + return &NullableSubTaskStage{value: val, isSet: true} +} + +func (v NullableSubTaskStage) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSubTaskStage) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go new file mode 100644 index 00000000..006e9d99 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go @@ -0,0 +1,107 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// SubTaskStep The current step within a subtask. - DUMP: Dump/export data from source. - LOAD: Load/import data into target. - SYNC: Sync/replicate binlog changes. +type SubTaskStep string + +// List of SubTask.Step +const ( + SUBTASKSTEP_DUMP SubTaskStep = "DUMP" + SUBTASKSTEP_LOAD SubTaskStep = "LOAD" + SUBTASKSTEP_SYNC SubTaskStep = "SYNC" +) + +// All allowed values of SubTaskStep enum +var AllowedSubTaskStepEnumValues = []SubTaskStep{ + "DUMP", + "LOAD", + "SYNC", +} + +func (v *SubTaskStep) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := SubTaskStep(value) + for _, existing := range AllowedSubTaskStepEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = SubTaskStep(value) + return nil +} + +// NewSubTaskStepFromValue returns a pointer to a valid SubTaskStep for the value passed as argument +func NewSubTaskStepFromValue(v string) *SubTaskStep { + ev := SubTaskStep(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v SubTaskStep) IsValid() bool { + for _, existing := range AllowedSubTaskStepEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to SubTask.Step value +func (v SubTaskStep) Ptr() *SubTaskStep { + return &v +} + +type NullableSubTaskStep struct { + value *SubTaskStep + isSet bool +} + +func (v NullableSubTaskStep) Get() *SubTaskStep { + return v.value +} + +func (v *NullableSubTaskStep) Set(val *SubTaskStep) { + v.value = val + v.isSet = true +} + +func (v NullableSubTaskStep) IsSet() bool { + return v.isSet +} + +func (v *NullableSubTaskStep) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSubTaskStep(val *SubTaskStep) *NullableSubTaskStep { + return &NullableSubTaskStep{value: val, isSet: true} +} + +func (v NullableSubTaskStep) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSubTaskStep) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sync_detail.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sync_detail.go new file mode 100644 index 00000000..4ca0f618 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sync_detail.go @@ -0,0 +1,230 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the SyncDetail type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &SyncDetail{} + +// SyncDetail struct for SyncDetail +type SyncDetail struct { + // Rows processed per second during sync. + Rps *string `json:"rps,omitempty"` + // Replication latency in seconds. + Latency *string `json:"latency,omitempty"` + // Synchronization checkpoint. + Checkpoint *string `json:"checkpoint,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _SyncDetail SyncDetail + +// NewSyncDetail instantiates a new SyncDetail object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSyncDetail() *SyncDetail { + this := SyncDetail{} + return &this +} + +// NewSyncDetailWithDefaults instantiates a new SyncDetail object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSyncDetailWithDefaults() *SyncDetail { + this := SyncDetail{} + return &this +} + +// GetRps returns the Rps field value if set, zero value otherwise. +func (o *SyncDetail) GetRps() string { + if o == nil || IsNil(o.Rps) { + var ret string + return ret + } + return *o.Rps +} + +// GetRpsOk returns a tuple with the Rps field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SyncDetail) GetRpsOk() (*string, bool) { + if o == nil || IsNil(o.Rps) { + return nil, false + } + return o.Rps, true +} + +// HasRps returns a boolean if a field has been set. +func (o *SyncDetail) HasRps() bool { + if o != nil && !IsNil(o.Rps) { + return true + } + + return false +} + +// SetRps gets a reference to the given string and assigns it to the Rps field. +func (o *SyncDetail) SetRps(v string) { + o.Rps = &v +} + +// GetLatency returns the Latency field value if set, zero value otherwise. +func (o *SyncDetail) GetLatency() string { + if o == nil || IsNil(o.Latency) { + var ret string + return ret + } + return *o.Latency +} + +// GetLatencyOk returns a tuple with the Latency field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SyncDetail) GetLatencyOk() (*string, bool) { + if o == nil || IsNil(o.Latency) { + return nil, false + } + return o.Latency, true +} + +// HasLatency returns a boolean if a field has been set. +func (o *SyncDetail) HasLatency() bool { + if o != nil && !IsNil(o.Latency) { + return true + } + + return false +} + +// SetLatency gets a reference to the given string and assigns it to the Latency field. +func (o *SyncDetail) SetLatency(v string) { + o.Latency = &v +} + +// GetCheckpoint returns the Checkpoint field value if set, zero value otherwise. +func (o *SyncDetail) GetCheckpoint() string { + if o == nil || IsNil(o.Checkpoint) { + var ret string + return ret + } + return *o.Checkpoint +} + +// GetCheckpointOk returns a tuple with the Checkpoint field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SyncDetail) GetCheckpointOk() (*string, bool) { + if o == nil || IsNil(o.Checkpoint) { + return nil, false + } + return o.Checkpoint, true +} + +// HasCheckpoint returns a boolean if a field has been set. +func (o *SyncDetail) HasCheckpoint() bool { + if o != nil && !IsNil(o.Checkpoint) { + return true + } + + return false +} + +// SetCheckpoint gets a reference to the given string and assigns it to the Checkpoint field. +func (o *SyncDetail) SetCheckpoint(v string) { + o.Checkpoint = &v +} + +func (o SyncDetail) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o SyncDetail) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Rps) { + toSerialize["rps"] = o.Rps + } + if !IsNil(o.Latency) { + toSerialize["latency"] = o.Latency + } + if !IsNil(o.Checkpoint) { + toSerialize["checkpoint"] = o.Checkpoint + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *SyncDetail) UnmarshalJSON(data []byte) (err error) { + varSyncDetail := _SyncDetail{} + + err = json.Unmarshal(data, &varSyncDetail) + + if err != nil { + return err + } + + *o = SyncDetail(varSyncDetail) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "rps") + delete(additionalProperties, "latency") + delete(additionalProperties, "checkpoint") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableSyncDetail struct { + value *SyncDetail + isSet bool +} + +func (v NullableSyncDetail) Get() *SyncDetail { + return v.value +} + +func (v *NullableSyncDetail) Set(val *SyncDetail) { + v.value = val + v.isSet = true +} + +func (v NullableSyncDetail) IsSet() bool { + return v.isSet +} + +func (v *NullableSyncDetail) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSyncDetail(val *SyncDetail) *NullableSyncDetail { + return &NullableSyncDetail{value: val, isSet: true} +} + +func (v NullableSyncDetail) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSyncDetail) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_table.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_table.go new file mode 100644 index 00000000..5e5baa59 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_table.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the Table type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Table{} + +// Table struct for Table +type Table struct { + // Schema name. + Schema *string `json:"schema,omitempty"` + // Table name. + Table *string `json:"table,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _Table Table + +// NewTable instantiates a new Table object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewTable() *Table { + this := Table{} + return &this +} + +// NewTableWithDefaults instantiates a new Table object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewTableWithDefaults() *Table { + this := Table{} + return &this +} + +// GetSchema returns the Schema field value if set, zero value otherwise. +func (o *Table) GetSchema() string { + if o == nil || IsNil(o.Schema) { + var ret string + return ret + } + return *o.Schema +} + +// GetSchemaOk returns a tuple with the Schema field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Table) GetSchemaOk() (*string, bool) { + if o == nil || IsNil(o.Schema) { + return nil, false + } + return o.Schema, true +} + +// HasSchema returns a boolean if a field has been set. +func (o *Table) HasSchema() bool { + if o != nil && !IsNil(o.Schema) { + return true + } + + return false +} + +// SetSchema gets a reference to the given string and assigns it to the Schema field. +func (o *Table) SetSchema(v string) { + o.Schema = &v +} + +// GetTable returns the Table field value if set, zero value otherwise. +func (o *Table) GetTable() string { + if o == nil || IsNil(o.Table) { + var ret string + return ret + } + return *o.Table +} + +// GetTableOk returns a tuple with the Table field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Table) GetTableOk() (*string, bool) { + if o == nil || IsNil(o.Table) { + return nil, false + } + return o.Table, true +} + +// HasTable returns a boolean if a field has been set. +func (o *Table) HasTable() bool { + if o != nil && !IsNil(o.Table) { + return true + } + + return false +} + +// SetTable gets a reference to the given string and assigns it to the Table field. +func (o *Table) SetTable(v string) { + o.Table = &v +} + +func (o Table) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Table) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Schema) { + toSerialize["schema"] = o.Schema + } + if !IsNil(o.Table) { + toSerialize["table"] = o.Table + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Table) UnmarshalJSON(data []byte) (err error) { + varTable := _Table{} + + err = json.Unmarshal(data, &varTable) + + if err != nil { + return err + } + + *o = Table(varTable) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "schema") + delete(additionalProperties, "table") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableTable struct { + value *Table + isSet bool +} + +func (v NullableTable) Get() *Table { + return v.value +} + +func (v *NullableTable) Set(val *Table) { + v.value = val + v.isSet = true +} + +func (v NullableTable) IsSet() bool { + return v.isSet +} + +func (v *NullableTable) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTable(val *Table) *NullableTable { + return &NullableTable{value: val, isSet: true} +} + +func (v NullableTable) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTable) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go new file mode 100644 index 00000000..5b5458d1 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go @@ -0,0 +1,197 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the Target type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Target{} + +// Target struct for Target +type Target struct { + // Target database user. + User string `json:"user"` + // Target database password. + Password string `json:"password"` + AdditionalProperties map[string]interface{} +} + +type _Target Target + +// NewTarget instantiates a new Target object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewTarget(user string, password string) *Target { + this := Target{} + this.User = user + this.Password = password + return &this +} + +// NewTargetWithDefaults instantiates a new Target object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewTargetWithDefaults() *Target { + this := Target{} + return &this +} + +// GetUser returns the User field value +func (o *Target) GetUser() string { + if o == nil { + var ret string + return ret + } + + return o.User +} + +// GetUserOk returns a tuple with the User field value +// and a boolean to check if the value has been set. +func (o *Target) GetUserOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.User, true +} + +// SetUser sets field value +func (o *Target) SetUser(v string) { + o.User = v +} + +// GetPassword returns the Password field value +func (o *Target) GetPassword() string { + if o == nil { + var ret string + return ret + } + + return o.Password +} + +// GetPasswordOk returns a tuple with the Password field value +// and a boolean to check if the value has been set. +func (o *Target) GetPasswordOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Password, true +} + +// SetPassword sets field value +func (o *Target) SetPassword(v string) { + o.Password = v +} + +func (o Target) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Target) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["user"] = o.User + toSerialize["password"] = o.Password + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Target) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "user", + "password", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varTarget := _Target{} + + err = json.Unmarshal(data, &varTarget) + + if err != nil { + return err + } + + *o = Target(varTarget) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "user") + delete(additionalProperties, "password") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableTarget struct { + value *Target + isSet bool +} + +func (v NullableTarget) Get() *Target { + return v.value +} + +func (v *NullableTarget) Set(val *Target) { + v.value = val + v.isSet = true +} + +func (v NullableTarget) IsSet() bool { + return v.isSet +} + +func (v *NullableTarget) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTarget(val *Target) *NullableTarget { + return &NullableTarget{value: val, isSet: true} +} + +func (v NullableTarget) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTarget) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go new file mode 100644 index 00000000..ba5c04bf --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go @@ -0,0 +1,105 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// TaskMode Migration task mode. - ALL: Full + incremental migration (all phases). - INCREMENTAL: Incremental-only migration (replication). +type TaskMode string + +// List of TaskMode +const ( + TASKMODE_ALL TaskMode = "ALL" + TASKMODE_INCREMENTAL TaskMode = "INCREMENTAL" +) + +// All allowed values of TaskMode enum +var AllowedTaskModeEnumValues = []TaskMode{ + "ALL", + "INCREMENTAL", +} + +func (v *TaskMode) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := TaskMode(value) + for _, existing := range AllowedTaskModeEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = TaskMode(value) + return nil +} + +// NewTaskModeFromValue returns a pointer to a valid TaskMode for the value passed as argument +func NewTaskModeFromValue(v string) *TaskMode { + ev := TaskMode(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v TaskMode) IsValid() bool { + for _, existing := range AllowedTaskModeEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to TaskMode value +func (v TaskMode) Ptr() *TaskMode { + return &v +} + +type NullableTaskMode struct { + value *TaskMode + isSet bool +} + +func (v NullableTaskMode) Get() *TaskMode { + return v.value +} + +func (v *NullableTaskMode) Set(val *TaskMode) { + v.value = val + v.isSet = true +} + +func (v NullableTaskMode) IsSet() bool { + return v.isSet +} + +func (v *NullableTaskMode) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTaskMode(val *TaskMode) *NullableTaskMode { + return &NullableTaskMode{value: val, isSet: true} +} + +func (v NullableTaskMode) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTaskMode) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/response.go b/pkg/tidbcloud/v1beta1/serverless/migration/response.go new file mode 100644 index 00000000..0f0f014b --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/response.go @@ -0,0 +1,47 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "net/http" +) + +// APIResponse stores the API response returned by the server. +type APIResponse struct { + *http.Response `json:"-"` + Message string `json:"message,omitempty"` + // Operation is the name of the OpenAPI operation. + Operation string `json:"operation,omitempty"` + // RequestURL is the request URL. This value is always available, even if the + // embedded *http.Response is nil. + RequestURL string `json:"url,omitempty"` + // Method is the HTTP method used for the request. This value is always + // available, even if the embedded *http.Response is nil. + Method string `json:"method,omitempty"` + // Payload holds the contents of the response body (which may be nil or empty). + // This is provided here as the raw response.Body() reader will have already + // been drained. + Payload []byte `json:"-"` +} + +// NewAPIResponse returns a new APIResponse object. +func NewAPIResponse(r *http.Response) *APIResponse { + + response := &APIResponse{Response: r} + return response +} + +// NewAPIResponseWithError returns a new APIResponse object with the provided error message. +func NewAPIResponseWithError(errorMessage string) *APIResponse { + + response := &APIResponse{Message: errorMessage} + return response +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/utils.go b/pkg/tidbcloud/v1beta1/serverless/migration/utils.go new file mode 100644 index 00000000..4a26ea61 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/utils.go @@ -0,0 +1,361 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "time" +) + +// PtrBool is a helper routine that returns a pointer to given boolean value. +func PtrBool(v bool) *bool { return &v } + +// PtrInt is a helper routine that returns a pointer to given integer value. +func PtrInt(v int) *int { return &v } + +// PtrInt32 is a helper routine that returns a pointer to given integer value. +func PtrInt32(v int32) *int32 { return &v } + +// PtrInt64 is a helper routine that returns a pointer to given integer value. +func PtrInt64(v int64) *int64 { return &v } + +// PtrFloat32 is a helper routine that returns a pointer to given float value. +func PtrFloat32(v float32) *float32 { return &v } + +// PtrFloat64 is a helper routine that returns a pointer to given float value. +func PtrFloat64(v float64) *float64 { return &v } + +// PtrString is a helper routine that returns a pointer to given string value. +func PtrString(v string) *string { return &v } + +// PtrTime is helper routine that returns a pointer to given Time value. +func PtrTime(v time.Time) *time.Time { return &v } + +type NullableBool struct { + value *bool + isSet bool +} + +func (v NullableBool) Get() *bool { + return v.value +} + +func (v *NullableBool) Set(val *bool) { + v.value = val + v.isSet = true +} + +func (v NullableBool) IsSet() bool { + return v.isSet +} + +func (v *NullableBool) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBool(val *bool) *NullableBool { + return &NullableBool{value: val, isSet: true} +} + +func (v NullableBool) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBool) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt struct { + value *int + isSet bool +} + +func (v NullableInt) Get() *int { + return v.value +} + +func (v *NullableInt) Set(val *int) { + v.value = val + v.isSet = true +} + +func (v NullableInt) IsSet() bool { + return v.isSet +} + +func (v *NullableInt) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt(val *int) *NullableInt { + return &NullableInt{value: val, isSet: true} +} + +func (v NullableInt) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt32 struct { + value *int32 + isSet bool +} + +func (v NullableInt32) Get() *int32 { + return v.value +} + +func (v *NullableInt32) Set(val *int32) { + v.value = val + v.isSet = true +} + +func (v NullableInt32) IsSet() bool { + return v.isSet +} + +func (v *NullableInt32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt32(val *int32) *NullableInt32 { + return &NullableInt32{value: val, isSet: true} +} + +func (v NullableInt32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt64 struct { + value *int64 + isSet bool +} + +func (v NullableInt64) Get() *int64 { + return v.value +} + +func (v *NullableInt64) Set(val *int64) { + v.value = val + v.isSet = true +} + +func (v NullableInt64) IsSet() bool { + return v.isSet +} + +func (v *NullableInt64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt64(val *int64) *NullableInt64 { + return &NullableInt64{value: val, isSet: true} +} + +func (v NullableInt64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat32 struct { + value *float32 + isSet bool +} + +func (v NullableFloat32) Get() *float32 { + return v.value +} + +func (v *NullableFloat32) Set(val *float32) { + v.value = val + v.isSet = true +} + +func (v NullableFloat32) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat32(val *float32) *NullableFloat32 { + return &NullableFloat32{value: val, isSet: true} +} + +func (v NullableFloat32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat64 struct { + value *float64 + isSet bool +} + +func (v NullableFloat64) Get() *float64 { + return v.value +} + +func (v *NullableFloat64) Set(val *float64) { + v.value = val + v.isSet = true +} + +func (v NullableFloat64) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat64(val *float64) *NullableFloat64 { + return &NullableFloat64{value: val, isSet: true} +} + +func (v NullableFloat64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableString struct { + value *string + isSet bool +} + +func (v NullableString) Get() *string { + return v.value +} + +func (v *NullableString) Set(val *string) { + v.value = val + v.isSet = true +} + +func (v NullableString) IsSet() bool { + return v.isSet +} + +func (v *NullableString) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableString(val *string) *NullableString { + return &NullableString{value: val, isSet: true} +} + +func (v NullableString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableString) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableTime struct { + value *time.Time + isSet bool +} + +func (v NullableTime) Get() *time.Time { + return v.value +} + +func (v *NullableTime) Set(val *time.Time) { + v.value = val + v.isSet = true +} + +func (v NullableTime) IsSet() bool { + return v.isSet +} + +func (v *NullableTime) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTime(val *time.Time) *NullableTime { + return &NullableTime{value: val, isSet: true} +} + +func (v NullableTime) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTime) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +// IsNil checks if an input is nil +func IsNil(i interface{}) bool { + if i == nil { + return true + } + switch reflect.TypeOf(i).Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return reflect.ValueOf(i).IsNil() + case reflect.Array: + return reflect.ValueOf(i).IsZero() + } + return false +} + +type MappedNullable interface { + ToMap() (map[string]interface{}, error) +} + +// A wrapper for strict JSON decoding +func newStrictDecoder(data []byte) *json.Decoder { + dec := json.NewDecoder(bytes.NewBuffer(data)) + dec.DisallowUnknownFields() + return dec +} + +// Prevent trying to import "fmt" +func reportError(format string, a ...interface{}) error { + return fmt.Errorf(format, a...) +} diff --git a/tools/openapi-generator/openapitools.json b/tools/openapi-generator/openapitools.json index 6f7db3e8..dec90b31 100644 --- a/tools/openapi-generator/openapitools.json +++ b/tools/openapi-generator/openapitools.json @@ -2,6 +2,9 @@ "$schema": "./node_modules/@openapitools/openapi-generator-cli/config.schema.json", "spaces": 2, "generator-cli": { - "version": "7.12.0" + "version": "7.12.0", + "repository": { + "downloadUrl": "https://maven.aliyun.com/repository/public/org/openapitools/openapi-generator-cli/7.12.0/openapi-generator-cli-7.12.0.jar" + } } }