From 1933dae735002934b19820c7d5fae4297c7ddc71 Mon Sep 17 00:00:00 2001 From: yangxin Date: Wed, 19 Nov 2025 10:56:32 +0800 Subject: [PATCH 01/19] init --- Makefile | 3 + internal/cli/serverless/migration/create.go | 320 +++++ internal/cli/serverless/migration/delete.go | 135 ++ internal/cli/serverless/migration/describe.go | 112 ++ internal/cli/serverless/migration/helpers.go | 100 ++ internal/cli/serverless/migration/list.go | 147 ++ .../cli/serverless/migration/migration.go | 25 + internal/cli/serverless/migration/pause.go | 110 ++ internal/cli/serverless/migration/resume.go | 110 ++ internal/cli/serverless/migration/template.go | 161 +++ internal/flag/flag.go | 9 + internal/mock/api_client.go | 272 ++++ internal/service/cloud/api_client.go | 110 +- internal/service/cloud/logic.go | 77 ++ .../v1beta1/serverless/dm.swagger.json | 1212 +++++++++++++++++ .../v1beta1/serverless/migration/.gitignore | 24 + .../migration/.openapi-generator-ignore | 23 + .../migration/.openapi-generator/FILES | 40 + .../migration/.openapi-generator/VERSION | 1 + .../v1beta1/serverless/migration/.travis.yml | 8 + .../v1beta1/serverless/migration/README.md | 149 ++ .../serverless/migration/api/openapi.yaml | 1088 +++++++++++++++ .../serverless/migration/api_migration.go | 1114 +++++++++++++++ .../v1beta1/serverless/migration/client.go | 655 +++++++++ .../serverless/migration/configuration.go | 214 +++ .../v1beta1/serverless/migration/git_push.sh | 57 + .../v1beta1/serverless/migration/model_any.go | 153 +++ .../migration/model_block_allow_rules.go | 192 +++ .../model_block_allow_rules_table.go | 192 +++ .../migration/model_conn_profile.go | 379 ++++++ .../serverless/migration/model_conn_type.go | 105 ++ .../model_create_migration_precheck_resp.go | 154 +++ .../serverless/migration/model_dump_detail.go | 344 +++++ .../model_list_migration_tasks_resp.go | 230 ++++ .../serverless/migration/model_load_detail.go | 268 ++++ .../migration/model_migration_precheck.go | 344 +++++ ...odel_migration_service_create_task_body.go | 319 +++++ .../model_migration_service_precheck_body.go | 319 +++++ .../migration/model_migration_task.go | 383 ++++++ .../migration/model_migration_task_state.go | 111 ++ .../migration/model_precheck_item.go | 344 +++++ .../migration/model_precheck_item_type.go | 129 ++ .../serverless/migration/model_route_rule.go | 192 +++ .../migration/model_route_rule_source.go | 192 +++ .../migration/model_route_rule_target.go | 192 +++ .../serverless/migration/model_security.go | 268 ++++ .../serverless/migration/model_source.go | 420 ++++++ .../migration/model_source_source_type.go | 105 ++ .../serverless/migration/model_status.go | 227 +++ .../serverless/migration/model_sub_task.go | 393 ++++++ .../migration/model_sub_task_stage.go | 111 ++ .../migration/model_sub_task_step.go | 107 ++ .../serverless/migration/model_sync_detail.go | 230 ++++ .../serverless/migration/model_target.go | 192 +++ .../serverless/migration/model_task_mode.go | 105 ++ .../v1beta1/serverless/migration/response.go | 47 + .../v1beta1/serverless/migration/utils.go | 361 +++++ tools/openapi-generator/openapitools.json | 5 +- 58 files changed, 13382 insertions(+), 7 deletions(-) create mode 100644 internal/cli/serverless/migration/create.go create mode 100644 internal/cli/serverless/migration/delete.go create mode 100644 internal/cli/serverless/migration/describe.go create mode 100644 internal/cli/serverless/migration/helpers.go create mode 100644 internal/cli/serverless/migration/list.go create mode 100644 internal/cli/serverless/migration/migration.go create mode 100644 internal/cli/serverless/migration/pause.go create mode 100644 internal/cli/serverless/migration/resume.go create mode 100644 internal/cli/serverless/migration/template.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/dm.swagger.json create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/.gitignore create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator-ignore create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/VERSION create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/.travis.yml create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/README.md create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/client.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/configuration.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/git_push.sh create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_any.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules_table.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_conn_type.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_dump_detail.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_list_migration_tasks_resp.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_load_detail.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_task_body.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task_state.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_source.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_target.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_security.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_source.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_status.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_sync_detail.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_target.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/response.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/utils.go diff --git a/Makefile b/Makefile index 9f8f4d76..e0effba0 100644 --- a/Makefile +++ b/Makefile @@ -56,6 +56,9 @@ generate-v1beta1-serverless-client: install-openapi-generator ## Generate server @echo "==> Generating serverless cdc client" rm -rf pkg/tidbcloud/v1beta1/serverless/cdc cd tools/openapi-generator && npx openapi-generator-cli generate --inline-schema-options RESOLVE_INLINE_ENUMS=true --additional-properties=withGoMod=false,enumClassPrefix=true,disallowAdditionalPropertiesIfNotPresent=false --global-property=apiTests=false,apiDocs=false,modelDocs=false,modelTests=false -i ../../pkg/tidbcloud/v1beta1/serverless/cdc.swagger.json -g go -o ../../pkg/tidbcloud/v1beta1/serverless/cdc --package-name cdc -c go/config.yaml + @echo "==> Generating serverless cdc client" + rm -rf pkg/tidbcloud/v1beta1/serverless/migration + cd tools/openapi-generator && npx openapi-generator-cli generate --inline-schema-options RESOLVE_INLINE_ENUMS=true --additional-properties=withGoMod=false,enumClassPrefix=true,disallowAdditionalPropertiesIfNotPresent=false --global-property=apiTests=false,apiDocs=false,modelDocs=false,modelTests=false -i ../../pkg/tidbcloud/v1beta1/serverless/dm.swagger.json -g go -o ../../pkg/tidbcloud/v1beta1/serverless/migration --package-name migration -c go/config.yaml cd pkg && go fmt ./tidbcloud/v1beta1/serverless/... && goimports -w . .PHONY: generate-v1beta1-dedicated-client diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go new file mode 100644 index 00000000..b33c372d --- /dev/null +++ b/internal/cli/serverless/migration/create.go @@ -0,0 +1,320 @@ +package migration + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/fatih/color" + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/output" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" + "github.com/tidbcloud/tidbcloud-cli/internal/ui" + pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" +) + +type CreateOpts struct { + interactive bool +} + +func (c CreateOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.DisplayName, + flag.MigrationSources, + flag.MigrationTarget, + flag.MigrationMode, + flag.MigrationFullData, + } +} + +func (c CreateOpts) RequiredFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationSources, + } +} + +func (c *CreateOpts) MarkInteractive(cmd *cobra.Command) error { + flags := c.NonInteractiveFlags() + for _, fn := range flags { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.RequiredFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func CreateCmd(h *internal.Helper) *cobra.Command { + opts := CreateOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "create", + Short: "Create a migration task", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Create a migration task in interactive mode: + $ %[1]s serverless migration create + + Create a migration task in non-interactive mode: + $ %[1]s serverless migration create -c --sources '' --target '' + + Run migration precheck only with shared inputs: + $ %[1]s serverless migration create --precheck-only`, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, name, sourcesStr, targetStr, modeStr, fullDataStr string + var fullDataPtr *bool + precheckOnly, err := cmd.Flags().GetBool(flag.MigrationPrecheckOnly) + if err != nil { + return errors.Trace(err) + } + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + + inputs := []string{flag.DisplayName, flag.MigrationSources, flag.MigrationTarget, flag.MigrationMode, flag.MigrationFullData} + textInput, err := ui.InitialInputModel(inputs, migrationInputDescription) + if err != nil { + return err + } + name = textInput.Inputs[0].Value() + sourcesStr = textInput.Inputs[1].Value() + targetStr = textInput.Inputs[2].Value() + modeStr = textInput.Inputs[3].Value() + fullDataStr = textInput.Inputs[4].Value() + fullDataPtr, err = parseFullData(fullDataStr) + if err != nil { + return err + } + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + name, err = cmd.Flags().GetString(flag.DisplayName) + if err != nil { + return errors.Trace(err) + } + sourcesStr, err = cmd.Flags().GetString(flag.MigrationSources) + if err != nil { + return errors.Trace(err) + } + targetStr, err = cmd.Flags().GetString(flag.MigrationTarget) + if err != nil { + return errors.Trace(err) + } + modeStr, err = cmd.Flags().GetString(flag.MigrationMode) + if err != nil { + return errors.Trace(err) + } + if cmd.Flags().Changed(flag.MigrationFullData) { + fullDataVal, err := cmd.Flags().GetBool(flag.MigrationFullData) + if err != nil { + return errors.Trace(err) + } + fullDataPtr = &fullDataVal + } + } + + sources, err := parseMigrationSources(sourcesStr) + if err != nil { + return err + } + target, err := parseMigrationTarget(targetStr) + if err != nil { + return err + } + mode, err := parseMigrationMode(modeStr) + if err != nil { + return err + } + + createBody := &pkgmigration.MigrationServiceCreateTaskBody{ + Sources: sources, + } + precheckBody := &pkgmigration.MigrationServicePrecheckBody{ + Sources: sources, + } + if name != "" { + createBody.Name = &name + precheckBody.Name = &name + } + if target != nil { + createBody.Target = target + precheckBody.Target = target + } + if mode != nil { + createBody.Mode = mode + precheckBody.Mode = mode + } + if fullDataPtr != nil { + createBody.FullDataMigration = fullDataPtr + precheckBody.FullDataMigration = fullDataPtr + } + + if precheckOnly { + return runMigrationPrecheck(ctx, d, clusterID, precheckBody, h) + } + + resp, err := d.CreateMigrationTask(ctx, clusterID, createBody) + if err != nil { + return errors.Trace(err) + } + + taskID := "" + if resp.Id != nil { + taskID = *resp.Id + } else if resp.Name != nil { + taskID = *resp.Name + } + if taskID == "" { + taskID = "" + } + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration task %s created", taskID)) + return nil + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") + cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration task.") + cmd.Flags().String(flag.MigrationSources, "", "Sources definition in JSON. Use \"ticloud serverless migration template --type sources\" for a template.") + cmd.Flags().String(flag.MigrationTarget, "", "Target definition in JSON. Use \"ticloud serverless migration template --type target\" for a template.") + cmd.Flags().String(flag.MigrationMode, "", fmt.Sprintf("Migration mode, one of %v.", taskModeValues())) + cmd.Flags().Bool(flag.MigrationFullData, false, "Migrate all user data (equivalent to enabling every non-system database).") + cmd.Flags().Bool(flag.MigrationPrecheckOnly, false, "Run a migration precheck with the provided inputs and exit without creating a task.") + + return cmd +} + +const precheckPollInterval = 5 * time.Second + +func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clusterID string, body *pkgmigration.MigrationServicePrecheckBody, h *internal.Helper) error { + resp, err := client.CreateMigrationPrecheck(ctx, clusterID, body) + if err != nil { + return errors.Trace(err) + } + if resp.Id == nil || *resp.Id == "" { + return errors.New("precheck created but ID is empty") + } + precheckID := *resp.Id + fmt.Fprintf(h.IOStreams.Out, "migration precheck %s created, polling results...\n", precheckID) + + ticker := time.NewTicker(precheckPollInterval) + defer ticker.Stop() + var lastStatus string + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + result, err := client.GetMigrationPrecheck(ctx, clusterID, precheckID) + if err != nil { + return errors.Trace(err) + } + status := strings.ToUpper(ptrString(result.Status)) + if status == "" { + status = "PENDING" + } + if status != lastStatus { + fmt.Fprintf(h.IOStreams.Out, "precheck %s status: %s\n", precheckID, status) + lastStatus = status + } + if isPrecheckPending(status) { + continue + } + if err := printPrecheckSummary(precheckID, status, result, h); err != nil { + return err + } + if strings.EqualFold(status, "FAILED") || (result.FailedCnt != nil && *result.FailedCnt > 0) { + return errors.New("migration precheck failed") + } + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration precheck %s passed", precheckID)) + return nil + } + } +} + +func isPrecheckPending(status string) bool { + switch status { + case "PENDING", "RUNNING", "PROCESSING", "IN_PROGRESS", "": + return true + default: + return false + } +} + +func printPrecheckSummary(id, status string, result *pkgmigration.MigrationPrecheck, h *internal.Helper) error { + fmt.Fprintf(h.IOStreams.Out, "precheck %s finished with status %s\n", id, status) + fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", + ptrInt32(result.Total), ptrInt32(result.SuccessCnt), ptrInt32(result.WarnCnt), ptrInt32(result.FailedCnt)) + if len(result.Items) == 0 { + return nil + } + columns := []output.Column{"Type", "Status", "Description", "Reason", "Solution"} + var rows []output.Row + for _, item := range result.Items { + rows = append(rows, output.Row{ + precheckItemType(item.Type), + ptrString(item.Status), + ptrString(item.Desc), + ptrString(item.Reason), + ptrString(item.Solution), + }) + } + return output.PrintHumanTable(h.IOStreams.Out, columns, rows) +} + +func ptrString(value *string) string { + if value == nil { + return "" + } + return *value +} + +func ptrInt32(value *int32) int32 { + if value == nil { + return 0 + } + return *value +} + +func precheckItemType(value *pkgmigration.PrecheckItemType) string { + if value == nil { + return "" + } + return string(*value) +} diff --git a/internal/cli/serverless/migration/delete.go b/internal/cli/serverless/migration/delete.go new file mode 100644 index 00000000..23df21ea --- /dev/null +++ b/internal/cli/serverless/migration/delete.go @@ -0,0 +1,135 @@ +package migration + +import ( + "fmt" + + "github.com/AlecAivazis/survey/v2" + "github.com/AlecAivazis/survey/v2/terminal" + "github.com/fatih/color" + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" + "github.com/tidbcloud/tidbcloud-cli/internal/util" +) + +type DeleteOpts struct { + interactive bool +} + +func (c DeleteOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationTaskID, + } +} + +func (c *DeleteOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func DeleteCmd(h *internal.Helper) *cobra.Command { + opts := DeleteOpts{interactive: true} + var force bool + + var cmd = &cobra.Command{ + Use: "delete", + Short: "Cancel a migration task", + Aliases: []string{"rm"}, + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Cancel a migration task in interactive mode: + $ %[1]s serverless migration delete + + Cancel a migration task in non-interactive mode: + $ %[1]s serverless migration delete -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, taskID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + taskID = task.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + if err != nil { + return errors.Trace(err) + } + } + + if !force { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support prompt, please run with --force to cancel the migration task") + } + prompt := &survey.Input{ + Message: fmt.Sprintf("%s %s %s", color.BlueString("Please type"), color.HiBlueString("yes"), color.BlueString("to confirm:")), + } + var confirmation string + if err := survey.AskOne(prompt, &confirmation); err != nil { + if err == terminal.InterruptErr { + return util.InterruptError + } + return err + } + if confirmation != "yes" { + return errors.New("Incorrect confirm string entered, skipping migration task cancellation") + } + } + + if _, err := d.CancelMigrationTask(ctx, clusterID, taskID); err != nil { + return errors.Trace(err) + } + + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration task %s canceled", taskID)) + return nil + }, + } + + cmd.Flags().BoolVar(&force, flag.Force, false, "Cancel without confirmation.") + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") + cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to cancel.") + return cmd +} diff --git a/internal/cli/serverless/migration/describe.go b/internal/cli/serverless/migration/describe.go new file mode 100644 index 00000000..7fe20ab6 --- /dev/null +++ b/internal/cli/serverless/migration/describe.go @@ -0,0 +1,112 @@ +package migration + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/output" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" +) + +type DescribeOpts struct { + interactive bool +} + +func (c DescribeOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationTaskID, + } +} + +func (c *DescribeOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func DescribeCmd(h *internal.Helper) *cobra.Command { + opts := DescribeOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "describe", + Short: "Describe a migration task", + Aliases: []string{"get"}, + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Describe a migration task in interactive mode: + $ %[1]s serverless migration describe + + Describe a migration task in non-interactive mode: + $ %[1]s serverless migration describe -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, taskID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + + task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + taskID = task.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + if err != nil { + return errors.Trace(err) + } + } + + resp, err := d.GetMigrationTask(ctx, clusterID, taskID) + if err != nil { + return errors.Trace(err) + } + + return errors.Trace(output.PrintJson(h.IOStreams.Out, resp)) + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") + cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to describe.") + return cmd +} diff --git a/internal/cli/serverless/migration/helpers.go b/internal/cli/serverless/migration/helpers.go new file mode 100644 index 00000000..61738db1 --- /dev/null +++ b/internal/cli/serverless/migration/helpers.go @@ -0,0 +1,100 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/juju/errors" + + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" +) + +var migrationInputDescription = map[string]string{ + flag.DisplayName: "Optional display name for the migration task/precheck.", + flag.MigrationSources: "Sources definition in JSON. Use \"ticloud serverless migration template --type sources\" as a reference.", + flag.MigrationTarget: "Target definition in JSON. Use \"ticloud serverless migration template --type target\" as a reference.", + flag.MigrationMode: "Migration mode, one of MODE_ALL or MODE_INCREMENTAL. Leave blank to use the server default.", + flag.MigrationFullData: "Whether to migrate all user data. Enter true, false, or leave blank to use the server default.", +} + +func parseMigrationSources(value string) ([]pkgmigration.Source, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil, errors.New("sources is required, use --sources or provide it in interactive mode") + } + var sources []pkgmigration.Source + if err := json.Unmarshal([]byte(trimmed), &sources); err != nil { + return nil, errors.Annotate(err, "invalid sources JSON") + } + if len(sources) == 0 { + return nil, errors.New("sources must contain at least one entry") + } + return sources, nil +} + +func parseMigrationTarget(value string) (*pkgmigration.Target, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil, nil + } + var target pkgmigration.Target + if err := json.Unmarshal([]byte(trimmed), &target); err != nil { + return nil, errors.Annotate(err, "invalid target JSON") + } + return &target, nil +} + +func parseMigrationMode(value string) (*pkgmigration.TaskMode, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil, nil + } + normalized := strings.ToUpper(trimmed) + if !strings.HasPrefix(normalized, "MODE_") { + normalized = fmt.Sprintf("MODE_%s", normalized) + } + mode := pkgmigration.TaskMode(normalized) + for _, allowed := range pkgmigration.AllowedTaskModeEnumValues { + if mode == allowed { + return &mode, nil + } + } + return nil, errors.Errorf("invalid mode %q, allowed values: %s", value, strings.Join(taskModeValues(), ", ")) +} + +func parseFullData(value string) (*bool, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil, nil + } + boolValue, err := strconv.ParseBool(trimmed) + if err != nil { + return nil, errors.Annotate(err, "invalid boolean value for full-data") + } + return &boolValue, nil +} + +func taskModeValues() []string { + values := make([]string, 0, len(pkgmigration.AllowedTaskModeEnumValues)) + for _, mode := range pkgmigration.AllowedTaskModeEnumValues { + values = append(values, string(mode)) + } + return values +} diff --git a/internal/cli/serverless/migration/list.go b/internal/cli/serverless/migration/list.go new file mode 100644 index 00000000..6d4da7f7 --- /dev/null +++ b/internal/cli/serverless/migration/list.go @@ -0,0 +1,147 @@ +package migration + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/output" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" +) + +type ListOpts struct { + interactive bool +} + +func (c ListOpts) NonInteractiveFlags() []string { + return []string{flag.ClusterID} +} + +func (c *ListOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func ListCmd(h *internal.Helper) *cobra.Command { + opts := ListOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "list", + Short: "List migration tasks", + Aliases: []string{"ls"}, + Args: cobra.NoArgs, + Example: fmt.Sprintf(` List migrations in interactive mode: + $ %[1]s serverless migration list + + List migrations in non-interactive mode with JSON output: + $ %[1]s serverless migration list -c -o json`, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + } + + pageSize := int32(h.QueryPageSize) + resp, err := d.ListMigrationTasks(ctx, clusterID, &pageSize, nil, nil) + if err != nil { + return errors.Trace(err) + } + + format, err := cmd.Flags().GetString(flag.Output) + if err != nil { + return errors.Trace(err) + } + + if format == output.JsonFormat || !h.IOStreams.CanPrompt { + return errors.Trace(output.PrintJson(h.IOStreams.Out, resp)) + } + + if format != output.HumanFormat { + return fmt.Errorf("unsupported output format: %s", format) + } + + columns := []output.Column{"ID", "Name", "Mode", "State", "CreatedAt"} + var rows []output.Row + for _, task := range resp.Tasks { + id := safeString(task.Id) + name := safeString(task.Name) + if name == "" { + name = id + } + mode := "" + if task.Mode != nil { + mode = string(*task.Mode) + } + state := "" + if task.State != nil { + state = string(*task.State) + } + created := formatTime(task.CreateTime) + rows = append(rows, output.Row{id, name, mode, state, created}) + } + return errors.Trace(output.PrintHumanTable(h.IOStreams.Out, columns, rows)) + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The cluster ID of the migration tasks to list.") + cmd.Flags().StringP(flag.Output, flag.OutputShort, output.HumanFormat, flag.OutputHelp) + return cmd +} + +func safeString(value *string) string { + if value == nil { + return "" + } + return *value +} + +func formatTime(value *time.Time) string { + if value == nil { + return "" + } + return value.Format(time.RFC3339) +} diff --git a/internal/cli/serverless/migration/migration.go b/internal/cli/serverless/migration/migration.go new file mode 100644 index 00000000..1660a0e1 --- /dev/null +++ b/internal/cli/serverless/migration/migration.go @@ -0,0 +1,25 @@ +package migration + +import ( + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" +) + +func MigrationCmd(h *internal.Helper) *cobra.Command { + var cmd = &cobra.Command{ + Use: "migration", + Short: "Manage TiDB Cloud Serverless migrations", + Aliases: []string{"dm"}, + } + + cmd.AddCommand(CreateCmd(h)) + cmd.AddCommand(DescribeCmd(h)) + cmd.AddCommand(ListCmd(h)) + cmd.AddCommand(DeleteCmd(h)) + cmd.AddCommand(TemplateCmd(h)) + cmd.AddCommand(PauseCmd(h)) + cmd.AddCommand(ResumeCmd(h)) + + return cmd +} diff --git a/internal/cli/serverless/migration/pause.go b/internal/cli/serverless/migration/pause.go new file mode 100644 index 00000000..66302a53 --- /dev/null +++ b/internal/cli/serverless/migration/pause.go @@ -0,0 +1,110 @@ +package migration + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" +) + +type PauseOpts struct { + interactive bool +} + +func (c PauseOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationTaskID, + } +} + +func (c *PauseOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func PauseCmd(h *internal.Helper) *cobra.Command { + opts := PauseOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "pause", + Short: "Pause a migration task", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Pause a migration task in interactive mode: + $ %[1]s serverless migration pause + + Pause a migration task in non-interactive mode: + $ %[1]s serverless migration pause -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, taskID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + taskID = task.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + if err != nil { + return errors.Trace(err) + } + } + + emptyBody := map[string]interface{}{} + if _, err := d.PauseMigrationTask(ctx, clusterID, taskID, &emptyBody); err != nil { + return errors.Trace(err) + } + + fmt.Fprintf(h.IOStreams.Out, "migration task %s paused\n", taskID) + return nil + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") + cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to pause.") + return cmd +} diff --git a/internal/cli/serverless/migration/resume.go b/internal/cli/serverless/migration/resume.go new file mode 100644 index 00000000..935b398a --- /dev/null +++ b/internal/cli/serverless/migration/resume.go @@ -0,0 +1,110 @@ +package migration + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" +) + +type ResumeOpts struct { + interactive bool +} + +func (c ResumeOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationTaskID, + } +} + +func (c *ResumeOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func ResumeCmd(h *internal.Helper) *cobra.Command { + opts := ResumeOpts{interactive: true} + + var cmd = &cobra.Command{ + Use: "resume", + Short: "Resume a paused migration task", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Resume a migration task in interactive mode: + $ %[1]s serverless migration resume + + Resume a migration task in non-interactive mode: + $ %[1]s serverless migration resume -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, taskID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + taskID = task.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + if err != nil { + return errors.Trace(err) + } + } + + emptyBody := map[string]interface{}{} + if _, err := d.ResumeMigrationTask(ctx, clusterID, taskID, &emptyBody); err != nil { + return errors.Trace(err) + } + + fmt.Fprintf(h.IOStreams.Out, "migration task %s resumed\n", taskID) + return nil + }, + } + + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") + cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to resume.") + return cmd +} diff --git a/internal/cli/serverless/migration/template.go b/internal/cli/serverless/migration/template.go new file mode 100644 index 00000000..4dde6dc0 --- /dev/null +++ b/internal/cli/serverless/migration/template.go @@ -0,0 +1,161 @@ +package migration + +import ( + "fmt" + "strings" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" +) + +const ( + migrationSourcesTemplateWithExplain = `[ + { + // Required: source database type. Supported: SOURCE_TYPE_MYSQL, SOURCE_TYPE_ALICLOUD_RDS_MYSQL + "sourceType": "SOURCE_TYPE_MYSQL", + "connProfile": { + // Optional connection type, PUBLIC or PRIVATE_LINK + "connType": "PUBLIC", + "host": "10.0.0.2", + "port": 3306, + "user": "dm_sync_user", + "password": "Passw0rd!", + // Optional when using private link + "endpointId": "pl-xxxxxxxx", + "security": { + // Optional TLS materials encoded in Base64 + "sslCaContent": "", + "sslCertContent": "", + "sslKeyContent": "", + "certAllowedCn": ["client-cn"] + } + }, + // Optional block/allow rules to whitelist schemas/tables + "baRules": { + "doDbs": ["app_db"], + "doTables": [ + {"schema": "app_db", "table": "orders"}, + {"schema": "app_db", "table": "customers"} + ] + }, + // Optional route rules for renaming schemas/tables + "routeRules": [ + { + "sourceTable": { + "schemaPattern": "app_db", + "tablePattern": "orders" + }, + "targetTable": { + "schema": "app_db", + "table": "orders_copy" + } + } + ], + // Optional start position for incremental sync. Provide binlogName+binlogPos or binlogGtid + "binlogName": "mysql-bin.000001", + "binlogPos": 4, + "binlogGtid": "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-12345" + } + ]` + + migrationSourcesTemplate = `[ + { + "sourceType": "SOURCE_TYPE_MYSQL", + "connProfile": { + "connType": "PUBLIC", + "host": "10.0.0.2", + "port": 3306, + "user": "dm_sync_user", + "password": "Passw0rd!" + }, + "baRules": { + "doDbs": ["app_db"], + "doTables": [{"schema": "app_db", "table": "orders"}] + }, + "routeRules": [ + { + "sourceTable": {"schemaPattern": "app_db", "tablePattern": "orders"}, + "targetTable": {"schema": "app_db", "table": "orders_copy"} + } + ] + } + ]` + + migrationTargetTemplateWithExplain = `{ + // Target TiDB Cloud user used by the migration task + "user": "migration_user", + // Password corresponding to the target user + "password": "Passw0rd!" +}` + + migrationTargetTemplate = `{ + "user": "migration_user", + "password": "Passw0rd!" +}` +) + +func TemplateCmd(h *internal.Helper) *cobra.Command { + var cmd = &cobra.Command{ + Use: "template", + Short: "Show migration JSON templates", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Show all migration templates: + $ %[1]s serverless migration template + + Show the sources template with explanations: + $ %[1]s serverless migration template --type sources --explain`, config.CliName), + RunE: func(cmd *cobra.Command, args []string) error { + explain, err := cmd.Flags().GetBool(flag.Explain) + if err != nil { + return err + } + templateType, err := cmd.Flags().GetString(flag.MigrationTemplateType) + if err != nil { + return err + } + + return renderMigrationTemplate(h, strings.ToLower(templateType), explain) + }, + } + + cmd.Flags().Bool(flag.Explain, false, "Show template with inline explanations.") + cmd.Flags().String(flag.MigrationTemplateType, "", "Template type to show, one of [\"sources\", \"target\"]. Default prints both.") + return cmd +} + +func renderMigrationTemplate(h *internal.Helper, templateType string, explain bool) error { + switch templateType { + case "sources": + if explain { + fmt.Fprintln(h.IOStreams.Out, migrationSourcesTemplateWithExplain) + } else { + fmt.Fprintln(h.IOStreams.Out, migrationSourcesTemplate) + } + case "target": + if explain { + fmt.Fprintln(h.IOStreams.Out, migrationTargetTemplateWithExplain) + } else { + fmt.Fprintln(h.IOStreams.Out, migrationTargetTemplate) + } + case "": + fmt.Fprintln(h.IOStreams.Out, color.GreenString("Sources template:")) + if explain { + fmt.Fprintln(h.IOStreams.Out, migrationSourcesTemplateWithExplain) + } else { + fmt.Fprintln(h.IOStreams.Out, migrationSourcesTemplate) + } + fmt.Fprintln(h.IOStreams.Out, color.GreenString("Target template:")) + if explain { + fmt.Fprintln(h.IOStreams.Out, migrationTargetTemplateWithExplain) + } else { + fmt.Fprintln(h.IOStreams.Out, migrationTargetTemplate) + } + default: + return fmt.Errorf("unknown template type %q", templateType) + } + return nil +} diff --git a/internal/flag/flag.go b/internal/flag/flag.go index f73e3fc8..82c2ed8a 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -118,6 +118,15 @@ const ( ChangefeedIDShort string = "f" Explain string = "explain" ChangefeedTemplateType string = "type" + MigrationTaskID string = "migration-id" + MigrationTaskIDShort string = "m" + MigrationPrecheckID string = "precheck-id" + MigrationSources string = "sources" + MigrationTarget string = "target" + MigrationMode string = "mode" + MigrationFullData string = "full-data" + MigrationTemplateType string = "type" + MigrationPrecheckOnly string = "precheck-only" ) const OutputHelp = "Output format, one of [\"human\" \"json\"]. For the complete result, please use json format." diff --git a/internal/mock/api_client.go b/internal/mock/api_client.go index 4dc6c391..b80c8e29 100644 --- a/internal/mock/api_client.go +++ b/internal/mock/api_client.go @@ -20,6 +20,8 @@ import ( imp "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/imp" + migration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" + mock "github.com/stretchr/testify/mock" ) @@ -76,6 +78,66 @@ func (_m *TiDBCloudClient) CancelImport(ctx context.Context, clusterId string, i return r0 } +// CancelMigrationPrecheck provides a mock function with given fields: ctx, clusterId, precheckId +func (_m *TiDBCloudClient) CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (map[string]interface{}, error) { + ret := _m.Called(ctx, clusterId, precheckId) + + if len(ret) == 0 { + panic("no return value specified for CancelMigrationPrecheck") + } + + var r0 map[string]interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (map[string]interface{}, error)); ok { + return rf(ctx, clusterId, precheckId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) map[string]interface{}); ok { + r0 = rf(ctx, clusterId, precheckId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, clusterId, precheckId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CancelMigrationTask provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) { + ret := _m.Called(ctx, clusterId, taskId) + + if len(ret) == 0 { + panic("no return value specified for CancelMigrationTask") + } + + var r0 *migration.MigrationTask + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.MigrationTask, error)); ok { + return rf(ctx, clusterId, taskId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.MigrationTask); ok { + r0 = rf(ctx, clusterId, taskId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.MigrationTask) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, clusterId, taskId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // CancelUpload provides a mock function with given fields: ctx, clusterId, uploadId func (_m *TiDBCloudClient) CancelUpload(ctx context.Context, clusterId string, uploadId *string) error { ret := _m.Called(ctx, clusterId, uploadId) @@ -292,6 +354,66 @@ func (_m *TiDBCloudClient) CreateImport(ctx context.Context, clusterId string, b return r0, r1 } +// CreateMigrationPrecheck provides a mock function with given fields: ctx, clusterId, body +func (_m *TiDBCloudClient) CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) { + ret := _m.Called(ctx, clusterId, body) + + if len(ret) == 0 { + panic("no return value specified for CreateMigrationPrecheck") + } + + var r0 *migration.CreateMigrationPrecheckResp + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error)); ok { + return rf(ctx, clusterId, body) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServicePrecheckBody) *migration.CreateMigrationPrecheckResp); ok { + r0 = rf(ctx, clusterId, body) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.CreateMigrationPrecheckResp) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServicePrecheckBody) error); ok { + r1 = rf(ctx, clusterId, body) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateMigrationTask provides a mock function with given fields: ctx, clusterId, body +func (_m *TiDBCloudClient) CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateTaskBody) (*migration.MigrationTask, error) { + ret := _m.Called(ctx, clusterId, body) + + if len(ret) == 0 { + panic("no return value specified for CreateMigrationTask") + } + + var r0 *migration.MigrationTask + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateTaskBody) (*migration.MigrationTask, error)); ok { + return rf(ctx, clusterId, body) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateTaskBody) *migration.MigrationTask); ok { + r0 = rf(ctx, clusterId, body) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.MigrationTask) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServiceCreateTaskBody) error); ok { + r1 = rf(ctx, clusterId, body) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // CreateSQLUser provides a mock function with given fields: ctx, clusterID, body func (_m *TiDBCloudClient) CreateSQLUser(ctx context.Context, clusterID string, body *iam.ApiCreateSqlUserReq) (*iam.ApiSqlUser, error) { ret := _m.Called(ctx, clusterID, body) @@ -892,6 +1014,66 @@ func (_m *TiDBCloudClient) GetImport(ctx context.Context, clusterId string, id s return r0, r1 } +// GetMigrationPrecheck provides a mock function with given fields: ctx, clusterId, precheckId +func (_m *TiDBCloudClient) GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) { + ret := _m.Called(ctx, clusterId, precheckId) + + if len(ret) == 0 { + panic("no return value specified for GetMigrationPrecheck") + } + + var r0 *migration.MigrationPrecheck + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.MigrationPrecheck, error)); ok { + return rf(ctx, clusterId, precheckId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.MigrationPrecheck); ok { + r0 = rf(ctx, clusterId, precheckId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.MigrationPrecheck) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, clusterId, precheckId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetMigrationTask provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) { + ret := _m.Called(ctx, clusterId, taskId) + + if len(ret) == 0 { + panic("no return value specified for GetMigrationTask") + } + + var r0 *migration.MigrationTask + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.MigrationTask, error)); ok { + return rf(ctx, clusterId, taskId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.MigrationTask); ok { + r0 = rf(ctx, clusterId, taskId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.MigrationTask) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, clusterId, taskId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetSQLUser provides a mock function with given fields: ctx, clusterID, userName func (_m *TiDBCloudClient) GetSQLUser(ctx context.Context, clusterID string, userName string) (*iam.ApiSqlUser, error) { ret := _m.Called(ctx, clusterID, userName) @@ -1192,6 +1374,36 @@ func (_m *TiDBCloudClient) ListImports(ctx context.Context, clusterId string, pa return r0, r1 } +// ListMigrationTasks provides a mock function with given fields: ctx, clusterId, pageSize, pageToken, orderBy +func (_m *TiDBCloudClient) ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationTasksResp, error) { + ret := _m.Called(ctx, clusterId, pageSize, pageToken, orderBy) + + if len(ret) == 0 { + panic("no return value specified for ListMigrationTasks") + } + + var r0 *migration.ListMigrationTasksResp + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *int32, *string, *string) (*migration.ListMigrationTasksResp, error)); ok { + return rf(ctx, clusterId, pageSize, pageToken, orderBy) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *int32, *string, *string) *migration.ListMigrationTasksResp); ok { + r0 = rf(ctx, clusterId, pageSize, pageToken, orderBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.ListMigrationTasksResp) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *int32, *string, *string) error); ok { + r1 = rf(ctx, clusterId, pageSize, pageToken, orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ListProjects provides a mock function with given fields: ctx, pageSize, pageToken func (_m *TiDBCloudClient) ListProjects(ctx context.Context, pageSize *int32, pageToken *string) (*iam.ApiListProjectsRsp, error) { ret := _m.Called(ctx, pageSize, pageToken) @@ -1312,6 +1524,36 @@ func (_m *TiDBCloudClient) PartialUpdateCluster(ctx context.Context, clusterId s return r0, r1 } +// PauseMigrationTask provides a mock function with given fields: ctx, clusterId, taskId, body +func (_m *TiDBCloudClient) PauseMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { + ret := _m.Called(ctx, clusterId, taskId, body) + + if len(ret) == 0 { + panic("no return value specified for PauseMigrationTask") + } + + var r0 map[string]interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *map[string]interface{}) (map[string]interface{}, error)); ok { + return rf(ctx, clusterId, taskId, body) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, *map[string]interface{}) map[string]interface{}); ok { + r0 = rf(ctx, clusterId, taskId, body) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, *map[string]interface{}) error); ok { + r1 = rf(ctx, clusterId, taskId, body) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ResetBranch provides a mock function with given fields: ctx, clusterId, branchId func (_m *TiDBCloudClient) ResetBranch(ctx context.Context, clusterId string, branchId string) (*branch.Branch, error) { ret := _m.Called(ctx, clusterId, branchId) @@ -1372,6 +1614,36 @@ func (_m *TiDBCloudClient) Restore(ctx context.Context, body *br.V1beta1RestoreR return r0, r1 } +// ResumeMigrationTask provides a mock function with given fields: ctx, clusterId, taskId, body +func (_m *TiDBCloudClient) ResumeMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { + ret := _m.Called(ctx, clusterId, taskId, body) + + if len(ret) == 0 { + panic("no return value specified for ResumeMigrationTask") + } + + var r0 map[string]interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *map[string]interface{}) (map[string]interface{}, error)); ok { + return rf(ctx, clusterId, taskId, body) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, *map[string]interface{}) map[string]interface{}); ok { + r0 = rf(ctx, clusterId, taskId, body) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, *map[string]interface{}) error); ok { + r1 = rf(ctx, clusterId, taskId, body) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // StartChangefeed provides a mock function with given fields: ctx, clusterId, changefeedId func (_m *TiDBCloudClient) StartChangefeed(ctx context.Context, clusterId string, changefeedId string) (*cdc.Changefeed, error) { ret := _m.Called(ctx, clusterId, changefeedId) diff --git a/internal/service/cloud/api_client.go b/internal/service/cloud/api_client.go index b5b7f240..a9c5abc8 100644 --- a/internal/service/cloud/api_client.go +++ b/internal/service/cloud/api_client.go @@ -34,6 +34,7 @@ import ( "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/cluster" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/export" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/imp" + "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" "github.com/icholy/digest" ) @@ -104,6 +105,24 @@ type TiDBCloudClient interface { DownloadExportFiles(ctx context.Context, clusterId string, exportId string, body *export.ExportServiceDownloadExportFilesBody) (*export.DownloadExportFilesResponse, error) + CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (map[string]interface{}, error) + + CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) + + CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) + + CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateTaskBody) (*migration.MigrationTask, error) + + GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) + + GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) + + ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationTasksResp, error) + + PauseMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) + + ResumeMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) + ListSQLUsers(ctx context.Context, clusterID string, pageSize *int32, pageToken *string) (*iam.ApiListSqlUsersRsp, error) CreateSQLUser(ctx context.Context, clusterID string, body *iam.ApiCreateSqlUserReq) (*iam.ApiSqlUser, error) @@ -153,11 +172,12 @@ type ClientDelegate struct { ec *export.APIClient alc *auditlog.APIClient cdc *cdc.APIClient + mc *migration.APIClient } func NewClientDelegateWithToken(token string, serverlessEndpoint string, iamEndpoint string) (*ClientDelegate, error) { transport := NewBearTokenTransport(token) - bc, sc, brc, sic, ec, ic, alc, cdc, err := NewApiClient(transport, serverlessEndpoint, iamEndpoint) + bc, sc, brc, sic, ec, ic, alc, cdc, mc, err := NewApiClient(transport, serverlessEndpoint, iamEndpoint) if err != nil { return nil, err } @@ -170,12 +190,13 @@ func NewClientDelegateWithToken(token string, serverlessEndpoint string, iamEndp sic: sic, alc: alc, cdc: cdc, + mc: mc, }, nil } func NewClientDelegateWithApiKey(publicKey string, privateKey string, serverlessEndpoint string, iamEndpoint string) (*ClientDelegate, error) { transport := NewDigestTransport(publicKey, privateKey) - bc, sc, brc, sic, ec, ic, alc, cdc, err := NewApiClient(transport, serverlessEndpoint, iamEndpoint) + bc, sc, brc, sic, ec, ic, alc, cdc, mc, err := NewApiClient(transport, serverlessEndpoint, iamEndpoint) if err != nil { return nil, err } @@ -188,6 +209,7 @@ func NewClientDelegateWithApiKey(publicKey string, privateKey string, serverless sic: sic, alc: alc, cdc: cdc, + mc: mc, }, nil } @@ -468,6 +490,77 @@ func (d *ClientDelegate) DownloadExportFiles(ctx context.Context, clusterId stri return res, parseError(err, h) } +func (d *ClientDelegate) CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (map[string]interface{}, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceCancelPrecheck(ctx, clusterId, precheckId).Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceCancelTask(ctx, clusterId, taskId).Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) { + r := d.mc.MigrationAPI.MigrationServicePrecheck(ctx, clusterId) + if body != nil { + r = r.Body(*body) + } + res, h, err := r.Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateTaskBody) (*migration.MigrationTask, error) { + r := d.mc.MigrationAPI.MigrationServiceCreateTask(ctx, clusterId) + if body != nil { + r = r.Body(*body) + } + res, h, err := r.Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceGetPrecheck(ctx, clusterId, precheckId).Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceGetTask(ctx, clusterId, taskId).Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationTasksResp, error) { + r := d.mc.MigrationAPI.MigrationServiceListTasks(ctx, clusterId) + if pageToken != nil { + r = r.PageToken(*pageToken) + } + if pageSize != nil { + r = r.PageSize(*pageSize) + } + if orderBy != nil { + r = r.OrderBy(*orderBy) + } + res, h, err := r.Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) PauseMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { + payload := map[string]interface{}{} + if body != nil { + payload = *body + } + res, h, err := d.mc.MigrationAPI.MigrationServicePauseTask(ctx, clusterId, taskId).Body(payload).Execute() + return res, parseError(err, h) +} + +func (d *ClientDelegate) ResumeMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { + payload := map[string]interface{}{} + if body != nil { + payload = *body + } + res, h, err := d.mc.MigrationAPI.MigrationServiceResumeTask(ctx, clusterId, taskId).Body(payload).Execute() + return res, parseError(err, h) +} + func (d *ClientDelegate) ListSQLUsers(ctx context.Context, clusterID string, pageSize *int32, pageToken *string) (*iam.ApiListSqlUsersRsp, error) { r := d.ic.AccountAPI.V1beta1ClustersClusterIdSqlUsersGet(ctx, clusterID) if pageSize != nil { @@ -581,7 +674,7 @@ func (d *ClientDelegate) GetAuditLogConfig(ctx context.Context, clusterID string return res, parseError(err, h) } -func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint string) (*branch.APIClient, *cluster.APIClient, *br.APIClient, *imp.APIClient, *export.APIClient, *iam.APIClient, *auditlog.APIClient, *cdc.APIClient, error) { +func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint string) (*branch.APIClient, *cluster.APIClient, *br.APIClient, *imp.APIClient, *export.APIClient, *iam.APIClient, *auditlog.APIClient, *cdc.APIClient, *migration.APIClient, error) { httpclient := &http.Client{ Transport: rt, } @@ -589,12 +682,12 @@ func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint s // v1beta1 api (serverless) serverlessURL, err := prop.ValidateApiUrl(serverlessEndpoint) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, nil, nil, nil, err } iamURL, err := prop.ValidateApiUrl(iamEndpoint) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, nil, nil, nil, err } userAgent := fmt.Sprintf("%s/%s", config.CliName, version.Version) @@ -639,10 +732,15 @@ func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint s cdcCfg.Host = serverlessURL.Host cdcCfg.UserAgent = userAgent + migrationCfg := migration.NewConfiguration() + migrationCfg.HTTPClient = httpclient + migrationCfg.Host = serverlessURL.Host + migrationCfg.UserAgent = userAgent + return branch.NewAPIClient(branchCfg), cluster.NewAPIClient(clusterCfg), br.NewAPIClient(backupRestoreCfg), imp.NewAPIClient(importCfg), export.NewAPIClient(exportCfg), - iam.NewAPIClient(iamCfg), auditlog.NewAPIClient(auditLogCfg), cdc.NewAPIClient(cdcCfg), nil + iam.NewAPIClient(iamCfg), auditlog.NewAPIClient(auditLogCfg), cdc.NewAPIClient(cdcCfg), migration.NewAPIClient(migrationCfg), nil } func NewDigestTransport(publicKey, privateKey string) http.RoundTripper { diff --git a/internal/service/cloud/logic.go b/internal/service/cloud/logic.go index a1be5567..5c5bc584 100644 --- a/internal/service/cloud/logic.go +++ b/internal/service/cloud/logic.go @@ -30,6 +30,7 @@ import ( "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/cluster" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/export" "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/imp" + "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" tea "github.com/charmbracelet/bubbletea" "github.com/juju/errors" @@ -87,6 +88,12 @@ type Changefeed struct { Type string } +type MigrationTask struct { + ID string + Name string + State string +} + type AuditLogFilterRule struct { FilterRuleId string DisplayName string @@ -150,6 +157,13 @@ func (c Changefeed) String() string { return fmt.Sprintf("%s(%s)", c.Name, c.ID) } +func (m MigrationTask) String() string { + if m.Name == "" || m.Name == m.ID { + return fmt.Sprintf("%s[%s]", m.ID, m.State) + } + return fmt.Sprintf("%s(%s)[%s]", m.Name, m.ID, m.State) +} + func GetSelectedProject(ctx context.Context, pageSize int64, client TiDBCloudClient) (*Project, error) { _, projectItems, err := RetrieveProjects(ctx, pageSize, client) if err != nil { @@ -1031,6 +1045,69 @@ func GetSelectedChangefeed(ctx context.Context, clusterID string, pageSize int64 return changefeed.(*Changefeed), nil } +func GetSelectedMigrationTask(ctx context.Context, clusterID string, pageSize int64, client TiDBCloudClient) (*MigrationTask, error) { + var items = make([]interface{}, 0) + pageSizeInt32 := int32(pageSize) + resp, err := client.ListMigrationTasks(ctx, clusterID, &pageSizeInt32, nil, nil) + if err != nil { + return nil, errors.Trace(err) + } + appendMigrationTaskItems := func(tasks []migration.MigrationTask) { + for _, item := range tasks { + if item.Id == nil { + continue + } + name := *item.Id + if item.Name != nil && *item.Name != "" { + name = *item.Name + } + state := "" + if item.State != nil { + state = string(*item.State) + } + items = append(items, &MigrationTask{ + ID: *item.Id, + Name: name, + State: state, + }) + } + } + appendMigrationTaskItems(resp.Tasks) + for resp.NextPageToken != nil && *resp.NextPageToken != "" { + resp, err = client.ListMigrationTasks(ctx, clusterID, &pageSizeInt32, resp.NextPageToken, nil) + if err != nil { + return nil, errors.Trace(err) + } + appendMigrationTaskItems(resp.Tasks) + } + + if len(items) == 0 { + return nil, fmt.Errorf("no available migration tasks found") + } + + model, err := ui.InitialSelectModel(items, "Choose the migration task:") + if err != nil { + return nil, errors.Trace(err) + } + itemsPerPage := 6 + model.EnablePagination(itemsPerPage) + model.EnableFilter() + + p := tea.NewProgram(model) + migrationModel, err := p.Run() + if err != nil { + return nil, errors.Trace(err) + } + if m, _ := migrationModel.(ui.SelectModel); m.Interrupted { + return nil, util.InterruptError + } + selected := migrationModel.(ui.SelectModel).GetSelectedItem() + if selected == nil { + return nil, errors.New("no migration task selected") + } + return selected.(*MigrationTask), nil +} + func GetSelectedFilterRule(ctx context.Context, clusterID string, client TiDBCloudClient) (*AuditLogFilterRule, error) { rulesResp, err := client.ListAuditLogFilterRules(ctx, clusterID) if err != nil { diff --git a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json new file mode 100644 index 00000000..2c7fbfb2 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json @@ -0,0 +1,1212 @@ +{ + "swagger": "2.0", + "info": { + "title": "TiDB Cloud Starter and Essential API", + "description": "TiDB Cloud Starter and Essential API", + "version": "v1beta1" + }, + "tags": [ + { + "name": "MigrationService" + } + ], + "host": "serverless.tidbapi.com", + "schemes": [ + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/v1beta1/clusters/{clusterId}/migrations": { + "get": { + "summary": "List migration tasks", + "operationId": "MigrationService_ListTasks", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/ListMigrationTasksResp" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster to list tasks for.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "pageToken", + "description": "Optional. The page token, default is empty.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "pageSize", + "description": "Optional. The page size, default is 10.", + "in": "query", + "required": false, + "type": "integer", + "format": "int32", + "default": 10 + }, + { + "name": "orderBy", + "description": "Specifies the sorting order of results. Use a comma-separated list of field names, optionally appending `desc` for descending order. For example, `createTime desc`. By default, fields are sorted in ascending order.\n\nSupported field: `createTime`.", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + }, + "post": { + "summary": "Create a migration task", + "operationId": "MigrationService_CreateTask", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/MigrationTask" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster to create the migration task in.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MigrationService.CreateTaskBody" + } + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{\\n \"name\": \"my-migration\",\\n \"sources\": [\\n {\\n \"connProfile\": {\\n \"connType\": \"PUBLIC\",\\n \"host\": \"1.2.3.4\",\\n \"port\": 3306,\\n \"user\": \"root\",\\n \"password\": \"secret\"\\n },\\n \"sourceType\": \"SOURCE_TYPE_MYSQL\"\\n }\\n ],\\n \"target\": {\\n \"user\": \"tidb\",\\n \"password\": \"tidb_password\"\\n },\\n \"mode\": \"MODE_ALL\",\\n \"fullDataMigration\": true\\n}'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrations/{id}": { + "get": { + "summary": "Get a migration task", + "operationId": "MigrationService_GetTask", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/MigrationTask" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "id", + "description": "The ID of the migration task.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + }, + "delete": { + "summary": "Cancel a migration task", + "operationId": "MigrationService_CancelTask", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/MigrationTask" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "id", + "description": "The ID of the migration task to cancel.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrations/{id}:pause": { + "post": { + "summary": "Pause a running migration task", + "operationId": "MigrationService_PauseTask", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": {} + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "id", + "description": "The ID of the migration task to pause.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MigrationService.PauseTaskBody" + } + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}:pause' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{}'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrations/{id}:resume": { + "post": { + "summary": "Resume a paused migration task", + "operationId": "MigrationService_ResumeTask", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": {} + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "id", + "description": "The ID of the migration task to resume.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MigrationService.ResumeTaskBody" + } + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}:resume' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{}'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrationPrechecks": { + "post": { + "summary": "Run a precheck for a migration task", + "operationId": "MigrationService_Precheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/CreateMigrationPrecheckResp" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster to create the migration task in.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/MigrationService.PrecheckBody" + } + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{\\n \"name\": \"my-migration\",\\n \"sources\": [\\n {\\n \"connProfile\": {\\n \"connType\": \"PUBLIC\",\\n \"host\": \"1.2.3.4\",\\n \"port\": 3306,\\n \"user\": \"root\",\\n \"password\": \"secret\"\\n },\\n \"sourceType\": \"SOURCE_TYPE_MYSQL\"\\n }\\n ],\\n \"target\": {\\n \"user\": \"tidb\",\\n \"password\": \"tidb_password\"\\n },\\n \"mode\": \"MODE_ALL\",\\n \"fullDataMigration\": true\\n}'" + } + ] + } + }, + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{id}": { + "get": { + "summary": "Get a migration precheck", + "operationId": "MigrationService_GetPrecheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/MigrationPrecheck" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "id", + "description": "The ID of the precheck.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + }, + "delete": { + "summary": "Cancel a migration precheck", + "operationId": "MigrationService_CancelPrecheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": {} + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "clusterId", + "description": "The ID of the cluster.", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "id", + "description": "The ID of the precheck to cancel.", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Migration" + ], + "x-codeSamples": [ + { + "label": "curl", + "lang": "cURL", + "source": "curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + } + ] + } + } + }, + "definitions": { + "Any": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "BlockAllowRules": { + "type": "object", + "properties": { + "doDbs": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Database names to include in migration." + }, + "doTables": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/BlockAllowRules.Table" + }, + "description": "Table-level allow-list rules." + } + } + }, + "BlockAllowRules.Table": { + "type": "object", + "properties": { + "schema": { + "type": "string", + "description": "Schema name." + }, + "table": { + "type": "string", + "description": "Table name." + } + } + }, + "ConnProfile": { + "type": "object", + "properties": { + "connType": { + "description": "Connection type (e.g., PUBLIC, PRIVATE_LINK).", + "allOf": [ + { + "$ref": "#/definitions/ConnType" + } + ] + }, + "endpointId": { + "type": "string", + "description": "Private link endpoint ID." + }, + "host": { + "type": "string", + "description": "Source host." + }, + "port": { + "type": "integer", + "format": "int32", + "description": "Source port." + }, + "user": { + "type": "string", + "description": "Source user." + }, + "password": { + "type": "string", + "description": "Source password." + }, + "security": { + "description": "TLS/SSL settings; if not set, use defaults.", + "allOf": [ + { + "$ref": "#/definitions/Security" + } + ] + } + }, + "required": [ + "port", + "user", + "password" + ] + }, + "ConnType": { + "type": "string", + "enum": [ + "PUBLIC", + "PRIVATE_LINK" + ], + "description": "The connection type used to connect to the source database.\n\n - PUBLIC: Connect over the public internet.\n - PRIVATE_LINK: Connect via Private Link/Private Endpoint." + }, + "CreateMigrationPrecheckResp": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the created precheck.", + "readOnly": true + } + } + }, + "DumpDetail": { + "type": "object", + "properties": { + "bps": { + "type": "string", + "format": "int64", + "description": "Bytes per second processed during dump.", + "readOnly": true + }, + "progress": { + "type": "number", + "format": "double", + "description": "Progress of dump phase (0-100).", + "readOnly": true + }, + "totalTables": { + "type": "string", + "format": "int64", + "description": "Total number of tables to dump.", + "readOnly": true + }, + "completedTables": { + "type": "string", + "format": "int64", + "description": "Number of tables completed dumping.", + "readOnly": true + }, + "finishedBytes": { + "type": "string", + "format": "int64", + "description": "Total bytes finished dumping.", + "readOnly": true + }, + "finishedRows": { + "type": "string", + "format": "int64", + "description": "Total rows finished dumping.", + "readOnly": true + } + } + }, + "ListMigrationTasksResp": { + "type": "object", + "properties": { + "tasks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/MigrationTask" + }, + "description": "The list of migration tasks.", + "readOnly": true + }, + "totalSize": { + "type": "integer", + "format": "int64", + "description": "The total number of tasks matching the query.", + "readOnly": true + }, + "nextPageToken": { + "type": "string", + "description": "Token to retrieve the next page of results.", + "readOnly": true + } + } + }, + "LoadDetail": { + "type": "object", + "properties": { + "bps": { + "type": "string", + "format": "int64", + "description": "Bytes per second processed during load.", + "readOnly": true + }, + "progress": { + "type": "number", + "format": "double", + "description": "Progress of load phase (0-100).", + "readOnly": true + }, + "finishedBytes": { + "type": "string", + "format": "int64", + "description": "Total bytes finished loading.", + "readOnly": true + }, + "totalBytes": { + "type": "string", + "format": "int64", + "description": "Total bytes to load.", + "readOnly": true + } + } + }, + "MigrationPrecheck": { + "type": "object", + "properties": { + "total": { + "type": "integer", + "format": "int32", + "description": "Total number of precheck items.", + "readOnly": true + }, + "failedCnt": { + "type": "integer", + "format": "int32", + "description": "Number of failed items.", + "readOnly": true + }, + "warnCnt": { + "type": "integer", + "format": "int32", + "description": "Number of items with warnings.", + "readOnly": true + }, + "successCnt": { + "type": "integer", + "format": "int32", + "description": "Number of successful items.", + "readOnly": true + }, + "status": { + "type": "string", + "description": "Overall status of the precheck.", + "readOnly": true + }, + "items": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/PrecheckItem" + }, + "description": "Details for each precheck item.", + "readOnly": true + } + } + }, + "MigrationService.CreateTaskBody": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The display name of the migration task." + }, + "sources": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/Source" + }, + "description": "The data sources to migrate from." + }, + "target": { + "description": "The target database credentials.", + "allOf": [ + { + "$ref": "#/definitions/Target" + } + ] + }, + "mode": { + "description": "The migration mode (full+incremental or incremental-only).", + "allOf": [ + { + "$ref": "#/definitions/TaskMode" + } + ] + }, + "fullDataMigration": { + "type": "boolean", + "description": "If true, migrate all user data (equivalent to enabling all non-system databases and tables)." + } + }, + "required": [ + "sources" + ] + }, + "MigrationService.PauseTaskBody": { + "type": "object", + "title": "PauseMigrationTaskReq is used to pause a migration task" + }, + "MigrationService.PrecheckBody": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The display name of the migration task." + }, + "sources": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/Source" + }, + "description": "The data sources to migrate from." + }, + "target": { + "description": "The target database credentials.", + "allOf": [ + { + "$ref": "#/definitions/Target" + } + ] + }, + "mode": { + "description": "The migration mode (full+incremental or incremental-only).", + "allOf": [ + { + "$ref": "#/definitions/TaskMode" + } + ] + }, + "fullDataMigration": { + "type": "boolean", + "description": "If true, migrate all user data (equivalent to enabling all non-system databases and tables)." + } + }, + "required": [ + "sources" + ] + }, + "MigrationService.ResumeTaskBody": { + "type": "object", + "title": "ResumeMigrationTaskReq is used to resume a paused migration task" + }, + "MigrationTask": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique ID of the migration task.", + "readOnly": true + }, + "name": { + "type": "string", + "description": "The display name of the migration task." + }, + "subTasks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/SubTask" + }, + "description": "The list of subtasks composing this migration.", + "readOnly": true + }, + "targetUser": { + "type": "string", + "description": "The target database username used by the task.", + "readOnly": true + }, + "createTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp when the task was created.", + "readOnly": true + }, + "mode": { + "description": "The migration mode of the task.", + "allOf": [ + { + "$ref": "#/definitions/TaskMode" + } + ] + }, + "state": { + "description": "The current state of the task.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/MigrationTask.State" + } + ] + } + } + }, + "MigrationTask.State": { + "type": "string", + "enum": [ + "CREATING", + "RUNNING", + "PAUSED", + "CANCELED", + "FAILED" + ], + "description": "Overall state of a migration task.\n\n - CREATING: Task is being created.\n - RUNNING: Task is actively running.\n - PAUSED: Task is paused.\n - CANCELED: Task has been canceled.\n - FAILED: Task failed with error." + }, + "PrecheckItem": { + "type": "object", + "properties": { + "desc": { + "type": "string", + "description": "Human-readable description of the check.", + "readOnly": true + }, + "status": { + "type": "string", + "description": "Status of this check (e.g., SUCCESS, FAILED, WARN).", + "readOnly": true + }, + "solution": { + "type": "string", + "description": "Suggested solution if the check failed or warned.", + "readOnly": true + }, + "reason": { + "type": "string", + "description": "Reason for the failure or warning.", + "readOnly": true + }, + "solutionDocUrl": { + "type": "string", + "description": "Documentation URL for the solution.", + "readOnly": true + }, + "type": { + "description": "The type of precheck.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/PrecheckItemType" + } + ] + } + } + }, + "PrecheckItemType": { + "type": "string", + "enum": [ + "PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING", + "PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING", + "PRECHECK_ITEM_TYPE_VERSION_CHECKING", + "PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING", + "PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING", + "PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING", + "PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING", + "PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING", + "PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING", + "PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING", + "PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING", + "PRECHECK_ITEM_TYPE_META_POSITION_CHECKING", + "PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING", + "PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING" + ], + "description": "Types of prechecks performed before starting a migration.\n\n - PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges.\n - PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges.\n - PRECHECK_ITEM_TYPE_VERSION_CHECKING: Check source database version compatibility.\n - PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING: Check source server_id configuration.\n - PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source.\n - PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source.\n - PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting.\n - PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target.\n - PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING: Check binlog database-level filtering configuration.\n - PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING: Check concurrent connections limit/availability.\n - PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges.\n - PRECHECK_ITEM_TYPE_META_POSITION_CHECKING: Check saved meta/binlog position validity.\n - PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load.\n - PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING: Check primary key settings on source tables." + }, + "RouteRule": { + "type": "object", + "properties": { + "sourceTable": { + "description": "Source table pattern to match.", + "allOf": [ + { + "$ref": "#/definitions/RouteRule.Source" + } + ] + }, + "targetTable": { + "description": "Target table to route to.", + "allOf": [ + { + "$ref": "#/definitions/RouteRule.Target" + } + ] + } + } + }, + "RouteRule.Source": { + "type": "object", + "properties": { + "schemaPattern": { + "type": "string", + "description": "Schema pattern of the source, supports wildcards." + }, + "tablePattern": { + "type": "string", + "description": "Table pattern of the source, supports wildcards." + } + } + }, + "RouteRule.Target": { + "type": "object", + "properties": { + "schema": { + "type": "string", + "description": "Target schema name." + }, + "table": { + "type": "string", + "description": "Target table name." + } + } + }, + "Security": { + "type": "object", + "properties": { + "certAllowedCn": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Allowed certificate Common Names." + }, + "sslCaContent": { + "type": "string", + "format": "byte", + "description": "CA certificate content in PEM." + }, + "sslCertContent": { + "type": "string", + "format": "byte", + "description": "Client certificate content in PEM." + }, + "sslKeyContent": { + "type": "string", + "format": "byte", + "description": "Client private key in PEM." + } + } + }, + "Source": { + "type": "object", + "properties": { + "connProfile": { + "description": "Connection profile for the source database.", + "allOf": [ + { + "$ref": "#/definitions/ConnProfile" + } + ] + }, + "baRules": { + "description": "Block/allow rules for databases and tables, which is exclusive with route_rules.", + "allOf": [ + { + "$ref": "#/definitions/BlockAllowRules" + } + ] + }, + "routeRules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/RouteRule" + }, + "description": "Table route rules\uff0cwhich is exclusive with ba_rules." + }, + "binlogName": { + "type": "string", + "x-nullable": true, + "description": "Starting binlog file name for incremental sync." + }, + "binlogPos": { + "type": "integer", + "format": "int32", + "x-nullable": true, + "description": "Starting binlog position for incremental sync." + }, + "binlogGtid": { + "type": "string", + "x-nullable": true, + "description": "Starting GTID set for incremental sync." + }, + "sourceType": { + "description": "Source type (e.g., MySQL).", + "allOf": [ + { + "$ref": "#/definitions/Source.SourceType" + } + ] + } + }, + "required": [ + "connProfile", + "sourceType" + ] + }, + "Source.SourceType": { + "type": "string", + "enum": [ + "SOURCE_TYPE_MYSQL", + "SOURCE_TYPE_ALICLOUD_RDS_MYSQL" + ], + "description": "The source database type.\n\n - SOURCE_TYPE_MYSQL: Self-managed MySQL.\n - SOURCE_TYPE_ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL." + }, + "Status": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/Any" + } + } + } + }, + "SubTask": { + "type": "object", + "properties": { + "source": { + "description": "Source configuration for this subtask.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/Source" + } + ] + }, + "currentStep": { + "description": "Current step of the subtask.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/SubTask.Step" + } + ] + }, + "stage": { + "description": "Current stage of the subtask.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/SubTask.Stage" + } + ] + }, + "dumpDetail": { + "description": "Detail of dump phase, if applicable.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/DumpDetail" + } + ] + }, + "loadDetail": { + "description": "Detail of load phase, if applicable.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/LoadDetail" + } + ] + }, + "syncDetail": { + "description": "Detail of sync phase, if applicable.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/SyncDetail" + } + ] + }, + "errorMsg": { + "type": "string", + "x-nullable": true, + "description": "Error message when the subtask fails.", + "readOnly": true + } + } + }, + "SubTask.Stage": { + "type": "string", + "enum": [ + "STAGE_RUNNING", + "STAGE_PAUSED", + "STAGE_FAILED", + "STAGE_FINISHED", + "STAGE_UNKNOWN" + ], + "description": "The high-level lifecycle stage of a subtask.\n\n - STAGE_RUNNING: Subtask is running.\n - STAGE_PAUSED: Subtask is paused.\n - STAGE_FAILED: Subtask failed.\n - STAGE_FINISHED: Subtask finished successfully.\n - STAGE_UNKNOWN: Subtask stage is unknown." + }, + "SubTask.Step": { + "type": "string", + "enum": [ + "STEP_DUMP", + "STEP_LOAD", + "STEP_SYNC" + ], + "description": "The current step within a subtask.\n\n - STEP_DUMP: Dump/export data from source.\n - STEP_LOAD: Load/import data into target.\n - STEP_SYNC: Sync/replicate binlog changes." + }, + "SyncDetail": { + "type": "object", + "properties": { + "rps": { + "type": "string", + "format": "int64", + "description": "Rows processed per second during sync.", + "readOnly": true + }, + "latency": { + "type": "string", + "format": "int64", + "description": "Replication latency in seconds.", + "readOnly": true + }, + "checkpoint": { + "type": "string", + "description": "Synchronization checkpoint.", + "readOnly": true + } + } + }, + "Target": { + "type": "object", + "properties": { + "user": { + "type": "string", + "description": "Target database user." + }, + "password": { + "type": "string", + "description": "Target database password." + } + } + }, + "TaskMode": { + "type": "string", + "enum": [ + "MODE_ALL", + "MODE_INCREMENTAL" + ], + "description": "Migration task mode.\n\n - MODE_ALL: Full + incremental migration (all phases).\n - MODE_INCREMENTAL: Incremental-only migration (replication)." + } + } +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.gitignore b/pkg/tidbcloud/v1beta1/serverless/migration/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator-ignore b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator-ignore new file mode 100644 index 00000000..7484ee59 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator-ignore @@ -0,0 +1,23 @@ +# OpenAPI Generator Ignore +# Generated by openapi-generator https://github.com/openapitools/openapi-generator + +# Use this file to prevent files from being overwritten by the generator. +# The patterns follow closely to .gitignore or .dockerignore. + +# As an example, the C# client generator defines ApiClient.cs. +# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line: +#ApiClient.cs + +# You can match any string of characters against a directory, file or extension with a single asterisk (*): +#foo/*/qux +# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux + +# You can recursively match patterns against a directory, file or extension with a double asterisk (**): +#foo/**/qux +# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux + +# You can also negate patterns with an exclamation (!). +# For example, you can ignore all files in a docs folder with the file extension .md: +#docs/*.md +# Then explicitly reverse the ignore rule for a single file: +#!docs/README.md diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES new file mode 100644 index 00000000..de7f3a50 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES @@ -0,0 +1,40 @@ +.gitignore +.openapi-generator-ignore +.travis.yml +README.md +api/openapi.yaml +api_migration.go +client.go +configuration.go +git_push.sh +model_any.go +model_block_allow_rules.go +model_block_allow_rules_table.go +model_conn_profile.go +model_conn_type.go +model_create_migration_precheck_resp.go +model_dump_detail.go +model_list_migration_tasks_resp.go +model_load_detail.go +model_migration_precheck.go +model_migration_service_create_task_body.go +model_migration_service_precheck_body.go +model_migration_task.go +model_migration_task_state.go +model_precheck_item.go +model_precheck_item_type.go +model_route_rule.go +model_route_rule_source.go +model_route_rule_target.go +model_security.go +model_source.go +model_source_source_type.go +model_status.go +model_sub_task.go +model_sub_task_stage.go +model_sub_task_step.go +model_sync_detail.go +model_target.go +model_task_mode.go +response.go +utils.go diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/VERSION b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/VERSION new file mode 100644 index 00000000..5f84a81d --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/VERSION @@ -0,0 +1 @@ +7.12.0 diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.travis.yml b/pkg/tidbcloud/v1beta1/serverless/migration/.travis.yml new file mode 100644 index 00000000..f5cb2ce9 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.travis.yml @@ -0,0 +1,8 @@ +language: go + +install: + - go get -d -v . + +script: + - go build -v ./ + diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/README.md b/pkg/tidbcloud/v1beta1/serverless/migration/README.md new file mode 100644 index 00000000..05493d37 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/README.md @@ -0,0 +1,149 @@ +# Go API client for migration + +TiDB Cloud Starter and Essential API + +## Overview +This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project. By using the [OpenAPI-spec](https://www.openapis.org/) from a remote server, you can easily generate an API client. + +- API version: v1beta1 +- Package version: 1.0.0 +- Generator version: 7.12.0 +- Build package: org.openapitools.codegen.languages.GoClientCodegen + +## Installation + +Install the following dependencies: + +```sh +go get github.com/stretchr/testify/assert +go get golang.org/x/net/context +``` + +Put the package under your project folder and add the following in import: + +```go +import migration "github.com/GIT_USER_ID/GIT_REPO_ID" +``` + +To use a proxy, set the environment variable `HTTP_PROXY`: + +```go +os.Setenv("HTTP_PROXY", "http://proxy_name:proxy_port") +``` + +## Configuration of Server URL + +Default configuration comes with `Servers` field that contains server objects as defined in the OpenAPI specification. + +### Select Server Configuration + +For using other server than the one defined on index 0 set context value `migration.ContextServerIndex` of type `int`. + +```go +ctx := context.WithValue(context.Background(), migration.ContextServerIndex, 1) +``` + +### Templated Server URL + +Templated server URL is formatted using default variables from configuration or from context value `migration.ContextServerVariables` of type `map[string]string`. + +```go +ctx := context.WithValue(context.Background(), migration.ContextServerVariables, map[string]string{ + "basePath": "v2", +}) +``` + +Note, enum values are always validated and all unused variables are silently ignored. + +### URLs Configuration per Operation + +Each operation can use different server URL defined using `OperationServers` map in the `Configuration`. +An operation is uniquely identified by `"{classname}Service.{nickname}"` string. +Similar rules for overriding default operation server index and variables applies by using `migration.ContextOperationServerIndices` and `migration.ContextOperationServerVariables` context maps. + +```go +ctx := context.WithValue(context.Background(), migration.ContextOperationServerIndices, map[string]int{ + "{classname}Service.{nickname}": 2, +}) +ctx = context.WithValue(context.Background(), migration.ContextOperationServerVariables, map[string]map[string]string{ + "{classname}Service.{nickname}": { + "port": "8443", + }, +}) +``` + +## Documentation for API Endpoints + +All URIs are relative to *https://serverless.tidbapi.com* + +Class | Method | HTTP request | Description +------------ | ------------- | ------------- | ------------- +*MigrationAPI* | [**MigrationServiceCancelPrecheck**](docs/MigrationAPI.md#migrationservicecancelprecheck) | **Delete** /v1beta1/clusters/{clusterId}/migrationPrechecks/{id} | Cancel a migration precheck +*MigrationAPI* | [**MigrationServiceCancelTask**](docs/MigrationAPI.md#migrationservicecanceltask) | **Delete** /v1beta1/clusters/{clusterId}/migrations/{id} | Cancel a migration task +*MigrationAPI* | [**MigrationServiceCreateTask**](docs/MigrationAPI.md#migrationservicecreatetask) | **Post** /v1beta1/clusters/{clusterId}/migrations | Create a migration task +*MigrationAPI* | [**MigrationServiceGetPrecheck**](docs/MigrationAPI.md#migrationservicegetprecheck) | **Get** /v1beta1/clusters/{clusterId}/migrationPrechecks/{id} | Get a migration precheck +*MigrationAPI* | [**MigrationServiceGetTask**](docs/MigrationAPI.md#migrationservicegettask) | **Get** /v1beta1/clusters/{clusterId}/migrations/{id} | Get a migration task +*MigrationAPI* | [**MigrationServiceListTasks**](docs/MigrationAPI.md#migrationservicelisttasks) | **Get** /v1beta1/clusters/{clusterId}/migrations | List migration tasks +*MigrationAPI* | [**MigrationServicePauseTask**](docs/MigrationAPI.md#migrationservicepausetask) | **Post** /v1beta1/clusters/{clusterId}/migrations/{id}:pause | Pause a running migration task +*MigrationAPI* | [**MigrationServicePrecheck**](docs/MigrationAPI.md#migrationserviceprecheck) | **Post** /v1beta1/clusters/{clusterId}/migrationPrechecks | Run a precheck for a migration task +*MigrationAPI* | [**MigrationServiceResumeTask**](docs/MigrationAPI.md#migrationserviceresumetask) | **Post** /v1beta1/clusters/{clusterId}/migrations/{id}:resume | Resume a paused migration task + + +## Documentation For Models + + - [Any](docs/Any.md) + - [BlockAllowRules](docs/BlockAllowRules.md) + - [BlockAllowRulesTable](docs/BlockAllowRulesTable.md) + - [ConnProfile](docs/ConnProfile.md) + - [ConnType](docs/ConnType.md) + - [CreateMigrationPrecheckResp](docs/CreateMigrationPrecheckResp.md) + - [DumpDetail](docs/DumpDetail.md) + - [ListMigrationTasksResp](docs/ListMigrationTasksResp.md) + - [LoadDetail](docs/LoadDetail.md) + - [MigrationPrecheck](docs/MigrationPrecheck.md) + - [MigrationServiceCreateTaskBody](docs/MigrationServiceCreateTaskBody.md) + - [MigrationServicePrecheckBody](docs/MigrationServicePrecheckBody.md) + - [MigrationTask](docs/MigrationTask.md) + - [MigrationTaskState](docs/MigrationTaskState.md) + - [PrecheckItem](docs/PrecheckItem.md) + - [PrecheckItemType](docs/PrecheckItemType.md) + - [RouteRule](docs/RouteRule.md) + - [RouteRuleSource](docs/RouteRuleSource.md) + - [RouteRuleTarget](docs/RouteRuleTarget.md) + - [Security](docs/Security.md) + - [Source](docs/Source.md) + - [SourceSourceType](docs/SourceSourceType.md) + - [Status](docs/Status.md) + - [SubTask](docs/SubTask.md) + - [SubTaskStage](docs/SubTaskStage.md) + - [SubTaskStep](docs/SubTaskStep.md) + - [SyncDetail](docs/SyncDetail.md) + - [Target](docs/Target.md) + - [TaskMode](docs/TaskMode.md) + + +## Documentation For Authorization + +Endpoints do not require authorization. + + +## Documentation for Utility Methods + +Due to the fact that model structure members are all pointers, this package contains +a number of utility functions to easily obtain pointers to values of basic types. +Each of these functions takes a value of the given basic type and returns a pointer to it: + +* `PtrBool` +* `PtrInt` +* `PtrInt32` +* `PtrInt64` +* `PtrFloat` +* `PtrFloat32` +* `PtrFloat64` +* `PtrString` +* `PtrTime` + +## Author + + + diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml new file mode 100644 index 00000000..a2b1404c --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml @@ -0,0 +1,1088 @@ +openapi: 3.0.1 +info: + description: TiDB Cloud Starter and Essential API + title: TiDB Cloud Starter and Essential API + version: v1beta1 +servers: +- url: https://serverless.tidbapi.com/ +tags: +- name: MigrationService +paths: + /v1beta1/clusters/{clusterId}/migrations: + get: + operationId: MigrationService_ListTasks + parameters: + - description: The ID of the cluster to list tasks for. + in: path + name: clusterId + required: true + schema: + type: string + - description: "Optional. The page token, default is empty." + in: query + name: pageToken + schema: + type: string + - description: "Optional. The page size, default is 10." + in: query + name: pageSize + schema: + default: 10 + format: int32 + type: integer + - description: |- + Specifies the sorting order of results. Use a comma-separated list of field names, optionally appending `desc` for descending order. For example, `createTime desc`. By default, fields are sorted in ascending order. + + Supported field: `createTime`. + in: query + name: orderBy + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ListMigrationTasksResp' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: List migration tasks + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' + post: + operationId: MigrationService_CreateTask + parameters: + - description: The ID of the cluster to create the migration task in. + in: path + name: clusterId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationService.CreateTaskBody' + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationTask' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Create a migration task + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Content-Type: application/json' \ + +--header 'Accept: application/json' \ + +--data '{\n "name": "my-migration",\n "sources": [\n {\n "connProfile": {\n "connType": "PUBLIC",\n "host": "1.2.3.4",\n "port": 3306,\n "user": "root",\n "password": "secret"\n },\n "sourceType": "SOURCE_TYPE_MYSQL"\n }\n ],\n "target": {\n "user": "tidb",\n "password": "tidb_password"\n },\n "mode": "MODE_ALL",\n "fullDataMigration": true\n}' + x-codegen-request-body-name: body + /v1beta1/clusters/{clusterId}/migrations/{id}: + delete: + operationId: MigrationService_CancelTask + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the migration task to cancel. + in: path + name: id + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationTask' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Cancel a migration task + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' + get: + operationId: MigrationService_GetTask + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the migration task. + in: path + name: id + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationTask' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Get a migration task + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' + /v1beta1/clusters/{clusterId}/migrations/{id}:pause: + post: + operationId: MigrationService_PauseTask + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the migration task to pause. + in: path + name: id + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationService.PauseTaskBody' + required: true + responses: + "200": + content: + application/json: + schema: + type: object + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Pause a running migration task + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}:pause' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Content-Type: application/json' \ + +--header 'Accept: application/json' \ + +--data '{}' + x-codegen-request-body-name: body + /v1beta1/clusters/{clusterId}/migrations/{id}:resume: + post: + operationId: MigrationService_ResumeTask + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the migration task to resume. + in: path + name: id + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationService.ResumeTaskBody' + required: true + responses: + "200": + content: + application/json: + schema: + type: object + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Resume a paused migration task + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}:resume' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Content-Type: application/json' \ + +--header 'Accept: application/json' \ + +--data '{}' + x-codegen-request-body-name: body + /v1beta1/clusters/{clusterId}/migrationPrechecks: + post: + operationId: MigrationService_Precheck + parameters: + - description: The ID of the cluster to create the migration task in. + in: path + name: clusterId + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationService.PrecheckBody' + required: true + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CreateMigrationPrecheckResp' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Run a precheck for a migration task + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Content-Type: application/json' \ + +--header 'Accept: application/json' \ + +--data '{\n "name": "my-migration",\n "sources": [\n {\n "connProfile": {\n "connType": "PUBLIC",\n "host": "1.2.3.4",\n "port": 3306,\n "user": "root",\n "password": "secret"\n },\n "sourceType": "SOURCE_TYPE_MYSQL"\n }\n ],\n "target": {\n "user": "tidb",\n "password": "tidb_password"\n },\n "mode": "MODE_ALL",\n "fullDataMigration": true\n}' + x-codegen-request-body-name: body + /v1beta1/clusters/{clusterId}/migrationPrechecks/{id}: + delete: + operationId: MigrationService_CancelPrecheck + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the precheck to cancel. + in: path + name: id + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + type: object + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Cancel a migration precheck + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{id}' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' + get: + operationId: MigrationService_GetPrecheck + parameters: + - description: The ID of the cluster. + in: path + name: clusterId + required: true + schema: + type: string + - description: The ID of the precheck. + in: path + name: id + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/MigrationPrecheck' + description: A successful response. + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Status' + description: An unexpected error response. + summary: Get a migration precheck + tags: + - Migration + x-codeSamples: + - label: curl + lang: cURL + source: |- + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{id}' \ + +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ + +--header 'Accept: application/json' +components: + schemas: + Any: + additionalProperties: + type: object + example: + '@type': '@type' + properties: + '@type': + type: string + type: object + BlockAllowRules: + properties: + doDbs: + description: Database names to include in migration. + items: + type: string + type: array + doTables: + description: Table-level allow-list rules. + items: + $ref: '#/components/schemas/BlockAllowRules.Table' + type: array + type: object + BlockAllowRules.Table: + properties: + schema: + description: Schema name. + type: string + table: + description: Table name. + type: string + type: object + ConnProfile: + properties: + connType: + allOf: + - $ref: '#/components/schemas/ConnType' + description: "Connection type (e.g., PUBLIC, PRIVATE_LINK)." + type: object + endpointId: + description: Private link endpoint ID. + type: string + host: + description: Source host. + type: string + port: + description: Source port. + format: int32 + type: integer + user: + description: Source user. + type: string + password: + description: Source password. + type: string + security: + allOf: + - $ref: '#/components/schemas/Security' + description: "TLS/SSL settings; if not set, use defaults." + type: object + required: + - password + - port + - user + type: object + ConnType: + description: |- + The connection type used to connect to the source database. + + - PUBLIC: Connect over the public internet. + - PRIVATE_LINK: Connect via Private Link/Private Endpoint. + enum: + - PUBLIC + - PRIVATE_LINK + type: string + CreateMigrationPrecheckResp: + example: + id: id + properties: + id: + description: The ID of the created precheck. + readOnly: true + type: string + type: object + DumpDetail: + properties: + bps: + description: Bytes per second processed during dump. + format: int64 + readOnly: true + type: string + progress: + description: Progress of dump phase (0-100). + format: double + readOnly: true + type: number + totalTables: + description: Total number of tables to dump. + format: int64 + readOnly: true + type: string + completedTables: + description: Number of tables completed dumping. + format: int64 + readOnly: true + type: string + finishedBytes: + description: Total bytes finished dumping. + format: int64 + readOnly: true + type: string + finishedRows: + description: Total rows finished dumping. + format: int64 + readOnly: true + type: string + type: object + ListMigrationTasksResp: + example: + totalSize: 0 + nextPageToken: nextPageToken + tasks: + - mode: "{}" + createTime: 2000-01-23T04:56:07.000+00:00 + subTasks: + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + name: name + id: id + state: "{}" + targetUser: targetUser + - mode: "{}" + createTime: 2000-01-23T04:56:07.000+00:00 + subTasks: + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + name: name + id: id + state: "{}" + targetUser: targetUser + properties: + tasks: + description: The list of migration tasks. + items: + $ref: '#/components/schemas/MigrationTask' + readOnly: true + type: array + totalSize: + description: The total number of tasks matching the query. + format: int64 + readOnly: true + type: integer + nextPageToken: + description: Token to retrieve the next page of results. + readOnly: true + type: string + type: object + LoadDetail: + properties: + bps: + description: Bytes per second processed during load. + format: int64 + readOnly: true + type: string + progress: + description: Progress of load phase (0-100). + format: double + readOnly: true + type: number + finishedBytes: + description: Total bytes finished loading. + format: int64 + readOnly: true + type: string + totalBytes: + description: Total bytes to load. + format: int64 + readOnly: true + type: string + type: object + MigrationPrecheck: + example: + failedCnt: 6 + total: 0 + successCnt: 5 + warnCnt: 1 + items: + - reason: reason + solution: solution + solutionDocUrl: solutionDocUrl + type: "{}" + desc: desc + status: status + - reason: reason + solution: solution + solutionDocUrl: solutionDocUrl + type: "{}" + desc: desc + status: status + status: status + properties: + total: + description: Total number of precheck items. + format: int32 + readOnly: true + type: integer + failedCnt: + description: Number of failed items. + format: int32 + readOnly: true + type: integer + warnCnt: + description: Number of items with warnings. + format: int32 + readOnly: true + type: integer + successCnt: + description: Number of successful items. + format: int32 + readOnly: true + type: integer + status: + description: Overall status of the precheck. + readOnly: true + type: string + items: + description: Details for each precheck item. + items: + $ref: '#/components/schemas/PrecheckItem' + readOnly: true + type: array + type: object + MigrationService.CreateTaskBody: + properties: + name: + description: The display name of the migration task. + type: string + sources: + description: The data sources to migrate from. + items: + $ref: '#/components/schemas/Source' + type: array + target: + allOf: + - $ref: '#/components/schemas/Target' + description: The target database credentials. + type: object + mode: + allOf: + - $ref: '#/components/schemas/TaskMode' + description: The migration mode (full+incremental or incremental-only). + type: object + fullDataMigration: + description: "If true, migrate all user data (equivalent to enabling all\ + \ non-system databases and tables)." + type: boolean + required: + - sources + type: object + MigrationService.PauseTaskBody: + title: PauseMigrationTaskReq is used to pause a migration task + type: object + MigrationService.PrecheckBody: + properties: + name: + description: The display name of the migration task. + type: string + sources: + description: The data sources to migrate from. + items: + $ref: '#/components/schemas/Source' + type: array + target: + allOf: + - $ref: '#/components/schemas/Target' + description: The target database credentials. + type: object + mode: + allOf: + - $ref: '#/components/schemas/TaskMode' + description: The migration mode (full+incremental or incremental-only). + type: object + fullDataMigration: + description: "If true, migrate all user data (equivalent to enabling all\ + \ non-system databases and tables)." + type: boolean + required: + - sources + type: object + MigrationService.ResumeTaskBody: + title: ResumeMigrationTaskReq is used to resume a paused migration task + type: object + MigrationTask: + example: + mode: "{}" + createTime: 2000-01-23T04:56:07.000+00:00 + subTasks: + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + name: name + id: id + state: "{}" + targetUser: targetUser + properties: + id: + description: The unique ID of the migration task. + readOnly: true + type: string + name: + description: The display name of the migration task. + type: string + subTasks: + description: The list of subtasks composing this migration. + items: + $ref: '#/components/schemas/SubTask' + readOnly: true + type: array + targetUser: + description: The target database username used by the task. + readOnly: true + type: string + createTime: + description: The timestamp when the task was created. + format: date-time + readOnly: true + type: string + mode: + allOf: + - $ref: '#/components/schemas/TaskMode' + description: The migration mode of the task. + type: object + state: + allOf: + - $ref: '#/components/schemas/MigrationTask.State' + description: The current state of the task. + type: object + type: object + MigrationTask.State: + description: |- + Overall state of a migration task. + + - CREATING: Task is being created. + - RUNNING: Task is actively running. + - PAUSED: Task is paused. + - CANCELED: Task has been canceled. + - FAILED: Task failed with error. + enum: + - CREATING + - RUNNING + - PAUSED + - CANCELED + - FAILED + type: string + PrecheckItem: + example: + reason: reason + solution: solution + solutionDocUrl: solutionDocUrl + type: "{}" + desc: desc + status: status + properties: + desc: + description: Human-readable description of the check. + readOnly: true + type: string + status: + description: "Status of this check (e.g., SUCCESS, FAILED, WARN)." + readOnly: true + type: string + solution: + description: Suggested solution if the check failed or warned. + readOnly: true + type: string + reason: + description: Reason for the failure or warning. + readOnly: true + type: string + solutionDocUrl: + description: Documentation URL for the solution. + readOnly: true + type: string + type: + allOf: + - $ref: '#/components/schemas/PrecheckItemType' + description: The type of precheck. + type: object + type: object + PrecheckItemType: + description: |- + Types of prechecks performed before starting a migration. + + - PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges. + - PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges. + - PRECHECK_ITEM_TYPE_VERSION_CHECKING: Check source database version compatibility. + - PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING: Check source server_id configuration. + - PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source. + - PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source. + - PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting. + - PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target. + - PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING: Check binlog database-level filtering configuration. + - PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING: Check concurrent connections limit/availability. + - PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges. + - PRECHECK_ITEM_TYPE_META_POSITION_CHECKING: Check saved meta/binlog position validity. + - PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load. + - PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING: Check primary key settings on source tables. + enum: + - PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING + - PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING + - PRECHECK_ITEM_TYPE_VERSION_CHECKING + - PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING + - PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING + - PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING + - PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING + - PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING + - PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING + - PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING + - PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING + - PRECHECK_ITEM_TYPE_META_POSITION_CHECKING + - PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING + - PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING + type: string + RouteRule: + properties: + sourceTable: + allOf: + - $ref: '#/components/schemas/RouteRule.Source' + description: Source table pattern to match. + type: object + targetTable: + allOf: + - $ref: '#/components/schemas/RouteRule.Target' + description: Target table to route to. + type: object + type: object + RouteRule.Source: + properties: + schemaPattern: + description: "Schema pattern of the source, supports wildcards." + type: string + tablePattern: + description: "Table pattern of the source, supports wildcards." + type: string + type: object + RouteRule.Target: + properties: + schema: + description: Target schema name. + type: string + table: + description: Target table name. + type: string + type: object + Security: + properties: + certAllowedCn: + description: Allowed certificate Common Names. + items: + type: string + type: array + sslCaContent: + description: CA certificate content in PEM. + format: byte + pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + type: string + sslCertContent: + description: Client certificate content in PEM. + format: byte + pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + type: string + sslKeyContent: + description: Client private key in PEM. + format: byte + pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + type: string + type: object + Source: + properties: + connProfile: + allOf: + - $ref: '#/components/schemas/ConnProfile' + description: Connection profile for the source database. + type: object + baRules: + allOf: + - $ref: '#/components/schemas/BlockAllowRules' + description: "Block/allow rules for databases and tables, which is exclusive\ + \ with route_rules." + type: object + routeRules: + description: Table route rules,which is exclusive with ba_rules. + items: + $ref: '#/components/schemas/RouteRule' + type: array + binlogName: + description: Starting binlog file name for incremental sync. + nullable: true + type: string + binlogPos: + description: Starting binlog position for incremental sync. + format: int32 + nullable: true + type: integer + binlogGtid: + description: Starting GTID set for incremental sync. + nullable: true + type: string + sourceType: + allOf: + - $ref: '#/components/schemas/Source.SourceType' + description: "Source type (e.g., MySQL)." + type: object + required: + - connProfile + - sourceType + type: object + Source.SourceType: + description: |- + The source database type. + + - SOURCE_TYPE_MYSQL: Self-managed MySQL. + - SOURCE_TYPE_ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. + enum: + - SOURCE_TYPE_MYSQL + - SOURCE_TYPE_ALICLOUD_RDS_MYSQL + type: string + Status: + example: + code: 6 + details: + - '@type': '@type' + - '@type': '@type' + message: message + properties: + code: + format: int32 + type: integer + message: + type: string + details: + items: + $ref: '#/components/schemas/Any' + type: array + type: object + SubTask: + example: + currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + properties: + source: + allOf: + - $ref: '#/components/schemas/Source' + description: Source configuration for this subtask. + type: object + currentStep: + allOf: + - $ref: '#/components/schemas/SubTask.Step' + description: Current step of the subtask. + type: object + stage: + allOf: + - $ref: '#/components/schemas/SubTask.Stage' + description: Current stage of the subtask. + type: object + dumpDetail: + allOf: + - $ref: '#/components/schemas/DumpDetail' + description: "Detail of dump phase, if applicable." + type: object + loadDetail: + allOf: + - $ref: '#/components/schemas/LoadDetail' + description: "Detail of load phase, if applicable." + type: object + syncDetail: + allOf: + - $ref: '#/components/schemas/SyncDetail' + description: "Detail of sync phase, if applicable." + type: object + errorMsg: + description: Error message when the subtask fails. + nullable: true + readOnly: true + type: string + type: object + SubTask.Stage: + description: |- + The high-level lifecycle stage of a subtask. + + - STAGE_RUNNING: Subtask is running. + - STAGE_PAUSED: Subtask is paused. + - STAGE_FAILED: Subtask failed. + - STAGE_FINISHED: Subtask finished successfully. + - STAGE_UNKNOWN: Subtask stage is unknown. + enum: + - STAGE_RUNNING + - STAGE_PAUSED + - STAGE_FAILED + - STAGE_FINISHED + - STAGE_UNKNOWN + type: string + SubTask.Step: + description: |- + The current step within a subtask. + + - STEP_DUMP: Dump/export data from source. + - STEP_LOAD: Load/import data into target. + - STEP_SYNC: Sync/replicate binlog changes. + enum: + - STEP_DUMP + - STEP_LOAD + - STEP_SYNC + type: string + SyncDetail: + properties: + rps: + description: Rows processed per second during sync. + format: int64 + readOnly: true + type: string + latency: + description: Replication latency in seconds. + format: int64 + readOnly: true + type: string + checkpoint: + description: Synchronization checkpoint. + readOnly: true + type: string + type: object + Target: + properties: + user: + description: Target database user. + type: string + password: + description: Target database password. + type: string + type: object + TaskMode: + description: |- + Migration task mode. + + - MODE_ALL: Full + incremental migration (all phases). + - MODE_INCREMENTAL: Incremental-only migration (replication). + enum: + - MODE_ALL + - MODE_INCREMENTAL + type: string +x-original-swagger-version: "2.0" diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go b/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go new file mode 100644 index 00000000..bb42d49a --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go @@ -0,0 +1,1114 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "bytes" + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// MigrationAPIService MigrationAPI service +type MigrationAPIService service + +type ApiMigrationServiceCancelPrecheckRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + id string +} + +func (r ApiMigrationServiceCancelPrecheckRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServiceCancelPrecheckExecute(r) +} + +/* +MigrationServiceCancelPrecheck Cancel a migration precheck + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param id The ID of the precheck to cancel. + @return ApiMigrationServiceCancelPrecheckRequest +*/ +func (a *MigrationAPIService) MigrationServiceCancelPrecheck(ctx context.Context, clusterId string, id string) ApiMigrationServiceCancelPrecheckRequest { + return ApiMigrationServiceCancelPrecheckRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + id: id, + } +} + +// Execute executes the request +// +// @return map[string]interface{} +func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrationServiceCancelPrecheckRequest) (map[string]interface{}, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodDelete + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue map[string]interface{} + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelPrecheck") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{id}" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceCancelTaskRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + id string +} + +func (r ApiMigrationServiceCancelTaskRequest) Execute() (*MigrationTask, *http.Response, error) { + return r.ApiService.MigrationServiceCancelTaskExecute(r) +} + +/* +MigrationServiceCancelTask Cancel a migration task + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param id The ID of the migration task to cancel. + @return ApiMigrationServiceCancelTaskRequest +*/ +func (a *MigrationAPIService) MigrationServiceCancelTask(ctx context.Context, clusterId string, id string) ApiMigrationServiceCancelTaskRequest { + return ApiMigrationServiceCancelTaskRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + id: id, + } +} + +// Execute executes the request +// +// @return MigrationTask +func (a *MigrationAPIService) MigrationServiceCancelTaskExecute(r ApiMigrationServiceCancelTaskRequest) (*MigrationTask, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodDelete + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *MigrationTask + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelTask") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{id}" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceCreateTaskRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + body *MigrationServiceCreateTaskBody +} + +func (r ApiMigrationServiceCreateTaskRequest) Body(body MigrationServiceCreateTaskBody) ApiMigrationServiceCreateTaskRequest { + r.body = &body + return r +} + +func (r ApiMigrationServiceCreateTaskRequest) Execute() (*MigrationTask, *http.Response, error) { + return r.ApiService.MigrationServiceCreateTaskExecute(r) +} + +/* +MigrationServiceCreateTask Create a migration task + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster to create the migration task in. + @return ApiMigrationServiceCreateTaskRequest +*/ +func (a *MigrationAPIService) MigrationServiceCreateTask(ctx context.Context, clusterId string) ApiMigrationServiceCreateTaskRequest { + return ApiMigrationServiceCreateTaskRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + } +} + +// Execute executes the request +// +// @return MigrationTask +func (a *MigrationAPIService) MigrationServiceCreateTaskExecute(r ApiMigrationServiceCreateTaskRequest) (*MigrationTask, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodPost + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *MigrationTask + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCreateTask") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.body + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceGetPrecheckRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + id string +} + +func (r ApiMigrationServiceGetPrecheckRequest) Execute() (*MigrationPrecheck, *http.Response, error) { + return r.ApiService.MigrationServiceGetPrecheckExecute(r) +} + +/* +MigrationServiceGetPrecheck Get a migration precheck + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param id The ID of the precheck. + @return ApiMigrationServiceGetPrecheckRequest +*/ +func (a *MigrationAPIService) MigrationServiceGetPrecheck(ctx context.Context, clusterId string, id string) ApiMigrationServiceGetPrecheckRequest { + return ApiMigrationServiceGetPrecheckRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + id: id, + } +} + +// Execute executes the request +// +// @return MigrationPrecheck +func (a *MigrationAPIService) MigrationServiceGetPrecheckExecute(r ApiMigrationServiceGetPrecheckRequest) (*MigrationPrecheck, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodGet + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *MigrationPrecheck + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceGetPrecheck") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{id}" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceGetTaskRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + id string +} + +func (r ApiMigrationServiceGetTaskRequest) Execute() (*MigrationTask, *http.Response, error) { + return r.ApiService.MigrationServiceGetTaskExecute(r) +} + +/* +MigrationServiceGetTask Get a migration task + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param id The ID of the migration task. + @return ApiMigrationServiceGetTaskRequest +*/ +func (a *MigrationAPIService) MigrationServiceGetTask(ctx context.Context, clusterId string, id string) ApiMigrationServiceGetTaskRequest { + return ApiMigrationServiceGetTaskRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + id: id, + } +} + +// Execute executes the request +// +// @return MigrationTask +func (a *MigrationAPIService) MigrationServiceGetTaskExecute(r ApiMigrationServiceGetTaskRequest) (*MigrationTask, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodGet + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *MigrationTask + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceGetTask") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{id}" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceListTasksRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + pageToken *string + pageSize *int32 + orderBy *string +} + +// Optional. The page token, default is empty. +func (r ApiMigrationServiceListTasksRequest) PageToken(pageToken string) ApiMigrationServiceListTasksRequest { + r.pageToken = &pageToken + return r +} + +// Optional. The page size, default is 10. +func (r ApiMigrationServiceListTasksRequest) PageSize(pageSize int32) ApiMigrationServiceListTasksRequest { + r.pageSize = &pageSize + return r +} + +// Specifies the sorting order of results. Use a comma-separated list of field names, optionally appending `desc` for descending order. For example, `createTime desc`. By default, fields are sorted in ascending order. Supported field: `createTime`. +func (r ApiMigrationServiceListTasksRequest) OrderBy(orderBy string) ApiMigrationServiceListTasksRequest { + r.orderBy = &orderBy + return r +} + +func (r ApiMigrationServiceListTasksRequest) Execute() (*ListMigrationTasksResp, *http.Response, error) { + return r.ApiService.MigrationServiceListTasksExecute(r) +} + +/* +MigrationServiceListTasks List migration tasks + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster to list tasks for. + @return ApiMigrationServiceListTasksRequest +*/ +func (a *MigrationAPIService) MigrationServiceListTasks(ctx context.Context, clusterId string) ApiMigrationServiceListTasksRequest { + return ApiMigrationServiceListTasksRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + } +} + +// Execute executes the request +// +// @return ListMigrationTasksResp +func (a *MigrationAPIService) MigrationServiceListTasksExecute(r ApiMigrationServiceListTasksRequest) (*ListMigrationTasksResp, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodGet + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *ListMigrationTasksResp + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceListTasks") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + + if r.pageToken != nil { + parameterAddToHeaderOrQuery(localVarQueryParams, "pageToken", r.pageToken, "", "") + } + if r.pageSize != nil { + parameterAddToHeaderOrQuery(localVarQueryParams, "pageSize", r.pageSize, "", "") + } else { + var defaultValue int32 = 10 + r.pageSize = &defaultValue + } + if r.orderBy != nil { + parameterAddToHeaderOrQuery(localVarQueryParams, "orderBy", r.orderBy, "", "") + } + // to determine the Content-Type header + localVarHTTPContentTypes := []string{} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServicePauseTaskRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + id string + body *map[string]interface{} +} + +func (r ApiMigrationServicePauseTaskRequest) Body(body map[string]interface{}) ApiMigrationServicePauseTaskRequest { + r.body = &body + return r +} + +func (r ApiMigrationServicePauseTaskRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServicePauseTaskExecute(r) +} + +/* +MigrationServicePauseTask Pause a running migration task + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param id The ID of the migration task to pause. + @return ApiMigrationServicePauseTaskRequest +*/ +func (a *MigrationAPIService) MigrationServicePauseTask(ctx context.Context, clusterId string, id string) ApiMigrationServicePauseTaskRequest { + return ApiMigrationServicePauseTaskRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + id: id, + } +} + +// Execute executes the request +// +// @return map[string]interface{} +func (a *MigrationAPIService) MigrationServicePauseTaskExecute(r ApiMigrationServicePauseTaskRequest) (map[string]interface{}, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodPost + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue map[string]interface{} + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServicePauseTask") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{id}:pause" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.body + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServicePrecheckRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + body *MigrationServicePrecheckBody +} + +func (r ApiMigrationServicePrecheckRequest) Body(body MigrationServicePrecheckBody) ApiMigrationServicePrecheckRequest { + r.body = &body + return r +} + +func (r ApiMigrationServicePrecheckRequest) Execute() (*CreateMigrationPrecheckResp, *http.Response, error) { + return r.ApiService.MigrationServicePrecheckExecute(r) +} + +/* +MigrationServicePrecheck Run a precheck for a migration task + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster to create the migration task in. + @return ApiMigrationServicePrecheckRequest +*/ +func (a *MigrationAPIService) MigrationServicePrecheck(ctx context.Context, clusterId string) ApiMigrationServicePrecheckRequest { + return ApiMigrationServicePrecheckRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + } +} + +// Execute executes the request +// +// @return CreateMigrationPrecheckResp +func (a *MigrationAPIService) MigrationServicePrecheckExecute(r ApiMigrationServicePrecheckRequest) (*CreateMigrationPrecheckResp, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodPost + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue *CreateMigrationPrecheckResp + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServicePrecheck") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.body + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} + +type ApiMigrationServiceResumeTaskRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + id string + body *map[string]interface{} +} + +func (r ApiMigrationServiceResumeTaskRequest) Body(body map[string]interface{}) ApiMigrationServiceResumeTaskRequest { + r.body = &body + return r +} + +func (r ApiMigrationServiceResumeTaskRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServiceResumeTaskExecute(r) +} + +/* +MigrationServiceResumeTask Resume a paused migration task + + @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). + @param clusterId The ID of the cluster. + @param id The ID of the migration task to resume. + @return ApiMigrationServiceResumeTaskRequest +*/ +func (a *MigrationAPIService) MigrationServiceResumeTask(ctx context.Context, clusterId string, id string) ApiMigrationServiceResumeTaskRequest { + return ApiMigrationServiceResumeTaskRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + id: id, + } +} + +// Execute executes the request +// +// @return map[string]interface{} +func (a *MigrationAPIService) MigrationServiceResumeTaskExecute(r ApiMigrationServiceResumeTaskRequest) (map[string]interface{}, *http.Response, error) { + var ( + localVarHTTPMethod = http.MethodPost + localVarPostBody interface{} + formFiles []formFile + localVarReturnValue map[string]interface{} + ) + + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceResumeTask") + if err != nil { + return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} + } + + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{id}:resume" + localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } + + // to determine the Content-Type header + localVarHTTPContentTypes := []string{"application/json"} + + // set Content-Type header + localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) + if localVarHTTPContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHTTPContentType + } + + // to determine the Accept header + localVarHTTPHeaderAccepts := []string{"application/json"} + + // set Accept header + localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) + if localVarHTTPHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept + } + // body params + localVarPostBody = r.body + req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) + if err != nil { + return localVarReturnValue, nil, err + } + + localVarHTTPResponse, err := a.client.callAPI(req) + if err != nil || localVarHTTPResponse == nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + localVarBody, err := io.ReadAll(localVarHTTPResponse.Body) + localVarHTTPResponse.Body.Close() + localVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody)) + if err != nil { + return localVarReturnValue, localVarHTTPResponse, err + } + + if localVarHTTPResponse.StatusCode >= 300 { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: localVarHTTPResponse.Status, + } + var v Status + err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr.error = err.Error() + return localVarReturnValue, localVarHTTPResponse, newErr + } + newErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v) + newErr.model = v + return localVarReturnValue, localVarHTTPResponse, newErr + } + + err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) + if err != nil { + newErr := &GenericOpenAPIError{ + body: localVarBody, + error: err.Error(), + } + return localVarReturnValue, localVarHTTPResponse, newErr + } + + return localVarReturnValue, localVarHTTPResponse, nil +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/client.go b/pkg/tidbcloud/v1beta1/serverless/migration/client.go new file mode 100644 index 00000000..a3518159 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/client.go @@ -0,0 +1,655 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +var ( + JsonCheck = regexp.MustCompile(`(?i:(?:application|text)/(?:[^;]+\+)?json)`) + XmlCheck = regexp.MustCompile(`(?i:(?:application|text)/(?:[^;]+\+)?xml)`) + queryParamSplit = regexp.MustCompile(`(^|&)([^&]+)`) + queryDescape = strings.NewReplacer("%5B", "[", "%5D", "]") +) + +// APIClient manages communication with the TiDB Cloud Starter and Essential API API vv1beta1 +// In most cases there should be only one, shared, APIClient. +type APIClient struct { + cfg *Configuration + common service // Reuse a single struct instead of allocating one for each service on the heap. + + // API Services + + MigrationAPI *MigrationAPIService +} + +type service struct { + client *APIClient +} + +// NewAPIClient creates a new API client. Requires a userAgent string describing your application. +// optionally a custom http.Client to allow for advanced features such as caching. +func NewAPIClient(cfg *Configuration) *APIClient { + if cfg.HTTPClient == nil { + cfg.HTTPClient = http.DefaultClient + } + + c := &APIClient{} + c.cfg = cfg + c.common.client = c + + // API Services + c.MigrationAPI = (*MigrationAPIService)(&c.common) + + return c +} + +func atoi(in string) (int, error) { + return strconv.Atoi(in) +} + +// selectHeaderContentType select a content type from the available list. +func selectHeaderContentType(contentTypes []string) string { + if len(contentTypes) == 0 { + return "" + } + if contains(contentTypes, "application/json") { + return "application/json" + } + return contentTypes[0] // use the first content type specified in 'consumes' +} + +// selectHeaderAccept join all accept types and return +func selectHeaderAccept(accepts []string) string { + if len(accepts) == 0 { + return "" + } + + if contains(accepts, "application/json") { + return "application/json" + } + + return strings.Join(accepts, ",") +} + +// contains is a case insensitive match, finding needle in a haystack +func contains(haystack []string, needle string) bool { + for _, a := range haystack { + if strings.EqualFold(a, needle) { + return true + } + } + return false +} + +// Verify optional parameters are of the correct type. +func typeCheckParameter(obj interface{}, expected string, name string) error { + // Make sure there is an object. + if obj == nil { + return nil + } + + // Check the type is as expected. + if reflect.TypeOf(obj).String() != expected { + return fmt.Errorf("expected %s to be of type %s but received %s", name, expected, reflect.TypeOf(obj).String()) + } + return nil +} + +func parameterValueToString(obj interface{}, key string) string { + if reflect.TypeOf(obj).Kind() != reflect.Ptr { + if actualObj, ok := obj.(interface{ GetActualInstanceValue() interface{} }); ok { + return fmt.Sprintf("%v", actualObj.GetActualInstanceValue()) + } + + return fmt.Sprintf("%v", obj) + } + var param, ok = obj.(MappedNullable) + if !ok { + return "" + } + dataMap, err := param.ToMap() + if err != nil { + return "" + } + return fmt.Sprintf("%v", dataMap[key]) +} + +// parameterAddToHeaderOrQuery adds the provided object to the request header or url query +// supporting deep object syntax +func parameterAddToHeaderOrQuery(headerOrQueryParams interface{}, keyPrefix string, obj interface{}, style string, collectionType string) { + var v = reflect.ValueOf(obj) + var value = "" + if v == reflect.ValueOf(nil) { + value = "null" + } else { + switch v.Kind() { + case reflect.Invalid: + value = "invalid" + + case reflect.Struct: + if t, ok := obj.(MappedNullable); ok { + dataMap, err := t.ToMap() + if err != nil { + return + } + parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, dataMap, style, collectionType) + return + } + if t, ok := obj.(time.Time); ok { + parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, t.Format(time.RFC3339Nano), style, collectionType) + return + } + value = v.Type().String() + " value" + case reflect.Slice: + var indValue = reflect.ValueOf(obj) + if indValue == reflect.ValueOf(nil) { + return + } + var lenIndValue = indValue.Len() + for i := 0; i < lenIndValue; i++ { + var arrayValue = indValue.Index(i) + var keyPrefixForCollectionType = keyPrefix + if style == "deepObject" { + keyPrefixForCollectionType = keyPrefix + "[" + strconv.Itoa(i) + "]" + } + parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefixForCollectionType, arrayValue.Interface(), style, collectionType) + } + return + + case reflect.Map: + var indValue = reflect.ValueOf(obj) + if indValue == reflect.ValueOf(nil) { + return + } + iter := indValue.MapRange() + for iter.Next() { + k, v := iter.Key(), iter.Value() + parameterAddToHeaderOrQuery(headerOrQueryParams, fmt.Sprintf("%s[%s]", keyPrefix, k.String()), v.Interface(), style, collectionType) + } + return + + case reflect.Interface: + fallthrough + case reflect.Ptr: + parameterAddToHeaderOrQuery(headerOrQueryParams, keyPrefix, v.Elem().Interface(), style, collectionType) + return + + case reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64: + value = strconv.FormatInt(v.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64, reflect.Uintptr: + value = strconv.FormatUint(v.Uint(), 10) + case reflect.Float32, reflect.Float64: + value = strconv.FormatFloat(v.Float(), 'g', -1, 32) + case reflect.Bool: + value = strconv.FormatBool(v.Bool()) + case reflect.String: + value = v.String() + default: + value = v.Type().String() + " value" + } + } + + switch valuesMap := headerOrQueryParams.(type) { + case url.Values: + if collectionType == "csv" && valuesMap.Get(keyPrefix) != "" { + valuesMap.Set(keyPrefix, valuesMap.Get(keyPrefix)+","+value) + } else { + valuesMap.Add(keyPrefix, value) + } + break + case map[string]string: + valuesMap[keyPrefix] = value + break + } +} + +// helper for converting interface{} parameters to json strings +func parameterToJson(obj interface{}) (string, error) { + jsonBuf, err := json.Marshal(obj) + if err != nil { + return "", err + } + return string(jsonBuf), err +} + +// callAPI do the request. +func (c *APIClient) callAPI(request *http.Request) (*http.Response, error) { + if c.cfg.Debug { + dump, err := httputil.DumpRequestOut(request, true) + if err != nil { + return nil, err + } + log.Printf("\n%s\n", string(dump)) + } + + resp, err := c.cfg.HTTPClient.Do(request) + if err != nil { + return resp, err + } + + if c.cfg.Debug { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return resp, err + } + log.Printf("\n%s\n", string(dump)) + } + return resp, err +} + +// Allow modification of underlying config for alternate implementations and testing +// Caution: modifying the configuration while live can cause data races and potentially unwanted behavior +func (c *APIClient) GetConfig() *Configuration { + return c.cfg +} + +type formFile struct { + fileBytes []byte + fileName string + formFileName string +} + +// prepareRequest build the request +func (c *APIClient) prepareRequest( + ctx context.Context, + path string, method string, + postBody interface{}, + headerParams map[string]string, + queryParams url.Values, + formParams url.Values, + formFiles []formFile) (localVarRequest *http.Request, err error) { + + var body *bytes.Buffer + + // Detect postBody type and post. + if postBody != nil { + contentType := headerParams["Content-Type"] + if contentType == "" { + contentType = detectContentType(postBody) + headerParams["Content-Type"] = contentType + } + + body, err = setBody(postBody, contentType) + if err != nil { + return nil, err + } + } + + // add form parameters and file if available. + if strings.HasPrefix(headerParams["Content-Type"], "multipart/form-data") && len(formParams) > 0 || (len(formFiles) > 0) { + if body != nil { + return nil, errors.New("Cannot specify postBody and multipart form at the same time.") + } + body = &bytes.Buffer{} + w := multipart.NewWriter(body) + + for k, v := range formParams { + for _, iv := range v { + if strings.HasPrefix(k, "@") { // file + err = addFile(w, k[1:], iv) + if err != nil { + return nil, err + } + } else { // form value + w.WriteField(k, iv) + } + } + } + for _, formFile := range formFiles { + if len(formFile.fileBytes) > 0 && formFile.fileName != "" { + w.Boundary() + part, err := w.CreateFormFile(formFile.formFileName, filepath.Base(formFile.fileName)) + if err != nil { + return nil, err + } + _, err = part.Write(formFile.fileBytes) + if err != nil { + return nil, err + } + } + } + + // Set the Boundary in the Content-Type + headerParams["Content-Type"] = w.FormDataContentType() + + // Set Content-Length + headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len()) + w.Close() + } + + if strings.HasPrefix(headerParams["Content-Type"], "application/x-www-form-urlencoded") && len(formParams) > 0 { + if body != nil { + return nil, errors.New("Cannot specify postBody and x-www-form-urlencoded form at the same time.") + } + body = &bytes.Buffer{} + body.WriteString(formParams.Encode()) + // Set Content-Length + headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len()) + } + + // Setup path and query parameters + url, err := url.Parse(path) + if err != nil { + return nil, err + } + + // Override request host, if applicable + if c.cfg.Host != "" { + url.Host = c.cfg.Host + } + + // Override request scheme, if applicable + if c.cfg.Scheme != "" { + url.Scheme = c.cfg.Scheme + } + + // Adding Query Param + query := url.Query() + for k, v := range queryParams { + for _, iv := range v { + query.Add(k, iv) + } + } + + // Encode the parameters. + url.RawQuery = queryParamSplit.ReplaceAllStringFunc(query.Encode(), func(s string) string { + pieces := strings.Split(s, "=") + pieces[0] = queryDescape.Replace(pieces[0]) + return strings.Join(pieces, "=") + }) + + // Generate a new request + if body != nil { + localVarRequest, err = http.NewRequest(method, url.String(), body) + } else { + localVarRequest, err = http.NewRequest(method, url.String(), nil) + } + if err != nil { + return nil, err + } + + // add header parameters, if any + if len(headerParams) > 0 { + headers := http.Header{} + for h, v := range headerParams { + headers[h] = []string{v} + } + localVarRequest.Header = headers + } + + // Add the user agent to the request. + localVarRequest.Header.Add("User-Agent", c.cfg.UserAgent) + + if ctx != nil { + // add context to the request + localVarRequest = localVarRequest.WithContext(ctx) + + // Walk through any authentication. + + } + + for header, value := range c.cfg.DefaultHeader { + localVarRequest.Header.Add(header, value) + } + return localVarRequest, nil +} + +func (c *APIClient) decode(v interface{}, b []byte, contentType string) (err error) { + if len(b) == 0 { + return nil + } + if s, ok := v.(*string); ok { + *s = string(b) + return nil + } + if f, ok := v.(*os.File); ok { + f, err = os.CreateTemp("", "HttpClientFile") + if err != nil { + return + } + _, err = f.Write(b) + if err != nil { + return + } + _, err = f.Seek(0, io.SeekStart) + return + } + if f, ok := v.(**os.File); ok { + *f, err = os.CreateTemp("", "HttpClientFile") + if err != nil { + return + } + _, err = (*f).Write(b) + if err != nil { + return + } + _, err = (*f).Seek(0, io.SeekStart) + return + } + if XmlCheck.MatchString(contentType) { + if err = xml.Unmarshal(b, v); err != nil { + return err + } + return nil + } + if JsonCheck.MatchString(contentType) { + if actualObj, ok := v.(interface{ GetActualInstance() interface{} }); ok { // oneOf, anyOf schemas + if unmarshalObj, ok := actualObj.(interface{ UnmarshalJSON([]byte) error }); ok { // make sure it has UnmarshalJSON defined + if err = unmarshalObj.UnmarshalJSON(b); err != nil { + return err + } + } else { + return errors.New("Unknown type with GetActualInstance but no unmarshalObj.UnmarshalJSON defined") + } + } else if err = json.Unmarshal(b, v); err != nil { // simple model + return err + } + return nil + } + return errors.New("undefined response type") +} + +// Add a file to the multipart request +func addFile(w *multipart.Writer, fieldName, path string) error { + file, err := os.Open(filepath.Clean(path)) + if err != nil { + return err + } + err = file.Close() + if err != nil { + return err + } + + part, err := w.CreateFormFile(fieldName, filepath.Base(path)) + if err != nil { + return err + } + _, err = io.Copy(part, file) + + return err +} + +// Set request body from an interface{} +func setBody(body interface{}, contentType string) (bodyBuf *bytes.Buffer, err error) { + if bodyBuf == nil { + bodyBuf = &bytes.Buffer{} + } + + if reader, ok := body.(io.Reader); ok { + _, err = bodyBuf.ReadFrom(reader) + } else if fp, ok := body.(*os.File); ok { + _, err = bodyBuf.ReadFrom(fp) + } else if b, ok := body.([]byte); ok { + _, err = bodyBuf.Write(b) + } else if s, ok := body.(string); ok { + _, err = bodyBuf.WriteString(s) + } else if s, ok := body.(*string); ok { + _, err = bodyBuf.WriteString(*s) + } else if JsonCheck.MatchString(contentType) { + err = json.NewEncoder(bodyBuf).Encode(body) + } else if XmlCheck.MatchString(contentType) { + var bs []byte + bs, err = xml.Marshal(body) + if err == nil { + bodyBuf.Write(bs) + } + } + + if err != nil { + return nil, err + } + + if bodyBuf.Len() == 0 { + err = fmt.Errorf("invalid body type %s\n", contentType) + return nil, err + } + return bodyBuf, nil +} + +// detectContentType method is used to figure out `Request.Body` content type for request header +func detectContentType(body interface{}) string { + contentType := "text/plain; charset=utf-8" + kind := reflect.TypeOf(body).Kind() + + switch kind { + case reflect.Struct, reflect.Map, reflect.Ptr: + contentType = "application/json; charset=utf-8" + case reflect.String: + contentType = "text/plain; charset=utf-8" + default: + if b, ok := body.([]byte); ok { + contentType = http.DetectContentType(b) + } else if kind == reflect.Slice { + contentType = "application/json; charset=utf-8" + } + } + + return contentType +} + +// Ripped from https://github.com/gregjones/httpcache/blob/master/httpcache.go +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// CacheExpires helper function to determine remaining time before repeating a request. +func CacheExpires(r *http.Response) time.Time { + // Figure out when the cache expires. + var expires time.Time + now, err := time.Parse(time.RFC1123, r.Header.Get("date")) + if err != nil { + return time.Now() + } + respCacheControl := parseCacheControl(r.Header) + + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err := time.ParseDuration(maxAge + "s") + if err != nil { + expires = now + } else { + expires = now.Add(lifetime) + } + } else { + expiresHeader := r.Header.Get("Expires") + if expiresHeader != "" { + expires, err = time.Parse(time.RFC1123, expiresHeader) + if err != nil { + expires = now + } + } + } + return expires +} + +func strlen(s string) int { + return utf8.RuneCountInString(s) +} + +// GenericOpenAPIError Provides access to the body, error and model on returned errors. +type GenericOpenAPIError struct { + body []byte + error string + model interface{} +} + +// Error returns non-empty string if there was an error. +func (e GenericOpenAPIError) Error() string { + return e.error +} + +// Body returns the raw bytes of the response +func (e GenericOpenAPIError) Body() []byte { + return e.body +} + +// Model returns the unpacked model of the error +func (e GenericOpenAPIError) Model() interface{} { + return e.model +} + +// format error message using title and detail when model implements rfc7807 +func formatErrorMessage(status string, v interface{}) string { + str := "" + metaValue := reflect.ValueOf(v).Elem() + + if metaValue.Kind() == reflect.Struct { + field := metaValue.FieldByName("Title") + if field != (reflect.Value{}) { + str = fmt.Sprintf("%s", field.Interface()) + } + + field = metaValue.FieldByName("Detail") + if field != (reflect.Value{}) { + str = fmt.Sprintf("%s (%s)", str, field.Interface()) + } + } + + return strings.TrimSpace(fmt.Sprintf("%s %s", status, str)) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/configuration.go b/pkg/tidbcloud/v1beta1/serverless/migration/configuration.go new file mode 100644 index 00000000..8d803723 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/configuration.go @@ -0,0 +1,214 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "context" + "fmt" + "net/http" + "strings" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextServerIndex uses a server configuration from the index. + ContextServerIndex = contextKey("serverIndex") + + // ContextOperationServerIndices uses a server configuration from the index mapping. + ContextOperationServerIndices = contextKey("serverOperationIndices") + + // ContextServerVariables overrides a server configuration variables. + ContextServerVariables = contextKey("serverVariables") + + // ContextOperationServerVariables overrides a server configuration variables using operation specific values. + ContextOperationServerVariables = contextKey("serverOperationVariables") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +// ServerVariable stores the information about a server variable +type ServerVariable struct { + Description string + DefaultValue string + EnumValues []string +} + +// ServerConfiguration stores the information about a server +type ServerConfiguration struct { + URL string + Description string + Variables map[string]ServerVariable +} + +// ServerConfigurations stores multiple ServerConfiguration items +type ServerConfigurations []ServerConfiguration + +// Configuration stores the configuration of the API client +type Configuration struct { + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + Debug bool `json:"debug,omitempty"` + Servers ServerConfigurations + OperationServers map[string]ServerConfigurations + HTTPClient *http.Client +} + +// NewConfiguration returns a new Configuration object +func NewConfiguration() *Configuration { + cfg := &Configuration{ + DefaultHeader: make(map[string]string), + UserAgent: "OpenAPI-Generator/1.0.0/go", + Debug: false, + Servers: ServerConfigurations{ + { + URL: "https://serverless.tidbapi.com", + Description: "No description provided", + }, + }, + OperationServers: map[string]ServerConfigurations{}, + } + return cfg +} + +// AddDefaultHeader adds a new HTTP header to the default header in the request +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} + +// URL formats template on a index using given variables +func (sc ServerConfigurations) URL(index int, variables map[string]string) (string, error) { + if index < 0 || len(sc) <= index { + return "", fmt.Errorf("index %v out of range %v", index, len(sc)-1) + } + server := sc[index] + url := server.URL + + // go through variables and replace placeholders + for name, variable := range server.Variables { + if value, ok := variables[name]; ok { + found := bool(len(variable.EnumValues) == 0) + for _, enumValue := range variable.EnumValues { + if value == enumValue { + found = true + } + } + if !found { + return "", fmt.Errorf("the variable %s in the server URL has invalid value %v. Must be %v", name, value, variable.EnumValues) + } + url = strings.Replace(url, "{"+name+"}", value, -1) + } else { + url = strings.Replace(url, "{"+name+"}", variable.DefaultValue, -1) + } + } + return url, nil +} + +// ServerURL returns URL based on server settings +func (c *Configuration) ServerURL(index int, variables map[string]string) (string, error) { + return c.Servers.URL(index, variables) +} + +func getServerIndex(ctx context.Context) (int, error) { + si := ctx.Value(ContextServerIndex) + if si != nil { + if index, ok := si.(int); ok { + return index, nil + } + return 0, reportError("Invalid type %T should be int", si) + } + return 0, nil +} + +func getServerOperationIndex(ctx context.Context, endpoint string) (int, error) { + osi := ctx.Value(ContextOperationServerIndices) + if osi != nil { + if operationIndices, ok := osi.(map[string]int); !ok { + return 0, reportError("Invalid type %T should be map[string]int", osi) + } else { + index, ok := operationIndices[endpoint] + if ok { + return index, nil + } + } + } + return getServerIndex(ctx) +} + +func getServerVariables(ctx context.Context) (map[string]string, error) { + sv := ctx.Value(ContextServerVariables) + if sv != nil { + if variables, ok := sv.(map[string]string); ok { + return variables, nil + } + return nil, reportError("ctx value of ContextServerVariables has invalid type %T should be map[string]string", sv) + } + return nil, nil +} + +func getServerOperationVariables(ctx context.Context, endpoint string) (map[string]string, error) { + osv := ctx.Value(ContextOperationServerVariables) + if osv != nil { + if operationVariables, ok := osv.(map[string]map[string]string); !ok { + return nil, reportError("ctx value of ContextOperationServerVariables has invalid type %T should be map[string]map[string]string", osv) + } else { + variables, ok := operationVariables[endpoint] + if ok { + return variables, nil + } + } + } + return getServerVariables(ctx) +} + +// ServerURLWithContext returns a new server URL given an endpoint +func (c *Configuration) ServerURLWithContext(ctx context.Context, endpoint string) (string, error) { + sc, ok := c.OperationServers[endpoint] + if !ok { + sc = c.Servers + } + + if ctx == nil { + return sc.URL(0, nil) + } + + index, err := getServerOperationIndex(ctx, endpoint) + if err != nil { + return "", err + } + + variables, err := getServerOperationVariables(ctx, endpoint) + if err != nil { + return "", err + } + + return sc.URL(index, variables) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/git_push.sh b/pkg/tidbcloud/v1beta1/serverless/migration/git_push.sh new file mode 100644 index 00000000..f53a75d4 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/git_push.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# ref: https://help.github.com/articles/adding-an-existing-project-to-github-using-the-command-line/ +# +# Usage example: /bin/sh ./git_push.sh wing328 openapi-petstore-perl "minor update" "gitlab.com" + +git_user_id=$1 +git_repo_id=$2 +release_note=$3 +git_host=$4 + +if [ "$git_host" = "" ]; then + git_host="github.com" + echo "[INFO] No command line input provided. Set \$git_host to $git_host" +fi + +if [ "$git_user_id" = "" ]; then + git_user_id="GIT_USER_ID" + echo "[INFO] No command line input provided. Set \$git_user_id to $git_user_id" +fi + +if [ "$git_repo_id" = "" ]; then + git_repo_id="GIT_REPO_ID" + echo "[INFO] No command line input provided. Set \$git_repo_id to $git_repo_id" +fi + +if [ "$release_note" = "" ]; then + release_note="Minor update" + echo "[INFO] No command line input provided. Set \$release_note to $release_note" +fi + +# Initialize the local directory as a Git repository +git init + +# Adds the files in the local repository and stages them for commit. +git add . + +# Commits the tracked changes and prepares them to be pushed to a remote repository. +git commit -m "$release_note" + +# Sets the new remote +git_remote=$(git remote) +if [ "$git_remote" = "" ]; then # git remote not defined + + if [ "$GIT_TOKEN" = "" ]; then + echo "[INFO] \$GIT_TOKEN (environment variable) is not set. Using the git credential in your environment." + git remote add origin https://${git_host}/${git_user_id}/${git_repo_id}.git + else + git remote add origin https://${git_user_id}:"${GIT_TOKEN}"@${git_host}/${git_user_id}/${git_repo_id}.git + fi + +fi + +git pull origin master + +# Pushes (Forces) the changes in the local repository up to the remote repository +echo "Git pushing to https://${git_host}/${git_user_id}/${git_repo_id}.git" +git push origin master 2>&1 | grep -v 'To https' diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_any.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_any.go new file mode 100644 index 00000000..ecdfe9e3 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_any.go @@ -0,0 +1,153 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the Any type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Any{} + +// Any struct for Any +type Any struct { + Type *string `json:"@type,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _Any Any + +// NewAny instantiates a new Any object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewAny() *Any { + this := Any{} + return &this +} + +// NewAnyWithDefaults instantiates a new Any object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewAnyWithDefaults() *Any { + this := Any{} + return &this +} + +// GetType returns the Type field value if set, zero value otherwise. +func (o *Any) GetType() string { + if o == nil || IsNil(o.Type) { + var ret string + return ret + } + return *o.Type +} + +// GetTypeOk returns a tuple with the Type field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Any) GetTypeOk() (*string, bool) { + if o == nil || IsNil(o.Type) { + return nil, false + } + return o.Type, true +} + +// HasType returns a boolean if a field has been set. +func (o *Any) HasType() bool { + if o != nil && !IsNil(o.Type) { + return true + } + + return false +} + +// SetType gets a reference to the given string and assigns it to the Type field. +func (o *Any) SetType(v string) { + o.Type = &v +} + +func (o Any) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Any) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Type) { + toSerialize["@type"] = o.Type + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Any) UnmarshalJSON(data []byte) (err error) { + varAny := _Any{} + + err = json.Unmarshal(data, &varAny) + + if err != nil { + return err + } + + *o = Any(varAny) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "@type") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableAny struct { + value *Any + isSet bool +} + +func (v NullableAny) Get() *Any { + return v.value +} + +func (v *NullableAny) Set(val *Any) { + v.value = val + v.isSet = true +} + +func (v NullableAny) IsSet() bool { + return v.isSet +} + +func (v *NullableAny) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableAny(val *Any) *NullableAny { + return &NullableAny{value: val, isSet: true} +} + +func (v NullableAny) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableAny) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go new file mode 100644 index 00000000..d4234094 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the BlockAllowRules type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BlockAllowRules{} + +// BlockAllowRules struct for BlockAllowRules +type BlockAllowRules struct { + // Database names to include in migration. + DoDbs []string `json:"doDbs,omitempty"` + // Table-level allow-list rules. + DoTables []BlockAllowRulesTable `json:"doTables,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _BlockAllowRules BlockAllowRules + +// NewBlockAllowRules instantiates a new BlockAllowRules object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBlockAllowRules() *BlockAllowRules { + this := BlockAllowRules{} + return &this +} + +// NewBlockAllowRulesWithDefaults instantiates a new BlockAllowRules object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBlockAllowRulesWithDefaults() *BlockAllowRules { + this := BlockAllowRules{} + return &this +} + +// GetDoDbs returns the DoDbs field value if set, zero value otherwise. +func (o *BlockAllowRules) GetDoDbs() []string { + if o == nil || IsNil(o.DoDbs) { + var ret []string + return ret + } + return o.DoDbs +} + +// GetDoDbsOk returns a tuple with the DoDbs field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BlockAllowRules) GetDoDbsOk() ([]string, bool) { + if o == nil || IsNil(o.DoDbs) { + return nil, false + } + return o.DoDbs, true +} + +// HasDoDbs returns a boolean if a field has been set. +func (o *BlockAllowRules) HasDoDbs() bool { + if o != nil && !IsNil(o.DoDbs) { + return true + } + + return false +} + +// SetDoDbs gets a reference to the given []string and assigns it to the DoDbs field. +func (o *BlockAllowRules) SetDoDbs(v []string) { + o.DoDbs = v +} + +// GetDoTables returns the DoTables field value if set, zero value otherwise. +func (o *BlockAllowRules) GetDoTables() []BlockAllowRulesTable { + if o == nil || IsNil(o.DoTables) { + var ret []BlockAllowRulesTable + return ret + } + return o.DoTables +} + +// GetDoTablesOk returns a tuple with the DoTables field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BlockAllowRules) GetDoTablesOk() ([]BlockAllowRulesTable, bool) { + if o == nil || IsNil(o.DoTables) { + return nil, false + } + return o.DoTables, true +} + +// HasDoTables returns a boolean if a field has been set. +func (o *BlockAllowRules) HasDoTables() bool { + if o != nil && !IsNil(o.DoTables) { + return true + } + + return false +} + +// SetDoTables gets a reference to the given []BlockAllowRulesTable and assigns it to the DoTables field. +func (o *BlockAllowRules) SetDoTables(v []BlockAllowRulesTable) { + o.DoTables = v +} + +func (o BlockAllowRules) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BlockAllowRules) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.DoDbs) { + toSerialize["doDbs"] = o.DoDbs + } + if !IsNil(o.DoTables) { + toSerialize["doTables"] = o.DoTables + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *BlockAllowRules) UnmarshalJSON(data []byte) (err error) { + varBlockAllowRules := _BlockAllowRules{} + + err = json.Unmarshal(data, &varBlockAllowRules) + + if err != nil { + return err + } + + *o = BlockAllowRules(varBlockAllowRules) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "doDbs") + delete(additionalProperties, "doTables") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableBlockAllowRules struct { + value *BlockAllowRules + isSet bool +} + +func (v NullableBlockAllowRules) Get() *BlockAllowRules { + return v.value +} + +func (v *NullableBlockAllowRules) Set(val *BlockAllowRules) { + v.value = val + v.isSet = true +} + +func (v NullableBlockAllowRules) IsSet() bool { + return v.isSet +} + +func (v *NullableBlockAllowRules) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBlockAllowRules(val *BlockAllowRules) *NullableBlockAllowRules { + return &NullableBlockAllowRules{value: val, isSet: true} +} + +func (v NullableBlockAllowRules) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBlockAllowRules) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules_table.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules_table.go new file mode 100644 index 00000000..1bf1d7bf --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules_table.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the BlockAllowRulesTable type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BlockAllowRulesTable{} + +// BlockAllowRulesTable struct for BlockAllowRulesTable +type BlockAllowRulesTable struct { + // Schema name. + Schema *string `json:"schema,omitempty"` + // Table name. + Table *string `json:"table,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _BlockAllowRulesTable BlockAllowRulesTable + +// NewBlockAllowRulesTable instantiates a new BlockAllowRulesTable object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBlockAllowRulesTable() *BlockAllowRulesTable { + this := BlockAllowRulesTable{} + return &this +} + +// NewBlockAllowRulesTableWithDefaults instantiates a new BlockAllowRulesTable object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBlockAllowRulesTableWithDefaults() *BlockAllowRulesTable { + this := BlockAllowRulesTable{} + return &this +} + +// GetSchema returns the Schema field value if set, zero value otherwise. +func (o *BlockAllowRulesTable) GetSchema() string { + if o == nil || IsNil(o.Schema) { + var ret string + return ret + } + return *o.Schema +} + +// GetSchemaOk returns a tuple with the Schema field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BlockAllowRulesTable) GetSchemaOk() (*string, bool) { + if o == nil || IsNil(o.Schema) { + return nil, false + } + return o.Schema, true +} + +// HasSchema returns a boolean if a field has been set. +func (o *BlockAllowRulesTable) HasSchema() bool { + if o != nil && !IsNil(o.Schema) { + return true + } + + return false +} + +// SetSchema gets a reference to the given string and assigns it to the Schema field. +func (o *BlockAllowRulesTable) SetSchema(v string) { + o.Schema = &v +} + +// GetTable returns the Table field value if set, zero value otherwise. +func (o *BlockAllowRulesTable) GetTable() string { + if o == nil || IsNil(o.Table) { + var ret string + return ret + } + return *o.Table +} + +// GetTableOk returns a tuple with the Table field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BlockAllowRulesTable) GetTableOk() (*string, bool) { + if o == nil || IsNil(o.Table) { + return nil, false + } + return o.Table, true +} + +// HasTable returns a boolean if a field has been set. +func (o *BlockAllowRulesTable) HasTable() bool { + if o != nil && !IsNil(o.Table) { + return true + } + + return false +} + +// SetTable gets a reference to the given string and assigns it to the Table field. +func (o *BlockAllowRulesTable) SetTable(v string) { + o.Table = &v +} + +func (o BlockAllowRulesTable) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BlockAllowRulesTable) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Schema) { + toSerialize["schema"] = o.Schema + } + if !IsNil(o.Table) { + toSerialize["table"] = o.Table + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *BlockAllowRulesTable) UnmarshalJSON(data []byte) (err error) { + varBlockAllowRulesTable := _BlockAllowRulesTable{} + + err = json.Unmarshal(data, &varBlockAllowRulesTable) + + if err != nil { + return err + } + + *o = BlockAllowRulesTable(varBlockAllowRulesTable) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "schema") + delete(additionalProperties, "table") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableBlockAllowRulesTable struct { + value *BlockAllowRulesTable + isSet bool +} + +func (v NullableBlockAllowRulesTable) Get() *BlockAllowRulesTable { + return v.value +} + +func (v *NullableBlockAllowRulesTable) Set(val *BlockAllowRulesTable) { + v.value = val + v.isSet = true +} + +func (v NullableBlockAllowRulesTable) IsSet() bool { + return v.isSet +} + +func (v *NullableBlockAllowRulesTable) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBlockAllowRulesTable(val *BlockAllowRulesTable) *NullableBlockAllowRulesTable { + return &NullableBlockAllowRulesTable{value: val, isSet: true} +} + +func (v NullableBlockAllowRulesTable) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBlockAllowRulesTable) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go new file mode 100644 index 00000000..938afd7c --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go @@ -0,0 +1,379 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the ConnProfile type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ConnProfile{} + +// ConnProfile struct for ConnProfile +type ConnProfile struct { + // Connection type (e.g., PUBLIC, PRIVATE_LINK). + ConnType *ConnType `json:"connType,omitempty"` + // Private link endpoint ID. + EndpointId *string `json:"endpointId,omitempty"` + // Source host. + Host *string `json:"host,omitempty"` + // Source port. + Port int32 `json:"port"` + // Source user. + User string `json:"user"` + // Source password. + Password string `json:"password"` + // TLS/SSL settings; if not set, use defaults. + Security *Security `json:"security,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _ConnProfile ConnProfile + +// NewConnProfile instantiates a new ConnProfile object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewConnProfile(port int32, user string, password string) *ConnProfile { + this := ConnProfile{} + this.Port = port + this.User = user + this.Password = password + return &this +} + +// NewConnProfileWithDefaults instantiates a new ConnProfile object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewConnProfileWithDefaults() *ConnProfile { + this := ConnProfile{} + return &this +} + +// GetConnType returns the ConnType field value if set, zero value otherwise. +func (o *ConnProfile) GetConnType() ConnType { + if o == nil || IsNil(o.ConnType) { + var ret ConnType + return ret + } + return *o.ConnType +} + +// GetConnTypeOk returns a tuple with the ConnType field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetConnTypeOk() (*ConnType, bool) { + if o == nil || IsNil(o.ConnType) { + return nil, false + } + return o.ConnType, true +} + +// HasConnType returns a boolean if a field has been set. +func (o *ConnProfile) HasConnType() bool { + if o != nil && !IsNil(o.ConnType) { + return true + } + + return false +} + +// SetConnType gets a reference to the given ConnType and assigns it to the ConnType field. +func (o *ConnProfile) SetConnType(v ConnType) { + o.ConnType = &v +} + +// GetEndpointId returns the EndpointId field value if set, zero value otherwise. +func (o *ConnProfile) GetEndpointId() string { + if o == nil || IsNil(o.EndpointId) { + var ret string + return ret + } + return *o.EndpointId +} + +// GetEndpointIdOk returns a tuple with the EndpointId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetEndpointIdOk() (*string, bool) { + if o == nil || IsNil(o.EndpointId) { + return nil, false + } + return o.EndpointId, true +} + +// HasEndpointId returns a boolean if a field has been set. +func (o *ConnProfile) HasEndpointId() bool { + if o != nil && !IsNil(o.EndpointId) { + return true + } + + return false +} + +// SetEndpointId gets a reference to the given string and assigns it to the EndpointId field. +func (o *ConnProfile) SetEndpointId(v string) { + o.EndpointId = &v +} + +// GetHost returns the Host field value if set, zero value otherwise. +func (o *ConnProfile) GetHost() string { + if o == nil || IsNil(o.Host) { + var ret string + return ret + } + return *o.Host +} + +// GetHostOk returns a tuple with the Host field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetHostOk() (*string, bool) { + if o == nil || IsNil(o.Host) { + return nil, false + } + return o.Host, true +} + +// HasHost returns a boolean if a field has been set. +func (o *ConnProfile) HasHost() bool { + if o != nil && !IsNil(o.Host) { + return true + } + + return false +} + +// SetHost gets a reference to the given string and assigns it to the Host field. +func (o *ConnProfile) SetHost(v string) { + o.Host = &v +} + +// GetPort returns the Port field value +func (o *ConnProfile) GetPort() int32 { + if o == nil { + var ret int32 + return ret + } + + return o.Port +} + +// GetPortOk returns a tuple with the Port field value +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetPortOk() (*int32, bool) { + if o == nil { + return nil, false + } + return &o.Port, true +} + +// SetPort sets field value +func (o *ConnProfile) SetPort(v int32) { + o.Port = v +} + +// GetUser returns the User field value +func (o *ConnProfile) GetUser() string { + if o == nil { + var ret string + return ret + } + + return o.User +} + +// GetUserOk returns a tuple with the User field value +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetUserOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.User, true +} + +// SetUser sets field value +func (o *ConnProfile) SetUser(v string) { + o.User = v +} + +// GetPassword returns the Password field value +func (o *ConnProfile) GetPassword() string { + if o == nil { + var ret string + return ret + } + + return o.Password +} + +// GetPasswordOk returns a tuple with the Password field value +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetPasswordOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Password, true +} + +// SetPassword sets field value +func (o *ConnProfile) SetPassword(v string) { + o.Password = v +} + +// GetSecurity returns the Security field value if set, zero value otherwise. +func (o *ConnProfile) GetSecurity() Security { + if o == nil || IsNil(o.Security) { + var ret Security + return ret + } + return *o.Security +} + +// GetSecurityOk returns a tuple with the Security field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ConnProfile) GetSecurityOk() (*Security, bool) { + if o == nil || IsNil(o.Security) { + return nil, false + } + return o.Security, true +} + +// HasSecurity returns a boolean if a field has been set. +func (o *ConnProfile) HasSecurity() bool { + if o != nil && !IsNil(o.Security) { + return true + } + + return false +} + +// SetSecurity gets a reference to the given Security and assigns it to the Security field. +func (o *ConnProfile) SetSecurity(v Security) { + o.Security = &v +} + +func (o ConnProfile) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o ConnProfile) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.ConnType) { + toSerialize["connType"] = o.ConnType + } + if !IsNil(o.EndpointId) { + toSerialize["endpointId"] = o.EndpointId + } + if !IsNil(o.Host) { + toSerialize["host"] = o.Host + } + toSerialize["port"] = o.Port + toSerialize["user"] = o.User + toSerialize["password"] = o.Password + if !IsNil(o.Security) { + toSerialize["security"] = o.Security + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *ConnProfile) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "port", + "user", + "password", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varConnProfile := _ConnProfile{} + + err = json.Unmarshal(data, &varConnProfile) + + if err != nil { + return err + } + + *o = ConnProfile(varConnProfile) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "connType") + delete(additionalProperties, "endpointId") + delete(additionalProperties, "host") + delete(additionalProperties, "port") + delete(additionalProperties, "user") + delete(additionalProperties, "password") + delete(additionalProperties, "security") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableConnProfile struct { + value *ConnProfile + isSet bool +} + +func (v NullableConnProfile) Get() *ConnProfile { + return v.value +} + +func (v *NullableConnProfile) Set(val *ConnProfile) { + v.value = val + v.isSet = true +} + +func (v NullableConnProfile) IsSet() bool { + return v.isSet +} + +func (v *NullableConnProfile) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableConnProfile(val *ConnProfile) *NullableConnProfile { + return &NullableConnProfile{value: val, isSet: true} +} + +func (v NullableConnProfile) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableConnProfile) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_type.go new file mode 100644 index 00000000..a0a2e39a --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_type.go @@ -0,0 +1,105 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// ConnType The connection type used to connect to the source database. - PUBLIC: Connect over the public internet. - PRIVATE_LINK: Connect via Private Link/Private Endpoint. +type ConnType string + +// List of ConnType +const ( + CONNTYPE_PUBLIC ConnType = "PUBLIC" + CONNTYPE_PRIVATE_LINK ConnType = "PRIVATE_LINK" +) + +// All allowed values of ConnType enum +var AllowedConnTypeEnumValues = []ConnType{ + "PUBLIC", + "PRIVATE_LINK", +} + +func (v *ConnType) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := ConnType(value) + for _, existing := range AllowedConnTypeEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = ConnType(value) + return nil +} + +// NewConnTypeFromValue returns a pointer to a valid ConnType for the value passed as argument +func NewConnTypeFromValue(v string) *ConnType { + ev := ConnType(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v ConnType) IsValid() bool { + for _, existing := range AllowedConnTypeEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to ConnType value +func (v ConnType) Ptr() *ConnType { + return &v +} + +type NullableConnType struct { + value *ConnType + isSet bool +} + +func (v NullableConnType) Get() *ConnType { + return v.value +} + +func (v *NullableConnType) Set(val *ConnType) { + v.value = val + v.isSet = true +} + +func (v NullableConnType) IsSet() bool { + return v.isSet +} + +func (v *NullableConnType) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableConnType(val *ConnType) *NullableConnType { + return &NullableConnType{value: val, isSet: true} +} + +func (v NullableConnType) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableConnType) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go new file mode 100644 index 00000000..f36abcdc --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go @@ -0,0 +1,154 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the CreateMigrationPrecheckResp type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &CreateMigrationPrecheckResp{} + +// CreateMigrationPrecheckResp struct for CreateMigrationPrecheckResp +type CreateMigrationPrecheckResp struct { + // The ID of the created precheck. + Id *string `json:"id,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _CreateMigrationPrecheckResp CreateMigrationPrecheckResp + +// NewCreateMigrationPrecheckResp instantiates a new CreateMigrationPrecheckResp object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewCreateMigrationPrecheckResp() *CreateMigrationPrecheckResp { + this := CreateMigrationPrecheckResp{} + return &this +} + +// NewCreateMigrationPrecheckRespWithDefaults instantiates a new CreateMigrationPrecheckResp object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewCreateMigrationPrecheckRespWithDefaults() *CreateMigrationPrecheckResp { + this := CreateMigrationPrecheckResp{} + return &this +} + +// GetId returns the Id field value if set, zero value otherwise. +func (o *CreateMigrationPrecheckResp) GetId() string { + if o == nil || IsNil(o.Id) { + var ret string + return ret + } + return *o.Id +} + +// GetIdOk returns a tuple with the Id field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *CreateMigrationPrecheckResp) GetIdOk() (*string, bool) { + if o == nil || IsNil(o.Id) { + return nil, false + } + return o.Id, true +} + +// HasId returns a boolean if a field has been set. +func (o *CreateMigrationPrecheckResp) HasId() bool { + if o != nil && !IsNil(o.Id) { + return true + } + + return false +} + +// SetId gets a reference to the given string and assigns it to the Id field. +func (o *CreateMigrationPrecheckResp) SetId(v string) { + o.Id = &v +} + +func (o CreateMigrationPrecheckResp) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o CreateMigrationPrecheckResp) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Id) { + toSerialize["id"] = o.Id + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *CreateMigrationPrecheckResp) UnmarshalJSON(data []byte) (err error) { + varCreateMigrationPrecheckResp := _CreateMigrationPrecheckResp{} + + err = json.Unmarshal(data, &varCreateMigrationPrecheckResp) + + if err != nil { + return err + } + + *o = CreateMigrationPrecheckResp(varCreateMigrationPrecheckResp) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "id") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableCreateMigrationPrecheckResp struct { + value *CreateMigrationPrecheckResp + isSet bool +} + +func (v NullableCreateMigrationPrecheckResp) Get() *CreateMigrationPrecheckResp { + return v.value +} + +func (v *NullableCreateMigrationPrecheckResp) Set(val *CreateMigrationPrecheckResp) { + v.value = val + v.isSet = true +} + +func (v NullableCreateMigrationPrecheckResp) IsSet() bool { + return v.isSet +} + +func (v *NullableCreateMigrationPrecheckResp) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableCreateMigrationPrecheckResp(val *CreateMigrationPrecheckResp) *NullableCreateMigrationPrecheckResp { + return &NullableCreateMigrationPrecheckResp{value: val, isSet: true} +} + +func (v NullableCreateMigrationPrecheckResp) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableCreateMigrationPrecheckResp) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_dump_detail.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_dump_detail.go new file mode 100644 index 00000000..54496692 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_dump_detail.go @@ -0,0 +1,344 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the DumpDetail type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DumpDetail{} + +// DumpDetail struct for DumpDetail +type DumpDetail struct { + // Bytes per second processed during dump. + Bps *string `json:"bps,omitempty"` + // Progress of dump phase (0-100). + Progress *float64 `json:"progress,omitempty"` + // Total number of tables to dump. + TotalTables *string `json:"totalTables,omitempty"` + // Number of tables completed dumping. + CompletedTables *string `json:"completedTables,omitempty"` + // Total bytes finished dumping. + FinishedBytes *string `json:"finishedBytes,omitempty"` + // Total rows finished dumping. + FinishedRows *string `json:"finishedRows,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _DumpDetail DumpDetail + +// NewDumpDetail instantiates a new DumpDetail object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDumpDetail() *DumpDetail { + this := DumpDetail{} + return &this +} + +// NewDumpDetailWithDefaults instantiates a new DumpDetail object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDumpDetailWithDefaults() *DumpDetail { + this := DumpDetail{} + return &this +} + +// GetBps returns the Bps field value if set, zero value otherwise. +func (o *DumpDetail) GetBps() string { + if o == nil || IsNil(o.Bps) { + var ret string + return ret + } + return *o.Bps +} + +// GetBpsOk returns a tuple with the Bps field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetBpsOk() (*string, bool) { + if o == nil || IsNil(o.Bps) { + return nil, false + } + return o.Bps, true +} + +// HasBps returns a boolean if a field has been set. +func (o *DumpDetail) HasBps() bool { + if o != nil && !IsNil(o.Bps) { + return true + } + + return false +} + +// SetBps gets a reference to the given string and assigns it to the Bps field. +func (o *DumpDetail) SetBps(v string) { + o.Bps = &v +} + +// GetProgress returns the Progress field value if set, zero value otherwise. +func (o *DumpDetail) GetProgress() float64 { + if o == nil || IsNil(o.Progress) { + var ret float64 + return ret + } + return *o.Progress +} + +// GetProgressOk returns a tuple with the Progress field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetProgressOk() (*float64, bool) { + if o == nil || IsNil(o.Progress) { + return nil, false + } + return o.Progress, true +} + +// HasProgress returns a boolean if a field has been set. +func (o *DumpDetail) HasProgress() bool { + if o != nil && !IsNil(o.Progress) { + return true + } + + return false +} + +// SetProgress gets a reference to the given float64 and assigns it to the Progress field. +func (o *DumpDetail) SetProgress(v float64) { + o.Progress = &v +} + +// GetTotalTables returns the TotalTables field value if set, zero value otherwise. +func (o *DumpDetail) GetTotalTables() string { + if o == nil || IsNil(o.TotalTables) { + var ret string + return ret + } + return *o.TotalTables +} + +// GetTotalTablesOk returns a tuple with the TotalTables field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetTotalTablesOk() (*string, bool) { + if o == nil || IsNil(o.TotalTables) { + return nil, false + } + return o.TotalTables, true +} + +// HasTotalTables returns a boolean if a field has been set. +func (o *DumpDetail) HasTotalTables() bool { + if o != nil && !IsNil(o.TotalTables) { + return true + } + + return false +} + +// SetTotalTables gets a reference to the given string and assigns it to the TotalTables field. +func (o *DumpDetail) SetTotalTables(v string) { + o.TotalTables = &v +} + +// GetCompletedTables returns the CompletedTables field value if set, zero value otherwise. +func (o *DumpDetail) GetCompletedTables() string { + if o == nil || IsNil(o.CompletedTables) { + var ret string + return ret + } + return *o.CompletedTables +} + +// GetCompletedTablesOk returns a tuple with the CompletedTables field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetCompletedTablesOk() (*string, bool) { + if o == nil || IsNil(o.CompletedTables) { + return nil, false + } + return o.CompletedTables, true +} + +// HasCompletedTables returns a boolean if a field has been set. +func (o *DumpDetail) HasCompletedTables() bool { + if o != nil && !IsNil(o.CompletedTables) { + return true + } + + return false +} + +// SetCompletedTables gets a reference to the given string and assigns it to the CompletedTables field. +func (o *DumpDetail) SetCompletedTables(v string) { + o.CompletedTables = &v +} + +// GetFinishedBytes returns the FinishedBytes field value if set, zero value otherwise. +func (o *DumpDetail) GetFinishedBytes() string { + if o == nil || IsNil(o.FinishedBytes) { + var ret string + return ret + } + return *o.FinishedBytes +} + +// GetFinishedBytesOk returns a tuple with the FinishedBytes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetFinishedBytesOk() (*string, bool) { + if o == nil || IsNil(o.FinishedBytes) { + return nil, false + } + return o.FinishedBytes, true +} + +// HasFinishedBytes returns a boolean if a field has been set. +func (o *DumpDetail) HasFinishedBytes() bool { + if o != nil && !IsNil(o.FinishedBytes) { + return true + } + + return false +} + +// SetFinishedBytes gets a reference to the given string and assigns it to the FinishedBytes field. +func (o *DumpDetail) SetFinishedBytes(v string) { + o.FinishedBytes = &v +} + +// GetFinishedRows returns the FinishedRows field value if set, zero value otherwise. +func (o *DumpDetail) GetFinishedRows() string { + if o == nil || IsNil(o.FinishedRows) { + var ret string + return ret + } + return *o.FinishedRows +} + +// GetFinishedRowsOk returns a tuple with the FinishedRows field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DumpDetail) GetFinishedRowsOk() (*string, bool) { + if o == nil || IsNil(o.FinishedRows) { + return nil, false + } + return o.FinishedRows, true +} + +// HasFinishedRows returns a boolean if a field has been set. +func (o *DumpDetail) HasFinishedRows() bool { + if o != nil && !IsNil(o.FinishedRows) { + return true + } + + return false +} + +// SetFinishedRows gets a reference to the given string and assigns it to the FinishedRows field. +func (o *DumpDetail) SetFinishedRows(v string) { + o.FinishedRows = &v +} + +func (o DumpDetail) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DumpDetail) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Bps) { + toSerialize["bps"] = o.Bps + } + if !IsNil(o.Progress) { + toSerialize["progress"] = o.Progress + } + if !IsNil(o.TotalTables) { + toSerialize["totalTables"] = o.TotalTables + } + if !IsNil(o.CompletedTables) { + toSerialize["completedTables"] = o.CompletedTables + } + if !IsNil(o.FinishedBytes) { + toSerialize["finishedBytes"] = o.FinishedBytes + } + if !IsNil(o.FinishedRows) { + toSerialize["finishedRows"] = o.FinishedRows + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *DumpDetail) UnmarshalJSON(data []byte) (err error) { + varDumpDetail := _DumpDetail{} + + err = json.Unmarshal(data, &varDumpDetail) + + if err != nil { + return err + } + + *o = DumpDetail(varDumpDetail) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "bps") + delete(additionalProperties, "progress") + delete(additionalProperties, "totalTables") + delete(additionalProperties, "completedTables") + delete(additionalProperties, "finishedBytes") + delete(additionalProperties, "finishedRows") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableDumpDetail struct { + value *DumpDetail + isSet bool +} + +func (v NullableDumpDetail) Get() *DumpDetail { + return v.value +} + +func (v *NullableDumpDetail) Set(val *DumpDetail) { + v.value = val + v.isSet = true +} + +func (v NullableDumpDetail) IsSet() bool { + return v.isSet +} + +func (v *NullableDumpDetail) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDumpDetail(val *DumpDetail) *NullableDumpDetail { + return &NullableDumpDetail{value: val, isSet: true} +} + +func (v NullableDumpDetail) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDumpDetail) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migration_tasks_resp.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migration_tasks_resp.go new file mode 100644 index 00000000..361cfa37 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migration_tasks_resp.go @@ -0,0 +1,230 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the ListMigrationTasksResp type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ListMigrationTasksResp{} + +// ListMigrationTasksResp struct for ListMigrationTasksResp +type ListMigrationTasksResp struct { + // The list of migration tasks. + Tasks []MigrationTask `json:"tasks,omitempty"` + // The total number of tasks matching the query. + TotalSize *int64 `json:"totalSize,omitempty"` + // Token to retrieve the next page of results. + NextPageToken *string `json:"nextPageToken,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _ListMigrationTasksResp ListMigrationTasksResp + +// NewListMigrationTasksResp instantiates a new ListMigrationTasksResp object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewListMigrationTasksResp() *ListMigrationTasksResp { + this := ListMigrationTasksResp{} + return &this +} + +// NewListMigrationTasksRespWithDefaults instantiates a new ListMigrationTasksResp object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewListMigrationTasksRespWithDefaults() *ListMigrationTasksResp { + this := ListMigrationTasksResp{} + return &this +} + +// GetTasks returns the Tasks field value if set, zero value otherwise. +func (o *ListMigrationTasksResp) GetTasks() []MigrationTask { + if o == nil || IsNil(o.Tasks) { + var ret []MigrationTask + return ret + } + return o.Tasks +} + +// GetTasksOk returns a tuple with the Tasks field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ListMigrationTasksResp) GetTasksOk() ([]MigrationTask, bool) { + if o == nil || IsNil(o.Tasks) { + return nil, false + } + return o.Tasks, true +} + +// HasTasks returns a boolean if a field has been set. +func (o *ListMigrationTasksResp) HasTasks() bool { + if o != nil && !IsNil(o.Tasks) { + return true + } + + return false +} + +// SetTasks gets a reference to the given []MigrationTask and assigns it to the Tasks field. +func (o *ListMigrationTasksResp) SetTasks(v []MigrationTask) { + o.Tasks = v +} + +// GetTotalSize returns the TotalSize field value if set, zero value otherwise. +func (o *ListMigrationTasksResp) GetTotalSize() int64 { + if o == nil || IsNil(o.TotalSize) { + var ret int64 + return ret + } + return *o.TotalSize +} + +// GetTotalSizeOk returns a tuple with the TotalSize field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ListMigrationTasksResp) GetTotalSizeOk() (*int64, bool) { + if o == nil || IsNil(o.TotalSize) { + return nil, false + } + return o.TotalSize, true +} + +// HasTotalSize returns a boolean if a field has been set. +func (o *ListMigrationTasksResp) HasTotalSize() bool { + if o != nil && !IsNil(o.TotalSize) { + return true + } + + return false +} + +// SetTotalSize gets a reference to the given int64 and assigns it to the TotalSize field. +func (o *ListMigrationTasksResp) SetTotalSize(v int64) { + o.TotalSize = &v +} + +// GetNextPageToken returns the NextPageToken field value if set, zero value otherwise. +func (o *ListMigrationTasksResp) GetNextPageToken() string { + if o == nil || IsNil(o.NextPageToken) { + var ret string + return ret + } + return *o.NextPageToken +} + +// GetNextPageTokenOk returns a tuple with the NextPageToken field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ListMigrationTasksResp) GetNextPageTokenOk() (*string, bool) { + if o == nil || IsNil(o.NextPageToken) { + return nil, false + } + return o.NextPageToken, true +} + +// HasNextPageToken returns a boolean if a field has been set. +func (o *ListMigrationTasksResp) HasNextPageToken() bool { + if o != nil && !IsNil(o.NextPageToken) { + return true + } + + return false +} + +// SetNextPageToken gets a reference to the given string and assigns it to the NextPageToken field. +func (o *ListMigrationTasksResp) SetNextPageToken(v string) { + o.NextPageToken = &v +} + +func (o ListMigrationTasksResp) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o ListMigrationTasksResp) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Tasks) { + toSerialize["tasks"] = o.Tasks + } + if !IsNil(o.TotalSize) { + toSerialize["totalSize"] = o.TotalSize + } + if !IsNil(o.NextPageToken) { + toSerialize["nextPageToken"] = o.NextPageToken + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *ListMigrationTasksResp) UnmarshalJSON(data []byte) (err error) { + varListMigrationTasksResp := _ListMigrationTasksResp{} + + err = json.Unmarshal(data, &varListMigrationTasksResp) + + if err != nil { + return err + } + + *o = ListMigrationTasksResp(varListMigrationTasksResp) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "tasks") + delete(additionalProperties, "totalSize") + delete(additionalProperties, "nextPageToken") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableListMigrationTasksResp struct { + value *ListMigrationTasksResp + isSet bool +} + +func (v NullableListMigrationTasksResp) Get() *ListMigrationTasksResp { + return v.value +} + +func (v *NullableListMigrationTasksResp) Set(val *ListMigrationTasksResp) { + v.value = val + v.isSet = true +} + +func (v NullableListMigrationTasksResp) IsSet() bool { + return v.isSet +} + +func (v *NullableListMigrationTasksResp) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableListMigrationTasksResp(val *ListMigrationTasksResp) *NullableListMigrationTasksResp { + return &NullableListMigrationTasksResp{value: val, isSet: true} +} + +func (v NullableListMigrationTasksResp) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableListMigrationTasksResp) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_load_detail.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_load_detail.go new file mode 100644 index 00000000..d0d3b7ba --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_load_detail.go @@ -0,0 +1,268 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the LoadDetail type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &LoadDetail{} + +// LoadDetail struct for LoadDetail +type LoadDetail struct { + // Bytes per second processed during load. + Bps *string `json:"bps,omitempty"` + // Progress of load phase (0-100). + Progress *float64 `json:"progress,omitempty"` + // Total bytes finished loading. + FinishedBytes *string `json:"finishedBytes,omitempty"` + // Total bytes to load. + TotalBytes *string `json:"totalBytes,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _LoadDetail LoadDetail + +// NewLoadDetail instantiates a new LoadDetail object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewLoadDetail() *LoadDetail { + this := LoadDetail{} + return &this +} + +// NewLoadDetailWithDefaults instantiates a new LoadDetail object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewLoadDetailWithDefaults() *LoadDetail { + this := LoadDetail{} + return &this +} + +// GetBps returns the Bps field value if set, zero value otherwise. +func (o *LoadDetail) GetBps() string { + if o == nil || IsNil(o.Bps) { + var ret string + return ret + } + return *o.Bps +} + +// GetBpsOk returns a tuple with the Bps field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *LoadDetail) GetBpsOk() (*string, bool) { + if o == nil || IsNil(o.Bps) { + return nil, false + } + return o.Bps, true +} + +// HasBps returns a boolean if a field has been set. +func (o *LoadDetail) HasBps() bool { + if o != nil && !IsNil(o.Bps) { + return true + } + + return false +} + +// SetBps gets a reference to the given string and assigns it to the Bps field. +func (o *LoadDetail) SetBps(v string) { + o.Bps = &v +} + +// GetProgress returns the Progress field value if set, zero value otherwise. +func (o *LoadDetail) GetProgress() float64 { + if o == nil || IsNil(o.Progress) { + var ret float64 + return ret + } + return *o.Progress +} + +// GetProgressOk returns a tuple with the Progress field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *LoadDetail) GetProgressOk() (*float64, bool) { + if o == nil || IsNil(o.Progress) { + return nil, false + } + return o.Progress, true +} + +// HasProgress returns a boolean if a field has been set. +func (o *LoadDetail) HasProgress() bool { + if o != nil && !IsNil(o.Progress) { + return true + } + + return false +} + +// SetProgress gets a reference to the given float64 and assigns it to the Progress field. +func (o *LoadDetail) SetProgress(v float64) { + o.Progress = &v +} + +// GetFinishedBytes returns the FinishedBytes field value if set, zero value otherwise. +func (o *LoadDetail) GetFinishedBytes() string { + if o == nil || IsNil(o.FinishedBytes) { + var ret string + return ret + } + return *o.FinishedBytes +} + +// GetFinishedBytesOk returns a tuple with the FinishedBytes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *LoadDetail) GetFinishedBytesOk() (*string, bool) { + if o == nil || IsNil(o.FinishedBytes) { + return nil, false + } + return o.FinishedBytes, true +} + +// HasFinishedBytes returns a boolean if a field has been set. +func (o *LoadDetail) HasFinishedBytes() bool { + if o != nil && !IsNil(o.FinishedBytes) { + return true + } + + return false +} + +// SetFinishedBytes gets a reference to the given string and assigns it to the FinishedBytes field. +func (o *LoadDetail) SetFinishedBytes(v string) { + o.FinishedBytes = &v +} + +// GetTotalBytes returns the TotalBytes field value if set, zero value otherwise. +func (o *LoadDetail) GetTotalBytes() string { + if o == nil || IsNil(o.TotalBytes) { + var ret string + return ret + } + return *o.TotalBytes +} + +// GetTotalBytesOk returns a tuple with the TotalBytes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *LoadDetail) GetTotalBytesOk() (*string, bool) { + if o == nil || IsNil(o.TotalBytes) { + return nil, false + } + return o.TotalBytes, true +} + +// HasTotalBytes returns a boolean if a field has been set. +func (o *LoadDetail) HasTotalBytes() bool { + if o != nil && !IsNil(o.TotalBytes) { + return true + } + + return false +} + +// SetTotalBytes gets a reference to the given string and assigns it to the TotalBytes field. +func (o *LoadDetail) SetTotalBytes(v string) { + o.TotalBytes = &v +} + +func (o LoadDetail) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o LoadDetail) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Bps) { + toSerialize["bps"] = o.Bps + } + if !IsNil(o.Progress) { + toSerialize["progress"] = o.Progress + } + if !IsNil(o.FinishedBytes) { + toSerialize["finishedBytes"] = o.FinishedBytes + } + if !IsNil(o.TotalBytes) { + toSerialize["totalBytes"] = o.TotalBytes + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *LoadDetail) UnmarshalJSON(data []byte) (err error) { + varLoadDetail := _LoadDetail{} + + err = json.Unmarshal(data, &varLoadDetail) + + if err != nil { + return err + } + + *o = LoadDetail(varLoadDetail) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "bps") + delete(additionalProperties, "progress") + delete(additionalProperties, "finishedBytes") + delete(additionalProperties, "totalBytes") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableLoadDetail struct { + value *LoadDetail + isSet bool +} + +func (v NullableLoadDetail) Get() *LoadDetail { + return v.value +} + +func (v *NullableLoadDetail) Set(val *LoadDetail) { + v.value = val + v.isSet = true +} + +func (v NullableLoadDetail) IsSet() bool { + return v.isSet +} + +func (v *NullableLoadDetail) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableLoadDetail(val *LoadDetail) *NullableLoadDetail { + return &NullableLoadDetail{value: val, isSet: true} +} + +func (v NullableLoadDetail) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableLoadDetail) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go new file mode 100644 index 00000000..6b525bdd --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go @@ -0,0 +1,344 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the MigrationPrecheck type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &MigrationPrecheck{} + +// MigrationPrecheck struct for MigrationPrecheck +type MigrationPrecheck struct { + // Total number of precheck items. + Total *int32 `json:"total,omitempty"` + // Number of failed items. + FailedCnt *int32 `json:"failedCnt,omitempty"` + // Number of items with warnings. + WarnCnt *int32 `json:"warnCnt,omitempty"` + // Number of successful items. + SuccessCnt *int32 `json:"successCnt,omitempty"` + // Overall status of the precheck. + Status *string `json:"status,omitempty"` + // Details for each precheck item. + Items []PrecheckItem `json:"items,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _MigrationPrecheck MigrationPrecheck + +// NewMigrationPrecheck instantiates a new MigrationPrecheck object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigrationPrecheck() *MigrationPrecheck { + this := MigrationPrecheck{} + return &this +} + +// NewMigrationPrecheckWithDefaults instantiates a new MigrationPrecheck object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationPrecheckWithDefaults() *MigrationPrecheck { + this := MigrationPrecheck{} + return &this +} + +// GetTotal returns the Total field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetTotal() int32 { + if o == nil || IsNil(o.Total) { + var ret int32 + return ret + } + return *o.Total +} + +// GetTotalOk returns a tuple with the Total field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetTotalOk() (*int32, bool) { + if o == nil || IsNil(o.Total) { + return nil, false + } + return o.Total, true +} + +// HasTotal returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasTotal() bool { + if o != nil && !IsNil(o.Total) { + return true + } + + return false +} + +// SetTotal gets a reference to the given int32 and assigns it to the Total field. +func (o *MigrationPrecheck) SetTotal(v int32) { + o.Total = &v +} + +// GetFailedCnt returns the FailedCnt field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetFailedCnt() int32 { + if o == nil || IsNil(o.FailedCnt) { + var ret int32 + return ret + } + return *o.FailedCnt +} + +// GetFailedCntOk returns a tuple with the FailedCnt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetFailedCntOk() (*int32, bool) { + if o == nil || IsNil(o.FailedCnt) { + return nil, false + } + return o.FailedCnt, true +} + +// HasFailedCnt returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasFailedCnt() bool { + if o != nil && !IsNil(o.FailedCnt) { + return true + } + + return false +} + +// SetFailedCnt gets a reference to the given int32 and assigns it to the FailedCnt field. +func (o *MigrationPrecheck) SetFailedCnt(v int32) { + o.FailedCnt = &v +} + +// GetWarnCnt returns the WarnCnt field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetWarnCnt() int32 { + if o == nil || IsNil(o.WarnCnt) { + var ret int32 + return ret + } + return *o.WarnCnt +} + +// GetWarnCntOk returns a tuple with the WarnCnt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetWarnCntOk() (*int32, bool) { + if o == nil || IsNil(o.WarnCnt) { + return nil, false + } + return o.WarnCnt, true +} + +// HasWarnCnt returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasWarnCnt() bool { + if o != nil && !IsNil(o.WarnCnt) { + return true + } + + return false +} + +// SetWarnCnt gets a reference to the given int32 and assigns it to the WarnCnt field. +func (o *MigrationPrecheck) SetWarnCnt(v int32) { + o.WarnCnt = &v +} + +// GetSuccessCnt returns the SuccessCnt field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetSuccessCnt() int32 { + if o == nil || IsNil(o.SuccessCnt) { + var ret int32 + return ret + } + return *o.SuccessCnt +} + +// GetSuccessCntOk returns a tuple with the SuccessCnt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetSuccessCntOk() (*int32, bool) { + if o == nil || IsNil(o.SuccessCnt) { + return nil, false + } + return o.SuccessCnt, true +} + +// HasSuccessCnt returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasSuccessCnt() bool { + if o != nil && !IsNil(o.SuccessCnt) { + return true + } + + return false +} + +// SetSuccessCnt gets a reference to the given int32 and assigns it to the SuccessCnt field. +func (o *MigrationPrecheck) SetSuccessCnt(v int32) { + o.SuccessCnt = &v +} + +// GetStatus returns the Status field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetStatus() string { + if o == nil || IsNil(o.Status) { + var ret string + return ret + } + return *o.Status +} + +// GetStatusOk returns a tuple with the Status field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetStatusOk() (*string, bool) { + if o == nil || IsNil(o.Status) { + return nil, false + } + return o.Status, true +} + +// HasStatus returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasStatus() bool { + if o != nil && !IsNil(o.Status) { + return true + } + + return false +} + +// SetStatus gets a reference to the given string and assigns it to the Status field. +func (o *MigrationPrecheck) SetStatus(v string) { + o.Status = &v +} + +// GetItems returns the Items field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetItems() []PrecheckItem { + if o == nil || IsNil(o.Items) { + var ret []PrecheckItem + return ret + } + return o.Items +} + +// GetItemsOk returns a tuple with the Items field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetItemsOk() ([]PrecheckItem, bool) { + if o == nil || IsNil(o.Items) { + return nil, false + } + return o.Items, true +} + +// HasItems returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasItems() bool { + if o != nil && !IsNil(o.Items) { + return true + } + + return false +} + +// SetItems gets a reference to the given []PrecheckItem and assigns it to the Items field. +func (o *MigrationPrecheck) SetItems(v []PrecheckItem) { + o.Items = v +} + +func (o MigrationPrecheck) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o MigrationPrecheck) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Total) { + toSerialize["total"] = o.Total + } + if !IsNil(o.FailedCnt) { + toSerialize["failedCnt"] = o.FailedCnt + } + if !IsNil(o.WarnCnt) { + toSerialize["warnCnt"] = o.WarnCnt + } + if !IsNil(o.SuccessCnt) { + toSerialize["successCnt"] = o.SuccessCnt + } + if !IsNil(o.Status) { + toSerialize["status"] = o.Status + } + if !IsNil(o.Items) { + toSerialize["items"] = o.Items + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *MigrationPrecheck) UnmarshalJSON(data []byte) (err error) { + varMigrationPrecheck := _MigrationPrecheck{} + + err = json.Unmarshal(data, &varMigrationPrecheck) + + if err != nil { + return err + } + + *o = MigrationPrecheck(varMigrationPrecheck) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "total") + delete(additionalProperties, "failedCnt") + delete(additionalProperties, "warnCnt") + delete(additionalProperties, "successCnt") + delete(additionalProperties, "status") + delete(additionalProperties, "items") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigrationPrecheck struct { + value *MigrationPrecheck + isSet bool +} + +func (v NullableMigrationPrecheck) Get() *MigrationPrecheck { + return v.value +} + +func (v *NullableMigrationPrecheck) Set(val *MigrationPrecheck) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationPrecheck) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationPrecheck) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationPrecheck(val *MigrationPrecheck) *NullableMigrationPrecheck { + return &NullableMigrationPrecheck{value: val, isSet: true} +} + +func (v NullableMigrationPrecheck) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationPrecheck) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_task_body.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_task_body.go new file mode 100644 index 00000000..d936df5b --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_task_body.go @@ -0,0 +1,319 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the MigrationServiceCreateTaskBody type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &MigrationServiceCreateTaskBody{} + +// MigrationServiceCreateTaskBody struct for MigrationServiceCreateTaskBody +type MigrationServiceCreateTaskBody struct { + // The display name of the migration task. + Name *string `json:"name,omitempty"` + // The data sources to migrate from. + Sources []Source `json:"sources"` + // The target database credentials. + Target *Target `json:"target,omitempty"` + // The migration mode (full+incremental or incremental-only). + Mode *TaskMode `json:"mode,omitempty"` + // If true, migrate all user data (equivalent to enabling all non-system databases and tables). + FullDataMigration *bool `json:"fullDataMigration,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _MigrationServiceCreateTaskBody MigrationServiceCreateTaskBody + +// NewMigrationServiceCreateTaskBody instantiates a new MigrationServiceCreateTaskBody object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigrationServiceCreateTaskBody(sources []Source) *MigrationServiceCreateTaskBody { + this := MigrationServiceCreateTaskBody{} + this.Sources = sources + return &this +} + +// NewMigrationServiceCreateTaskBodyWithDefaults instantiates a new MigrationServiceCreateTaskBody object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationServiceCreateTaskBodyWithDefaults() *MigrationServiceCreateTaskBody { + this := MigrationServiceCreateTaskBody{} + return &this +} + +// GetName returns the Name field value if set, zero value otherwise. +func (o *MigrationServiceCreateTaskBody) GetName() string { + if o == nil || IsNil(o.Name) { + var ret string + return ret + } + return *o.Name +} + +// GetNameOk returns a tuple with the Name field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateTaskBody) GetNameOk() (*string, bool) { + if o == nil || IsNil(o.Name) { + return nil, false + } + return o.Name, true +} + +// HasName returns a boolean if a field has been set. +func (o *MigrationServiceCreateTaskBody) HasName() bool { + if o != nil && !IsNil(o.Name) { + return true + } + + return false +} + +// SetName gets a reference to the given string and assigns it to the Name field. +func (o *MigrationServiceCreateTaskBody) SetName(v string) { + o.Name = &v +} + +// GetSources returns the Sources field value +func (o *MigrationServiceCreateTaskBody) GetSources() []Source { + if o == nil { + var ret []Source + return ret + } + + return o.Sources +} + +// GetSourcesOk returns a tuple with the Sources field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateTaskBody) GetSourcesOk() ([]Source, bool) { + if o == nil { + return nil, false + } + return o.Sources, true +} + +// SetSources sets field value +func (o *MigrationServiceCreateTaskBody) SetSources(v []Source) { + o.Sources = v +} + +// GetTarget returns the Target field value if set, zero value otherwise. +func (o *MigrationServiceCreateTaskBody) GetTarget() Target { + if o == nil || IsNil(o.Target) { + var ret Target + return ret + } + return *o.Target +} + +// GetTargetOk returns a tuple with the Target field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateTaskBody) GetTargetOk() (*Target, bool) { + if o == nil || IsNil(o.Target) { + return nil, false + } + return o.Target, true +} + +// HasTarget returns a boolean if a field has been set. +func (o *MigrationServiceCreateTaskBody) HasTarget() bool { + if o != nil && !IsNil(o.Target) { + return true + } + + return false +} + +// SetTarget gets a reference to the given Target and assigns it to the Target field. +func (o *MigrationServiceCreateTaskBody) SetTarget(v Target) { + o.Target = &v +} + +// GetMode returns the Mode field value if set, zero value otherwise. +func (o *MigrationServiceCreateTaskBody) GetMode() TaskMode { + if o == nil || IsNil(o.Mode) { + var ret TaskMode + return ret + } + return *o.Mode +} + +// GetModeOk returns a tuple with the Mode field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateTaskBody) GetModeOk() (*TaskMode, bool) { + if o == nil || IsNil(o.Mode) { + return nil, false + } + return o.Mode, true +} + +// HasMode returns a boolean if a field has been set. +func (o *MigrationServiceCreateTaskBody) HasMode() bool { + if o != nil && !IsNil(o.Mode) { + return true + } + + return false +} + +// SetMode gets a reference to the given TaskMode and assigns it to the Mode field. +func (o *MigrationServiceCreateTaskBody) SetMode(v TaskMode) { + o.Mode = &v +} + +// GetFullDataMigration returns the FullDataMigration field value if set, zero value otherwise. +func (o *MigrationServiceCreateTaskBody) GetFullDataMigration() bool { + if o == nil || IsNil(o.FullDataMigration) { + var ret bool + return ret + } + return *o.FullDataMigration +} + +// GetFullDataMigrationOk returns a tuple with the FullDataMigration field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateTaskBody) GetFullDataMigrationOk() (*bool, bool) { + if o == nil || IsNil(o.FullDataMigration) { + return nil, false + } + return o.FullDataMigration, true +} + +// HasFullDataMigration returns a boolean if a field has been set. +func (o *MigrationServiceCreateTaskBody) HasFullDataMigration() bool { + if o != nil && !IsNil(o.FullDataMigration) { + return true + } + + return false +} + +// SetFullDataMigration gets a reference to the given bool and assigns it to the FullDataMigration field. +func (o *MigrationServiceCreateTaskBody) SetFullDataMigration(v bool) { + o.FullDataMigration = &v +} + +func (o MigrationServiceCreateTaskBody) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o MigrationServiceCreateTaskBody) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Name) { + toSerialize["name"] = o.Name + } + toSerialize["sources"] = o.Sources + if !IsNil(o.Target) { + toSerialize["target"] = o.Target + } + if !IsNil(o.Mode) { + toSerialize["mode"] = o.Mode + } + if !IsNil(o.FullDataMigration) { + toSerialize["fullDataMigration"] = o.FullDataMigration + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *MigrationServiceCreateTaskBody) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "sources", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varMigrationServiceCreateTaskBody := _MigrationServiceCreateTaskBody{} + + err = json.Unmarshal(data, &varMigrationServiceCreateTaskBody) + + if err != nil { + return err + } + + *o = MigrationServiceCreateTaskBody(varMigrationServiceCreateTaskBody) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "name") + delete(additionalProperties, "sources") + delete(additionalProperties, "target") + delete(additionalProperties, "mode") + delete(additionalProperties, "fullDataMigration") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigrationServiceCreateTaskBody struct { + value *MigrationServiceCreateTaskBody + isSet bool +} + +func (v NullableMigrationServiceCreateTaskBody) Get() *MigrationServiceCreateTaskBody { + return v.value +} + +func (v *NullableMigrationServiceCreateTaskBody) Set(val *MigrationServiceCreateTaskBody) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationServiceCreateTaskBody) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationServiceCreateTaskBody) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationServiceCreateTaskBody(val *MigrationServiceCreateTaskBody) *NullableMigrationServiceCreateTaskBody { + return &NullableMigrationServiceCreateTaskBody{value: val, isSet: true} +} + +func (v NullableMigrationServiceCreateTaskBody) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationServiceCreateTaskBody) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go new file mode 100644 index 00000000..49677590 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go @@ -0,0 +1,319 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the MigrationServicePrecheckBody type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &MigrationServicePrecheckBody{} + +// MigrationServicePrecheckBody struct for MigrationServicePrecheckBody +type MigrationServicePrecheckBody struct { + // The display name of the migration task. + Name *string `json:"name,omitempty"` + // The data sources to migrate from. + Sources []Source `json:"sources"` + // The target database credentials. + Target *Target `json:"target,omitempty"` + // The migration mode (full+incremental or incremental-only). + Mode *TaskMode `json:"mode,omitempty"` + // If true, migrate all user data (equivalent to enabling all non-system databases and tables). + FullDataMigration *bool `json:"fullDataMigration,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _MigrationServicePrecheckBody MigrationServicePrecheckBody + +// NewMigrationServicePrecheckBody instantiates a new MigrationServicePrecheckBody object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigrationServicePrecheckBody(sources []Source) *MigrationServicePrecheckBody { + this := MigrationServicePrecheckBody{} + this.Sources = sources + return &this +} + +// NewMigrationServicePrecheckBodyWithDefaults instantiates a new MigrationServicePrecheckBody object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationServicePrecheckBodyWithDefaults() *MigrationServicePrecheckBody { + this := MigrationServicePrecheckBody{} + return &this +} + +// GetName returns the Name field value if set, zero value otherwise. +func (o *MigrationServicePrecheckBody) GetName() string { + if o == nil || IsNil(o.Name) { + var ret string + return ret + } + return *o.Name +} + +// GetNameOk returns a tuple with the Name field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetNameOk() (*string, bool) { + if o == nil || IsNil(o.Name) { + return nil, false + } + return o.Name, true +} + +// HasName returns a boolean if a field has been set. +func (o *MigrationServicePrecheckBody) HasName() bool { + if o != nil && !IsNil(o.Name) { + return true + } + + return false +} + +// SetName gets a reference to the given string and assigns it to the Name field. +func (o *MigrationServicePrecheckBody) SetName(v string) { + o.Name = &v +} + +// GetSources returns the Sources field value +func (o *MigrationServicePrecheckBody) GetSources() []Source { + if o == nil { + var ret []Source + return ret + } + + return o.Sources +} + +// GetSourcesOk returns a tuple with the Sources field value +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetSourcesOk() ([]Source, bool) { + if o == nil { + return nil, false + } + return o.Sources, true +} + +// SetSources sets field value +func (o *MigrationServicePrecheckBody) SetSources(v []Source) { + o.Sources = v +} + +// GetTarget returns the Target field value if set, zero value otherwise. +func (o *MigrationServicePrecheckBody) GetTarget() Target { + if o == nil || IsNil(o.Target) { + var ret Target + return ret + } + return *o.Target +} + +// GetTargetOk returns a tuple with the Target field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetTargetOk() (*Target, bool) { + if o == nil || IsNil(o.Target) { + return nil, false + } + return o.Target, true +} + +// HasTarget returns a boolean if a field has been set. +func (o *MigrationServicePrecheckBody) HasTarget() bool { + if o != nil && !IsNil(o.Target) { + return true + } + + return false +} + +// SetTarget gets a reference to the given Target and assigns it to the Target field. +func (o *MigrationServicePrecheckBody) SetTarget(v Target) { + o.Target = &v +} + +// GetMode returns the Mode field value if set, zero value otherwise. +func (o *MigrationServicePrecheckBody) GetMode() TaskMode { + if o == nil || IsNil(o.Mode) { + var ret TaskMode + return ret + } + return *o.Mode +} + +// GetModeOk returns a tuple with the Mode field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetModeOk() (*TaskMode, bool) { + if o == nil || IsNil(o.Mode) { + return nil, false + } + return o.Mode, true +} + +// HasMode returns a boolean if a field has been set. +func (o *MigrationServicePrecheckBody) HasMode() bool { + if o != nil && !IsNil(o.Mode) { + return true + } + + return false +} + +// SetMode gets a reference to the given TaskMode and assigns it to the Mode field. +func (o *MigrationServicePrecheckBody) SetMode(v TaskMode) { + o.Mode = &v +} + +// GetFullDataMigration returns the FullDataMigration field value if set, zero value otherwise. +func (o *MigrationServicePrecheckBody) GetFullDataMigration() bool { + if o == nil || IsNil(o.FullDataMigration) { + var ret bool + return ret + } + return *o.FullDataMigration +} + +// GetFullDataMigrationOk returns a tuple with the FullDataMigration field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationServicePrecheckBody) GetFullDataMigrationOk() (*bool, bool) { + if o == nil || IsNil(o.FullDataMigration) { + return nil, false + } + return o.FullDataMigration, true +} + +// HasFullDataMigration returns a boolean if a field has been set. +func (o *MigrationServicePrecheckBody) HasFullDataMigration() bool { + if o != nil && !IsNil(o.FullDataMigration) { + return true + } + + return false +} + +// SetFullDataMigration gets a reference to the given bool and assigns it to the FullDataMigration field. +func (o *MigrationServicePrecheckBody) SetFullDataMigration(v bool) { + o.FullDataMigration = &v +} + +func (o MigrationServicePrecheckBody) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o MigrationServicePrecheckBody) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Name) { + toSerialize["name"] = o.Name + } + toSerialize["sources"] = o.Sources + if !IsNil(o.Target) { + toSerialize["target"] = o.Target + } + if !IsNil(o.Mode) { + toSerialize["mode"] = o.Mode + } + if !IsNil(o.FullDataMigration) { + toSerialize["fullDataMigration"] = o.FullDataMigration + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *MigrationServicePrecheckBody) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "sources", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varMigrationServicePrecheckBody := _MigrationServicePrecheckBody{} + + err = json.Unmarshal(data, &varMigrationServicePrecheckBody) + + if err != nil { + return err + } + + *o = MigrationServicePrecheckBody(varMigrationServicePrecheckBody) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "name") + delete(additionalProperties, "sources") + delete(additionalProperties, "target") + delete(additionalProperties, "mode") + delete(additionalProperties, "fullDataMigration") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigrationServicePrecheckBody struct { + value *MigrationServicePrecheckBody + isSet bool +} + +func (v NullableMigrationServicePrecheckBody) Get() *MigrationServicePrecheckBody { + return v.value +} + +func (v *NullableMigrationServicePrecheckBody) Set(val *MigrationServicePrecheckBody) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationServicePrecheckBody) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationServicePrecheckBody) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationServicePrecheckBody(val *MigrationServicePrecheckBody) *NullableMigrationServicePrecheckBody { + return &NullableMigrationServicePrecheckBody{value: val, isSet: true} +} + +func (v NullableMigrationServicePrecheckBody) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationServicePrecheckBody) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task.go new file mode 100644 index 00000000..2a430c08 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task.go @@ -0,0 +1,383 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "time" +) + +// checks if the MigrationTask type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &MigrationTask{} + +// MigrationTask struct for MigrationTask +type MigrationTask struct { + // The unique ID of the migration task. + Id *string `json:"id,omitempty"` + // The display name of the migration task. + Name *string `json:"name,omitempty"` + // The list of subtasks composing this migration. + SubTasks []SubTask `json:"subTasks,omitempty"` + // The target database username used by the task. + TargetUser *string `json:"targetUser,omitempty"` + // The timestamp when the task was created. + CreateTime *time.Time `json:"createTime,omitempty"` + // The migration mode of the task. + Mode *TaskMode `json:"mode,omitempty"` + // The current state of the task. + State *MigrationTaskState `json:"state,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _MigrationTask MigrationTask + +// NewMigrationTask instantiates a new MigrationTask object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigrationTask() *MigrationTask { + this := MigrationTask{} + return &this +} + +// NewMigrationTaskWithDefaults instantiates a new MigrationTask object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationTaskWithDefaults() *MigrationTask { + this := MigrationTask{} + return &this +} + +// GetId returns the Id field value if set, zero value otherwise. +func (o *MigrationTask) GetId() string { + if o == nil || IsNil(o.Id) { + var ret string + return ret + } + return *o.Id +} + +// GetIdOk returns a tuple with the Id field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationTask) GetIdOk() (*string, bool) { + if o == nil || IsNil(o.Id) { + return nil, false + } + return o.Id, true +} + +// HasId returns a boolean if a field has been set. +func (o *MigrationTask) HasId() bool { + if o != nil && !IsNil(o.Id) { + return true + } + + return false +} + +// SetId gets a reference to the given string and assigns it to the Id field. +func (o *MigrationTask) SetId(v string) { + o.Id = &v +} + +// GetName returns the Name field value if set, zero value otherwise. +func (o *MigrationTask) GetName() string { + if o == nil || IsNil(o.Name) { + var ret string + return ret + } + return *o.Name +} + +// GetNameOk returns a tuple with the Name field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationTask) GetNameOk() (*string, bool) { + if o == nil || IsNil(o.Name) { + return nil, false + } + return o.Name, true +} + +// HasName returns a boolean if a field has been set. +func (o *MigrationTask) HasName() bool { + if o != nil && !IsNil(o.Name) { + return true + } + + return false +} + +// SetName gets a reference to the given string and assigns it to the Name field. +func (o *MigrationTask) SetName(v string) { + o.Name = &v +} + +// GetSubTasks returns the SubTasks field value if set, zero value otherwise. +func (o *MigrationTask) GetSubTasks() []SubTask { + if o == nil || IsNil(o.SubTasks) { + var ret []SubTask + return ret + } + return o.SubTasks +} + +// GetSubTasksOk returns a tuple with the SubTasks field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationTask) GetSubTasksOk() ([]SubTask, bool) { + if o == nil || IsNil(o.SubTasks) { + return nil, false + } + return o.SubTasks, true +} + +// HasSubTasks returns a boolean if a field has been set. +func (o *MigrationTask) HasSubTasks() bool { + if o != nil && !IsNil(o.SubTasks) { + return true + } + + return false +} + +// SetSubTasks gets a reference to the given []SubTask and assigns it to the SubTasks field. +func (o *MigrationTask) SetSubTasks(v []SubTask) { + o.SubTasks = v +} + +// GetTargetUser returns the TargetUser field value if set, zero value otherwise. +func (o *MigrationTask) GetTargetUser() string { + if o == nil || IsNil(o.TargetUser) { + var ret string + return ret + } + return *o.TargetUser +} + +// GetTargetUserOk returns a tuple with the TargetUser field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationTask) GetTargetUserOk() (*string, bool) { + if o == nil || IsNil(o.TargetUser) { + return nil, false + } + return o.TargetUser, true +} + +// HasTargetUser returns a boolean if a field has been set. +func (o *MigrationTask) HasTargetUser() bool { + if o != nil && !IsNil(o.TargetUser) { + return true + } + + return false +} + +// SetTargetUser gets a reference to the given string and assigns it to the TargetUser field. +func (o *MigrationTask) SetTargetUser(v string) { + o.TargetUser = &v +} + +// GetCreateTime returns the CreateTime field value if set, zero value otherwise. +func (o *MigrationTask) GetCreateTime() time.Time { + if o == nil || IsNil(o.CreateTime) { + var ret time.Time + return ret + } + return *o.CreateTime +} + +// GetCreateTimeOk returns a tuple with the CreateTime field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationTask) GetCreateTimeOk() (*time.Time, bool) { + if o == nil || IsNil(o.CreateTime) { + return nil, false + } + return o.CreateTime, true +} + +// HasCreateTime returns a boolean if a field has been set. +func (o *MigrationTask) HasCreateTime() bool { + if o != nil && !IsNil(o.CreateTime) { + return true + } + + return false +} + +// SetCreateTime gets a reference to the given time.Time and assigns it to the CreateTime field. +func (o *MigrationTask) SetCreateTime(v time.Time) { + o.CreateTime = &v +} + +// GetMode returns the Mode field value if set, zero value otherwise. +func (o *MigrationTask) GetMode() TaskMode { + if o == nil || IsNil(o.Mode) { + var ret TaskMode + return ret + } + return *o.Mode +} + +// GetModeOk returns a tuple with the Mode field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationTask) GetModeOk() (*TaskMode, bool) { + if o == nil || IsNil(o.Mode) { + return nil, false + } + return o.Mode, true +} + +// HasMode returns a boolean if a field has been set. +func (o *MigrationTask) HasMode() bool { + if o != nil && !IsNil(o.Mode) { + return true + } + + return false +} + +// SetMode gets a reference to the given TaskMode and assigns it to the Mode field. +func (o *MigrationTask) SetMode(v TaskMode) { + o.Mode = &v +} + +// GetState returns the State field value if set, zero value otherwise. +func (o *MigrationTask) GetState() MigrationTaskState { + if o == nil || IsNil(o.State) { + var ret MigrationTaskState + return ret + } + return *o.State +} + +// GetStateOk returns a tuple with the State field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationTask) GetStateOk() (*MigrationTaskState, bool) { + if o == nil || IsNil(o.State) { + return nil, false + } + return o.State, true +} + +// HasState returns a boolean if a field has been set. +func (o *MigrationTask) HasState() bool { + if o != nil && !IsNil(o.State) { + return true + } + + return false +} + +// SetState gets a reference to the given MigrationTaskState and assigns it to the State field. +func (o *MigrationTask) SetState(v MigrationTaskState) { + o.State = &v +} + +func (o MigrationTask) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o MigrationTask) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Id) { + toSerialize["id"] = o.Id + } + if !IsNil(o.Name) { + toSerialize["name"] = o.Name + } + if !IsNil(o.SubTasks) { + toSerialize["subTasks"] = o.SubTasks + } + if !IsNil(o.TargetUser) { + toSerialize["targetUser"] = o.TargetUser + } + if !IsNil(o.CreateTime) { + toSerialize["createTime"] = o.CreateTime + } + if !IsNil(o.Mode) { + toSerialize["mode"] = o.Mode + } + if !IsNil(o.State) { + toSerialize["state"] = o.State + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *MigrationTask) UnmarshalJSON(data []byte) (err error) { + varMigrationTask := _MigrationTask{} + + err = json.Unmarshal(data, &varMigrationTask) + + if err != nil { + return err + } + + *o = MigrationTask(varMigrationTask) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "id") + delete(additionalProperties, "name") + delete(additionalProperties, "subTasks") + delete(additionalProperties, "targetUser") + delete(additionalProperties, "createTime") + delete(additionalProperties, "mode") + delete(additionalProperties, "state") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigrationTask struct { + value *MigrationTask + isSet bool +} + +func (v NullableMigrationTask) Get() *MigrationTask { + return v.value +} + +func (v *NullableMigrationTask) Set(val *MigrationTask) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationTask) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationTask) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationTask(val *MigrationTask) *NullableMigrationTask { + return &NullableMigrationTask{value: val, isSet: true} +} + +func (v NullableMigrationTask) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationTask) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task_state.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task_state.go new file mode 100644 index 00000000..479adf2e --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task_state.go @@ -0,0 +1,111 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// MigrationTaskState Overall state of a migration task. - CREATING: Task is being created. - RUNNING: Task is actively running. - PAUSED: Task is paused. - CANCELED: Task has been canceled. - FAILED: Task failed with error. +type MigrationTaskState string + +// List of MigrationTask.State +const ( + MIGRATIONTASKSTATE_CREATING MigrationTaskState = "CREATING" + MIGRATIONTASKSTATE_RUNNING MigrationTaskState = "RUNNING" + MIGRATIONTASKSTATE_PAUSED MigrationTaskState = "PAUSED" + MIGRATIONTASKSTATE_CANCELED MigrationTaskState = "CANCELED" + MIGRATIONTASKSTATE_FAILED MigrationTaskState = "FAILED" +) + +// All allowed values of MigrationTaskState enum +var AllowedMigrationTaskStateEnumValues = []MigrationTaskState{ + "CREATING", + "RUNNING", + "PAUSED", + "CANCELED", + "FAILED", +} + +func (v *MigrationTaskState) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := MigrationTaskState(value) + for _, existing := range AllowedMigrationTaskStateEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = MigrationTaskState(value) + return nil +} + +// NewMigrationTaskStateFromValue returns a pointer to a valid MigrationTaskState for the value passed as argument +func NewMigrationTaskStateFromValue(v string) *MigrationTaskState { + ev := MigrationTaskState(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v MigrationTaskState) IsValid() bool { + for _, existing := range AllowedMigrationTaskStateEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to MigrationTask.State value +func (v MigrationTaskState) Ptr() *MigrationTaskState { + return &v +} + +type NullableMigrationTaskState struct { + value *MigrationTaskState + isSet bool +} + +func (v NullableMigrationTaskState) Get() *MigrationTaskState { + return v.value +} + +func (v *NullableMigrationTaskState) Set(val *MigrationTaskState) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationTaskState) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationTaskState) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationTaskState(val *MigrationTaskState) *NullableMigrationTaskState { + return &NullableMigrationTaskState{value: val, isSet: true} +} + +func (v NullableMigrationTaskState) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationTaskState) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go new file mode 100644 index 00000000..51e5d39a --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go @@ -0,0 +1,344 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the PrecheckItem type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &PrecheckItem{} + +// PrecheckItem struct for PrecheckItem +type PrecheckItem struct { + // Human-readable description of the check. + Desc *string `json:"desc,omitempty"` + // Status of this check (e.g., SUCCESS, FAILED, WARN). + Status *string `json:"status,omitempty"` + // Suggested solution if the check failed or warned. + Solution *string `json:"solution,omitempty"` + // Reason for the failure or warning. + Reason *string `json:"reason,omitempty"` + // Documentation URL for the solution. + SolutionDocUrl *string `json:"solutionDocUrl,omitempty"` + // The type of precheck. + Type *PrecheckItemType `json:"type,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _PrecheckItem PrecheckItem + +// NewPrecheckItem instantiates a new PrecheckItem object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewPrecheckItem() *PrecheckItem { + this := PrecheckItem{} + return &this +} + +// NewPrecheckItemWithDefaults instantiates a new PrecheckItem object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewPrecheckItemWithDefaults() *PrecheckItem { + this := PrecheckItem{} + return &this +} + +// GetDesc returns the Desc field value if set, zero value otherwise. +func (o *PrecheckItem) GetDesc() string { + if o == nil || IsNil(o.Desc) { + var ret string + return ret + } + return *o.Desc +} + +// GetDescOk returns a tuple with the Desc field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetDescOk() (*string, bool) { + if o == nil || IsNil(o.Desc) { + return nil, false + } + return o.Desc, true +} + +// HasDesc returns a boolean if a field has been set. +func (o *PrecheckItem) HasDesc() bool { + if o != nil && !IsNil(o.Desc) { + return true + } + + return false +} + +// SetDesc gets a reference to the given string and assigns it to the Desc field. +func (o *PrecheckItem) SetDesc(v string) { + o.Desc = &v +} + +// GetStatus returns the Status field value if set, zero value otherwise. +func (o *PrecheckItem) GetStatus() string { + if o == nil || IsNil(o.Status) { + var ret string + return ret + } + return *o.Status +} + +// GetStatusOk returns a tuple with the Status field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetStatusOk() (*string, bool) { + if o == nil || IsNil(o.Status) { + return nil, false + } + return o.Status, true +} + +// HasStatus returns a boolean if a field has been set. +func (o *PrecheckItem) HasStatus() bool { + if o != nil && !IsNil(o.Status) { + return true + } + + return false +} + +// SetStatus gets a reference to the given string and assigns it to the Status field. +func (o *PrecheckItem) SetStatus(v string) { + o.Status = &v +} + +// GetSolution returns the Solution field value if set, zero value otherwise. +func (o *PrecheckItem) GetSolution() string { + if o == nil || IsNil(o.Solution) { + var ret string + return ret + } + return *o.Solution +} + +// GetSolutionOk returns a tuple with the Solution field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetSolutionOk() (*string, bool) { + if o == nil || IsNil(o.Solution) { + return nil, false + } + return o.Solution, true +} + +// HasSolution returns a boolean if a field has been set. +func (o *PrecheckItem) HasSolution() bool { + if o != nil && !IsNil(o.Solution) { + return true + } + + return false +} + +// SetSolution gets a reference to the given string and assigns it to the Solution field. +func (o *PrecheckItem) SetSolution(v string) { + o.Solution = &v +} + +// GetReason returns the Reason field value if set, zero value otherwise. +func (o *PrecheckItem) GetReason() string { + if o == nil || IsNil(o.Reason) { + var ret string + return ret + } + return *o.Reason +} + +// GetReasonOk returns a tuple with the Reason field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetReasonOk() (*string, bool) { + if o == nil || IsNil(o.Reason) { + return nil, false + } + return o.Reason, true +} + +// HasReason returns a boolean if a field has been set. +func (o *PrecheckItem) HasReason() bool { + if o != nil && !IsNil(o.Reason) { + return true + } + + return false +} + +// SetReason gets a reference to the given string and assigns it to the Reason field. +func (o *PrecheckItem) SetReason(v string) { + o.Reason = &v +} + +// GetSolutionDocUrl returns the SolutionDocUrl field value if set, zero value otherwise. +func (o *PrecheckItem) GetSolutionDocUrl() string { + if o == nil || IsNil(o.SolutionDocUrl) { + var ret string + return ret + } + return *o.SolutionDocUrl +} + +// GetSolutionDocUrlOk returns a tuple with the SolutionDocUrl field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetSolutionDocUrlOk() (*string, bool) { + if o == nil || IsNil(o.SolutionDocUrl) { + return nil, false + } + return o.SolutionDocUrl, true +} + +// HasSolutionDocUrl returns a boolean if a field has been set. +func (o *PrecheckItem) HasSolutionDocUrl() bool { + if o != nil && !IsNil(o.SolutionDocUrl) { + return true + } + + return false +} + +// SetSolutionDocUrl gets a reference to the given string and assigns it to the SolutionDocUrl field. +func (o *PrecheckItem) SetSolutionDocUrl(v string) { + o.SolutionDocUrl = &v +} + +// GetType returns the Type field value if set, zero value otherwise. +func (o *PrecheckItem) GetType() PrecheckItemType { + if o == nil || IsNil(o.Type) { + var ret PrecheckItemType + return ret + } + return *o.Type +} + +// GetTypeOk returns a tuple with the Type field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PrecheckItem) GetTypeOk() (*PrecheckItemType, bool) { + if o == nil || IsNil(o.Type) { + return nil, false + } + return o.Type, true +} + +// HasType returns a boolean if a field has been set. +func (o *PrecheckItem) HasType() bool { + if o != nil && !IsNil(o.Type) { + return true + } + + return false +} + +// SetType gets a reference to the given PrecheckItemType and assigns it to the Type field. +func (o *PrecheckItem) SetType(v PrecheckItemType) { + o.Type = &v +} + +func (o PrecheckItem) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o PrecheckItem) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Desc) { + toSerialize["desc"] = o.Desc + } + if !IsNil(o.Status) { + toSerialize["status"] = o.Status + } + if !IsNil(o.Solution) { + toSerialize["solution"] = o.Solution + } + if !IsNil(o.Reason) { + toSerialize["reason"] = o.Reason + } + if !IsNil(o.SolutionDocUrl) { + toSerialize["solutionDocUrl"] = o.SolutionDocUrl + } + if !IsNil(o.Type) { + toSerialize["type"] = o.Type + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *PrecheckItem) UnmarshalJSON(data []byte) (err error) { + varPrecheckItem := _PrecheckItem{} + + err = json.Unmarshal(data, &varPrecheckItem) + + if err != nil { + return err + } + + *o = PrecheckItem(varPrecheckItem) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "desc") + delete(additionalProperties, "status") + delete(additionalProperties, "solution") + delete(additionalProperties, "reason") + delete(additionalProperties, "solutionDocUrl") + delete(additionalProperties, "type") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullablePrecheckItem struct { + value *PrecheckItem + isSet bool +} + +func (v NullablePrecheckItem) Get() *PrecheckItem { + return v.value +} + +func (v *NullablePrecheckItem) Set(val *PrecheckItem) { + v.value = val + v.isSet = true +} + +func (v NullablePrecheckItem) IsSet() bool { + return v.isSet +} + +func (v *NullablePrecheckItem) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePrecheckItem(val *PrecheckItem) *NullablePrecheckItem { + return &NullablePrecheckItem{value: val, isSet: true} +} + +func (v NullablePrecheckItem) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePrecheckItem) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go new file mode 100644 index 00000000..2967d7cc --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go @@ -0,0 +1,129 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// PrecheckItemType Types of prechecks performed before starting a migration. - PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges. - PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges. - PRECHECK_ITEM_TYPE_VERSION_CHECKING: Check source database version compatibility. - PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING: Check source server_id configuration. - PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source. - PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source. - PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting. - PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target. - PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING: Check binlog database-level filtering configuration. - PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING: Check concurrent connections limit/availability. - PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges. - PRECHECK_ITEM_TYPE_META_POSITION_CHECKING: Check saved meta/binlog position validity. - PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load. - PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING: Check primary key settings on source tables. +type PrecheckItemType string + +// List of PrecheckItemType +const ( + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_VERSION_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_VERSION_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_META_POSITION_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_META_POSITION_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING" + PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING" +) + +// All allowed values of PrecheckItemType enum +var AllowedPrecheckItemTypeEnumValues = []PrecheckItemType{ + "PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING", + "PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING", + "PRECHECK_ITEM_TYPE_VERSION_CHECKING", + "PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING", + "PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING", + "PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING", + "PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING", + "PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING", + "PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING", + "PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING", + "PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING", + "PRECHECK_ITEM_TYPE_META_POSITION_CHECKING", + "PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING", + "PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING", +} + +func (v *PrecheckItemType) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := PrecheckItemType(value) + for _, existing := range AllowedPrecheckItemTypeEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = PrecheckItemType(value) + return nil +} + +// NewPrecheckItemTypeFromValue returns a pointer to a valid PrecheckItemType for the value passed as argument +func NewPrecheckItemTypeFromValue(v string) *PrecheckItemType { + ev := PrecheckItemType(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v PrecheckItemType) IsValid() bool { + for _, existing := range AllowedPrecheckItemTypeEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to PrecheckItemType value +func (v PrecheckItemType) Ptr() *PrecheckItemType { + return &v +} + +type NullablePrecheckItemType struct { + value *PrecheckItemType + isSet bool +} + +func (v NullablePrecheckItemType) Get() *PrecheckItemType { + return v.value +} + +func (v *NullablePrecheckItemType) Set(val *PrecheckItemType) { + v.value = val + v.isSet = true +} + +func (v NullablePrecheckItemType) IsSet() bool { + return v.isSet +} + +func (v *NullablePrecheckItemType) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePrecheckItemType(val *PrecheckItemType) *NullablePrecheckItemType { + return &NullablePrecheckItemType{value: val, isSet: true} +} + +func (v NullablePrecheckItemType) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePrecheckItemType) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go new file mode 100644 index 00000000..efb21ed7 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the RouteRule type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &RouteRule{} + +// RouteRule struct for RouteRule +type RouteRule struct { + // Source table pattern to match. + SourceTable *RouteRuleSource `json:"sourceTable,omitempty"` + // Target table to route to. + TargetTable *RouteRuleTarget `json:"targetTable,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _RouteRule RouteRule + +// NewRouteRule instantiates a new RouteRule object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewRouteRule() *RouteRule { + this := RouteRule{} + return &this +} + +// NewRouteRuleWithDefaults instantiates a new RouteRule object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewRouteRuleWithDefaults() *RouteRule { + this := RouteRule{} + return &this +} + +// GetSourceTable returns the SourceTable field value if set, zero value otherwise. +func (o *RouteRule) GetSourceTable() RouteRuleSource { + if o == nil || IsNil(o.SourceTable) { + var ret RouteRuleSource + return ret + } + return *o.SourceTable +} + +// GetSourceTableOk returns a tuple with the SourceTable field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRule) GetSourceTableOk() (*RouteRuleSource, bool) { + if o == nil || IsNil(o.SourceTable) { + return nil, false + } + return o.SourceTable, true +} + +// HasSourceTable returns a boolean if a field has been set. +func (o *RouteRule) HasSourceTable() bool { + if o != nil && !IsNil(o.SourceTable) { + return true + } + + return false +} + +// SetSourceTable gets a reference to the given RouteRuleSource and assigns it to the SourceTable field. +func (o *RouteRule) SetSourceTable(v RouteRuleSource) { + o.SourceTable = &v +} + +// GetTargetTable returns the TargetTable field value if set, zero value otherwise. +func (o *RouteRule) GetTargetTable() RouteRuleTarget { + if o == nil || IsNil(o.TargetTable) { + var ret RouteRuleTarget + return ret + } + return *o.TargetTable +} + +// GetTargetTableOk returns a tuple with the TargetTable field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRule) GetTargetTableOk() (*RouteRuleTarget, bool) { + if o == nil || IsNil(o.TargetTable) { + return nil, false + } + return o.TargetTable, true +} + +// HasTargetTable returns a boolean if a field has been set. +func (o *RouteRule) HasTargetTable() bool { + if o != nil && !IsNil(o.TargetTable) { + return true + } + + return false +} + +// SetTargetTable gets a reference to the given RouteRuleTarget and assigns it to the TargetTable field. +func (o *RouteRule) SetTargetTable(v RouteRuleTarget) { + o.TargetTable = &v +} + +func (o RouteRule) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o RouteRule) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.SourceTable) { + toSerialize["sourceTable"] = o.SourceTable + } + if !IsNil(o.TargetTable) { + toSerialize["targetTable"] = o.TargetTable + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *RouteRule) UnmarshalJSON(data []byte) (err error) { + varRouteRule := _RouteRule{} + + err = json.Unmarshal(data, &varRouteRule) + + if err != nil { + return err + } + + *o = RouteRule(varRouteRule) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "sourceTable") + delete(additionalProperties, "targetTable") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableRouteRule struct { + value *RouteRule + isSet bool +} + +func (v NullableRouteRule) Get() *RouteRule { + return v.value +} + +func (v *NullableRouteRule) Set(val *RouteRule) { + v.value = val + v.isSet = true +} + +func (v NullableRouteRule) IsSet() bool { + return v.isSet +} + +func (v *NullableRouteRule) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableRouteRule(val *RouteRule) *NullableRouteRule { + return &NullableRouteRule{value: val, isSet: true} +} + +func (v NullableRouteRule) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableRouteRule) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_source.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_source.go new file mode 100644 index 00000000..32a57aa9 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_source.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the RouteRuleSource type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &RouteRuleSource{} + +// RouteRuleSource struct for RouteRuleSource +type RouteRuleSource struct { + // Schema pattern of the source, supports wildcards. + SchemaPattern *string `json:"schemaPattern,omitempty"` + // Table pattern of the source, supports wildcards. + TablePattern *string `json:"tablePattern,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _RouteRuleSource RouteRuleSource + +// NewRouteRuleSource instantiates a new RouteRuleSource object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewRouteRuleSource() *RouteRuleSource { + this := RouteRuleSource{} + return &this +} + +// NewRouteRuleSourceWithDefaults instantiates a new RouteRuleSource object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewRouteRuleSourceWithDefaults() *RouteRuleSource { + this := RouteRuleSource{} + return &this +} + +// GetSchemaPattern returns the SchemaPattern field value if set, zero value otherwise. +func (o *RouteRuleSource) GetSchemaPattern() string { + if o == nil || IsNil(o.SchemaPattern) { + var ret string + return ret + } + return *o.SchemaPattern +} + +// GetSchemaPatternOk returns a tuple with the SchemaPattern field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRuleSource) GetSchemaPatternOk() (*string, bool) { + if o == nil || IsNil(o.SchemaPattern) { + return nil, false + } + return o.SchemaPattern, true +} + +// HasSchemaPattern returns a boolean if a field has been set. +func (o *RouteRuleSource) HasSchemaPattern() bool { + if o != nil && !IsNil(o.SchemaPattern) { + return true + } + + return false +} + +// SetSchemaPattern gets a reference to the given string and assigns it to the SchemaPattern field. +func (o *RouteRuleSource) SetSchemaPattern(v string) { + o.SchemaPattern = &v +} + +// GetTablePattern returns the TablePattern field value if set, zero value otherwise. +func (o *RouteRuleSource) GetTablePattern() string { + if o == nil || IsNil(o.TablePattern) { + var ret string + return ret + } + return *o.TablePattern +} + +// GetTablePatternOk returns a tuple with the TablePattern field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRuleSource) GetTablePatternOk() (*string, bool) { + if o == nil || IsNil(o.TablePattern) { + return nil, false + } + return o.TablePattern, true +} + +// HasTablePattern returns a boolean if a field has been set. +func (o *RouteRuleSource) HasTablePattern() bool { + if o != nil && !IsNil(o.TablePattern) { + return true + } + + return false +} + +// SetTablePattern gets a reference to the given string and assigns it to the TablePattern field. +func (o *RouteRuleSource) SetTablePattern(v string) { + o.TablePattern = &v +} + +func (o RouteRuleSource) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o RouteRuleSource) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.SchemaPattern) { + toSerialize["schemaPattern"] = o.SchemaPattern + } + if !IsNil(o.TablePattern) { + toSerialize["tablePattern"] = o.TablePattern + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *RouteRuleSource) UnmarshalJSON(data []byte) (err error) { + varRouteRuleSource := _RouteRuleSource{} + + err = json.Unmarshal(data, &varRouteRuleSource) + + if err != nil { + return err + } + + *o = RouteRuleSource(varRouteRuleSource) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "schemaPattern") + delete(additionalProperties, "tablePattern") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableRouteRuleSource struct { + value *RouteRuleSource + isSet bool +} + +func (v NullableRouteRuleSource) Get() *RouteRuleSource { + return v.value +} + +func (v *NullableRouteRuleSource) Set(val *RouteRuleSource) { + v.value = val + v.isSet = true +} + +func (v NullableRouteRuleSource) IsSet() bool { + return v.isSet +} + +func (v *NullableRouteRuleSource) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableRouteRuleSource(val *RouteRuleSource) *NullableRouteRuleSource { + return &NullableRouteRuleSource{value: val, isSet: true} +} + +func (v NullableRouteRuleSource) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableRouteRuleSource) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_target.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_target.go new file mode 100644 index 00000000..5e45c109 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_target.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the RouteRuleTarget type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &RouteRuleTarget{} + +// RouteRuleTarget struct for RouteRuleTarget +type RouteRuleTarget struct { + // Target schema name. + Schema *string `json:"schema,omitempty"` + // Target table name. + Table *string `json:"table,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _RouteRuleTarget RouteRuleTarget + +// NewRouteRuleTarget instantiates a new RouteRuleTarget object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewRouteRuleTarget() *RouteRuleTarget { + this := RouteRuleTarget{} + return &this +} + +// NewRouteRuleTargetWithDefaults instantiates a new RouteRuleTarget object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewRouteRuleTargetWithDefaults() *RouteRuleTarget { + this := RouteRuleTarget{} + return &this +} + +// GetSchema returns the Schema field value if set, zero value otherwise. +func (o *RouteRuleTarget) GetSchema() string { + if o == nil || IsNil(o.Schema) { + var ret string + return ret + } + return *o.Schema +} + +// GetSchemaOk returns a tuple with the Schema field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRuleTarget) GetSchemaOk() (*string, bool) { + if o == nil || IsNil(o.Schema) { + return nil, false + } + return o.Schema, true +} + +// HasSchema returns a boolean if a field has been set. +func (o *RouteRuleTarget) HasSchema() bool { + if o != nil && !IsNil(o.Schema) { + return true + } + + return false +} + +// SetSchema gets a reference to the given string and assigns it to the Schema field. +func (o *RouteRuleTarget) SetSchema(v string) { + o.Schema = &v +} + +// GetTable returns the Table field value if set, zero value otherwise. +func (o *RouteRuleTarget) GetTable() string { + if o == nil || IsNil(o.Table) { + var ret string + return ret + } + return *o.Table +} + +// GetTableOk returns a tuple with the Table field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RouteRuleTarget) GetTableOk() (*string, bool) { + if o == nil || IsNil(o.Table) { + return nil, false + } + return o.Table, true +} + +// HasTable returns a boolean if a field has been set. +func (o *RouteRuleTarget) HasTable() bool { + if o != nil && !IsNil(o.Table) { + return true + } + + return false +} + +// SetTable gets a reference to the given string and assigns it to the Table field. +func (o *RouteRuleTarget) SetTable(v string) { + o.Table = &v +} + +func (o RouteRuleTarget) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o RouteRuleTarget) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Schema) { + toSerialize["schema"] = o.Schema + } + if !IsNil(o.Table) { + toSerialize["table"] = o.Table + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *RouteRuleTarget) UnmarshalJSON(data []byte) (err error) { + varRouteRuleTarget := _RouteRuleTarget{} + + err = json.Unmarshal(data, &varRouteRuleTarget) + + if err != nil { + return err + } + + *o = RouteRuleTarget(varRouteRuleTarget) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "schema") + delete(additionalProperties, "table") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableRouteRuleTarget struct { + value *RouteRuleTarget + isSet bool +} + +func (v NullableRouteRuleTarget) Get() *RouteRuleTarget { + return v.value +} + +func (v *NullableRouteRuleTarget) Set(val *RouteRuleTarget) { + v.value = val + v.isSet = true +} + +func (v NullableRouteRuleTarget) IsSet() bool { + return v.isSet +} + +func (v *NullableRouteRuleTarget) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableRouteRuleTarget(val *RouteRuleTarget) *NullableRouteRuleTarget { + return &NullableRouteRuleTarget{value: val, isSet: true} +} + +func (v NullableRouteRuleTarget) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableRouteRuleTarget) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_security.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_security.go new file mode 100644 index 00000000..007edc54 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_security.go @@ -0,0 +1,268 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the Security type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Security{} + +// Security struct for Security +type Security struct { + // Allowed certificate Common Names. + CertAllowedCn []string `json:"certAllowedCn,omitempty"` + // CA certificate content in PEM. + SslCaContent *string `json:"sslCaContent,omitempty" validate:"regexp=^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$"` + // Client certificate content in PEM. + SslCertContent *string `json:"sslCertContent,omitempty" validate:"regexp=^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$"` + // Client private key in PEM. + SslKeyContent *string `json:"sslKeyContent,omitempty" validate:"regexp=^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$"` + AdditionalProperties map[string]interface{} +} + +type _Security Security + +// NewSecurity instantiates a new Security object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSecurity() *Security { + this := Security{} + return &this +} + +// NewSecurityWithDefaults instantiates a new Security object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSecurityWithDefaults() *Security { + this := Security{} + return &this +} + +// GetCertAllowedCn returns the CertAllowedCn field value if set, zero value otherwise. +func (o *Security) GetCertAllowedCn() []string { + if o == nil || IsNil(o.CertAllowedCn) { + var ret []string + return ret + } + return o.CertAllowedCn +} + +// GetCertAllowedCnOk returns a tuple with the CertAllowedCn field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Security) GetCertAllowedCnOk() ([]string, bool) { + if o == nil || IsNil(o.CertAllowedCn) { + return nil, false + } + return o.CertAllowedCn, true +} + +// HasCertAllowedCn returns a boolean if a field has been set. +func (o *Security) HasCertAllowedCn() bool { + if o != nil && !IsNil(o.CertAllowedCn) { + return true + } + + return false +} + +// SetCertAllowedCn gets a reference to the given []string and assigns it to the CertAllowedCn field. +func (o *Security) SetCertAllowedCn(v []string) { + o.CertAllowedCn = v +} + +// GetSslCaContent returns the SslCaContent field value if set, zero value otherwise. +func (o *Security) GetSslCaContent() string { + if o == nil || IsNil(o.SslCaContent) { + var ret string + return ret + } + return *o.SslCaContent +} + +// GetSslCaContentOk returns a tuple with the SslCaContent field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Security) GetSslCaContentOk() (*string, bool) { + if o == nil || IsNil(o.SslCaContent) { + return nil, false + } + return o.SslCaContent, true +} + +// HasSslCaContent returns a boolean if a field has been set. +func (o *Security) HasSslCaContent() bool { + if o != nil && !IsNil(o.SslCaContent) { + return true + } + + return false +} + +// SetSslCaContent gets a reference to the given string and assigns it to the SslCaContent field. +func (o *Security) SetSslCaContent(v string) { + o.SslCaContent = &v +} + +// GetSslCertContent returns the SslCertContent field value if set, zero value otherwise. +func (o *Security) GetSslCertContent() string { + if o == nil || IsNil(o.SslCertContent) { + var ret string + return ret + } + return *o.SslCertContent +} + +// GetSslCertContentOk returns a tuple with the SslCertContent field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Security) GetSslCertContentOk() (*string, bool) { + if o == nil || IsNil(o.SslCertContent) { + return nil, false + } + return o.SslCertContent, true +} + +// HasSslCertContent returns a boolean if a field has been set. +func (o *Security) HasSslCertContent() bool { + if o != nil && !IsNil(o.SslCertContent) { + return true + } + + return false +} + +// SetSslCertContent gets a reference to the given string and assigns it to the SslCertContent field. +func (o *Security) SetSslCertContent(v string) { + o.SslCertContent = &v +} + +// GetSslKeyContent returns the SslKeyContent field value if set, zero value otherwise. +func (o *Security) GetSslKeyContent() string { + if o == nil || IsNil(o.SslKeyContent) { + var ret string + return ret + } + return *o.SslKeyContent +} + +// GetSslKeyContentOk returns a tuple with the SslKeyContent field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Security) GetSslKeyContentOk() (*string, bool) { + if o == nil || IsNil(o.SslKeyContent) { + return nil, false + } + return o.SslKeyContent, true +} + +// HasSslKeyContent returns a boolean if a field has been set. +func (o *Security) HasSslKeyContent() bool { + if o != nil && !IsNil(o.SslKeyContent) { + return true + } + + return false +} + +// SetSslKeyContent gets a reference to the given string and assigns it to the SslKeyContent field. +func (o *Security) SetSslKeyContent(v string) { + o.SslKeyContent = &v +} + +func (o Security) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Security) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.CertAllowedCn) { + toSerialize["certAllowedCn"] = o.CertAllowedCn + } + if !IsNil(o.SslCaContent) { + toSerialize["sslCaContent"] = o.SslCaContent + } + if !IsNil(o.SslCertContent) { + toSerialize["sslCertContent"] = o.SslCertContent + } + if !IsNil(o.SslKeyContent) { + toSerialize["sslKeyContent"] = o.SslKeyContent + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Security) UnmarshalJSON(data []byte) (err error) { + varSecurity := _Security{} + + err = json.Unmarshal(data, &varSecurity) + + if err != nil { + return err + } + + *o = Security(varSecurity) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "certAllowedCn") + delete(additionalProperties, "sslCaContent") + delete(additionalProperties, "sslCertContent") + delete(additionalProperties, "sslKeyContent") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableSecurity struct { + value *Security + isSet bool +} + +func (v NullableSecurity) Get() *Security { + return v.value +} + +func (v *NullableSecurity) Set(val *Security) { + v.value = val + v.isSet = true +} + +func (v NullableSecurity) IsSet() bool { + return v.isSet +} + +func (v *NullableSecurity) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSecurity(val *Security) *NullableSecurity { + return &NullableSecurity{value: val, isSet: true} +} + +func (v NullableSecurity) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSecurity) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go new file mode 100644 index 00000000..99028000 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go @@ -0,0 +1,420 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the Source type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Source{} + +// Source struct for Source +type Source struct { + // Connection profile for the source database. + ConnProfile ConnProfile `json:"connProfile"` + // Block/allow rules for databases and tables, which is exclusive with route_rules. + BaRules *BlockAllowRules `json:"baRules,omitempty"` + // Table route rules,which is exclusive with ba_rules. + RouteRules []RouteRule `json:"routeRules,omitempty"` + // Starting binlog file name for incremental sync. + BinlogName NullableString `json:"binlogName,omitempty"` + // Starting binlog position for incremental sync. + BinlogPos NullableInt32 `json:"binlogPos,omitempty"` + // Starting GTID set for incremental sync. + BinlogGtid NullableString `json:"binlogGtid,omitempty"` + // Source type (e.g., MySQL). + SourceType SourceSourceType `json:"sourceType"` + AdditionalProperties map[string]interface{} +} + +type _Source Source + +// NewSource instantiates a new Source object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSource(connProfile ConnProfile, sourceType SourceSourceType) *Source { + this := Source{} + this.ConnProfile = connProfile + this.SourceType = sourceType + return &this +} + +// NewSourceWithDefaults instantiates a new Source object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSourceWithDefaults() *Source { + this := Source{} + return &this +} + +// GetConnProfile returns the ConnProfile field value +func (o *Source) GetConnProfile() ConnProfile { + if o == nil { + var ret ConnProfile + return ret + } + + return o.ConnProfile +} + +// GetConnProfileOk returns a tuple with the ConnProfile field value +// and a boolean to check if the value has been set. +func (o *Source) GetConnProfileOk() (*ConnProfile, bool) { + if o == nil { + return nil, false + } + return &o.ConnProfile, true +} + +// SetConnProfile sets field value +func (o *Source) SetConnProfile(v ConnProfile) { + o.ConnProfile = v +} + +// GetBaRules returns the BaRules field value if set, zero value otherwise. +func (o *Source) GetBaRules() BlockAllowRules { + if o == nil || IsNil(o.BaRules) { + var ret BlockAllowRules + return ret + } + return *o.BaRules +} + +// GetBaRulesOk returns a tuple with the BaRules field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Source) GetBaRulesOk() (*BlockAllowRules, bool) { + if o == nil || IsNil(o.BaRules) { + return nil, false + } + return o.BaRules, true +} + +// HasBaRules returns a boolean if a field has been set. +func (o *Source) HasBaRules() bool { + if o != nil && !IsNil(o.BaRules) { + return true + } + + return false +} + +// SetBaRules gets a reference to the given BlockAllowRules and assigns it to the BaRules field. +func (o *Source) SetBaRules(v BlockAllowRules) { + o.BaRules = &v +} + +// GetRouteRules returns the RouteRules field value if set, zero value otherwise. +func (o *Source) GetRouteRules() []RouteRule { + if o == nil || IsNil(o.RouteRules) { + var ret []RouteRule + return ret + } + return o.RouteRules +} + +// GetRouteRulesOk returns a tuple with the RouteRules field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Source) GetRouteRulesOk() ([]RouteRule, bool) { + if o == nil || IsNil(o.RouteRules) { + return nil, false + } + return o.RouteRules, true +} + +// HasRouteRules returns a boolean if a field has been set. +func (o *Source) HasRouteRules() bool { + if o != nil && !IsNil(o.RouteRules) { + return true + } + + return false +} + +// SetRouteRules gets a reference to the given []RouteRule and assigns it to the RouteRules field. +func (o *Source) SetRouteRules(v []RouteRule) { + o.RouteRules = v +} + +// GetBinlogName returns the BinlogName field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *Source) GetBinlogName() string { + if o == nil || IsNil(o.BinlogName.Get()) { + var ret string + return ret + } + return *o.BinlogName.Get() +} + +// GetBinlogNameOk returns a tuple with the BinlogName field value if set, nil otherwise +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *Source) GetBinlogNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return o.BinlogName.Get(), o.BinlogName.IsSet() +} + +// HasBinlogName returns a boolean if a field has been set. +func (o *Source) HasBinlogName() bool { + if o != nil && o.BinlogName.IsSet() { + return true + } + + return false +} + +// SetBinlogName gets a reference to the given NullableString and assigns it to the BinlogName field. +func (o *Source) SetBinlogName(v string) { + o.BinlogName.Set(&v) +} + +// SetBinlogNameNil sets the value for BinlogName to be an explicit nil +func (o *Source) SetBinlogNameNil() { + o.BinlogName.Set(nil) +} + +// UnsetBinlogName ensures that no value is present for BinlogName, not even an explicit nil +func (o *Source) UnsetBinlogName() { + o.BinlogName.Unset() +} + +// GetBinlogPos returns the BinlogPos field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *Source) GetBinlogPos() int32 { + if o == nil || IsNil(o.BinlogPos.Get()) { + var ret int32 + return ret + } + return *o.BinlogPos.Get() +} + +// GetBinlogPosOk returns a tuple with the BinlogPos field value if set, nil otherwise +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *Source) GetBinlogPosOk() (*int32, bool) { + if o == nil { + return nil, false + } + return o.BinlogPos.Get(), o.BinlogPos.IsSet() +} + +// HasBinlogPos returns a boolean if a field has been set. +func (o *Source) HasBinlogPos() bool { + if o != nil && o.BinlogPos.IsSet() { + return true + } + + return false +} + +// SetBinlogPos gets a reference to the given NullableInt32 and assigns it to the BinlogPos field. +func (o *Source) SetBinlogPos(v int32) { + o.BinlogPos.Set(&v) +} + +// SetBinlogPosNil sets the value for BinlogPos to be an explicit nil +func (o *Source) SetBinlogPosNil() { + o.BinlogPos.Set(nil) +} + +// UnsetBinlogPos ensures that no value is present for BinlogPos, not even an explicit nil +func (o *Source) UnsetBinlogPos() { + o.BinlogPos.Unset() +} + +// GetBinlogGtid returns the BinlogGtid field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *Source) GetBinlogGtid() string { + if o == nil || IsNil(o.BinlogGtid.Get()) { + var ret string + return ret + } + return *o.BinlogGtid.Get() +} + +// GetBinlogGtidOk returns a tuple with the BinlogGtid field value if set, nil otherwise +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *Source) GetBinlogGtidOk() (*string, bool) { + if o == nil { + return nil, false + } + return o.BinlogGtid.Get(), o.BinlogGtid.IsSet() +} + +// HasBinlogGtid returns a boolean if a field has been set. +func (o *Source) HasBinlogGtid() bool { + if o != nil && o.BinlogGtid.IsSet() { + return true + } + + return false +} + +// SetBinlogGtid gets a reference to the given NullableString and assigns it to the BinlogGtid field. +func (o *Source) SetBinlogGtid(v string) { + o.BinlogGtid.Set(&v) +} + +// SetBinlogGtidNil sets the value for BinlogGtid to be an explicit nil +func (o *Source) SetBinlogGtidNil() { + o.BinlogGtid.Set(nil) +} + +// UnsetBinlogGtid ensures that no value is present for BinlogGtid, not even an explicit nil +func (o *Source) UnsetBinlogGtid() { + o.BinlogGtid.Unset() +} + +// GetSourceType returns the SourceType field value +func (o *Source) GetSourceType() SourceSourceType { + if o == nil { + var ret SourceSourceType + return ret + } + + return o.SourceType +} + +// GetSourceTypeOk returns a tuple with the SourceType field value +// and a boolean to check if the value has been set. +func (o *Source) GetSourceTypeOk() (*SourceSourceType, bool) { + if o == nil { + return nil, false + } + return &o.SourceType, true +} + +// SetSourceType sets field value +func (o *Source) SetSourceType(v SourceSourceType) { + o.SourceType = v +} + +func (o Source) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Source) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["connProfile"] = o.ConnProfile + if !IsNil(o.BaRules) { + toSerialize["baRules"] = o.BaRules + } + if !IsNil(o.RouteRules) { + toSerialize["routeRules"] = o.RouteRules + } + if o.BinlogName.IsSet() { + toSerialize["binlogName"] = o.BinlogName.Get() + } + if o.BinlogPos.IsSet() { + toSerialize["binlogPos"] = o.BinlogPos.Get() + } + if o.BinlogGtid.IsSet() { + toSerialize["binlogGtid"] = o.BinlogGtid.Get() + } + toSerialize["sourceType"] = o.SourceType + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Source) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "connProfile", + "sourceType", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varSource := _Source{} + + err = json.Unmarshal(data, &varSource) + + if err != nil { + return err + } + + *o = Source(varSource) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "connProfile") + delete(additionalProperties, "baRules") + delete(additionalProperties, "routeRules") + delete(additionalProperties, "binlogName") + delete(additionalProperties, "binlogPos") + delete(additionalProperties, "binlogGtid") + delete(additionalProperties, "sourceType") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableSource struct { + value *Source + isSet bool +} + +func (v NullableSource) Get() *Source { + return v.value +} + +func (v *NullableSource) Set(val *Source) { + v.value = val + v.isSet = true +} + +func (v NullableSource) IsSet() bool { + return v.isSet +} + +func (v *NullableSource) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSource(val *Source) *NullableSource { + return &NullableSource{value: val, isSet: true} +} + +func (v NullableSource) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSource) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go new file mode 100644 index 00000000..2148bcf3 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go @@ -0,0 +1,105 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// SourceSourceType The source database type. - SOURCE_TYPE_MYSQL: Self-managed MySQL. - SOURCE_TYPE_ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. +type SourceSourceType string + +// List of Source.SourceType +const ( + SOURCESOURCETYPE_SOURCE_TYPE_MYSQL SourceSourceType = "SOURCE_TYPE_MYSQL" + SOURCESOURCETYPE_SOURCE_TYPE_ALICLOUD_RDS_MYSQL SourceSourceType = "SOURCE_TYPE_ALICLOUD_RDS_MYSQL" +) + +// All allowed values of SourceSourceType enum +var AllowedSourceSourceTypeEnumValues = []SourceSourceType{ + "SOURCE_TYPE_MYSQL", + "SOURCE_TYPE_ALICLOUD_RDS_MYSQL", +} + +func (v *SourceSourceType) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := SourceSourceType(value) + for _, existing := range AllowedSourceSourceTypeEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = SourceSourceType(value) + return nil +} + +// NewSourceSourceTypeFromValue returns a pointer to a valid SourceSourceType for the value passed as argument +func NewSourceSourceTypeFromValue(v string) *SourceSourceType { + ev := SourceSourceType(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v SourceSourceType) IsValid() bool { + for _, existing := range AllowedSourceSourceTypeEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to Source.SourceType value +func (v SourceSourceType) Ptr() *SourceSourceType { + return &v +} + +type NullableSourceSourceType struct { + value *SourceSourceType + isSet bool +} + +func (v NullableSourceSourceType) Get() *SourceSourceType { + return v.value +} + +func (v *NullableSourceSourceType) Set(val *SourceSourceType) { + v.value = val + v.isSet = true +} + +func (v NullableSourceSourceType) IsSet() bool { + return v.isSet +} + +func (v *NullableSourceSourceType) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSourceSourceType(val *SourceSourceType) *NullableSourceSourceType { + return &NullableSourceSourceType{value: val, isSet: true} +} + +func (v NullableSourceSourceType) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSourceSourceType) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_status.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_status.go new file mode 100644 index 00000000..b46a9490 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_status.go @@ -0,0 +1,227 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the Status type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Status{} + +// Status struct for Status +type Status struct { + Code *int32 `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Details []Any `json:"details,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _Status Status + +// NewStatus instantiates a new Status object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewStatus() *Status { + this := Status{} + return &this +} + +// NewStatusWithDefaults instantiates a new Status object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewStatusWithDefaults() *Status { + this := Status{} + return &this +} + +// GetCode returns the Code field value if set, zero value otherwise. +func (o *Status) GetCode() int32 { + if o == nil || IsNil(o.Code) { + var ret int32 + return ret + } + return *o.Code +} + +// GetCodeOk returns a tuple with the Code field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Status) GetCodeOk() (*int32, bool) { + if o == nil || IsNil(o.Code) { + return nil, false + } + return o.Code, true +} + +// HasCode returns a boolean if a field has been set. +func (o *Status) HasCode() bool { + if o != nil && !IsNil(o.Code) { + return true + } + + return false +} + +// SetCode gets a reference to the given int32 and assigns it to the Code field. +func (o *Status) SetCode(v int32) { + o.Code = &v +} + +// GetMessage returns the Message field value if set, zero value otherwise. +func (o *Status) GetMessage() string { + if o == nil || IsNil(o.Message) { + var ret string + return ret + } + return *o.Message +} + +// GetMessageOk returns a tuple with the Message field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Status) GetMessageOk() (*string, bool) { + if o == nil || IsNil(o.Message) { + return nil, false + } + return o.Message, true +} + +// HasMessage returns a boolean if a field has been set. +func (o *Status) HasMessage() bool { + if o != nil && !IsNil(o.Message) { + return true + } + + return false +} + +// SetMessage gets a reference to the given string and assigns it to the Message field. +func (o *Status) SetMessage(v string) { + o.Message = &v +} + +// GetDetails returns the Details field value if set, zero value otherwise. +func (o *Status) GetDetails() []Any { + if o == nil || IsNil(o.Details) { + var ret []Any + return ret + } + return o.Details +} + +// GetDetailsOk returns a tuple with the Details field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Status) GetDetailsOk() ([]Any, bool) { + if o == nil || IsNil(o.Details) { + return nil, false + } + return o.Details, true +} + +// HasDetails returns a boolean if a field has been set. +func (o *Status) HasDetails() bool { + if o != nil && !IsNil(o.Details) { + return true + } + + return false +} + +// SetDetails gets a reference to the given []Any and assigns it to the Details field. +func (o *Status) SetDetails(v []Any) { + o.Details = v +} + +func (o Status) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Status) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Code) { + toSerialize["code"] = o.Code + } + if !IsNil(o.Message) { + toSerialize["message"] = o.Message + } + if !IsNil(o.Details) { + toSerialize["details"] = o.Details + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Status) UnmarshalJSON(data []byte) (err error) { + varStatus := _Status{} + + err = json.Unmarshal(data, &varStatus) + + if err != nil { + return err + } + + *o = Status(varStatus) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "code") + delete(additionalProperties, "message") + delete(additionalProperties, "details") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableStatus struct { + value *Status + isSet bool +} + +func (v NullableStatus) Get() *Status { + return v.value +} + +func (v *NullableStatus) Set(val *Status) { + v.value = val + v.isSet = true +} + +func (v NullableStatus) IsSet() bool { + return v.isSet +} + +func (v *NullableStatus) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableStatus(val *Status) *NullableStatus { + return &NullableStatus{value: val, isSet: true} +} + +func (v NullableStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableStatus) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task.go new file mode 100644 index 00000000..b09a054c --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task.go @@ -0,0 +1,393 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the SubTask type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &SubTask{} + +// SubTask struct for SubTask +type SubTask struct { + // Source configuration for this subtask. + Source *Source `json:"source,omitempty"` + // Current step of the subtask. + CurrentStep *SubTaskStep `json:"currentStep,omitempty"` + // Current stage of the subtask. + Stage *SubTaskStage `json:"stage,omitempty"` + // Detail of dump phase, if applicable. + DumpDetail *DumpDetail `json:"dumpDetail,omitempty"` + // Detail of load phase, if applicable. + LoadDetail *LoadDetail `json:"loadDetail,omitempty"` + // Detail of sync phase, if applicable. + SyncDetail *SyncDetail `json:"syncDetail,omitempty"` + // Error message when the subtask fails. + ErrorMsg NullableString `json:"errorMsg,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _SubTask SubTask + +// NewSubTask instantiates a new SubTask object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSubTask() *SubTask { + this := SubTask{} + return &this +} + +// NewSubTaskWithDefaults instantiates a new SubTask object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSubTaskWithDefaults() *SubTask { + this := SubTask{} + return &this +} + +// GetSource returns the Source field value if set, zero value otherwise. +func (o *SubTask) GetSource() Source { + if o == nil || IsNil(o.Source) { + var ret Source + return ret + } + return *o.Source +} + +// GetSourceOk returns a tuple with the Source field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetSourceOk() (*Source, bool) { + if o == nil || IsNil(o.Source) { + return nil, false + } + return o.Source, true +} + +// HasSource returns a boolean if a field has been set. +func (o *SubTask) HasSource() bool { + if o != nil && !IsNil(o.Source) { + return true + } + + return false +} + +// SetSource gets a reference to the given Source and assigns it to the Source field. +func (o *SubTask) SetSource(v Source) { + o.Source = &v +} + +// GetCurrentStep returns the CurrentStep field value if set, zero value otherwise. +func (o *SubTask) GetCurrentStep() SubTaskStep { + if o == nil || IsNil(o.CurrentStep) { + var ret SubTaskStep + return ret + } + return *o.CurrentStep +} + +// GetCurrentStepOk returns a tuple with the CurrentStep field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetCurrentStepOk() (*SubTaskStep, bool) { + if o == nil || IsNil(o.CurrentStep) { + return nil, false + } + return o.CurrentStep, true +} + +// HasCurrentStep returns a boolean if a field has been set. +func (o *SubTask) HasCurrentStep() bool { + if o != nil && !IsNil(o.CurrentStep) { + return true + } + + return false +} + +// SetCurrentStep gets a reference to the given SubTaskStep and assigns it to the CurrentStep field. +func (o *SubTask) SetCurrentStep(v SubTaskStep) { + o.CurrentStep = &v +} + +// GetStage returns the Stage field value if set, zero value otherwise. +func (o *SubTask) GetStage() SubTaskStage { + if o == nil || IsNil(o.Stage) { + var ret SubTaskStage + return ret + } + return *o.Stage +} + +// GetStageOk returns a tuple with the Stage field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetStageOk() (*SubTaskStage, bool) { + if o == nil || IsNil(o.Stage) { + return nil, false + } + return o.Stage, true +} + +// HasStage returns a boolean if a field has been set. +func (o *SubTask) HasStage() bool { + if o != nil && !IsNil(o.Stage) { + return true + } + + return false +} + +// SetStage gets a reference to the given SubTaskStage and assigns it to the Stage field. +func (o *SubTask) SetStage(v SubTaskStage) { + o.Stage = &v +} + +// GetDumpDetail returns the DumpDetail field value if set, zero value otherwise. +func (o *SubTask) GetDumpDetail() DumpDetail { + if o == nil || IsNil(o.DumpDetail) { + var ret DumpDetail + return ret + } + return *o.DumpDetail +} + +// GetDumpDetailOk returns a tuple with the DumpDetail field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetDumpDetailOk() (*DumpDetail, bool) { + if o == nil || IsNil(o.DumpDetail) { + return nil, false + } + return o.DumpDetail, true +} + +// HasDumpDetail returns a boolean if a field has been set. +func (o *SubTask) HasDumpDetail() bool { + if o != nil && !IsNil(o.DumpDetail) { + return true + } + + return false +} + +// SetDumpDetail gets a reference to the given DumpDetail and assigns it to the DumpDetail field. +func (o *SubTask) SetDumpDetail(v DumpDetail) { + o.DumpDetail = &v +} + +// GetLoadDetail returns the LoadDetail field value if set, zero value otherwise. +func (o *SubTask) GetLoadDetail() LoadDetail { + if o == nil || IsNil(o.LoadDetail) { + var ret LoadDetail + return ret + } + return *o.LoadDetail +} + +// GetLoadDetailOk returns a tuple with the LoadDetail field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetLoadDetailOk() (*LoadDetail, bool) { + if o == nil || IsNil(o.LoadDetail) { + return nil, false + } + return o.LoadDetail, true +} + +// HasLoadDetail returns a boolean if a field has been set. +func (o *SubTask) HasLoadDetail() bool { + if o != nil && !IsNil(o.LoadDetail) { + return true + } + + return false +} + +// SetLoadDetail gets a reference to the given LoadDetail and assigns it to the LoadDetail field. +func (o *SubTask) SetLoadDetail(v LoadDetail) { + o.LoadDetail = &v +} + +// GetSyncDetail returns the SyncDetail field value if set, zero value otherwise. +func (o *SubTask) GetSyncDetail() SyncDetail { + if o == nil || IsNil(o.SyncDetail) { + var ret SyncDetail + return ret + } + return *o.SyncDetail +} + +// GetSyncDetailOk returns a tuple with the SyncDetail field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SubTask) GetSyncDetailOk() (*SyncDetail, bool) { + if o == nil || IsNil(o.SyncDetail) { + return nil, false + } + return o.SyncDetail, true +} + +// HasSyncDetail returns a boolean if a field has been set. +func (o *SubTask) HasSyncDetail() bool { + if o != nil && !IsNil(o.SyncDetail) { + return true + } + + return false +} + +// SetSyncDetail gets a reference to the given SyncDetail and assigns it to the SyncDetail field. +func (o *SubTask) SetSyncDetail(v SyncDetail) { + o.SyncDetail = &v +} + +// GetErrorMsg returns the ErrorMsg field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *SubTask) GetErrorMsg() string { + if o == nil || IsNil(o.ErrorMsg.Get()) { + var ret string + return ret + } + return *o.ErrorMsg.Get() +} + +// GetErrorMsgOk returns a tuple with the ErrorMsg field value if set, nil otherwise +// and a boolean to check if the value has been set. +// NOTE: If the value is an explicit nil, `nil, true` will be returned +func (o *SubTask) GetErrorMsgOk() (*string, bool) { + if o == nil { + return nil, false + } + return o.ErrorMsg.Get(), o.ErrorMsg.IsSet() +} + +// HasErrorMsg returns a boolean if a field has been set. +func (o *SubTask) HasErrorMsg() bool { + if o != nil && o.ErrorMsg.IsSet() { + return true + } + + return false +} + +// SetErrorMsg gets a reference to the given NullableString and assigns it to the ErrorMsg field. +func (o *SubTask) SetErrorMsg(v string) { + o.ErrorMsg.Set(&v) +} + +// SetErrorMsgNil sets the value for ErrorMsg to be an explicit nil +func (o *SubTask) SetErrorMsgNil() { + o.ErrorMsg.Set(nil) +} + +// UnsetErrorMsg ensures that no value is present for ErrorMsg, not even an explicit nil +func (o *SubTask) UnsetErrorMsg() { + o.ErrorMsg.Unset() +} + +func (o SubTask) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o SubTask) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Source) { + toSerialize["source"] = o.Source + } + if !IsNil(o.CurrentStep) { + toSerialize["currentStep"] = o.CurrentStep + } + if !IsNil(o.Stage) { + toSerialize["stage"] = o.Stage + } + if !IsNil(o.DumpDetail) { + toSerialize["dumpDetail"] = o.DumpDetail + } + if !IsNil(o.LoadDetail) { + toSerialize["loadDetail"] = o.LoadDetail + } + if !IsNil(o.SyncDetail) { + toSerialize["syncDetail"] = o.SyncDetail + } + if o.ErrorMsg.IsSet() { + toSerialize["errorMsg"] = o.ErrorMsg.Get() + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *SubTask) UnmarshalJSON(data []byte) (err error) { + varSubTask := _SubTask{} + + err = json.Unmarshal(data, &varSubTask) + + if err != nil { + return err + } + + *o = SubTask(varSubTask) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "source") + delete(additionalProperties, "currentStep") + delete(additionalProperties, "stage") + delete(additionalProperties, "dumpDetail") + delete(additionalProperties, "loadDetail") + delete(additionalProperties, "syncDetail") + delete(additionalProperties, "errorMsg") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableSubTask struct { + value *SubTask + isSet bool +} + +func (v NullableSubTask) Get() *SubTask { + return v.value +} + +func (v *NullableSubTask) Set(val *SubTask) { + v.value = val + v.isSet = true +} + +func (v NullableSubTask) IsSet() bool { + return v.isSet +} + +func (v *NullableSubTask) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSubTask(val *SubTask) *NullableSubTask { + return &NullableSubTask{value: val, isSet: true} +} + +func (v NullableSubTask) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSubTask) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go new file mode 100644 index 00000000..2bfba751 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go @@ -0,0 +1,111 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// SubTaskStage The high-level lifecycle stage of a subtask. - STAGE_RUNNING: Subtask is running. - STAGE_PAUSED: Subtask is paused. - STAGE_FAILED: Subtask failed. - STAGE_FINISHED: Subtask finished successfully. - STAGE_UNKNOWN: Subtask stage is unknown. +type SubTaskStage string + +// List of SubTask.Stage +const ( + SUBTASKSTAGE_STAGE_RUNNING SubTaskStage = "STAGE_RUNNING" + SUBTASKSTAGE_STAGE_PAUSED SubTaskStage = "STAGE_PAUSED" + SUBTASKSTAGE_STAGE_FAILED SubTaskStage = "STAGE_FAILED" + SUBTASKSTAGE_STAGE_FINISHED SubTaskStage = "STAGE_FINISHED" + SUBTASKSTAGE_STAGE_UNKNOWN SubTaskStage = "STAGE_UNKNOWN" +) + +// All allowed values of SubTaskStage enum +var AllowedSubTaskStageEnumValues = []SubTaskStage{ + "STAGE_RUNNING", + "STAGE_PAUSED", + "STAGE_FAILED", + "STAGE_FINISHED", + "STAGE_UNKNOWN", +} + +func (v *SubTaskStage) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := SubTaskStage(value) + for _, existing := range AllowedSubTaskStageEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = SubTaskStage(value) + return nil +} + +// NewSubTaskStageFromValue returns a pointer to a valid SubTaskStage for the value passed as argument +func NewSubTaskStageFromValue(v string) *SubTaskStage { + ev := SubTaskStage(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v SubTaskStage) IsValid() bool { + for _, existing := range AllowedSubTaskStageEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to SubTask.Stage value +func (v SubTaskStage) Ptr() *SubTaskStage { + return &v +} + +type NullableSubTaskStage struct { + value *SubTaskStage + isSet bool +} + +func (v NullableSubTaskStage) Get() *SubTaskStage { + return v.value +} + +func (v *NullableSubTaskStage) Set(val *SubTaskStage) { + v.value = val + v.isSet = true +} + +func (v NullableSubTaskStage) IsSet() bool { + return v.isSet +} + +func (v *NullableSubTaskStage) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSubTaskStage(val *SubTaskStage) *NullableSubTaskStage { + return &NullableSubTaskStage{value: val, isSet: true} +} + +func (v NullableSubTaskStage) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSubTaskStage) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go new file mode 100644 index 00000000..37baa0e1 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go @@ -0,0 +1,107 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// SubTaskStep The current step within a subtask. - STEP_DUMP: Dump/export data from source. - STEP_LOAD: Load/import data into target. - STEP_SYNC: Sync/replicate binlog changes. +type SubTaskStep string + +// List of SubTask.Step +const ( + SUBTASKSTEP_STEP_DUMP SubTaskStep = "STEP_DUMP" + SUBTASKSTEP_STEP_LOAD SubTaskStep = "STEP_LOAD" + SUBTASKSTEP_STEP_SYNC SubTaskStep = "STEP_SYNC" +) + +// All allowed values of SubTaskStep enum +var AllowedSubTaskStepEnumValues = []SubTaskStep{ + "STEP_DUMP", + "STEP_LOAD", + "STEP_SYNC", +} + +func (v *SubTaskStep) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := SubTaskStep(value) + for _, existing := range AllowedSubTaskStepEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = SubTaskStep(value) + return nil +} + +// NewSubTaskStepFromValue returns a pointer to a valid SubTaskStep for the value passed as argument +func NewSubTaskStepFromValue(v string) *SubTaskStep { + ev := SubTaskStep(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v SubTaskStep) IsValid() bool { + for _, existing := range AllowedSubTaskStepEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to SubTask.Step value +func (v SubTaskStep) Ptr() *SubTaskStep { + return &v +} + +type NullableSubTaskStep struct { + value *SubTaskStep + isSet bool +} + +func (v NullableSubTaskStep) Get() *SubTaskStep { + return v.value +} + +func (v *NullableSubTaskStep) Set(val *SubTaskStep) { + v.value = val + v.isSet = true +} + +func (v NullableSubTaskStep) IsSet() bool { + return v.isSet +} + +func (v *NullableSubTaskStep) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSubTaskStep(val *SubTaskStep) *NullableSubTaskStep { + return &NullableSubTaskStep{value: val, isSet: true} +} + +func (v NullableSubTaskStep) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSubTaskStep) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sync_detail.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sync_detail.go new file mode 100644 index 00000000..4ca0f618 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sync_detail.go @@ -0,0 +1,230 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the SyncDetail type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &SyncDetail{} + +// SyncDetail struct for SyncDetail +type SyncDetail struct { + // Rows processed per second during sync. + Rps *string `json:"rps,omitempty"` + // Replication latency in seconds. + Latency *string `json:"latency,omitempty"` + // Synchronization checkpoint. + Checkpoint *string `json:"checkpoint,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _SyncDetail SyncDetail + +// NewSyncDetail instantiates a new SyncDetail object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSyncDetail() *SyncDetail { + this := SyncDetail{} + return &this +} + +// NewSyncDetailWithDefaults instantiates a new SyncDetail object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSyncDetailWithDefaults() *SyncDetail { + this := SyncDetail{} + return &this +} + +// GetRps returns the Rps field value if set, zero value otherwise. +func (o *SyncDetail) GetRps() string { + if o == nil || IsNil(o.Rps) { + var ret string + return ret + } + return *o.Rps +} + +// GetRpsOk returns a tuple with the Rps field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SyncDetail) GetRpsOk() (*string, bool) { + if o == nil || IsNil(o.Rps) { + return nil, false + } + return o.Rps, true +} + +// HasRps returns a boolean if a field has been set. +func (o *SyncDetail) HasRps() bool { + if o != nil && !IsNil(o.Rps) { + return true + } + + return false +} + +// SetRps gets a reference to the given string and assigns it to the Rps field. +func (o *SyncDetail) SetRps(v string) { + o.Rps = &v +} + +// GetLatency returns the Latency field value if set, zero value otherwise. +func (o *SyncDetail) GetLatency() string { + if o == nil || IsNil(o.Latency) { + var ret string + return ret + } + return *o.Latency +} + +// GetLatencyOk returns a tuple with the Latency field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SyncDetail) GetLatencyOk() (*string, bool) { + if o == nil || IsNil(o.Latency) { + return nil, false + } + return o.Latency, true +} + +// HasLatency returns a boolean if a field has been set. +func (o *SyncDetail) HasLatency() bool { + if o != nil && !IsNil(o.Latency) { + return true + } + + return false +} + +// SetLatency gets a reference to the given string and assigns it to the Latency field. +func (o *SyncDetail) SetLatency(v string) { + o.Latency = &v +} + +// GetCheckpoint returns the Checkpoint field value if set, zero value otherwise. +func (o *SyncDetail) GetCheckpoint() string { + if o == nil || IsNil(o.Checkpoint) { + var ret string + return ret + } + return *o.Checkpoint +} + +// GetCheckpointOk returns a tuple with the Checkpoint field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *SyncDetail) GetCheckpointOk() (*string, bool) { + if o == nil || IsNil(o.Checkpoint) { + return nil, false + } + return o.Checkpoint, true +} + +// HasCheckpoint returns a boolean if a field has been set. +func (o *SyncDetail) HasCheckpoint() bool { + if o != nil && !IsNil(o.Checkpoint) { + return true + } + + return false +} + +// SetCheckpoint gets a reference to the given string and assigns it to the Checkpoint field. +func (o *SyncDetail) SetCheckpoint(v string) { + o.Checkpoint = &v +} + +func (o SyncDetail) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o SyncDetail) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Rps) { + toSerialize["rps"] = o.Rps + } + if !IsNil(o.Latency) { + toSerialize["latency"] = o.Latency + } + if !IsNil(o.Checkpoint) { + toSerialize["checkpoint"] = o.Checkpoint + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *SyncDetail) UnmarshalJSON(data []byte) (err error) { + varSyncDetail := _SyncDetail{} + + err = json.Unmarshal(data, &varSyncDetail) + + if err != nil { + return err + } + + *o = SyncDetail(varSyncDetail) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "rps") + delete(additionalProperties, "latency") + delete(additionalProperties, "checkpoint") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableSyncDetail struct { + value *SyncDetail + isSet bool +} + +func (v NullableSyncDetail) Get() *SyncDetail { + return v.value +} + +func (v *NullableSyncDetail) Set(val *SyncDetail) { + v.value = val + v.isSet = true +} + +func (v NullableSyncDetail) IsSet() bool { + return v.isSet +} + +func (v *NullableSyncDetail) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSyncDetail(val *SyncDetail) *NullableSyncDetail { + return &NullableSyncDetail{value: val, isSet: true} +} + +func (v NullableSyncDetail) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSyncDetail) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go new file mode 100644 index 00000000..43ae7dc2 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go @@ -0,0 +1,192 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// checks if the Target type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Target{} + +// Target struct for Target +type Target struct { + // Target database user. + User *string `json:"user,omitempty"` + // Target database password. + Password *string `json:"password,omitempty"` + AdditionalProperties map[string]interface{} +} + +type _Target Target + +// NewTarget instantiates a new Target object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewTarget() *Target { + this := Target{} + return &this +} + +// NewTargetWithDefaults instantiates a new Target object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewTargetWithDefaults() *Target { + this := Target{} + return &this +} + +// GetUser returns the User field value if set, zero value otherwise. +func (o *Target) GetUser() string { + if o == nil || IsNil(o.User) { + var ret string + return ret + } + return *o.User +} + +// GetUserOk returns a tuple with the User field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Target) GetUserOk() (*string, bool) { + if o == nil || IsNil(o.User) { + return nil, false + } + return o.User, true +} + +// HasUser returns a boolean if a field has been set. +func (o *Target) HasUser() bool { + if o != nil && !IsNil(o.User) { + return true + } + + return false +} + +// SetUser gets a reference to the given string and assigns it to the User field. +func (o *Target) SetUser(v string) { + o.User = &v +} + +// GetPassword returns the Password field value if set, zero value otherwise. +func (o *Target) GetPassword() string { + if o == nil || IsNil(o.Password) { + var ret string + return ret + } + return *o.Password +} + +// GetPasswordOk returns a tuple with the Password field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Target) GetPasswordOk() (*string, bool) { + if o == nil || IsNil(o.Password) { + return nil, false + } + return o.Password, true +} + +// HasPassword returns a boolean if a field has been set. +func (o *Target) HasPassword() bool { + if o != nil && !IsNil(o.Password) { + return true + } + + return false +} + +// SetPassword gets a reference to the given string and assigns it to the Password field. +func (o *Target) SetPassword(v string) { + o.Password = &v +} + +func (o Target) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Target) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.User) { + toSerialize["user"] = o.User + } + if !IsNil(o.Password) { + toSerialize["password"] = o.Password + } + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *Target) UnmarshalJSON(data []byte) (err error) { + varTarget := _Target{} + + err = json.Unmarshal(data, &varTarget) + + if err != nil { + return err + } + + *o = Target(varTarget) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "user") + delete(additionalProperties, "password") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableTarget struct { + value *Target + isSet bool +} + +func (v NullableTarget) Get() *Target { + return v.value +} + +func (v *NullableTarget) Set(val *Target) { + v.value = val + v.isSet = true +} + +func (v NullableTarget) IsSet() bool { + return v.isSet +} + +func (v *NullableTarget) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTarget(val *Target) *NullableTarget { + return &NullableTarget{value: val, isSet: true} +} + +func (v NullableTarget) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTarget) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go new file mode 100644 index 00000000..8dd44bf8 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go @@ -0,0 +1,105 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// TaskMode Migration task mode. - MODE_ALL: Full + incremental migration (all phases). - MODE_INCREMENTAL: Incremental-only migration (replication). +type TaskMode string + +// List of TaskMode +const ( + TASKMODE_MODE_ALL TaskMode = "MODE_ALL" + TASKMODE_MODE_INCREMENTAL TaskMode = "MODE_INCREMENTAL" +) + +// All allowed values of TaskMode enum +var AllowedTaskModeEnumValues = []TaskMode{ + "MODE_ALL", + "MODE_INCREMENTAL", +} + +func (v *TaskMode) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := TaskMode(value) + for _, existing := range AllowedTaskModeEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = TaskMode(value) + return nil +} + +// NewTaskModeFromValue returns a pointer to a valid TaskMode for the value passed as argument +func NewTaskModeFromValue(v string) *TaskMode { + ev := TaskMode(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v TaskMode) IsValid() bool { + for _, existing := range AllowedTaskModeEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to TaskMode value +func (v TaskMode) Ptr() *TaskMode { + return &v +} + +type NullableTaskMode struct { + value *TaskMode + isSet bool +} + +func (v NullableTaskMode) Get() *TaskMode { + return v.value +} + +func (v *NullableTaskMode) Set(val *TaskMode) { + v.value = val + v.isSet = true +} + +func (v NullableTaskMode) IsSet() bool { + return v.isSet +} + +func (v *NullableTaskMode) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTaskMode(val *TaskMode) *NullableTaskMode { + return &NullableTaskMode{value: val, isSet: true} +} + +func (v NullableTaskMode) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTaskMode) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/response.go b/pkg/tidbcloud/v1beta1/serverless/migration/response.go new file mode 100644 index 00000000..0f0f014b --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/response.go @@ -0,0 +1,47 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "net/http" +) + +// APIResponse stores the API response returned by the server. +type APIResponse struct { + *http.Response `json:"-"` + Message string `json:"message,omitempty"` + // Operation is the name of the OpenAPI operation. + Operation string `json:"operation,omitempty"` + // RequestURL is the request URL. This value is always available, even if the + // embedded *http.Response is nil. + RequestURL string `json:"url,omitempty"` + // Method is the HTTP method used for the request. This value is always + // available, even if the embedded *http.Response is nil. + Method string `json:"method,omitempty"` + // Payload holds the contents of the response body (which may be nil or empty). + // This is provided here as the raw response.Body() reader will have already + // been drained. + Payload []byte `json:"-"` +} + +// NewAPIResponse returns a new APIResponse object. +func NewAPIResponse(r *http.Response) *APIResponse { + + response := &APIResponse{Response: r} + return response +} + +// NewAPIResponseWithError returns a new APIResponse object with the provided error message. +func NewAPIResponseWithError(errorMessage string) *APIResponse { + + response := &APIResponse{Message: errorMessage} + return response +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/utils.go b/pkg/tidbcloud/v1beta1/serverless/migration/utils.go new file mode 100644 index 00000000..4a26ea61 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/utils.go @@ -0,0 +1,361 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "time" +) + +// PtrBool is a helper routine that returns a pointer to given boolean value. +func PtrBool(v bool) *bool { return &v } + +// PtrInt is a helper routine that returns a pointer to given integer value. +func PtrInt(v int) *int { return &v } + +// PtrInt32 is a helper routine that returns a pointer to given integer value. +func PtrInt32(v int32) *int32 { return &v } + +// PtrInt64 is a helper routine that returns a pointer to given integer value. +func PtrInt64(v int64) *int64 { return &v } + +// PtrFloat32 is a helper routine that returns a pointer to given float value. +func PtrFloat32(v float32) *float32 { return &v } + +// PtrFloat64 is a helper routine that returns a pointer to given float value. +func PtrFloat64(v float64) *float64 { return &v } + +// PtrString is a helper routine that returns a pointer to given string value. +func PtrString(v string) *string { return &v } + +// PtrTime is helper routine that returns a pointer to given Time value. +func PtrTime(v time.Time) *time.Time { return &v } + +type NullableBool struct { + value *bool + isSet bool +} + +func (v NullableBool) Get() *bool { + return v.value +} + +func (v *NullableBool) Set(val *bool) { + v.value = val + v.isSet = true +} + +func (v NullableBool) IsSet() bool { + return v.isSet +} + +func (v *NullableBool) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBool(val *bool) *NullableBool { + return &NullableBool{value: val, isSet: true} +} + +func (v NullableBool) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBool) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt struct { + value *int + isSet bool +} + +func (v NullableInt) Get() *int { + return v.value +} + +func (v *NullableInt) Set(val *int) { + v.value = val + v.isSet = true +} + +func (v NullableInt) IsSet() bool { + return v.isSet +} + +func (v *NullableInt) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt(val *int) *NullableInt { + return &NullableInt{value: val, isSet: true} +} + +func (v NullableInt) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt32 struct { + value *int32 + isSet bool +} + +func (v NullableInt32) Get() *int32 { + return v.value +} + +func (v *NullableInt32) Set(val *int32) { + v.value = val + v.isSet = true +} + +func (v NullableInt32) IsSet() bool { + return v.isSet +} + +func (v *NullableInt32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt32(val *int32) *NullableInt32 { + return &NullableInt32{value: val, isSet: true} +} + +func (v NullableInt32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt64 struct { + value *int64 + isSet bool +} + +func (v NullableInt64) Get() *int64 { + return v.value +} + +func (v *NullableInt64) Set(val *int64) { + v.value = val + v.isSet = true +} + +func (v NullableInt64) IsSet() bool { + return v.isSet +} + +func (v *NullableInt64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt64(val *int64) *NullableInt64 { + return &NullableInt64{value: val, isSet: true} +} + +func (v NullableInt64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat32 struct { + value *float32 + isSet bool +} + +func (v NullableFloat32) Get() *float32 { + return v.value +} + +func (v *NullableFloat32) Set(val *float32) { + v.value = val + v.isSet = true +} + +func (v NullableFloat32) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat32(val *float32) *NullableFloat32 { + return &NullableFloat32{value: val, isSet: true} +} + +func (v NullableFloat32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat64 struct { + value *float64 + isSet bool +} + +func (v NullableFloat64) Get() *float64 { + return v.value +} + +func (v *NullableFloat64) Set(val *float64) { + v.value = val + v.isSet = true +} + +func (v NullableFloat64) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat64(val *float64) *NullableFloat64 { + return &NullableFloat64{value: val, isSet: true} +} + +func (v NullableFloat64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableString struct { + value *string + isSet bool +} + +func (v NullableString) Get() *string { + return v.value +} + +func (v *NullableString) Set(val *string) { + v.value = val + v.isSet = true +} + +func (v NullableString) IsSet() bool { + return v.isSet +} + +func (v *NullableString) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableString(val *string) *NullableString { + return &NullableString{value: val, isSet: true} +} + +func (v NullableString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableString) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableTime struct { + value *time.Time + isSet bool +} + +func (v NullableTime) Get() *time.Time { + return v.value +} + +func (v *NullableTime) Set(val *time.Time) { + v.value = val + v.isSet = true +} + +func (v NullableTime) IsSet() bool { + return v.isSet +} + +func (v *NullableTime) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTime(val *time.Time) *NullableTime { + return &NullableTime{value: val, isSet: true} +} + +func (v NullableTime) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTime) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +// IsNil checks if an input is nil +func IsNil(i interface{}) bool { + if i == nil { + return true + } + switch reflect.TypeOf(i).Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return reflect.ValueOf(i).IsNil() + case reflect.Array: + return reflect.ValueOf(i).IsZero() + } + return false +} + +type MappedNullable interface { + ToMap() (map[string]interface{}, error) +} + +// A wrapper for strict JSON decoding +func newStrictDecoder(data []byte) *json.Decoder { + dec := json.NewDecoder(bytes.NewBuffer(data)) + dec.DisallowUnknownFields() + return dec +} + +// Prevent trying to import "fmt" +func reportError(format string, a ...interface{}) error { + return fmt.Errorf(format, a...) +} diff --git a/tools/openapi-generator/openapitools.json b/tools/openapi-generator/openapitools.json index 6f7db3e8..dec90b31 100644 --- a/tools/openapi-generator/openapitools.json +++ b/tools/openapi-generator/openapitools.json @@ -2,6 +2,9 @@ "$schema": "./node_modules/@openapitools/openapi-generator-cli/config.schema.json", "spaces": 2, "generator-cli": { - "version": "7.12.0" + "version": "7.12.0", + "repository": { + "downloadUrl": "https://maven.aliyun.com/repository/public/org/openapitools/openapi-generator-cli/7.12.0/openapi-generator-cli-7.12.0.jar" + } } } From e815208713e494949a7fa8d2a5fd8d3022959558 Mon Sep 17 00:00:00 2001 From: yangxin Date: Mon, 24 Nov 2025 15:24:17 +0800 Subject: [PATCH 02/19] update json --- .../v1beta1/serverless/dm.swagger.json | 437 +++++++---------- .../migration/.openapi-generator/FILES | 11 +- .../v1beta1/serverless/migration/README.md | 29 +- .../serverless/migration/api/openapi.yaml | 455 +++++++++--------- .../serverless/migration/api_migration.go | 302 ++++++------ .../migration/model_block_allow_rules.go | 12 +- .../model_block_allow_rules_table.go | 192 -------- .../migration/model_conn_profile.go | 36 +- .../model_create_migration_precheck_resp.go | 36 +- ..._resp.go => model_list_migrations_resp.go} | 114 ++--- ...l_migration_task.go => model_migration.go} | 186 +++---- .../migration/model_migration_precheck.go | 38 ++ ...migration_service_create_migration_body.go | 257 ++++++++++ ...odel_migration_service_create_task_body.go | 319 ------------ .../model_migration_service_precheck_body.go | 152 ++---- .../migration/model_migration_state.go | 111 +++++ .../migration/model_migration_task_state.go | 111 ----- .../migration/model_precheck_item.go | 36 +- .../migration/model_precheck_item_type.go | 58 +-- .../serverless/migration/model_route_rule.go | 12 +- .../serverless/migration/model_source.go | 46 +- .../migration/model_source_source_type.go | 10 +- .../migration/model_sub_task_stage.go | 22 +- .../migration/model_sub_task_step.go | 14 +- ...el_route_rule_target.go => model_table.go} | 74 +-- .../serverless/migration/model_target.go | 91 ++-- .../serverless/migration/model_task_mode.go | 10 +- 27 files changed, 1406 insertions(+), 1765 deletions(-) delete mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules_table.go rename pkg/tidbcloud/v1beta1/serverless/migration/{model_list_migration_tasks_resp.go => model_list_migrations_resp.go} (51%) rename pkg/tidbcloud/v1beta1/serverless/migration/{model_migration_task.go => model_migration.go} (56%) create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_migration_body.go delete mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_task_body.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go delete mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task_state.go rename pkg/tidbcloud/v1beta1/serverless/migration/{model_route_rule_target.go => model_table.go} (61%) diff --git a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json index 2c7fbfb2..1f15290a 100644 --- a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json +++ b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json @@ -11,25 +11,19 @@ } ], "host": "serverless.tidbapi.com", - "schemes": [ - "https" - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], + "schemes": ["https"], + "consumes": ["application/json"], + "produces": ["application/json"], "paths": { "/v1beta1/clusters/{clusterId}/migrations": { "get": { - "summary": "List migration tasks", - "operationId": "MigrationService_ListTasks", + "summary": "List migrations", + "operationId": "MigrationService_ListMigrations", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/ListMigrationTasksResp" + "$ref": "#/definitions/ListMigrationsResp" } }, "default": { @@ -42,7 +36,7 @@ "parameters": [ { "name": "clusterId", - "description": "The ID of the cluster to list tasks for.", + "description": "The ID of the cluster to list migrations for.", "in": "path", "required": true, "type": "string" @@ -71,9 +65,7 @@ "type": "string" } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", @@ -83,13 +75,13 @@ ] }, "post": { - "summary": "Create a migration task", - "operationId": "MigrationService_CreateTask", + "summary": "Create a migration", + "operationId": "MigrationService_CreateMigration", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/MigrationTask" + "$ref": "#/definitions/Migration" } }, "default": { @@ -102,7 +94,7 @@ "parameters": [ { "name": "clusterId", - "description": "The ID of the cluster to create the migration task in.", + "description": "The ID of the cluster to create the migration in.", "in": "path", "required": true, "type": "string" @@ -112,31 +104,29 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/MigrationService.CreateTaskBody" + "$ref": "#/definitions/MigrationService.CreateMigrationBody" } } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", "lang": "cURL", - "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{\\n \"name\": \"my-migration\",\\n \"sources\": [\\n {\\n \"connProfile\": {\\n \"connType\": \"PUBLIC\",\\n \"host\": \"1.2.3.4\",\\n \"port\": 3306,\\n \"user\": \"root\",\\n \"password\": \"secret\"\\n },\\n \"sourceType\": \"SOURCE_TYPE_MYSQL\"\\n }\\n ],\\n \"target\": {\\n \"user\": \"tidb\",\\n \"password\": \"tidb_password\"\\n },\\n \"mode\": \"MODE_ALL\",\\n \"fullDataMigration\": true\\n}'" + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{\\n \"name\": \"my-migration\",\\n \"sources\": [\\n {\\n \"connProfile\": {\\n \"connType\": \"PUBLIC\",\\n \"host\": \"1.2.3.4\",\\n \"port\": 3306,\\n \"user\": \"root\",\\n \"password\": \"secret\"\\n },\\n \"sourceType\": \"MYSQL\"\\n }\\n ],\\n \"target\": {\\n \"user\": \"tidb\",\\n \"password\": \"tidb_password\"\\n },\\n \"mode\": \"ALL\"\\n}'" } ] } }, - "/v1beta1/clusters/{clusterId}/migrations/{id}": { + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}": { "get": { - "summary": "Get a migration task", - "operationId": "MigrationService_GetTask", + "summary": "Get a migration", + "operationId": "MigrationService_GetMigration", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/MigrationTask" + "$ref": "#/definitions/Migration" } }, "default": { @@ -155,32 +145,30 @@ "type": "string" }, { - "name": "id", - "description": "The ID of the migration task.", + "name": "migrationId", + "description": "The ID of the migration.", "in": "path", "required": true, "type": "string" } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", "lang": "cURL", - "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" } ] }, "delete": { - "summary": "Cancel a migration task", - "operationId": "MigrationService_CancelTask", + "summary": "Cancel a migration", + "operationId": "MigrationService_CancelMigration", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/MigrationTask" + "$ref": "#/definitions/Migration" } }, "default": { @@ -199,29 +187,27 @@ "type": "string" }, { - "name": "id", - "description": "The ID of the migration task to cancel.", + "name": "migrationId", + "description": "The ID of the migration to cancel.", "in": "path", "required": true, "type": "string" } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", "lang": "cURL", - "source": "curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + "source": "curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" } ] } }, - "/v1beta1/clusters/{clusterId}/migrations/{id}:pause": { + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}:pause": { "post": { - "summary": "Pause a running migration task", - "operationId": "MigrationService_PauseTask", + "summary": "Pause a running migration", + "operationId": "MigrationService_PauseMigration", "responses": { "200": { "description": "A successful response.", @@ -246,8 +232,8 @@ "type": "string" }, { - "name": "id", - "description": "The ID of the migration task to pause.", + "name": "migrationId", + "description": "The ID of the migration to pause.", "in": "path", "required": true, "type": "string" @@ -257,26 +243,24 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/MigrationService.PauseTaskBody" + "$ref": "#/definitions/MigrationService.PauseMigrationBody" } } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", "lang": "cURL", - "source": "curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}:pause' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{}'" + "source": "curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}:pause' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json'" } ] } }, - "/v1beta1/clusters/{clusterId}/migrations/{id}:resume": { + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}:resume": { "post": { - "summary": "Resume a paused migration task", - "operationId": "MigrationService_ResumeTask", + "summary": "Resume a paused migration", + "operationId": "MigrationService_ResumeMigration", "responses": { "200": { "description": "A successful response.", @@ -301,8 +285,8 @@ "type": "string" }, { - "name": "id", - "description": "The ID of the migration task to resume.", + "name": "migrationId", + "description": "The ID of the migration to resume.", "in": "path", "required": true, "type": "string" @@ -312,25 +296,23 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/MigrationService.ResumeTaskBody" + "$ref": "#/definitions/MigrationService.ResumeMigrationBody" } } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", "lang": "cURL", - "source": "curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}:resume' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{}'" + "source": "curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}:resume' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json'" } ] } }, "/v1beta1/clusters/{clusterId}/migrationPrechecks": { "post": { - "summary": "Run a precheck for a migration task", + "summary": "Run a precheck for a migration", "operationId": "MigrationService_Precheck", "responses": { "200": { @@ -349,7 +331,7 @@ "parameters": [ { "name": "clusterId", - "description": "The ID of the cluster to create the migration task in.", + "description": "The ID of the cluster to create the migration in.", "in": "path", "required": true, "type": "string" @@ -363,19 +345,17 @@ } } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", "lang": "cURL", - "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{\\n \"name\": \"my-migration\",\\n \"sources\": [\\n {\\n \"connProfile\": {\\n \"connType\": \"PUBLIC\",\\n \"host\": \"1.2.3.4\",\\n \"port\": 3306,\\n \"user\": \"root\",\\n \"password\": \"secret\"\\n },\\n \"sourceType\": \"SOURCE_TYPE_MYSQL\"\\n }\\n ],\\n \"target\": {\\n \"user\": \"tidb\",\\n \"password\": \"tidb_password\"\\n },\\n \"mode\": \"MODE_ALL\",\\n \"fullDataMigration\": true\\n}'" + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Content-Type: application/json' \\\n+--header 'Accept: application/json' \\\n+--data '{\\n \"name\": \"my-migration\",\\n \"sources\": [\\n {\\n \"connProfile\": {\\n \"connType\": \"PUBLIC\",\\n \"host\": \"1.2.3.4\",\\n \"port\": 3306,\\n \"user\": \"root\",\\n \"password\": \"secret\"\\n },\\n \"sourceType\": \"MYSQL\"\\n }\\n ],\\n \"target\": {\\n \"user\": \"tidb\",\\n \"password\": \"tidb_password\"\\n },\\n \"mode\": \"ALL\"\\n}'" } ] } }, - "/v1beta1/clusters/{clusterId}/migrationPrechecks/{id}": { + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}": { "get": { "summary": "Get a migration precheck", "operationId": "MigrationService_GetPrecheck", @@ -402,21 +382,19 @@ "type": "string" }, { - "name": "id", + "name": "precheckId", "description": "The ID of the precheck.", "in": "path", "required": true, "type": "string" } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", "lang": "cURL", - "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + "source": "curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{precheck_id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" } ] }, @@ -447,21 +425,19 @@ "type": "string" }, { - "name": "id", + "name": "precheckId", "description": "The ID of the precheck to cancel.", "in": "path", "required": true, "type": "string" } ], - "tags": [ - "Migration" - ], + "tags": ["Migration"], "x-codeSamples": [ { "label": "curl", "lang": "cURL", - "source": "curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" + "source": "curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{precheck_id}' \\\n+--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \\\n+--header 'Accept: application/json'" } ] } @@ -491,25 +467,12 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/BlockAllowRules.Table" + "$ref": "#/definitions/Table" }, "description": "Table-level allow-list rules." } } }, - "BlockAllowRules.Table": { - "type": "object", - "properties": { - "schema": { - "type": "string", - "description": "Schema name." - }, - "table": { - "type": "string", - "description": "Table name." - } - } - }, "ConnProfile": { "type": "object", "properties": { @@ -551,24 +514,17 @@ ] } }, - "required": [ - "port", - "user", - "password" - ] + "required": ["connType", "port", "user", "password"] }, "ConnType": { "type": "string", - "enum": [ - "PUBLIC", - "PRIVATE_LINK" - ], + "enum": ["PUBLIC", "PRIVATE_LINK"], "description": "The connection type used to connect to the source database.\n\n - PUBLIC: Connect over the public internet.\n - PRIVATE_LINK: Connect via Private Link/Private Endpoint." }, "CreateMigrationPrecheckResp": { "type": "object", "properties": { - "id": { + "precheckId": { "type": "string", "description": "The ID of the created precheck.", "readOnly": true @@ -616,22 +572,22 @@ } } }, - "ListMigrationTasksResp": { + "ListMigrationsResp": { "type": "object", "properties": { - "tasks": { + "migrations": { "type": "array", "items": { "type": "object", - "$ref": "#/definitions/MigrationTask" + "$ref": "#/definitions/Migration" }, - "description": "The list of migration tasks.", + "description": "The list of migrations.", "readOnly": true }, "totalSize": { "type": "integer", "format": "int64", - "description": "The total number of tasks matching the query.", + "description": "The total number of migrations matching the query.", "readOnly": true }, "nextPageToken": { @@ -670,9 +626,72 @@ } } }, + "Migration": { + "type": "object", + "properties": { + "migrationId": { + "type": "string", + "description": "The unique ID of the migration.", + "readOnly": true + }, + "displayName": { + "type": "string", + "description": "The display name of the migration.", + "readOnly": true + }, + "subTasks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/SubTask" + }, + "description": "The list of subtasks composing this migration.", + "readOnly": true + }, + "targetUser": { + "type": "string", + "description": "The target database username used by the migration.", + "readOnly": true + }, + "createTime": { + "type": "string", + "format": "date-time", + "description": "The timestamp when the migration was created.", + "readOnly": true + }, + "mode": { + "description": "The migration mode of the migration.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/TaskMode" + } + ] + }, + "state": { + "description": "The current state of the migration.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/Migration.State" + } + ] + } + } + }, + "Migration.State": { + "type": "string", + "enum": ["CREATING", "RUNNING", "PAUSED", "CANCELED", "FAILED"], + "description": "Overall state of a migration.\n\n - CREATING: Task is being created.\n - RUNNING: Task is actively running.\n - PAUSED: Task is paused.\n - CANCELED: Task has been canceled.\n - FAILED: Task failed with error." + }, "MigrationPrecheck": { "type": "object", "properties": { + "precheckId": { + "type": "string", + "description": "The ID of the precheck.", + "readOnly": true + }, "total": { "type": "integer", "format": "int32", @@ -713,12 +732,12 @@ } } }, - "MigrationService.CreateTaskBody": { + "MigrationService.CreateMigrationBody": { "type": "object", "properties": { - "name": { + "displayName": { "type": "string", - "description": "The display name of the migration task." + "description": "The display name of the migration." }, "sources": { "type": "array", @@ -743,26 +762,20 @@ "$ref": "#/definitions/TaskMode" } ] - }, - "fullDataMigration": { - "type": "boolean", - "description": "If true, migrate all user data (equivalent to enabling all non-system databases and tables)." } }, - "required": [ - "sources" - ] + "required": ["displayName", "sources", "target", "mode"] }, - "MigrationService.PauseTaskBody": { + "MigrationService.PauseMigrationBody": { "type": "object", - "title": "PauseMigrationTaskReq is used to pause a migration task" + "title": "PauseMigrationReq is used to pause a migration" }, "MigrationService.PrecheckBody": { "type": "object", "properties": { - "name": { + "displayName": { "type": "string", - "description": "The display name of the migration task." + "description": "The display name of the migration." }, "sources": { "type": "array", @@ -787,86 +800,18 @@ "$ref": "#/definitions/TaskMode" } ] - }, - "fullDataMigration": { - "type": "boolean", - "description": "If true, migrate all user data (equivalent to enabling all non-system databases and tables)." } }, - "required": [ - "sources" - ] + "required": ["displayName", "sources", "target", "mode"] }, - "MigrationService.ResumeTaskBody": { + "MigrationService.ResumeMigrationBody": { "type": "object", - "title": "ResumeMigrationTaskReq is used to resume a paused migration task" - }, - "MigrationTask": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The unique ID of the migration task.", - "readOnly": true - }, - "name": { - "type": "string", - "description": "The display name of the migration task." - }, - "subTasks": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/SubTask" - }, - "description": "The list of subtasks composing this migration.", - "readOnly": true - }, - "targetUser": { - "type": "string", - "description": "The target database username used by the task.", - "readOnly": true - }, - "createTime": { - "type": "string", - "format": "date-time", - "description": "The timestamp when the task was created.", - "readOnly": true - }, - "mode": { - "description": "The migration mode of the task.", - "allOf": [ - { - "$ref": "#/definitions/TaskMode" - } - ] - }, - "state": { - "description": "The current state of the task.", - "readOnly": true, - "allOf": [ - { - "$ref": "#/definitions/MigrationTask.State" - } - ] - } - } - }, - "MigrationTask.State": { - "type": "string", - "enum": [ - "CREATING", - "RUNNING", - "PAUSED", - "CANCELED", - "FAILED" - ], - "description": "Overall state of a migration task.\n\n - CREATING: Task is being created.\n - RUNNING: Task is actively running.\n - PAUSED: Task is paused.\n - CANCELED: Task has been canceled.\n - FAILED: Task failed with error." + "title": "ResumeMigrationReq is used to resume a paused migration" }, "PrecheckItem": { "type": "object", "properties": { - "desc": { + "description": { "type": "string", "description": "Human-readable description of the check.", "readOnly": true @@ -905,22 +850,22 @@ "PrecheckItemType": { "type": "string", "enum": [ - "PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING", - "PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING", - "PRECHECK_ITEM_TYPE_VERSION_CHECKING", - "PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING", - "PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING", - "PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING", - "PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING", - "PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING", - "PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING", - "PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING", - "PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING", - "PRECHECK_ITEM_TYPE_META_POSITION_CHECKING", - "PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING", - "PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING" + "DUMP_PRIVILEGE_CHECKING", + "REPLICATION_PRIVILEGE_CHECKING", + "VERSION_CHECKING", + "SERVER_ID_CHECKING", + "BINLOG_ENABLE_CHECKING", + "BINLOG_FORMAT_CHECKING", + "BINLOG_ROW_IMAGE_CHECKING", + "TABLE_SCHEMA_CHECKING", + "BINLOG_DB_CHECKING", + "CONN_NUMBER_CHECKING", + "TARGET_DB_PRIVILEGE_CHECKING", + "META_POSITION_CHECKING", + "LIGHTNING_TABLE_EMPTY_CHECKING", + "PRIMARY_KEY_CHECKING" ], - "description": "Types of prechecks performed before starting a migration.\n\n - PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges.\n - PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges.\n - PRECHECK_ITEM_TYPE_VERSION_CHECKING: Check source database version compatibility.\n - PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING: Check source server_id configuration.\n - PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source.\n - PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source.\n - PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting.\n - PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target.\n - PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING: Check binlog database-level filtering configuration.\n - PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING: Check concurrent connections limit/availability.\n - PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges.\n - PRECHECK_ITEM_TYPE_META_POSITION_CHECKING: Check saved meta/binlog position validity.\n - PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load.\n - PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING: Check primary key settings on source tables." + "description": "Types of prechecks performed before starting a migration.\n\n - DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges.\n - REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges.\n - VERSION_CHECKING: Check source database version compatibility.\n - SERVER_ID_CHECKING: Check source server_id configuration.\n - BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source.\n - BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source.\n - BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting.\n - TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target.\n - BINLOG_DB_CHECKING: Check binlog database-level filtering configuration.\n - CONN_NUMBER_CHECKING: Check concurrent connections limit/availability.\n - TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges.\n - META_POSITION_CHECKING: Check saved meta/binlog position validity.\n - LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load.\n - PRIMARY_KEY_CHECKING: Check primary key settings on source tables." }, "RouteRule": { "type": "object", @@ -937,7 +882,7 @@ "description": "Target table to route to.", "allOf": [ { - "$ref": "#/definitions/RouteRule.Target" + "$ref": "#/definitions/Table" } ] } @@ -956,19 +901,6 @@ } } }, - "RouteRule.Target": { - "type": "object", - "properties": { - "schema": { - "type": "string", - "description": "Target schema name." - }, - "table": { - "type": "string", - "description": "Target table name." - } - } - }, "Security": { "type": "object", "properties": { @@ -1028,7 +960,7 @@ "x-nullable": true, "description": "Starting binlog file name for incremental sync." }, - "binlogPos": { + "binlogPosition": { "type": "integer", "format": "int32", "x-nullable": true, @@ -1048,18 +980,12 @@ ] } }, - "required": [ - "connProfile", - "sourceType" - ] + "required": ["connProfile", "sourceType"] }, "Source.SourceType": { "type": "string", - "enum": [ - "SOURCE_TYPE_MYSQL", - "SOURCE_TYPE_ALICLOUD_RDS_MYSQL" - ], - "description": "The source database type.\n\n - SOURCE_TYPE_MYSQL: Self-managed MySQL.\n - SOURCE_TYPE_ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL." + "enum": ["MYSQL", "ALICLOUD_RDS_MYSQL"], + "description": "The source database type.\n\n - MYSQL: Self-managed MySQL.\n - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL." }, "Status": { "type": "object", @@ -1147,23 +1073,13 @@ }, "SubTask.Stage": { "type": "string", - "enum": [ - "STAGE_RUNNING", - "STAGE_PAUSED", - "STAGE_FAILED", - "STAGE_FINISHED", - "STAGE_UNKNOWN" - ], - "description": "The high-level lifecycle stage of a subtask.\n\n - STAGE_RUNNING: Subtask is running.\n - STAGE_PAUSED: Subtask is paused.\n - STAGE_FAILED: Subtask failed.\n - STAGE_FINISHED: Subtask finished successfully.\n - STAGE_UNKNOWN: Subtask stage is unknown." + "enum": ["RUNNING", "PAUSED", "FAILED", "FINISHED", "UNKNOWN"], + "description": "The high-level lifecycle stage of a subtask.\n\n - RUNNING: Subtask is running.\n - PAUSED: Subtask is paused.\n - FAILED: Subtask failed.\n - FINISHED: Subtask finished successfully.\n - UNKNOWN: Subtask stage is unknown." }, "SubTask.Step": { "type": "string", - "enum": [ - "STEP_DUMP", - "STEP_LOAD", - "STEP_SYNC" - ], - "description": "The current step within a subtask.\n\n - STEP_DUMP: Dump/export data from source.\n - STEP_LOAD: Load/import data into target.\n - STEP_SYNC: Sync/replicate binlog changes." + "enum": ["DUMP", "LOAD", "SYNC"], + "description": "The current step within a subtask.\n\n - DUMP: Dump/export data from source.\n - LOAD: Load/import data into target.\n - SYNC: Sync/replicate binlog changes." }, "SyncDetail": { "type": "object", @@ -1187,6 +1103,19 @@ } } }, + "Table": { + "type": "object", + "properties": { + "schema": { + "type": "string", + "description": "Schema name." + }, + "table": { + "type": "string", + "description": "Table name." + } + } + }, "Target": { "type": "object", "properties": { @@ -1198,15 +1127,13 @@ "type": "string", "description": "Target database password." } - } + }, + "required": ["user", "password"] }, "TaskMode": { "type": "string", - "enum": [ - "MODE_ALL", - "MODE_INCREMENTAL" - ], - "description": "Migration task mode.\n\n - MODE_ALL: Full + incremental migration (all phases).\n - MODE_INCREMENTAL: Incremental-only migration (replication)." + "enum": ["ALL", "INCREMENTAL"], + "description": "Migration task mode.\n\n - ALL: Full + incremental migration (all phases).\n - INCREMENTAL: Incremental-only migration (replication)." } } } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES index de7f3a50..e7969c2b 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES @@ -9,23 +9,21 @@ configuration.go git_push.sh model_any.go model_block_allow_rules.go -model_block_allow_rules_table.go model_conn_profile.go model_conn_type.go model_create_migration_precheck_resp.go model_dump_detail.go -model_list_migration_tasks_resp.go +model_list_migrations_resp.go model_load_detail.go +model_migration.go model_migration_precheck.go -model_migration_service_create_task_body.go +model_migration_service_create_migration_body.go model_migration_service_precheck_body.go -model_migration_task.go -model_migration_task_state.go +model_migration_state.go model_precheck_item.go model_precheck_item_type.go model_route_rule.go model_route_rule_source.go -model_route_rule_target.go model_security.go model_source.go model_source_source_type.go @@ -34,6 +32,7 @@ model_sub_task.go model_sub_task_stage.go model_sub_task_step.go model_sync_detail.go +model_table.go model_target.go model_task_mode.go response.go diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/README.md b/pkg/tidbcloud/v1beta1/serverless/migration/README.md index 05493d37..967c2723 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/README.md +++ b/pkg/tidbcloud/v1beta1/serverless/migration/README.md @@ -78,38 +78,36 @@ All URIs are relative to *https://serverless.tidbapi.com* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- -*MigrationAPI* | [**MigrationServiceCancelPrecheck**](docs/MigrationAPI.md#migrationservicecancelprecheck) | **Delete** /v1beta1/clusters/{clusterId}/migrationPrechecks/{id} | Cancel a migration precheck -*MigrationAPI* | [**MigrationServiceCancelTask**](docs/MigrationAPI.md#migrationservicecanceltask) | **Delete** /v1beta1/clusters/{clusterId}/migrations/{id} | Cancel a migration task -*MigrationAPI* | [**MigrationServiceCreateTask**](docs/MigrationAPI.md#migrationservicecreatetask) | **Post** /v1beta1/clusters/{clusterId}/migrations | Create a migration task -*MigrationAPI* | [**MigrationServiceGetPrecheck**](docs/MigrationAPI.md#migrationservicegetprecheck) | **Get** /v1beta1/clusters/{clusterId}/migrationPrechecks/{id} | Get a migration precheck -*MigrationAPI* | [**MigrationServiceGetTask**](docs/MigrationAPI.md#migrationservicegettask) | **Get** /v1beta1/clusters/{clusterId}/migrations/{id} | Get a migration task -*MigrationAPI* | [**MigrationServiceListTasks**](docs/MigrationAPI.md#migrationservicelisttasks) | **Get** /v1beta1/clusters/{clusterId}/migrations | List migration tasks -*MigrationAPI* | [**MigrationServicePauseTask**](docs/MigrationAPI.md#migrationservicepausetask) | **Post** /v1beta1/clusters/{clusterId}/migrations/{id}:pause | Pause a running migration task -*MigrationAPI* | [**MigrationServicePrecheck**](docs/MigrationAPI.md#migrationserviceprecheck) | **Post** /v1beta1/clusters/{clusterId}/migrationPrechecks | Run a precheck for a migration task -*MigrationAPI* | [**MigrationServiceResumeTask**](docs/MigrationAPI.md#migrationserviceresumetask) | **Post** /v1beta1/clusters/{clusterId}/migrations/{id}:resume | Resume a paused migration task +*MigrationAPI* | [**MigrationServiceCancelMigration**](docs/MigrationAPI.md#migrationservicecancelmigration) | **Delete** /v1beta1/clusters/{clusterId}/migrations/{migrationId} | Cancel a migration +*MigrationAPI* | [**MigrationServiceCancelPrecheck**](docs/MigrationAPI.md#migrationservicecancelprecheck) | **Delete** /v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId} | Cancel a migration precheck +*MigrationAPI* | [**MigrationServiceCreateMigration**](docs/MigrationAPI.md#migrationservicecreatemigration) | **Post** /v1beta1/clusters/{clusterId}/migrations | Create a migration +*MigrationAPI* | [**MigrationServiceGetMigration**](docs/MigrationAPI.md#migrationservicegetmigration) | **Get** /v1beta1/clusters/{clusterId}/migrations/{migrationId} | Get a migration +*MigrationAPI* | [**MigrationServiceGetPrecheck**](docs/MigrationAPI.md#migrationservicegetprecheck) | **Get** /v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId} | Get a migration precheck +*MigrationAPI* | [**MigrationServiceListMigrations**](docs/MigrationAPI.md#migrationservicelistmigrations) | **Get** /v1beta1/clusters/{clusterId}/migrations | List migrations +*MigrationAPI* | [**MigrationServicePauseMigration**](docs/MigrationAPI.md#migrationservicepausemigration) | **Post** /v1beta1/clusters/{clusterId}/migrations/{migrationId}:pause | Pause a running migration +*MigrationAPI* | [**MigrationServicePrecheck**](docs/MigrationAPI.md#migrationserviceprecheck) | **Post** /v1beta1/clusters/{clusterId}/migrationPrechecks | Run a precheck for a migration +*MigrationAPI* | [**MigrationServiceResumeMigration**](docs/MigrationAPI.md#migrationserviceresumemigration) | **Post** /v1beta1/clusters/{clusterId}/migrations/{migrationId}:resume | Resume a paused migration ## Documentation For Models - [Any](docs/Any.md) - [BlockAllowRules](docs/BlockAllowRules.md) - - [BlockAllowRulesTable](docs/BlockAllowRulesTable.md) - [ConnProfile](docs/ConnProfile.md) - [ConnType](docs/ConnType.md) - [CreateMigrationPrecheckResp](docs/CreateMigrationPrecheckResp.md) - [DumpDetail](docs/DumpDetail.md) - - [ListMigrationTasksResp](docs/ListMigrationTasksResp.md) + - [ListMigrationsResp](docs/ListMigrationsResp.md) - [LoadDetail](docs/LoadDetail.md) + - [Migration](docs/Migration.md) - [MigrationPrecheck](docs/MigrationPrecheck.md) - - [MigrationServiceCreateTaskBody](docs/MigrationServiceCreateTaskBody.md) + - [MigrationServiceCreateMigrationBody](docs/MigrationServiceCreateMigrationBody.md) - [MigrationServicePrecheckBody](docs/MigrationServicePrecheckBody.md) - - [MigrationTask](docs/MigrationTask.md) - - [MigrationTaskState](docs/MigrationTaskState.md) + - [MigrationState](docs/MigrationState.md) - [PrecheckItem](docs/PrecheckItem.md) - [PrecheckItemType](docs/PrecheckItemType.md) - [RouteRule](docs/RouteRule.md) - [RouteRuleSource](docs/RouteRuleSource.md) - - [RouteRuleTarget](docs/RouteRuleTarget.md) - [Security](docs/Security.md) - [Source](docs/Source.md) - [SourceSourceType](docs/SourceSourceType.md) @@ -118,6 +116,7 @@ Class | Method | HTTP request | Description - [SubTaskStage](docs/SubTaskStage.md) - [SubTaskStep](docs/SubTaskStep.md) - [SyncDetail](docs/SyncDetail.md) + - [Table](docs/Table.md) - [Target](docs/Target.md) - [TaskMode](docs/TaskMode.md) diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml index a2b1404c..269b75e5 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml @@ -10,9 +10,9 @@ tags: paths: /v1beta1/clusters/{clusterId}/migrations: get: - operationId: MigrationService_ListTasks + operationId: MigrationService_ListMigrations parameters: - - description: The ID of the cluster to list tasks for. + - description: The ID of the cluster to list migrations for. in: path name: clusterId required: true @@ -43,7 +43,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ListMigrationTasksResp' + $ref: '#/components/schemas/ListMigrationsResp' description: A successful response. default: content: @@ -51,7 +51,7 @@ paths: schema: $ref: '#/components/schemas/Status' description: An unexpected error response. - summary: List migration tasks + summary: List migrations tags: - Migration x-codeSamples: @@ -62,9 +62,9 @@ paths: +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Accept: application/json' post: - operationId: MigrationService_CreateTask + operationId: MigrationService_CreateMigration parameters: - - description: The ID of the cluster to create the migration task in. + - description: The ID of the cluster to create the migration in. in: path name: clusterId required: true @@ -74,14 +74,14 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/MigrationService.CreateTaskBody' + $ref: '#/components/schemas/MigrationService.CreateMigrationBody' required: true responses: "200": content: application/json: schema: - $ref: '#/components/schemas/MigrationTask' + $ref: '#/components/schemas/Migration' description: A successful response. default: content: @@ -89,7 +89,7 @@ paths: schema: $ref: '#/components/schemas/Status' description: An unexpected error response. - summary: Create a migration task + summary: Create a migration tags: - Migration x-codeSamples: @@ -100,11 +100,11 @@ paths: +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Content-Type: application/json' \ +--header 'Accept: application/json' \ - +--data '{\n "name": "my-migration",\n "sources": [\n {\n "connProfile": {\n "connType": "PUBLIC",\n "host": "1.2.3.4",\n "port": 3306,\n "user": "root",\n "password": "secret"\n },\n "sourceType": "SOURCE_TYPE_MYSQL"\n }\n ],\n "target": {\n "user": "tidb",\n "password": "tidb_password"\n },\n "mode": "MODE_ALL",\n "fullDataMigration": true\n}' + +--data '{\n "name": "my-migration",\n "sources": [\n {\n "connProfile": {\n "connType": "PUBLIC",\n "host": "1.2.3.4",\n "port": 3306,\n "user": "root",\n "password": "secret"\n },\n "sourceType": "MYSQL"\n }\n ],\n "target": {\n "user": "tidb",\n "password": "tidb_password"\n },\n "mode": "ALL"\n}' x-codegen-request-body-name: body - /v1beta1/clusters/{clusterId}/migrations/{id}: + /v1beta1/clusters/{clusterId}/migrations/{migrationId}: delete: - operationId: MigrationService_CancelTask + operationId: MigrationService_CancelMigration parameters: - description: The ID of the cluster. in: path @@ -112,9 +112,9 @@ paths: required: true schema: type: string - - description: The ID of the migration task to cancel. + - description: The ID of the migration to cancel. in: path - name: id + name: migrationId required: true schema: type: string @@ -123,7 +123,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/MigrationTask' + $ref: '#/components/schemas/Migration' description: A successful response. default: content: @@ -131,18 +131,18 @@ paths: schema: $ref: '#/components/schemas/Status' description: An unexpected error response. - summary: Cancel a migration task + summary: Cancel a migration tags: - Migration x-codeSamples: - label: curl lang: cURL source: |- - curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}' \ + curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}' \ +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Accept: application/json' get: - operationId: MigrationService_GetTask + operationId: MigrationService_GetMigration parameters: - description: The ID of the cluster. in: path @@ -150,9 +150,9 @@ paths: required: true schema: type: string - - description: The ID of the migration task. + - description: The ID of the migration. in: path - name: id + name: migrationId required: true schema: type: string @@ -161,7 +161,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/MigrationTask' + $ref: '#/components/schemas/Migration' description: A successful response. default: content: @@ -169,19 +169,19 @@ paths: schema: $ref: '#/components/schemas/Status' description: An unexpected error response. - summary: Get a migration task + summary: Get a migration tags: - Migration x-codeSamples: - label: curl lang: cURL source: |- - curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}' \ + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}' \ +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Accept: application/json' - /v1beta1/clusters/{clusterId}/migrations/{id}:pause: + /v1beta1/clusters/{clusterId}/migrations/{migrationId}:pause: post: - operationId: MigrationService_PauseTask + operationId: MigrationService_PauseMigration parameters: - description: The ID of the cluster. in: path @@ -189,9 +189,9 @@ paths: required: true schema: type: string - - description: The ID of the migration task to pause. + - description: The ID of the migration to pause. in: path - name: id + name: migrationId required: true schema: type: string @@ -199,7 +199,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/MigrationService.PauseTaskBody' + $ref: '#/components/schemas/MigrationService.PauseMigrationBody' required: true responses: "200": @@ -214,22 +214,21 @@ paths: schema: $ref: '#/components/schemas/Status' description: An unexpected error response. - summary: Pause a running migration task + summary: Pause a running migration tags: - Migration x-codeSamples: - label: curl lang: cURL source: |- - curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}:pause' \ + curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}:pause' \ +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Content-Type: application/json' \ - +--header 'Accept: application/json' \ - +--data '{}' + +--header 'Accept: application/json' x-codegen-request-body-name: body - /v1beta1/clusters/{clusterId}/migrations/{id}:resume: + /v1beta1/clusters/{clusterId}/migrations/{migrationId}:resume: post: - operationId: MigrationService_ResumeTask + operationId: MigrationService_ResumeMigration parameters: - description: The ID of the cluster. in: path @@ -237,9 +236,9 @@ paths: required: true schema: type: string - - description: The ID of the migration task to resume. + - description: The ID of the migration to resume. in: path - name: id + name: migrationId required: true schema: type: string @@ -247,7 +246,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/MigrationService.ResumeTaskBody' + $ref: '#/components/schemas/MigrationService.ResumeMigrationBody' required: true responses: "200": @@ -262,24 +261,23 @@ paths: schema: $ref: '#/components/schemas/Status' description: An unexpected error response. - summary: Resume a paused migration task + summary: Resume a paused migration tags: - Migration x-codeSamples: - label: curl lang: cURL source: |- - curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{id}:resume' \ + curl --location --request POST 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrations/{migration_id}:resume' \ +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Content-Type: application/json' \ - +--header 'Accept: application/json' \ - +--data '{}' + +--header 'Accept: application/json' x-codegen-request-body-name: body /v1beta1/clusters/{clusterId}/migrationPrechecks: post: operationId: MigrationService_Precheck parameters: - - description: The ID of the cluster to create the migration task in. + - description: The ID of the cluster to create the migration in. in: path name: clusterId required: true @@ -304,7 +302,7 @@ paths: schema: $ref: '#/components/schemas/Status' description: An unexpected error response. - summary: Run a precheck for a migration task + summary: Run a precheck for a migration tags: - Migration x-codeSamples: @@ -315,9 +313,9 @@ paths: +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Content-Type: application/json' \ +--header 'Accept: application/json' \ - +--data '{\n "name": "my-migration",\n "sources": [\n {\n "connProfile": {\n "connType": "PUBLIC",\n "host": "1.2.3.4",\n "port": 3306,\n "user": "root",\n "password": "secret"\n },\n "sourceType": "SOURCE_TYPE_MYSQL"\n }\n ],\n "target": {\n "user": "tidb",\n "password": "tidb_password"\n },\n "mode": "MODE_ALL",\n "fullDataMigration": true\n}' + +--data '{\n "name": "my-migration",\n "sources": [\n {\n "connProfile": {\n "connType": "PUBLIC",\n "host": "1.2.3.4",\n "port": 3306,\n "user": "root",\n "password": "secret"\n },\n "sourceType": "MYSQL"\n }\n ],\n "target": {\n "user": "tidb",\n "password": "tidb_password"\n },\n "mode": "ALL"\n}' x-codegen-request-body-name: body - /v1beta1/clusters/{clusterId}/migrationPrechecks/{id}: + /v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}: delete: operationId: MigrationService_CancelPrecheck parameters: @@ -329,7 +327,7 @@ paths: type: string - description: The ID of the precheck to cancel. in: path - name: id + name: precheckId required: true schema: type: string @@ -353,7 +351,7 @@ paths: - label: curl lang: cURL source: |- - curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{id}' \ + curl --location --request DELETE 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{precheck_id}' \ +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Accept: application/json' get: @@ -367,7 +365,7 @@ paths: type: string - description: The ID of the precheck. in: path - name: id + name: precheckId required: true schema: type: string @@ -391,7 +389,7 @@ paths: - label: curl lang: cURL source: |- - curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{id}' \ + curl --location 'https://serverless.tidbapi.com/v1beta1/clusters/{cluster_id}/migrationPrechecks/{precheck_id}' \ +--digest --user 'YOUR_PUBLIC_KEY:YOUR_PRIVATE_KEY' \ +--header 'Accept: application/json' components: @@ -415,18 +413,9 @@ components: doTables: description: Table-level allow-list rules. items: - $ref: '#/components/schemas/BlockAllowRules.Table' + $ref: '#/components/schemas/Table' type: array type: object - BlockAllowRules.Table: - properties: - schema: - description: Schema name. - type: string - table: - description: Table name. - type: string - type: object ConnProfile: properties: connType: @@ -456,6 +445,7 @@ components: description: "TLS/SSL settings; if not set, use defaults." type: object required: + - connType - password - port - user @@ -472,9 +462,9 @@ components: type: string CreateMigrationPrecheckResp: example: - id: id + precheckId: precheckId properties: - id: + precheckId: description: The ID of the created precheck. readOnly: true type: string @@ -512,13 +502,14 @@ components: readOnly: true type: string type: object - ListMigrationTasksResp: + ListMigrationsResp: example: totalSize: 0 - nextPageToken: nextPageToken - tasks: + migrations: - mode: "{}" + migrationId: migrationId createTime: 2000-01-23T04:56:07.000+00:00 + displayName: displayName subTasks: - currentStep: "{}" syncDetail: "{}" @@ -534,12 +525,12 @@ components: source: "{}" loadDetail: "{}" errorMsg: errorMsg - name: name - id: id state: "{}" targetUser: targetUser - mode: "{}" + migrationId: migrationId createTime: 2000-01-23T04:56:07.000+00:00 + displayName: displayName subTasks: - currentStep: "{}" syncDetail: "{}" @@ -555,19 +546,18 @@ components: source: "{}" loadDetail: "{}" errorMsg: errorMsg - name: name - id: id state: "{}" targetUser: targetUser + nextPageToken: nextPageToken properties: - tasks: - description: The list of migration tasks. + migrations: + description: The list of migrations. items: - $ref: '#/components/schemas/MigrationTask' + $ref: '#/components/schemas/Migration' readOnly: true type: array totalSize: - description: The total number of tasks matching the query. + description: The total number of migrations matching the query. format: int64 readOnly: true type: integer @@ -599,8 +589,83 @@ components: readOnly: true type: string type: object + Migration: + example: + mode: "{}" + migrationId: migrationId + createTime: 2000-01-23T04:56:07.000+00:00 + displayName: displayName + subTasks: + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + - currentStep: "{}" + syncDetail: "{}" + stage: "{}" + dumpDetail: "{}" + source: "{}" + loadDetail: "{}" + errorMsg: errorMsg + state: "{}" + targetUser: targetUser + properties: + migrationId: + description: The unique ID of the migration. + readOnly: true + type: string + displayName: + description: The display name of the migration. + readOnly: true + type: string + subTasks: + description: The list of subtasks composing this migration. + items: + $ref: '#/components/schemas/SubTask' + readOnly: true + type: array + targetUser: + description: The target database username used by the migration. + readOnly: true + type: string + createTime: + description: The timestamp when the migration was created. + format: date-time + readOnly: true + type: string + mode: + allOf: + - $ref: '#/components/schemas/TaskMode' + description: The migration mode of the migration. + type: object + state: + allOf: + - $ref: '#/components/schemas/Migration.State' + description: The current state of the migration. + type: object + type: object + Migration.State: + description: |- + Overall state of a migration. + + - CREATING: Task is being created. + - RUNNING: Task is actively running. + - PAUSED: Task is paused. + - CANCELED: Task has been canceled. + - FAILED: Task failed with error. + enum: + - CREATING + - RUNNING + - PAUSED + - CANCELED + - FAILED + type: string MigrationPrecheck: example: + precheckId: precheckId failedCnt: 6 total: 0 successCnt: 5 @@ -609,17 +674,21 @@ components: - reason: reason solution: solution solutionDocUrl: solutionDocUrl + description: description type: "{}" - desc: desc status: status - reason: reason solution: solution solutionDocUrl: solutionDocUrl + description: description type: "{}" - desc: desc status: status status: status properties: + precheckId: + description: The ID of the precheck. + readOnly: true + type: string total: description: Total number of precheck items. format: int32 @@ -651,10 +720,10 @@ components: readOnly: true type: array type: object - MigrationService.CreateTaskBody: + MigrationService.CreateMigrationBody: properties: - name: - description: The display name of the migration task. + displayName: + description: The display name of the migration. type: string sources: description: The data sources to migrate from. @@ -671,20 +740,19 @@ components: - $ref: '#/components/schemas/TaskMode' description: The migration mode (full+incremental or incremental-only). type: object - fullDataMigration: - description: "If true, migrate all user data (equivalent to enabling all\ - \ non-system databases and tables)." - type: boolean required: + - displayName + - mode - sources + - target type: object - MigrationService.PauseTaskBody: - title: PauseMigrationTaskReq is used to pause a migration task + MigrationService.PauseMigrationBody: + title: PauseMigrationReq is used to pause a migration type: object MigrationService.PrecheckBody: properties: - name: - description: The display name of the migration task. + displayName: + description: The display name of the migration. type: string sources: description: The data sources to migrate from. @@ -701,99 +769,25 @@ components: - $ref: '#/components/schemas/TaskMode' description: The migration mode (full+incremental or incremental-only). type: object - fullDataMigration: - description: "If true, migrate all user data (equivalent to enabling all\ - \ non-system databases and tables)." - type: boolean required: + - displayName + - mode - sources + - target type: object - MigrationService.ResumeTaskBody: - title: ResumeMigrationTaskReq is used to resume a paused migration task + MigrationService.ResumeMigrationBody: + title: ResumeMigrationReq is used to resume a paused migration type: object - MigrationTask: - example: - mode: "{}" - createTime: 2000-01-23T04:56:07.000+00:00 - subTasks: - - currentStep: "{}" - syncDetail: "{}" - stage: "{}" - dumpDetail: "{}" - source: "{}" - loadDetail: "{}" - errorMsg: errorMsg - - currentStep: "{}" - syncDetail: "{}" - stage: "{}" - dumpDetail: "{}" - source: "{}" - loadDetail: "{}" - errorMsg: errorMsg - name: name - id: id - state: "{}" - targetUser: targetUser - properties: - id: - description: The unique ID of the migration task. - readOnly: true - type: string - name: - description: The display name of the migration task. - type: string - subTasks: - description: The list of subtasks composing this migration. - items: - $ref: '#/components/schemas/SubTask' - readOnly: true - type: array - targetUser: - description: The target database username used by the task. - readOnly: true - type: string - createTime: - description: The timestamp when the task was created. - format: date-time - readOnly: true - type: string - mode: - allOf: - - $ref: '#/components/schemas/TaskMode' - description: The migration mode of the task. - type: object - state: - allOf: - - $ref: '#/components/schemas/MigrationTask.State' - description: The current state of the task. - type: object - type: object - MigrationTask.State: - description: |- - Overall state of a migration task. - - - CREATING: Task is being created. - - RUNNING: Task is actively running. - - PAUSED: Task is paused. - - CANCELED: Task has been canceled. - - FAILED: Task failed with error. - enum: - - CREATING - - RUNNING - - PAUSED - - CANCELED - - FAILED - type: string PrecheckItem: example: reason: reason solution: solution solutionDocUrl: solutionDocUrl + description: description type: "{}" - desc: desc status: status properties: - desc: + description: description: Human-readable description of the check. readOnly: true type: string @@ -823,35 +817,35 @@ components: description: |- Types of prechecks performed before starting a migration. - - PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges. - - PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges. - - PRECHECK_ITEM_TYPE_VERSION_CHECKING: Check source database version compatibility. - - PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING: Check source server_id configuration. - - PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source. - - PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source. - - PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting. - - PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target. - - PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING: Check binlog database-level filtering configuration. - - PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING: Check concurrent connections limit/availability. - - PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges. - - PRECHECK_ITEM_TYPE_META_POSITION_CHECKING: Check saved meta/binlog position validity. - - PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load. - - PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING: Check primary key settings on source tables. + - DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges. + - REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges. + - VERSION_CHECKING: Check source database version compatibility. + - SERVER_ID_CHECKING: Check source server_id configuration. + - BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source. + - BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source. + - BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting. + - TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target. + - BINLOG_DB_CHECKING: Check binlog database-level filtering configuration. + - CONN_NUMBER_CHECKING: Check concurrent connections limit/availability. + - TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges. + - META_POSITION_CHECKING: Check saved meta/binlog position validity. + - LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load. + - PRIMARY_KEY_CHECKING: Check primary key settings on source tables. enum: - - PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING - - PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING - - PRECHECK_ITEM_TYPE_VERSION_CHECKING - - PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING - - PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING - - PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING - - PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING - - PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING - - PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING - - PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING - - PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING - - PRECHECK_ITEM_TYPE_META_POSITION_CHECKING - - PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING - - PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING + - DUMP_PRIVILEGE_CHECKING + - REPLICATION_PRIVILEGE_CHECKING + - VERSION_CHECKING + - SERVER_ID_CHECKING + - BINLOG_ENABLE_CHECKING + - BINLOG_FORMAT_CHECKING + - BINLOG_ROW_IMAGE_CHECKING + - TABLE_SCHEMA_CHECKING + - BINLOG_DB_CHECKING + - CONN_NUMBER_CHECKING + - TARGET_DB_PRIVILEGE_CHECKING + - META_POSITION_CHECKING + - LIGHTNING_TABLE_EMPTY_CHECKING + - PRIMARY_KEY_CHECKING type: string RouteRule: properties: @@ -862,7 +856,7 @@ components: type: object targetTable: allOf: - - $ref: '#/components/schemas/RouteRule.Target' + - $ref: '#/components/schemas/Table' description: Target table to route to. type: object type: object @@ -875,15 +869,6 @@ components: description: "Table pattern of the source, supports wildcards." type: string type: object - RouteRule.Target: - properties: - schema: - description: Target schema name. - type: string - table: - description: Target table name. - type: string - type: object Security: properties: certAllowedCn: @@ -929,7 +914,7 @@ components: description: Starting binlog file name for incremental sync. nullable: true type: string - binlogPos: + binlogPosition: description: Starting binlog position for incremental sync. format: int32 nullable: true @@ -951,11 +936,11 @@ components: description: |- The source database type. - - SOURCE_TYPE_MYSQL: Self-managed MySQL. - - SOURCE_TYPE_ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. + - MYSQL: Self-managed MySQL. + - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. enum: - - SOURCE_TYPE_MYSQL - - SOURCE_TYPE_ALICLOUD_RDS_MYSQL + - MYSQL + - ALICLOUD_RDS_MYSQL type: string Status: example: @@ -1025,29 +1010,29 @@ components: description: |- The high-level lifecycle stage of a subtask. - - STAGE_RUNNING: Subtask is running. - - STAGE_PAUSED: Subtask is paused. - - STAGE_FAILED: Subtask failed. - - STAGE_FINISHED: Subtask finished successfully. - - STAGE_UNKNOWN: Subtask stage is unknown. + - RUNNING: Subtask is running. + - PAUSED: Subtask is paused. + - FAILED: Subtask failed. + - FINISHED: Subtask finished successfully. + - UNKNOWN: Subtask stage is unknown. enum: - - STAGE_RUNNING - - STAGE_PAUSED - - STAGE_FAILED - - STAGE_FINISHED - - STAGE_UNKNOWN + - RUNNING + - PAUSED + - FAILED + - FINISHED + - UNKNOWN type: string SubTask.Step: description: |- The current step within a subtask. - - STEP_DUMP: Dump/export data from source. - - STEP_LOAD: Load/import data into target. - - STEP_SYNC: Sync/replicate binlog changes. + - DUMP: Dump/export data from source. + - LOAD: Load/import data into target. + - SYNC: Sync/replicate binlog changes. enum: - - STEP_DUMP - - STEP_LOAD - - STEP_SYNC + - DUMP + - LOAD + - SYNC type: string SyncDetail: properties: @@ -1066,6 +1051,15 @@ components: readOnly: true type: string type: object + Table: + properties: + schema: + description: Schema name. + type: string + table: + description: Table name. + type: string + type: object Target: properties: user: @@ -1074,15 +1068,18 @@ components: password: description: Target database password. type: string + required: + - password + - user type: object TaskMode: description: |- Migration task mode. - - MODE_ALL: Full + incremental migration (all phases). - - MODE_INCREMENTAL: Incremental-only migration (replication). + - ALL: Full + incremental migration (all phases). + - INCREMENTAL: Incremental-only migration (replication). enum: - - MODE_ALL - - MODE_INCREMENTAL + - ALL + - INCREMENTAL type: string x-original-swagger-version: "2.0" diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go b/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go index bb42d49a..57549068 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go @@ -22,53 +22,53 @@ import ( // MigrationAPIService MigrationAPI service type MigrationAPIService service -type ApiMigrationServiceCancelPrecheckRequest struct { - ctx context.Context - ApiService *MigrationAPIService - clusterId string - id string +type ApiMigrationServiceCancelMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string } -func (r ApiMigrationServiceCancelPrecheckRequest) Execute() (map[string]interface{}, *http.Response, error) { - return r.ApiService.MigrationServiceCancelPrecheckExecute(r) +func (r ApiMigrationServiceCancelMigrationRequest) Execute() (*Migration, *http.Response, error) { + return r.ApiService.MigrationServiceCancelMigrationExecute(r) } /* -MigrationServiceCancelPrecheck Cancel a migration precheck +MigrationServiceCancelMigration Cancel a migration @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @param clusterId The ID of the cluster. - @param id The ID of the precheck to cancel. - @return ApiMigrationServiceCancelPrecheckRequest + @param migrationId The ID of the migration to cancel. + @return ApiMigrationServiceCancelMigrationRequest */ -func (a *MigrationAPIService) MigrationServiceCancelPrecheck(ctx context.Context, clusterId string, id string) ApiMigrationServiceCancelPrecheckRequest { - return ApiMigrationServiceCancelPrecheckRequest{ - ApiService: a, - ctx: ctx, - clusterId: clusterId, - id: id, +func (a *MigrationAPIService) MigrationServiceCancelMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServiceCancelMigrationRequest { + return ApiMigrationServiceCancelMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, } } // Execute executes the request // -// @return map[string]interface{} -func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrationServiceCancelPrecheckRequest) (map[string]interface{}, *http.Response, error) { +// @return Migration +func (a *MigrationAPIService) MigrationServiceCancelMigrationExecute(r ApiMigrationServiceCancelMigrationRequest) (*Migration, *http.Response, error) { var ( localVarHTTPMethod = http.MethodDelete localVarPostBody interface{} formFiles []formFile - localVarReturnValue map[string]interface{} + localVarReturnValue *Migration ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelPrecheck") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelMigration") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{id}" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} @@ -136,53 +136,53 @@ func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrati return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServiceCancelTaskRequest struct { +type ApiMigrationServiceCancelPrecheckRequest struct { ctx context.Context ApiService *MigrationAPIService clusterId string - id string + precheckId string } -func (r ApiMigrationServiceCancelTaskRequest) Execute() (*MigrationTask, *http.Response, error) { - return r.ApiService.MigrationServiceCancelTaskExecute(r) +func (r ApiMigrationServiceCancelPrecheckRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServiceCancelPrecheckExecute(r) } /* -MigrationServiceCancelTask Cancel a migration task +MigrationServiceCancelPrecheck Cancel a migration precheck @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @param clusterId The ID of the cluster. - @param id The ID of the migration task to cancel. - @return ApiMigrationServiceCancelTaskRequest + @param precheckId The ID of the precheck to cancel. + @return ApiMigrationServiceCancelPrecheckRequest */ -func (a *MigrationAPIService) MigrationServiceCancelTask(ctx context.Context, clusterId string, id string) ApiMigrationServiceCancelTaskRequest { - return ApiMigrationServiceCancelTaskRequest{ +func (a *MigrationAPIService) MigrationServiceCancelPrecheck(ctx context.Context, clusterId string, precheckId string) ApiMigrationServiceCancelPrecheckRequest { + return ApiMigrationServiceCancelPrecheckRequest{ ApiService: a, ctx: ctx, clusterId: clusterId, - id: id, + precheckId: precheckId, } } // Execute executes the request // -// @return MigrationTask -func (a *MigrationAPIService) MigrationServiceCancelTaskExecute(r ApiMigrationServiceCancelTaskRequest) (*MigrationTask, *http.Response, error) { +// @return map[string]interface{} +func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrationServiceCancelPrecheckRequest) (map[string]interface{}, *http.Response, error) { var ( localVarHTTPMethod = http.MethodDelete localVarPostBody interface{} formFiles []formFile - localVarReturnValue *MigrationTask + localVarReturnValue map[string]interface{} ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelTask") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelPrecheck") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{id}" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"precheckId"+"}", url.PathEscape(parameterValueToString(r.precheckId, "precheckId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} @@ -250,31 +250,31 @@ func (a *MigrationAPIService) MigrationServiceCancelTaskExecute(r ApiMigrationSe return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServiceCreateTaskRequest struct { +type ApiMigrationServiceCreateMigrationRequest struct { ctx context.Context ApiService *MigrationAPIService clusterId string - body *MigrationServiceCreateTaskBody + body *MigrationServiceCreateMigrationBody } -func (r ApiMigrationServiceCreateTaskRequest) Body(body MigrationServiceCreateTaskBody) ApiMigrationServiceCreateTaskRequest { +func (r ApiMigrationServiceCreateMigrationRequest) Body(body MigrationServiceCreateMigrationBody) ApiMigrationServiceCreateMigrationRequest { r.body = &body return r } -func (r ApiMigrationServiceCreateTaskRequest) Execute() (*MigrationTask, *http.Response, error) { - return r.ApiService.MigrationServiceCreateTaskExecute(r) +func (r ApiMigrationServiceCreateMigrationRequest) Execute() (*Migration, *http.Response, error) { + return r.ApiService.MigrationServiceCreateMigrationExecute(r) } /* -MigrationServiceCreateTask Create a migration task +MigrationServiceCreateMigration Create a migration @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param clusterId The ID of the cluster to create the migration task in. - @return ApiMigrationServiceCreateTaskRequest + @param clusterId The ID of the cluster to create the migration in. + @return ApiMigrationServiceCreateMigrationRequest */ -func (a *MigrationAPIService) MigrationServiceCreateTask(ctx context.Context, clusterId string) ApiMigrationServiceCreateTaskRequest { - return ApiMigrationServiceCreateTaskRequest{ +func (a *MigrationAPIService) MigrationServiceCreateMigration(ctx context.Context, clusterId string) ApiMigrationServiceCreateMigrationRequest { + return ApiMigrationServiceCreateMigrationRequest{ ApiService: a, ctx: ctx, clusterId: clusterId, @@ -283,16 +283,16 @@ func (a *MigrationAPIService) MigrationServiceCreateTask(ctx context.Context, cl // Execute executes the request // -// @return MigrationTask -func (a *MigrationAPIService) MigrationServiceCreateTaskExecute(r ApiMigrationServiceCreateTaskRequest) (*MigrationTask, *http.Response, error) { +// @return Migration +func (a *MigrationAPIService) MigrationServiceCreateMigrationExecute(r ApiMigrationServiceCreateMigrationRequest) (*Migration, *http.Response, error) { var ( localVarHTTPMethod = http.MethodPost localVarPostBody interface{} formFiles []formFile - localVarReturnValue *MigrationTask + localVarReturnValue *Migration ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCreateTask") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCreateMigration") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } @@ -371,53 +371,53 @@ func (a *MigrationAPIService) MigrationServiceCreateTaskExecute(r ApiMigrationSe return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServiceGetPrecheckRequest struct { - ctx context.Context - ApiService *MigrationAPIService - clusterId string - id string +type ApiMigrationServiceGetMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string } -func (r ApiMigrationServiceGetPrecheckRequest) Execute() (*MigrationPrecheck, *http.Response, error) { - return r.ApiService.MigrationServiceGetPrecheckExecute(r) +func (r ApiMigrationServiceGetMigrationRequest) Execute() (*Migration, *http.Response, error) { + return r.ApiService.MigrationServiceGetMigrationExecute(r) } /* -MigrationServiceGetPrecheck Get a migration precheck +MigrationServiceGetMigration Get a migration @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @param clusterId The ID of the cluster. - @param id The ID of the precheck. - @return ApiMigrationServiceGetPrecheckRequest + @param migrationId The ID of the migration. + @return ApiMigrationServiceGetMigrationRequest */ -func (a *MigrationAPIService) MigrationServiceGetPrecheck(ctx context.Context, clusterId string, id string) ApiMigrationServiceGetPrecheckRequest { - return ApiMigrationServiceGetPrecheckRequest{ - ApiService: a, - ctx: ctx, - clusterId: clusterId, - id: id, +func (a *MigrationAPIService) MigrationServiceGetMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServiceGetMigrationRequest { + return ApiMigrationServiceGetMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, } } // Execute executes the request // -// @return MigrationPrecheck -func (a *MigrationAPIService) MigrationServiceGetPrecheckExecute(r ApiMigrationServiceGetPrecheckRequest) (*MigrationPrecheck, *http.Response, error) { +// @return Migration +func (a *MigrationAPIService) MigrationServiceGetMigrationExecute(r ApiMigrationServiceGetMigrationRequest) (*Migration, *http.Response, error) { var ( localVarHTTPMethod = http.MethodGet localVarPostBody interface{} formFiles []formFile - localVarReturnValue *MigrationPrecheck + localVarReturnValue *Migration ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceGetPrecheck") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceGetMigration") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{id}" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} @@ -485,53 +485,53 @@ func (a *MigrationAPIService) MigrationServiceGetPrecheckExecute(r ApiMigrationS return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServiceGetTaskRequest struct { +type ApiMigrationServiceGetPrecheckRequest struct { ctx context.Context ApiService *MigrationAPIService clusterId string - id string + precheckId string } -func (r ApiMigrationServiceGetTaskRequest) Execute() (*MigrationTask, *http.Response, error) { - return r.ApiService.MigrationServiceGetTaskExecute(r) +func (r ApiMigrationServiceGetPrecheckRequest) Execute() (*MigrationPrecheck, *http.Response, error) { + return r.ApiService.MigrationServiceGetPrecheckExecute(r) } /* -MigrationServiceGetTask Get a migration task +MigrationServiceGetPrecheck Get a migration precheck @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @param clusterId The ID of the cluster. - @param id The ID of the migration task. - @return ApiMigrationServiceGetTaskRequest + @param precheckId The ID of the precheck. + @return ApiMigrationServiceGetPrecheckRequest */ -func (a *MigrationAPIService) MigrationServiceGetTask(ctx context.Context, clusterId string, id string) ApiMigrationServiceGetTaskRequest { - return ApiMigrationServiceGetTaskRequest{ +func (a *MigrationAPIService) MigrationServiceGetPrecheck(ctx context.Context, clusterId string, precheckId string) ApiMigrationServiceGetPrecheckRequest { + return ApiMigrationServiceGetPrecheckRequest{ ApiService: a, ctx: ctx, clusterId: clusterId, - id: id, + precheckId: precheckId, } } // Execute executes the request // -// @return MigrationTask -func (a *MigrationAPIService) MigrationServiceGetTaskExecute(r ApiMigrationServiceGetTaskRequest) (*MigrationTask, *http.Response, error) { +// @return MigrationPrecheck +func (a *MigrationAPIService) MigrationServiceGetPrecheckExecute(r ApiMigrationServiceGetPrecheckRequest) (*MigrationPrecheck, *http.Response, error) { var ( localVarHTTPMethod = http.MethodGet localVarPostBody interface{} formFiles []formFile - localVarReturnValue *MigrationTask + localVarReturnValue *MigrationPrecheck ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceGetTask") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceGetPrecheck") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{id}" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"precheckId"+"}", url.PathEscape(parameterValueToString(r.precheckId, "precheckId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} @@ -599,7 +599,7 @@ func (a *MigrationAPIService) MigrationServiceGetTaskExecute(r ApiMigrationServi return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServiceListTasksRequest struct { +type ApiMigrationServiceListMigrationsRequest struct { ctx context.Context ApiService *MigrationAPIService clusterId string @@ -609,36 +609,36 @@ type ApiMigrationServiceListTasksRequest struct { } // Optional. The page token, default is empty. -func (r ApiMigrationServiceListTasksRequest) PageToken(pageToken string) ApiMigrationServiceListTasksRequest { +func (r ApiMigrationServiceListMigrationsRequest) PageToken(pageToken string) ApiMigrationServiceListMigrationsRequest { r.pageToken = &pageToken return r } // Optional. The page size, default is 10. -func (r ApiMigrationServiceListTasksRequest) PageSize(pageSize int32) ApiMigrationServiceListTasksRequest { +func (r ApiMigrationServiceListMigrationsRequest) PageSize(pageSize int32) ApiMigrationServiceListMigrationsRequest { r.pageSize = &pageSize return r } // Specifies the sorting order of results. Use a comma-separated list of field names, optionally appending `desc` for descending order. For example, `createTime desc`. By default, fields are sorted in ascending order. Supported field: `createTime`. -func (r ApiMigrationServiceListTasksRequest) OrderBy(orderBy string) ApiMigrationServiceListTasksRequest { +func (r ApiMigrationServiceListMigrationsRequest) OrderBy(orderBy string) ApiMigrationServiceListMigrationsRequest { r.orderBy = &orderBy return r } -func (r ApiMigrationServiceListTasksRequest) Execute() (*ListMigrationTasksResp, *http.Response, error) { - return r.ApiService.MigrationServiceListTasksExecute(r) +func (r ApiMigrationServiceListMigrationsRequest) Execute() (*ListMigrationsResp, *http.Response, error) { + return r.ApiService.MigrationServiceListMigrationsExecute(r) } /* -MigrationServiceListTasks List migration tasks +MigrationServiceListMigrations List migrations @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param clusterId The ID of the cluster to list tasks for. - @return ApiMigrationServiceListTasksRequest + @param clusterId The ID of the cluster to list migrations for. + @return ApiMigrationServiceListMigrationsRequest */ -func (a *MigrationAPIService) MigrationServiceListTasks(ctx context.Context, clusterId string) ApiMigrationServiceListTasksRequest { - return ApiMigrationServiceListTasksRequest{ +func (a *MigrationAPIService) MigrationServiceListMigrations(ctx context.Context, clusterId string) ApiMigrationServiceListMigrationsRequest { + return ApiMigrationServiceListMigrationsRequest{ ApiService: a, ctx: ctx, clusterId: clusterId, @@ -647,16 +647,16 @@ func (a *MigrationAPIService) MigrationServiceListTasks(ctx context.Context, clu // Execute executes the request // -// @return ListMigrationTasksResp -func (a *MigrationAPIService) MigrationServiceListTasksExecute(r ApiMigrationServiceListTasksRequest) (*ListMigrationTasksResp, *http.Response, error) { +// @return ListMigrationsResp +func (a *MigrationAPIService) MigrationServiceListMigrationsExecute(r ApiMigrationServiceListMigrationsRequest) (*ListMigrationsResp, *http.Response, error) { var ( localVarHTTPMethod = http.MethodGet localVarPostBody interface{} formFiles []formFile - localVarReturnValue *ListMigrationTasksResp + localVarReturnValue *ListMigrationsResp ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceListTasks") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceListMigrations") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } @@ -742,44 +742,44 @@ func (a *MigrationAPIService) MigrationServiceListTasksExecute(r ApiMigrationSer return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServicePauseTaskRequest struct { - ctx context.Context - ApiService *MigrationAPIService - clusterId string - id string - body *map[string]interface{} +type ApiMigrationServicePauseMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string + body *map[string]interface{} } -func (r ApiMigrationServicePauseTaskRequest) Body(body map[string]interface{}) ApiMigrationServicePauseTaskRequest { +func (r ApiMigrationServicePauseMigrationRequest) Body(body map[string]interface{}) ApiMigrationServicePauseMigrationRequest { r.body = &body return r } -func (r ApiMigrationServicePauseTaskRequest) Execute() (map[string]interface{}, *http.Response, error) { - return r.ApiService.MigrationServicePauseTaskExecute(r) +func (r ApiMigrationServicePauseMigrationRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServicePauseMigrationExecute(r) } /* -MigrationServicePauseTask Pause a running migration task +MigrationServicePauseMigration Pause a running migration @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @param clusterId The ID of the cluster. - @param id The ID of the migration task to pause. - @return ApiMigrationServicePauseTaskRequest + @param migrationId The ID of the migration to pause. + @return ApiMigrationServicePauseMigrationRequest */ -func (a *MigrationAPIService) MigrationServicePauseTask(ctx context.Context, clusterId string, id string) ApiMigrationServicePauseTaskRequest { - return ApiMigrationServicePauseTaskRequest{ - ApiService: a, - ctx: ctx, - clusterId: clusterId, - id: id, +func (a *MigrationAPIService) MigrationServicePauseMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServicePauseMigrationRequest { + return ApiMigrationServicePauseMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, } } // Execute executes the request // // @return map[string]interface{} -func (a *MigrationAPIService) MigrationServicePauseTaskExecute(r ApiMigrationServicePauseTaskRequest) (map[string]interface{}, *http.Response, error) { +func (a *MigrationAPIService) MigrationServicePauseMigrationExecute(r ApiMigrationServicePauseMigrationRequest) (map[string]interface{}, *http.Response, error) { var ( localVarHTTPMethod = http.MethodPost localVarPostBody interface{} @@ -787,14 +787,14 @@ func (a *MigrationAPIService) MigrationServicePauseTaskExecute(r ApiMigrationSer localVarReturnValue map[string]interface{} ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServicePauseTask") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServicePauseMigration") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{id}:pause" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}:pause" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} @@ -884,10 +884,10 @@ func (r ApiMigrationServicePrecheckRequest) Execute() (*CreateMigrationPrecheckR } /* -MigrationServicePrecheck Run a precheck for a migration task +MigrationServicePrecheck Run a precheck for a migration @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param clusterId The ID of the cluster to create the migration task in. + @param clusterId The ID of the cluster to create the migration in. @return ApiMigrationServicePrecheckRequest */ func (a *MigrationAPIService) MigrationServicePrecheck(ctx context.Context, clusterId string) ApiMigrationServicePrecheckRequest { @@ -988,44 +988,44 @@ func (a *MigrationAPIService) MigrationServicePrecheckExecute(r ApiMigrationServ return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServiceResumeTaskRequest struct { - ctx context.Context - ApiService *MigrationAPIService - clusterId string - id string - body *map[string]interface{} +type ApiMigrationServiceResumeMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string + body *map[string]interface{} } -func (r ApiMigrationServiceResumeTaskRequest) Body(body map[string]interface{}) ApiMigrationServiceResumeTaskRequest { +func (r ApiMigrationServiceResumeMigrationRequest) Body(body map[string]interface{}) ApiMigrationServiceResumeMigrationRequest { r.body = &body return r } -func (r ApiMigrationServiceResumeTaskRequest) Execute() (map[string]interface{}, *http.Response, error) { - return r.ApiService.MigrationServiceResumeTaskExecute(r) +func (r ApiMigrationServiceResumeMigrationRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServiceResumeMigrationExecute(r) } /* -MigrationServiceResumeTask Resume a paused migration task +MigrationServiceResumeMigration Resume a paused migration @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @param clusterId The ID of the cluster. - @param id The ID of the migration task to resume. - @return ApiMigrationServiceResumeTaskRequest + @param migrationId The ID of the migration to resume. + @return ApiMigrationServiceResumeMigrationRequest */ -func (a *MigrationAPIService) MigrationServiceResumeTask(ctx context.Context, clusterId string, id string) ApiMigrationServiceResumeTaskRequest { - return ApiMigrationServiceResumeTaskRequest{ - ApiService: a, - ctx: ctx, - clusterId: clusterId, - id: id, +func (a *MigrationAPIService) MigrationServiceResumeMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServiceResumeMigrationRequest { + return ApiMigrationServiceResumeMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, } } // Execute executes the request // // @return map[string]interface{} -func (a *MigrationAPIService) MigrationServiceResumeTaskExecute(r ApiMigrationServiceResumeTaskRequest) (map[string]interface{}, *http.Response, error) { +func (a *MigrationAPIService) MigrationServiceResumeMigrationExecute(r ApiMigrationServiceResumeMigrationRequest) (map[string]interface{}, *http.Response, error) { var ( localVarHTTPMethod = http.MethodPost localVarPostBody interface{} @@ -1033,14 +1033,14 @@ func (a *MigrationAPIService) MigrationServiceResumeTaskExecute(r ApiMigrationSe localVarReturnValue map[string]interface{} ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceResumeTask") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceResumeMigration") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{id}:resume" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}:resume" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", url.PathEscape(parameterValueToString(r.id, "id")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go index d4234094..3ddf2549 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules.go @@ -22,7 +22,7 @@ type BlockAllowRules struct { // Database names to include in migration. DoDbs []string `json:"doDbs,omitempty"` // Table-level allow-list rules. - DoTables []BlockAllowRulesTable `json:"doTables,omitempty"` + DoTables []Table `json:"doTables,omitempty"` AdditionalProperties map[string]interface{} } @@ -78,9 +78,9 @@ func (o *BlockAllowRules) SetDoDbs(v []string) { } // GetDoTables returns the DoTables field value if set, zero value otherwise. -func (o *BlockAllowRules) GetDoTables() []BlockAllowRulesTable { +func (o *BlockAllowRules) GetDoTables() []Table { if o == nil || IsNil(o.DoTables) { - var ret []BlockAllowRulesTable + var ret []Table return ret } return o.DoTables @@ -88,7 +88,7 @@ func (o *BlockAllowRules) GetDoTables() []BlockAllowRulesTable { // GetDoTablesOk returns a tuple with the DoTables field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *BlockAllowRules) GetDoTablesOk() ([]BlockAllowRulesTable, bool) { +func (o *BlockAllowRules) GetDoTablesOk() ([]Table, bool) { if o == nil || IsNil(o.DoTables) { return nil, false } @@ -104,8 +104,8 @@ func (o *BlockAllowRules) HasDoTables() bool { return false } -// SetDoTables gets a reference to the given []BlockAllowRulesTable and assigns it to the DoTables field. -func (o *BlockAllowRules) SetDoTables(v []BlockAllowRulesTable) { +// SetDoTables gets a reference to the given []Table and assigns it to the DoTables field. +func (o *BlockAllowRules) SetDoTables(v []Table) { o.DoTables = v } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules_table.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules_table.go deleted file mode 100644 index 1bf1d7bf..00000000 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_block_allow_rules_table.go +++ /dev/null @@ -1,192 +0,0 @@ -/* -TiDB Cloud Starter and Essential API - -TiDB Cloud Starter and Essential API - -API version: v1beta1 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package migration - -import ( - "encoding/json" -) - -// checks if the BlockAllowRulesTable type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BlockAllowRulesTable{} - -// BlockAllowRulesTable struct for BlockAllowRulesTable -type BlockAllowRulesTable struct { - // Schema name. - Schema *string `json:"schema,omitempty"` - // Table name. - Table *string `json:"table,omitempty"` - AdditionalProperties map[string]interface{} -} - -type _BlockAllowRulesTable BlockAllowRulesTable - -// NewBlockAllowRulesTable instantiates a new BlockAllowRulesTable object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBlockAllowRulesTable() *BlockAllowRulesTable { - this := BlockAllowRulesTable{} - return &this -} - -// NewBlockAllowRulesTableWithDefaults instantiates a new BlockAllowRulesTable object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBlockAllowRulesTableWithDefaults() *BlockAllowRulesTable { - this := BlockAllowRulesTable{} - return &this -} - -// GetSchema returns the Schema field value if set, zero value otherwise. -func (o *BlockAllowRulesTable) GetSchema() string { - if o == nil || IsNil(o.Schema) { - var ret string - return ret - } - return *o.Schema -} - -// GetSchemaOk returns a tuple with the Schema field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BlockAllowRulesTable) GetSchemaOk() (*string, bool) { - if o == nil || IsNil(o.Schema) { - return nil, false - } - return o.Schema, true -} - -// HasSchema returns a boolean if a field has been set. -func (o *BlockAllowRulesTable) HasSchema() bool { - if o != nil && !IsNil(o.Schema) { - return true - } - - return false -} - -// SetSchema gets a reference to the given string and assigns it to the Schema field. -func (o *BlockAllowRulesTable) SetSchema(v string) { - o.Schema = &v -} - -// GetTable returns the Table field value if set, zero value otherwise. -func (o *BlockAllowRulesTable) GetTable() string { - if o == nil || IsNil(o.Table) { - var ret string - return ret - } - return *o.Table -} - -// GetTableOk returns a tuple with the Table field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BlockAllowRulesTable) GetTableOk() (*string, bool) { - if o == nil || IsNil(o.Table) { - return nil, false - } - return o.Table, true -} - -// HasTable returns a boolean if a field has been set. -func (o *BlockAllowRulesTable) HasTable() bool { - if o != nil && !IsNil(o.Table) { - return true - } - - return false -} - -// SetTable gets a reference to the given string and assigns it to the Table field. -func (o *BlockAllowRulesTable) SetTable(v string) { - o.Table = &v -} - -func (o BlockAllowRulesTable) MarshalJSON() ([]byte, error) { - toSerialize, err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BlockAllowRulesTable) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Schema) { - toSerialize["schema"] = o.Schema - } - if !IsNil(o.Table) { - toSerialize["table"] = o.Table - } - - for key, value := range o.AdditionalProperties { - toSerialize[key] = value - } - - return toSerialize, nil -} - -func (o *BlockAllowRulesTable) UnmarshalJSON(data []byte) (err error) { - varBlockAllowRulesTable := _BlockAllowRulesTable{} - - err = json.Unmarshal(data, &varBlockAllowRulesTable) - - if err != nil { - return err - } - - *o = BlockAllowRulesTable(varBlockAllowRulesTable) - - additionalProperties := make(map[string]interface{}) - - if err = json.Unmarshal(data, &additionalProperties); err == nil { - delete(additionalProperties, "schema") - delete(additionalProperties, "table") - o.AdditionalProperties = additionalProperties - } - - return err -} - -type NullableBlockAllowRulesTable struct { - value *BlockAllowRulesTable - isSet bool -} - -func (v NullableBlockAllowRulesTable) Get() *BlockAllowRulesTable { - return v.value -} - -func (v *NullableBlockAllowRulesTable) Set(val *BlockAllowRulesTable) { - v.value = val - v.isSet = true -} - -func (v NullableBlockAllowRulesTable) IsSet() bool { - return v.isSet -} - -func (v *NullableBlockAllowRulesTable) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBlockAllowRulesTable(val *BlockAllowRulesTable) *NullableBlockAllowRulesTable { - return &NullableBlockAllowRulesTable{value: val, isSet: true} -} - -func (v NullableBlockAllowRulesTable) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBlockAllowRulesTable) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go index 938afd7c..e6103377 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_conn_profile.go @@ -21,7 +21,7 @@ var _ MappedNullable = &ConnProfile{} // ConnProfile struct for ConnProfile type ConnProfile struct { // Connection type (e.g., PUBLIC, PRIVATE_LINK). - ConnType *ConnType `json:"connType,omitempty"` + ConnType ConnType `json:"connType"` // Private link endpoint ID. EndpointId *string `json:"endpointId,omitempty"` // Source host. @@ -43,8 +43,9 @@ type _ConnProfile ConnProfile // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewConnProfile(port int32, user string, password string) *ConnProfile { +func NewConnProfile(connType ConnType, port int32, user string, password string) *ConnProfile { this := ConnProfile{} + this.ConnType = connType this.Port = port this.User = user this.Password = password @@ -59,36 +60,28 @@ func NewConnProfileWithDefaults() *ConnProfile { return &this } -// GetConnType returns the ConnType field value if set, zero value otherwise. +// GetConnType returns the ConnType field value func (o *ConnProfile) GetConnType() ConnType { - if o == nil || IsNil(o.ConnType) { + if o == nil { var ret ConnType return ret } - return *o.ConnType + + return o.ConnType } -// GetConnTypeOk returns a tuple with the ConnType field value if set, nil otherwise +// GetConnTypeOk returns a tuple with the ConnType field value // and a boolean to check if the value has been set. func (o *ConnProfile) GetConnTypeOk() (*ConnType, bool) { - if o == nil || IsNil(o.ConnType) { + if o == nil { return nil, false } - return o.ConnType, true -} - -// HasConnType returns a boolean if a field has been set. -func (o *ConnProfile) HasConnType() bool { - if o != nil && !IsNil(o.ConnType) { - return true - } - - return false + return &o.ConnType, true } -// SetConnType gets a reference to the given ConnType and assigns it to the ConnType field. +// SetConnType sets field value func (o *ConnProfile) SetConnType(v ConnType) { - o.ConnType = &v + o.ConnType = v } // GetEndpointId returns the EndpointId field value if set, zero value otherwise. @@ -269,9 +262,7 @@ func (o ConnProfile) MarshalJSON() ([]byte, error) { func (o ConnProfile) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} - if !IsNil(o.ConnType) { - toSerialize["connType"] = o.ConnType - } + toSerialize["connType"] = o.ConnType if !IsNil(o.EndpointId) { toSerialize["endpointId"] = o.EndpointId } @@ -297,6 +288,7 @@ func (o *ConnProfile) UnmarshalJSON(data []byte) (err error) { // by unmarshalling the object into a generic map with string keys and checking // that every required field exists as a key in the generic map. requiredProperties := []string{ + "connType", "port", "user", "password", diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go index f36abcdc..70d4f4ca 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_create_migration_precheck_resp.go @@ -20,7 +20,7 @@ var _ MappedNullable = &CreateMigrationPrecheckResp{} // CreateMigrationPrecheckResp struct for CreateMigrationPrecheckResp type CreateMigrationPrecheckResp struct { // The ID of the created precheck. - Id *string `json:"id,omitempty"` + PrecheckId *string `json:"precheckId,omitempty"` AdditionalProperties map[string]interface{} } @@ -43,36 +43,36 @@ func NewCreateMigrationPrecheckRespWithDefaults() *CreateMigrationPrecheckResp { return &this } -// GetId returns the Id field value if set, zero value otherwise. -func (o *CreateMigrationPrecheckResp) GetId() string { - if o == nil || IsNil(o.Id) { +// GetPrecheckId returns the PrecheckId field value if set, zero value otherwise. +func (o *CreateMigrationPrecheckResp) GetPrecheckId() string { + if o == nil || IsNil(o.PrecheckId) { var ret string return ret } - return *o.Id + return *o.PrecheckId } -// GetIdOk returns a tuple with the Id field value if set, nil otherwise +// GetPrecheckIdOk returns a tuple with the PrecheckId field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *CreateMigrationPrecheckResp) GetIdOk() (*string, bool) { - if o == nil || IsNil(o.Id) { +func (o *CreateMigrationPrecheckResp) GetPrecheckIdOk() (*string, bool) { + if o == nil || IsNil(o.PrecheckId) { return nil, false } - return o.Id, true + return o.PrecheckId, true } -// HasId returns a boolean if a field has been set. -func (o *CreateMigrationPrecheckResp) HasId() bool { - if o != nil && !IsNil(o.Id) { +// HasPrecheckId returns a boolean if a field has been set. +func (o *CreateMigrationPrecheckResp) HasPrecheckId() bool { + if o != nil && !IsNil(o.PrecheckId) { return true } return false } -// SetId gets a reference to the given string and assigns it to the Id field. -func (o *CreateMigrationPrecheckResp) SetId(v string) { - o.Id = &v +// SetPrecheckId gets a reference to the given string and assigns it to the PrecheckId field. +func (o *CreateMigrationPrecheckResp) SetPrecheckId(v string) { + o.PrecheckId = &v } func (o CreateMigrationPrecheckResp) MarshalJSON() ([]byte, error) { @@ -85,8 +85,8 @@ func (o CreateMigrationPrecheckResp) MarshalJSON() ([]byte, error) { func (o CreateMigrationPrecheckResp) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} - if !IsNil(o.Id) { - toSerialize["id"] = o.Id + if !IsNil(o.PrecheckId) { + toSerialize["precheckId"] = o.PrecheckId } for key, value := range o.AdditionalProperties { @@ -110,7 +110,7 @@ func (o *CreateMigrationPrecheckResp) UnmarshalJSON(data []byte) (err error) { additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(data, &additionalProperties); err == nil { - delete(additionalProperties, "id") + delete(additionalProperties, "precheckId") o.AdditionalProperties = additionalProperties } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migration_tasks_resp.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migrations_resp.go similarity index 51% rename from pkg/tidbcloud/v1beta1/serverless/migration/model_list_migration_tasks_resp.go rename to pkg/tidbcloud/v1beta1/serverless/migration/model_list_migrations_resp.go index 361cfa37..4fa97dc3 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migration_tasks_resp.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_list_migrations_resp.go @@ -14,73 +14,73 @@ import ( "encoding/json" ) -// checks if the ListMigrationTasksResp type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &ListMigrationTasksResp{} - -// ListMigrationTasksResp struct for ListMigrationTasksResp -type ListMigrationTasksResp struct { - // The list of migration tasks. - Tasks []MigrationTask `json:"tasks,omitempty"` - // The total number of tasks matching the query. +// checks if the ListMigrationsResp type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ListMigrationsResp{} + +// ListMigrationsResp struct for ListMigrationsResp +type ListMigrationsResp struct { + // The list of migrations. + Migrations []Migration `json:"migrations,omitempty"` + // The total number of migrations matching the query. TotalSize *int64 `json:"totalSize,omitempty"` // Token to retrieve the next page of results. NextPageToken *string `json:"nextPageToken,omitempty"` AdditionalProperties map[string]interface{} } -type _ListMigrationTasksResp ListMigrationTasksResp +type _ListMigrationsResp ListMigrationsResp -// NewListMigrationTasksResp instantiates a new ListMigrationTasksResp object +// NewListMigrationsResp instantiates a new ListMigrationsResp object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewListMigrationTasksResp() *ListMigrationTasksResp { - this := ListMigrationTasksResp{} +func NewListMigrationsResp() *ListMigrationsResp { + this := ListMigrationsResp{} return &this } -// NewListMigrationTasksRespWithDefaults instantiates a new ListMigrationTasksResp object +// NewListMigrationsRespWithDefaults instantiates a new ListMigrationsResp object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set -func NewListMigrationTasksRespWithDefaults() *ListMigrationTasksResp { - this := ListMigrationTasksResp{} +func NewListMigrationsRespWithDefaults() *ListMigrationsResp { + this := ListMigrationsResp{} return &this } -// GetTasks returns the Tasks field value if set, zero value otherwise. -func (o *ListMigrationTasksResp) GetTasks() []MigrationTask { - if o == nil || IsNil(o.Tasks) { - var ret []MigrationTask +// GetMigrations returns the Migrations field value if set, zero value otherwise. +func (o *ListMigrationsResp) GetMigrations() []Migration { + if o == nil || IsNil(o.Migrations) { + var ret []Migration return ret } - return o.Tasks + return o.Migrations } -// GetTasksOk returns a tuple with the Tasks field value if set, nil otherwise +// GetMigrationsOk returns a tuple with the Migrations field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *ListMigrationTasksResp) GetTasksOk() ([]MigrationTask, bool) { - if o == nil || IsNil(o.Tasks) { +func (o *ListMigrationsResp) GetMigrationsOk() ([]Migration, bool) { + if o == nil || IsNil(o.Migrations) { return nil, false } - return o.Tasks, true + return o.Migrations, true } -// HasTasks returns a boolean if a field has been set. -func (o *ListMigrationTasksResp) HasTasks() bool { - if o != nil && !IsNil(o.Tasks) { +// HasMigrations returns a boolean if a field has been set. +func (o *ListMigrationsResp) HasMigrations() bool { + if o != nil && !IsNil(o.Migrations) { return true } return false } -// SetTasks gets a reference to the given []MigrationTask and assigns it to the Tasks field. -func (o *ListMigrationTasksResp) SetTasks(v []MigrationTask) { - o.Tasks = v +// SetMigrations gets a reference to the given []Migration and assigns it to the Migrations field. +func (o *ListMigrationsResp) SetMigrations(v []Migration) { + o.Migrations = v } // GetTotalSize returns the TotalSize field value if set, zero value otherwise. -func (o *ListMigrationTasksResp) GetTotalSize() int64 { +func (o *ListMigrationsResp) GetTotalSize() int64 { if o == nil || IsNil(o.TotalSize) { var ret int64 return ret @@ -90,7 +90,7 @@ func (o *ListMigrationTasksResp) GetTotalSize() int64 { // GetTotalSizeOk returns a tuple with the TotalSize field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *ListMigrationTasksResp) GetTotalSizeOk() (*int64, bool) { +func (o *ListMigrationsResp) GetTotalSizeOk() (*int64, bool) { if o == nil || IsNil(o.TotalSize) { return nil, false } @@ -98,7 +98,7 @@ func (o *ListMigrationTasksResp) GetTotalSizeOk() (*int64, bool) { } // HasTotalSize returns a boolean if a field has been set. -func (o *ListMigrationTasksResp) HasTotalSize() bool { +func (o *ListMigrationsResp) HasTotalSize() bool { if o != nil && !IsNil(o.TotalSize) { return true } @@ -107,12 +107,12 @@ func (o *ListMigrationTasksResp) HasTotalSize() bool { } // SetTotalSize gets a reference to the given int64 and assigns it to the TotalSize field. -func (o *ListMigrationTasksResp) SetTotalSize(v int64) { +func (o *ListMigrationsResp) SetTotalSize(v int64) { o.TotalSize = &v } // GetNextPageToken returns the NextPageToken field value if set, zero value otherwise. -func (o *ListMigrationTasksResp) GetNextPageToken() string { +func (o *ListMigrationsResp) GetNextPageToken() string { if o == nil || IsNil(o.NextPageToken) { var ret string return ret @@ -122,7 +122,7 @@ func (o *ListMigrationTasksResp) GetNextPageToken() string { // GetNextPageTokenOk returns a tuple with the NextPageToken field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *ListMigrationTasksResp) GetNextPageTokenOk() (*string, bool) { +func (o *ListMigrationsResp) GetNextPageTokenOk() (*string, bool) { if o == nil || IsNil(o.NextPageToken) { return nil, false } @@ -130,7 +130,7 @@ func (o *ListMigrationTasksResp) GetNextPageTokenOk() (*string, bool) { } // HasNextPageToken returns a boolean if a field has been set. -func (o *ListMigrationTasksResp) HasNextPageToken() bool { +func (o *ListMigrationsResp) HasNextPageToken() bool { if o != nil && !IsNil(o.NextPageToken) { return true } @@ -139,11 +139,11 @@ func (o *ListMigrationTasksResp) HasNextPageToken() bool { } // SetNextPageToken gets a reference to the given string and assigns it to the NextPageToken field. -func (o *ListMigrationTasksResp) SetNextPageToken(v string) { +func (o *ListMigrationsResp) SetNextPageToken(v string) { o.NextPageToken = &v } -func (o ListMigrationTasksResp) MarshalJSON() ([]byte, error) { +func (o ListMigrationsResp) MarshalJSON() ([]byte, error) { toSerialize, err := o.ToMap() if err != nil { return []byte{}, err @@ -151,10 +151,10 @@ func (o ListMigrationTasksResp) MarshalJSON() ([]byte, error) { return json.Marshal(toSerialize) } -func (o ListMigrationTasksResp) ToMap() (map[string]interface{}, error) { +func (o ListMigrationsResp) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} - if !IsNil(o.Tasks) { - toSerialize["tasks"] = o.Tasks + if !IsNil(o.Migrations) { + toSerialize["migrations"] = o.Migrations } if !IsNil(o.TotalSize) { toSerialize["totalSize"] = o.TotalSize @@ -170,21 +170,21 @@ func (o ListMigrationTasksResp) ToMap() (map[string]interface{}, error) { return toSerialize, nil } -func (o *ListMigrationTasksResp) UnmarshalJSON(data []byte) (err error) { - varListMigrationTasksResp := _ListMigrationTasksResp{} +func (o *ListMigrationsResp) UnmarshalJSON(data []byte) (err error) { + varListMigrationsResp := _ListMigrationsResp{} - err = json.Unmarshal(data, &varListMigrationTasksResp) + err = json.Unmarshal(data, &varListMigrationsResp) if err != nil { return err } - *o = ListMigrationTasksResp(varListMigrationTasksResp) + *o = ListMigrationsResp(varListMigrationsResp) additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(data, &additionalProperties); err == nil { - delete(additionalProperties, "tasks") + delete(additionalProperties, "migrations") delete(additionalProperties, "totalSize") delete(additionalProperties, "nextPageToken") o.AdditionalProperties = additionalProperties @@ -193,38 +193,38 @@ func (o *ListMigrationTasksResp) UnmarshalJSON(data []byte) (err error) { return err } -type NullableListMigrationTasksResp struct { - value *ListMigrationTasksResp +type NullableListMigrationsResp struct { + value *ListMigrationsResp isSet bool } -func (v NullableListMigrationTasksResp) Get() *ListMigrationTasksResp { +func (v NullableListMigrationsResp) Get() *ListMigrationsResp { return v.value } -func (v *NullableListMigrationTasksResp) Set(val *ListMigrationTasksResp) { +func (v *NullableListMigrationsResp) Set(val *ListMigrationsResp) { v.value = val v.isSet = true } -func (v NullableListMigrationTasksResp) IsSet() bool { +func (v NullableListMigrationsResp) IsSet() bool { return v.isSet } -func (v *NullableListMigrationTasksResp) Unset() { +func (v *NullableListMigrationsResp) Unset() { v.value = nil v.isSet = false } -func NewNullableListMigrationTasksResp(val *ListMigrationTasksResp) *NullableListMigrationTasksResp { - return &NullableListMigrationTasksResp{value: val, isSet: true} +func NewNullableListMigrationsResp(val *ListMigrationsResp) *NullableListMigrationsResp { + return &NullableListMigrationsResp{value: val, isSet: true} } -func (v NullableListMigrationTasksResp) MarshalJSON() ([]byte, error) { +func (v NullableListMigrationsResp) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } -func (v *NullableListMigrationTasksResp) UnmarshalJSON(src []byte) error { +func (v *NullableListMigrationsResp) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration.go similarity index 56% rename from pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task.go rename to pkg/tidbcloud/v1beta1/serverless/migration/model_migration.go index 2a430c08..1012d2ec 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration.go @@ -15,113 +15,113 @@ import ( "time" ) -// checks if the MigrationTask type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &MigrationTask{} - -// MigrationTask struct for MigrationTask -type MigrationTask struct { - // The unique ID of the migration task. - Id *string `json:"id,omitempty"` - // The display name of the migration task. - Name *string `json:"name,omitempty"` +// checks if the Migration type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Migration{} + +// Migration struct for Migration +type Migration struct { + // The unique ID of the migration. + MigrationId *string `json:"migrationId,omitempty"` + // The display name of the migration. + DisplayName *string `json:"displayName,omitempty"` // The list of subtasks composing this migration. SubTasks []SubTask `json:"subTasks,omitempty"` - // The target database username used by the task. + // The target database username used by the migration. TargetUser *string `json:"targetUser,omitempty"` - // The timestamp when the task was created. + // The timestamp when the migration was created. CreateTime *time.Time `json:"createTime,omitempty"` - // The migration mode of the task. + // The migration mode of the migration. Mode *TaskMode `json:"mode,omitempty"` - // The current state of the task. - State *MigrationTaskState `json:"state,omitempty"` + // The current state of the migration. + State *MigrationState `json:"state,omitempty"` AdditionalProperties map[string]interface{} } -type _MigrationTask MigrationTask +type _Migration Migration -// NewMigrationTask instantiates a new MigrationTask object +// NewMigration instantiates a new Migration object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewMigrationTask() *MigrationTask { - this := MigrationTask{} +func NewMigration() *Migration { + this := Migration{} return &this } -// NewMigrationTaskWithDefaults instantiates a new MigrationTask object +// NewMigrationWithDefaults instantiates a new Migration object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set -func NewMigrationTaskWithDefaults() *MigrationTask { - this := MigrationTask{} +func NewMigrationWithDefaults() *Migration { + this := Migration{} return &this } -// GetId returns the Id field value if set, zero value otherwise. -func (o *MigrationTask) GetId() string { - if o == nil || IsNil(o.Id) { +// GetMigrationId returns the MigrationId field value if set, zero value otherwise. +func (o *Migration) GetMigrationId() string { + if o == nil || IsNil(o.MigrationId) { var ret string return ret } - return *o.Id + return *o.MigrationId } -// GetIdOk returns a tuple with the Id field value if set, nil otherwise +// GetMigrationIdOk returns a tuple with the MigrationId field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *MigrationTask) GetIdOk() (*string, bool) { - if o == nil || IsNil(o.Id) { +func (o *Migration) GetMigrationIdOk() (*string, bool) { + if o == nil || IsNil(o.MigrationId) { return nil, false } - return o.Id, true + return o.MigrationId, true } -// HasId returns a boolean if a field has been set. -func (o *MigrationTask) HasId() bool { - if o != nil && !IsNil(o.Id) { +// HasMigrationId returns a boolean if a field has been set. +func (o *Migration) HasMigrationId() bool { + if o != nil && !IsNil(o.MigrationId) { return true } return false } -// SetId gets a reference to the given string and assigns it to the Id field. -func (o *MigrationTask) SetId(v string) { - o.Id = &v +// SetMigrationId gets a reference to the given string and assigns it to the MigrationId field. +func (o *Migration) SetMigrationId(v string) { + o.MigrationId = &v } -// GetName returns the Name field value if set, zero value otherwise. -func (o *MigrationTask) GetName() string { - if o == nil || IsNil(o.Name) { +// GetDisplayName returns the DisplayName field value if set, zero value otherwise. +func (o *Migration) GetDisplayName() string { + if o == nil || IsNil(o.DisplayName) { var ret string return ret } - return *o.Name + return *o.DisplayName } -// GetNameOk returns a tuple with the Name field value if set, nil otherwise +// GetDisplayNameOk returns a tuple with the DisplayName field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *MigrationTask) GetNameOk() (*string, bool) { - if o == nil || IsNil(o.Name) { +func (o *Migration) GetDisplayNameOk() (*string, bool) { + if o == nil || IsNil(o.DisplayName) { return nil, false } - return o.Name, true + return o.DisplayName, true } -// HasName returns a boolean if a field has been set. -func (o *MigrationTask) HasName() bool { - if o != nil && !IsNil(o.Name) { +// HasDisplayName returns a boolean if a field has been set. +func (o *Migration) HasDisplayName() bool { + if o != nil && !IsNil(o.DisplayName) { return true } return false } -// SetName gets a reference to the given string and assigns it to the Name field. -func (o *MigrationTask) SetName(v string) { - o.Name = &v +// SetDisplayName gets a reference to the given string and assigns it to the DisplayName field. +func (o *Migration) SetDisplayName(v string) { + o.DisplayName = &v } // GetSubTasks returns the SubTasks field value if set, zero value otherwise. -func (o *MigrationTask) GetSubTasks() []SubTask { +func (o *Migration) GetSubTasks() []SubTask { if o == nil || IsNil(o.SubTasks) { var ret []SubTask return ret @@ -131,7 +131,7 @@ func (o *MigrationTask) GetSubTasks() []SubTask { // GetSubTasksOk returns a tuple with the SubTasks field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *MigrationTask) GetSubTasksOk() ([]SubTask, bool) { +func (o *Migration) GetSubTasksOk() ([]SubTask, bool) { if o == nil || IsNil(o.SubTasks) { return nil, false } @@ -139,7 +139,7 @@ func (o *MigrationTask) GetSubTasksOk() ([]SubTask, bool) { } // HasSubTasks returns a boolean if a field has been set. -func (o *MigrationTask) HasSubTasks() bool { +func (o *Migration) HasSubTasks() bool { if o != nil && !IsNil(o.SubTasks) { return true } @@ -148,12 +148,12 @@ func (o *MigrationTask) HasSubTasks() bool { } // SetSubTasks gets a reference to the given []SubTask and assigns it to the SubTasks field. -func (o *MigrationTask) SetSubTasks(v []SubTask) { +func (o *Migration) SetSubTasks(v []SubTask) { o.SubTasks = v } // GetTargetUser returns the TargetUser field value if set, zero value otherwise. -func (o *MigrationTask) GetTargetUser() string { +func (o *Migration) GetTargetUser() string { if o == nil || IsNil(o.TargetUser) { var ret string return ret @@ -163,7 +163,7 @@ func (o *MigrationTask) GetTargetUser() string { // GetTargetUserOk returns a tuple with the TargetUser field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *MigrationTask) GetTargetUserOk() (*string, bool) { +func (o *Migration) GetTargetUserOk() (*string, bool) { if o == nil || IsNil(o.TargetUser) { return nil, false } @@ -171,7 +171,7 @@ func (o *MigrationTask) GetTargetUserOk() (*string, bool) { } // HasTargetUser returns a boolean if a field has been set. -func (o *MigrationTask) HasTargetUser() bool { +func (o *Migration) HasTargetUser() bool { if o != nil && !IsNil(o.TargetUser) { return true } @@ -180,12 +180,12 @@ func (o *MigrationTask) HasTargetUser() bool { } // SetTargetUser gets a reference to the given string and assigns it to the TargetUser field. -func (o *MigrationTask) SetTargetUser(v string) { +func (o *Migration) SetTargetUser(v string) { o.TargetUser = &v } // GetCreateTime returns the CreateTime field value if set, zero value otherwise. -func (o *MigrationTask) GetCreateTime() time.Time { +func (o *Migration) GetCreateTime() time.Time { if o == nil || IsNil(o.CreateTime) { var ret time.Time return ret @@ -195,7 +195,7 @@ func (o *MigrationTask) GetCreateTime() time.Time { // GetCreateTimeOk returns a tuple with the CreateTime field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *MigrationTask) GetCreateTimeOk() (*time.Time, bool) { +func (o *Migration) GetCreateTimeOk() (*time.Time, bool) { if o == nil || IsNil(o.CreateTime) { return nil, false } @@ -203,7 +203,7 @@ func (o *MigrationTask) GetCreateTimeOk() (*time.Time, bool) { } // HasCreateTime returns a boolean if a field has been set. -func (o *MigrationTask) HasCreateTime() bool { +func (o *Migration) HasCreateTime() bool { if o != nil && !IsNil(o.CreateTime) { return true } @@ -212,12 +212,12 @@ func (o *MigrationTask) HasCreateTime() bool { } // SetCreateTime gets a reference to the given time.Time and assigns it to the CreateTime field. -func (o *MigrationTask) SetCreateTime(v time.Time) { +func (o *Migration) SetCreateTime(v time.Time) { o.CreateTime = &v } // GetMode returns the Mode field value if set, zero value otherwise. -func (o *MigrationTask) GetMode() TaskMode { +func (o *Migration) GetMode() TaskMode { if o == nil || IsNil(o.Mode) { var ret TaskMode return ret @@ -227,7 +227,7 @@ func (o *MigrationTask) GetMode() TaskMode { // GetModeOk returns a tuple with the Mode field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *MigrationTask) GetModeOk() (*TaskMode, bool) { +func (o *Migration) GetModeOk() (*TaskMode, bool) { if o == nil || IsNil(o.Mode) { return nil, false } @@ -235,7 +235,7 @@ func (o *MigrationTask) GetModeOk() (*TaskMode, bool) { } // HasMode returns a boolean if a field has been set. -func (o *MigrationTask) HasMode() bool { +func (o *Migration) HasMode() bool { if o != nil && !IsNil(o.Mode) { return true } @@ -244,14 +244,14 @@ func (o *MigrationTask) HasMode() bool { } // SetMode gets a reference to the given TaskMode and assigns it to the Mode field. -func (o *MigrationTask) SetMode(v TaskMode) { +func (o *Migration) SetMode(v TaskMode) { o.Mode = &v } // GetState returns the State field value if set, zero value otherwise. -func (o *MigrationTask) GetState() MigrationTaskState { +func (o *Migration) GetState() MigrationState { if o == nil || IsNil(o.State) { - var ret MigrationTaskState + var ret MigrationState return ret } return *o.State @@ -259,7 +259,7 @@ func (o *MigrationTask) GetState() MigrationTaskState { // GetStateOk returns a tuple with the State field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *MigrationTask) GetStateOk() (*MigrationTaskState, bool) { +func (o *Migration) GetStateOk() (*MigrationState, bool) { if o == nil || IsNil(o.State) { return nil, false } @@ -267,7 +267,7 @@ func (o *MigrationTask) GetStateOk() (*MigrationTaskState, bool) { } // HasState returns a boolean if a field has been set. -func (o *MigrationTask) HasState() bool { +func (o *Migration) HasState() bool { if o != nil && !IsNil(o.State) { return true } @@ -275,12 +275,12 @@ func (o *MigrationTask) HasState() bool { return false } -// SetState gets a reference to the given MigrationTaskState and assigns it to the State field. -func (o *MigrationTask) SetState(v MigrationTaskState) { +// SetState gets a reference to the given MigrationState and assigns it to the State field. +func (o *Migration) SetState(v MigrationState) { o.State = &v } -func (o MigrationTask) MarshalJSON() ([]byte, error) { +func (o Migration) MarshalJSON() ([]byte, error) { toSerialize, err := o.ToMap() if err != nil { return []byte{}, err @@ -288,13 +288,13 @@ func (o MigrationTask) MarshalJSON() ([]byte, error) { return json.Marshal(toSerialize) } -func (o MigrationTask) ToMap() (map[string]interface{}, error) { +func (o Migration) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} - if !IsNil(o.Id) { - toSerialize["id"] = o.Id + if !IsNil(o.MigrationId) { + toSerialize["migrationId"] = o.MigrationId } - if !IsNil(o.Name) { - toSerialize["name"] = o.Name + if !IsNil(o.DisplayName) { + toSerialize["displayName"] = o.DisplayName } if !IsNil(o.SubTasks) { toSerialize["subTasks"] = o.SubTasks @@ -319,22 +319,22 @@ func (o MigrationTask) ToMap() (map[string]interface{}, error) { return toSerialize, nil } -func (o *MigrationTask) UnmarshalJSON(data []byte) (err error) { - varMigrationTask := _MigrationTask{} +func (o *Migration) UnmarshalJSON(data []byte) (err error) { + varMigration := _Migration{} - err = json.Unmarshal(data, &varMigrationTask) + err = json.Unmarshal(data, &varMigration) if err != nil { return err } - *o = MigrationTask(varMigrationTask) + *o = Migration(varMigration) additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(data, &additionalProperties); err == nil { - delete(additionalProperties, "id") - delete(additionalProperties, "name") + delete(additionalProperties, "migrationId") + delete(additionalProperties, "displayName") delete(additionalProperties, "subTasks") delete(additionalProperties, "targetUser") delete(additionalProperties, "createTime") @@ -346,38 +346,38 @@ func (o *MigrationTask) UnmarshalJSON(data []byte) (err error) { return err } -type NullableMigrationTask struct { - value *MigrationTask +type NullableMigration struct { + value *Migration isSet bool } -func (v NullableMigrationTask) Get() *MigrationTask { +func (v NullableMigration) Get() *Migration { return v.value } -func (v *NullableMigrationTask) Set(val *MigrationTask) { +func (v *NullableMigration) Set(val *Migration) { v.value = val v.isSet = true } -func (v NullableMigrationTask) IsSet() bool { +func (v NullableMigration) IsSet() bool { return v.isSet } -func (v *NullableMigrationTask) Unset() { +func (v *NullableMigration) Unset() { v.value = nil v.isSet = false } -func NewNullableMigrationTask(val *MigrationTask) *NullableMigrationTask { - return &NullableMigrationTask{value: val, isSet: true} +func NewNullableMigration(val *Migration) *NullableMigration { + return &NullableMigration{value: val, isSet: true} } -func (v NullableMigrationTask) MarshalJSON() ([]byte, error) { +func (v NullableMigration) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } -func (v *NullableMigrationTask) UnmarshalJSON(src []byte) error { +func (v *NullableMigration) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go index 6b525bdd..ace07dc2 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go @@ -19,6 +19,8 @@ var _ MappedNullable = &MigrationPrecheck{} // MigrationPrecheck struct for MigrationPrecheck type MigrationPrecheck struct { + // The ID of the precheck. + PrecheckId *string `json:"precheckId,omitempty"` // Total number of precheck items. Total *int32 `json:"total,omitempty"` // Number of failed items. @@ -53,6 +55,38 @@ func NewMigrationPrecheckWithDefaults() *MigrationPrecheck { return &this } +// GetPrecheckId returns the PrecheckId field value if set, zero value otherwise. +func (o *MigrationPrecheck) GetPrecheckId() string { + if o == nil || IsNil(o.PrecheckId) { + var ret string + return ret + } + return *o.PrecheckId +} + +// GetPrecheckIdOk returns a tuple with the PrecheckId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *MigrationPrecheck) GetPrecheckIdOk() (*string, bool) { + if o == nil || IsNil(o.PrecheckId) { + return nil, false + } + return o.PrecheckId, true +} + +// HasPrecheckId returns a boolean if a field has been set. +func (o *MigrationPrecheck) HasPrecheckId() bool { + if o != nil && !IsNil(o.PrecheckId) { + return true + } + + return false +} + +// SetPrecheckId gets a reference to the given string and assigns it to the PrecheckId field. +func (o *MigrationPrecheck) SetPrecheckId(v string) { + o.PrecheckId = &v +} + // GetTotal returns the Total field value if set, zero value otherwise. func (o *MigrationPrecheck) GetTotal() int32 { if o == nil || IsNil(o.Total) { @@ -255,6 +289,9 @@ func (o MigrationPrecheck) MarshalJSON() ([]byte, error) { func (o MigrationPrecheck) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} + if !IsNil(o.PrecheckId) { + toSerialize["precheckId"] = o.PrecheckId + } if !IsNil(o.Total) { toSerialize["total"] = o.Total } @@ -295,6 +332,7 @@ func (o *MigrationPrecheck) UnmarshalJSON(data []byte) (err error) { additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "precheckId") delete(additionalProperties, "total") delete(additionalProperties, "failedCnt") delete(additionalProperties, "warnCnt") diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_migration_body.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_migration_body.go new file mode 100644 index 00000000..1c5f1ab0 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_migration_body.go @@ -0,0 +1,257 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" + "fmt" +) + +// checks if the MigrationServiceCreateMigrationBody type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &MigrationServiceCreateMigrationBody{} + +// MigrationServiceCreateMigrationBody struct for MigrationServiceCreateMigrationBody +type MigrationServiceCreateMigrationBody struct { + // The display name of the migration. + DisplayName string `json:"displayName"` + // The data sources to migrate from. + Sources []Source `json:"sources"` + // The target database credentials. + Target Target `json:"target"` + // The migration mode (full+incremental or incremental-only). + Mode TaskMode `json:"mode"` + AdditionalProperties map[string]interface{} +} + +type _MigrationServiceCreateMigrationBody MigrationServiceCreateMigrationBody + +// NewMigrationServiceCreateMigrationBody instantiates a new MigrationServiceCreateMigrationBody object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewMigrationServiceCreateMigrationBody(displayName string, sources []Source, target Target, mode TaskMode) *MigrationServiceCreateMigrationBody { + this := MigrationServiceCreateMigrationBody{} + this.DisplayName = displayName + this.Sources = sources + this.Target = target + this.Mode = mode + return &this +} + +// NewMigrationServiceCreateMigrationBodyWithDefaults instantiates a new MigrationServiceCreateMigrationBody object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewMigrationServiceCreateMigrationBodyWithDefaults() *MigrationServiceCreateMigrationBody { + this := MigrationServiceCreateMigrationBody{} + return &this +} + +// GetDisplayName returns the DisplayName field value +func (o *MigrationServiceCreateMigrationBody) GetDisplayName() string { + if o == nil { + var ret string + return ret + } + + return o.DisplayName +} + +// GetDisplayNameOk returns a tuple with the DisplayName field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateMigrationBody) GetDisplayNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.DisplayName, true +} + +// SetDisplayName sets field value +func (o *MigrationServiceCreateMigrationBody) SetDisplayName(v string) { + o.DisplayName = v +} + +// GetSources returns the Sources field value +func (o *MigrationServiceCreateMigrationBody) GetSources() []Source { + if o == nil { + var ret []Source + return ret + } + + return o.Sources +} + +// GetSourcesOk returns a tuple with the Sources field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateMigrationBody) GetSourcesOk() ([]Source, bool) { + if o == nil { + return nil, false + } + return o.Sources, true +} + +// SetSources sets field value +func (o *MigrationServiceCreateMigrationBody) SetSources(v []Source) { + o.Sources = v +} + +// GetTarget returns the Target field value +func (o *MigrationServiceCreateMigrationBody) GetTarget() Target { + if o == nil { + var ret Target + return ret + } + + return o.Target +} + +// GetTargetOk returns a tuple with the Target field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateMigrationBody) GetTargetOk() (*Target, bool) { + if o == nil { + return nil, false + } + return &o.Target, true +} + +// SetTarget sets field value +func (o *MigrationServiceCreateMigrationBody) SetTarget(v Target) { + o.Target = v +} + +// GetMode returns the Mode field value +func (o *MigrationServiceCreateMigrationBody) GetMode() TaskMode { + if o == nil { + var ret TaskMode + return ret + } + + return o.Mode +} + +// GetModeOk returns a tuple with the Mode field value +// and a boolean to check if the value has been set. +func (o *MigrationServiceCreateMigrationBody) GetModeOk() (*TaskMode, bool) { + if o == nil { + return nil, false + } + return &o.Mode, true +} + +// SetMode sets field value +func (o *MigrationServiceCreateMigrationBody) SetMode(v TaskMode) { + o.Mode = v +} + +func (o MigrationServiceCreateMigrationBody) MarshalJSON() ([]byte, error) { + toSerialize, err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o MigrationServiceCreateMigrationBody) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["displayName"] = o.DisplayName + toSerialize["sources"] = o.Sources + toSerialize["target"] = o.Target + toSerialize["mode"] = o.Mode + + for key, value := range o.AdditionalProperties { + toSerialize[key] = value + } + + return toSerialize, nil +} + +func (o *MigrationServiceCreateMigrationBody) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "displayName", + "sources", + "target", + "mode", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varMigrationServiceCreateMigrationBody := _MigrationServiceCreateMigrationBody{} + + err = json.Unmarshal(data, &varMigrationServiceCreateMigrationBody) + + if err != nil { + return err + } + + *o = MigrationServiceCreateMigrationBody(varMigrationServiceCreateMigrationBody) + + additionalProperties := make(map[string]interface{}) + + if err = json.Unmarshal(data, &additionalProperties); err == nil { + delete(additionalProperties, "displayName") + delete(additionalProperties, "sources") + delete(additionalProperties, "target") + delete(additionalProperties, "mode") + o.AdditionalProperties = additionalProperties + } + + return err +} + +type NullableMigrationServiceCreateMigrationBody struct { + value *MigrationServiceCreateMigrationBody + isSet bool +} + +func (v NullableMigrationServiceCreateMigrationBody) Get() *MigrationServiceCreateMigrationBody { + return v.value +} + +func (v *NullableMigrationServiceCreateMigrationBody) Set(val *MigrationServiceCreateMigrationBody) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationServiceCreateMigrationBody) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationServiceCreateMigrationBody) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationServiceCreateMigrationBody(val *MigrationServiceCreateMigrationBody) *NullableMigrationServiceCreateMigrationBody { + return &NullableMigrationServiceCreateMigrationBody{value: val, isSet: true} +} + +func (v NullableMigrationServiceCreateMigrationBody) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationServiceCreateMigrationBody) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_task_body.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_task_body.go deleted file mode 100644 index d936df5b..00000000 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_create_task_body.go +++ /dev/null @@ -1,319 +0,0 @@ -/* -TiDB Cloud Starter and Essential API - -TiDB Cloud Starter and Essential API - -API version: v1beta1 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package migration - -import ( - "encoding/json" - "fmt" -) - -// checks if the MigrationServiceCreateTaskBody type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &MigrationServiceCreateTaskBody{} - -// MigrationServiceCreateTaskBody struct for MigrationServiceCreateTaskBody -type MigrationServiceCreateTaskBody struct { - // The display name of the migration task. - Name *string `json:"name,omitempty"` - // The data sources to migrate from. - Sources []Source `json:"sources"` - // The target database credentials. - Target *Target `json:"target,omitempty"` - // The migration mode (full+incremental or incremental-only). - Mode *TaskMode `json:"mode,omitempty"` - // If true, migrate all user data (equivalent to enabling all non-system databases and tables). - FullDataMigration *bool `json:"fullDataMigration,omitempty"` - AdditionalProperties map[string]interface{} -} - -type _MigrationServiceCreateTaskBody MigrationServiceCreateTaskBody - -// NewMigrationServiceCreateTaskBody instantiates a new MigrationServiceCreateTaskBody object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewMigrationServiceCreateTaskBody(sources []Source) *MigrationServiceCreateTaskBody { - this := MigrationServiceCreateTaskBody{} - this.Sources = sources - return &this -} - -// NewMigrationServiceCreateTaskBodyWithDefaults instantiates a new MigrationServiceCreateTaskBody object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewMigrationServiceCreateTaskBodyWithDefaults() *MigrationServiceCreateTaskBody { - this := MigrationServiceCreateTaskBody{} - return &this -} - -// GetName returns the Name field value if set, zero value otherwise. -func (o *MigrationServiceCreateTaskBody) GetName() string { - if o == nil || IsNil(o.Name) { - var ret string - return ret - } - return *o.Name -} - -// GetNameOk returns a tuple with the Name field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *MigrationServiceCreateTaskBody) GetNameOk() (*string, bool) { - if o == nil || IsNil(o.Name) { - return nil, false - } - return o.Name, true -} - -// HasName returns a boolean if a field has been set. -func (o *MigrationServiceCreateTaskBody) HasName() bool { - if o != nil && !IsNil(o.Name) { - return true - } - - return false -} - -// SetName gets a reference to the given string and assigns it to the Name field. -func (o *MigrationServiceCreateTaskBody) SetName(v string) { - o.Name = &v -} - -// GetSources returns the Sources field value -func (o *MigrationServiceCreateTaskBody) GetSources() []Source { - if o == nil { - var ret []Source - return ret - } - - return o.Sources -} - -// GetSourcesOk returns a tuple with the Sources field value -// and a boolean to check if the value has been set. -func (o *MigrationServiceCreateTaskBody) GetSourcesOk() ([]Source, bool) { - if o == nil { - return nil, false - } - return o.Sources, true -} - -// SetSources sets field value -func (o *MigrationServiceCreateTaskBody) SetSources(v []Source) { - o.Sources = v -} - -// GetTarget returns the Target field value if set, zero value otherwise. -func (o *MigrationServiceCreateTaskBody) GetTarget() Target { - if o == nil || IsNil(o.Target) { - var ret Target - return ret - } - return *o.Target -} - -// GetTargetOk returns a tuple with the Target field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *MigrationServiceCreateTaskBody) GetTargetOk() (*Target, bool) { - if o == nil || IsNil(o.Target) { - return nil, false - } - return o.Target, true -} - -// HasTarget returns a boolean if a field has been set. -func (o *MigrationServiceCreateTaskBody) HasTarget() bool { - if o != nil && !IsNil(o.Target) { - return true - } - - return false -} - -// SetTarget gets a reference to the given Target and assigns it to the Target field. -func (o *MigrationServiceCreateTaskBody) SetTarget(v Target) { - o.Target = &v -} - -// GetMode returns the Mode field value if set, zero value otherwise. -func (o *MigrationServiceCreateTaskBody) GetMode() TaskMode { - if o == nil || IsNil(o.Mode) { - var ret TaskMode - return ret - } - return *o.Mode -} - -// GetModeOk returns a tuple with the Mode field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *MigrationServiceCreateTaskBody) GetModeOk() (*TaskMode, bool) { - if o == nil || IsNil(o.Mode) { - return nil, false - } - return o.Mode, true -} - -// HasMode returns a boolean if a field has been set. -func (o *MigrationServiceCreateTaskBody) HasMode() bool { - if o != nil && !IsNil(o.Mode) { - return true - } - - return false -} - -// SetMode gets a reference to the given TaskMode and assigns it to the Mode field. -func (o *MigrationServiceCreateTaskBody) SetMode(v TaskMode) { - o.Mode = &v -} - -// GetFullDataMigration returns the FullDataMigration field value if set, zero value otherwise. -func (o *MigrationServiceCreateTaskBody) GetFullDataMigration() bool { - if o == nil || IsNil(o.FullDataMigration) { - var ret bool - return ret - } - return *o.FullDataMigration -} - -// GetFullDataMigrationOk returns a tuple with the FullDataMigration field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *MigrationServiceCreateTaskBody) GetFullDataMigrationOk() (*bool, bool) { - if o == nil || IsNil(o.FullDataMigration) { - return nil, false - } - return o.FullDataMigration, true -} - -// HasFullDataMigration returns a boolean if a field has been set. -func (o *MigrationServiceCreateTaskBody) HasFullDataMigration() bool { - if o != nil && !IsNil(o.FullDataMigration) { - return true - } - - return false -} - -// SetFullDataMigration gets a reference to the given bool and assigns it to the FullDataMigration field. -func (o *MigrationServiceCreateTaskBody) SetFullDataMigration(v bool) { - o.FullDataMigration = &v -} - -func (o MigrationServiceCreateTaskBody) MarshalJSON() ([]byte, error) { - toSerialize, err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o MigrationServiceCreateTaskBody) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Name) { - toSerialize["name"] = o.Name - } - toSerialize["sources"] = o.Sources - if !IsNil(o.Target) { - toSerialize["target"] = o.Target - } - if !IsNil(o.Mode) { - toSerialize["mode"] = o.Mode - } - if !IsNil(o.FullDataMigration) { - toSerialize["fullDataMigration"] = o.FullDataMigration - } - - for key, value := range o.AdditionalProperties { - toSerialize[key] = value - } - - return toSerialize, nil -} - -func (o *MigrationServiceCreateTaskBody) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "sources", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err - } - - for _, requiredProperty := range requiredProperties { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varMigrationServiceCreateTaskBody := _MigrationServiceCreateTaskBody{} - - err = json.Unmarshal(data, &varMigrationServiceCreateTaskBody) - - if err != nil { - return err - } - - *o = MigrationServiceCreateTaskBody(varMigrationServiceCreateTaskBody) - - additionalProperties := make(map[string]interface{}) - - if err = json.Unmarshal(data, &additionalProperties); err == nil { - delete(additionalProperties, "name") - delete(additionalProperties, "sources") - delete(additionalProperties, "target") - delete(additionalProperties, "mode") - delete(additionalProperties, "fullDataMigration") - o.AdditionalProperties = additionalProperties - } - - return err -} - -type NullableMigrationServiceCreateTaskBody struct { - value *MigrationServiceCreateTaskBody - isSet bool -} - -func (v NullableMigrationServiceCreateTaskBody) Get() *MigrationServiceCreateTaskBody { - return v.value -} - -func (v *NullableMigrationServiceCreateTaskBody) Set(val *MigrationServiceCreateTaskBody) { - v.value = val - v.isSet = true -} - -func (v NullableMigrationServiceCreateTaskBody) IsSet() bool { - return v.isSet -} - -func (v *NullableMigrationServiceCreateTaskBody) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableMigrationServiceCreateTaskBody(val *MigrationServiceCreateTaskBody) *NullableMigrationServiceCreateTaskBody { - return &NullableMigrationServiceCreateTaskBody{value: val, isSet: true} -} - -func (v NullableMigrationServiceCreateTaskBody) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableMigrationServiceCreateTaskBody) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go index 49677590..047968a8 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_service_precheck_body.go @@ -20,16 +20,14 @@ var _ MappedNullable = &MigrationServicePrecheckBody{} // MigrationServicePrecheckBody struct for MigrationServicePrecheckBody type MigrationServicePrecheckBody struct { - // The display name of the migration task. - Name *string `json:"name,omitempty"` + // The display name of the migration. + DisplayName string `json:"displayName"` // The data sources to migrate from. Sources []Source `json:"sources"` // The target database credentials. - Target *Target `json:"target,omitempty"` + Target Target `json:"target"` // The migration mode (full+incremental or incremental-only). - Mode *TaskMode `json:"mode,omitempty"` - // If true, migrate all user data (equivalent to enabling all non-system databases and tables). - FullDataMigration *bool `json:"fullDataMigration,omitempty"` + Mode TaskMode `json:"mode"` AdditionalProperties map[string]interface{} } @@ -39,9 +37,12 @@ type _MigrationServicePrecheckBody MigrationServicePrecheckBody // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewMigrationServicePrecheckBody(sources []Source) *MigrationServicePrecheckBody { +func NewMigrationServicePrecheckBody(displayName string, sources []Source, target Target, mode TaskMode) *MigrationServicePrecheckBody { this := MigrationServicePrecheckBody{} + this.DisplayName = displayName this.Sources = sources + this.Target = target + this.Mode = mode return &this } @@ -53,36 +54,28 @@ func NewMigrationServicePrecheckBodyWithDefaults() *MigrationServicePrecheckBody return &this } -// GetName returns the Name field value if set, zero value otherwise. -func (o *MigrationServicePrecheckBody) GetName() string { - if o == nil || IsNil(o.Name) { +// GetDisplayName returns the DisplayName field value +func (o *MigrationServicePrecheckBody) GetDisplayName() string { + if o == nil { var ret string return ret } - return *o.Name + + return o.DisplayName } -// GetNameOk returns a tuple with the Name field value if set, nil otherwise +// GetDisplayNameOk returns a tuple with the DisplayName field value // and a boolean to check if the value has been set. -func (o *MigrationServicePrecheckBody) GetNameOk() (*string, bool) { - if o == nil || IsNil(o.Name) { +func (o *MigrationServicePrecheckBody) GetDisplayNameOk() (*string, bool) { + if o == nil { return nil, false } - return o.Name, true -} - -// HasName returns a boolean if a field has been set. -func (o *MigrationServicePrecheckBody) HasName() bool { - if o != nil && !IsNil(o.Name) { - return true - } - - return false + return &o.DisplayName, true } -// SetName gets a reference to the given string and assigns it to the Name field. -func (o *MigrationServicePrecheckBody) SetName(v string) { - o.Name = &v +// SetDisplayName sets field value +func (o *MigrationServicePrecheckBody) SetDisplayName(v string) { + o.DisplayName = v } // GetSources returns the Sources field value @@ -109,100 +102,52 @@ func (o *MigrationServicePrecheckBody) SetSources(v []Source) { o.Sources = v } -// GetTarget returns the Target field value if set, zero value otherwise. +// GetTarget returns the Target field value func (o *MigrationServicePrecheckBody) GetTarget() Target { - if o == nil || IsNil(o.Target) { + if o == nil { var ret Target return ret } - return *o.Target + + return o.Target } -// GetTargetOk returns a tuple with the Target field value if set, nil otherwise +// GetTargetOk returns a tuple with the Target field value // and a boolean to check if the value has been set. func (o *MigrationServicePrecheckBody) GetTargetOk() (*Target, bool) { - if o == nil || IsNil(o.Target) { + if o == nil { return nil, false } - return o.Target, true + return &o.Target, true } -// HasTarget returns a boolean if a field has been set. -func (o *MigrationServicePrecheckBody) HasTarget() bool { - if o != nil && !IsNil(o.Target) { - return true - } - - return false -} - -// SetTarget gets a reference to the given Target and assigns it to the Target field. +// SetTarget sets field value func (o *MigrationServicePrecheckBody) SetTarget(v Target) { - o.Target = &v + o.Target = v } -// GetMode returns the Mode field value if set, zero value otherwise. +// GetMode returns the Mode field value func (o *MigrationServicePrecheckBody) GetMode() TaskMode { - if o == nil || IsNil(o.Mode) { + if o == nil { var ret TaskMode return ret } - return *o.Mode + + return o.Mode } -// GetModeOk returns a tuple with the Mode field value if set, nil otherwise +// GetModeOk returns a tuple with the Mode field value // and a boolean to check if the value has been set. func (o *MigrationServicePrecheckBody) GetModeOk() (*TaskMode, bool) { - if o == nil || IsNil(o.Mode) { + if o == nil { return nil, false } - return o.Mode, true -} - -// HasMode returns a boolean if a field has been set. -func (o *MigrationServicePrecheckBody) HasMode() bool { - if o != nil && !IsNil(o.Mode) { - return true - } - - return false + return &o.Mode, true } -// SetMode gets a reference to the given TaskMode and assigns it to the Mode field. +// SetMode sets field value func (o *MigrationServicePrecheckBody) SetMode(v TaskMode) { - o.Mode = &v -} - -// GetFullDataMigration returns the FullDataMigration field value if set, zero value otherwise. -func (o *MigrationServicePrecheckBody) GetFullDataMigration() bool { - if o == nil || IsNil(o.FullDataMigration) { - var ret bool - return ret - } - return *o.FullDataMigration -} - -// GetFullDataMigrationOk returns a tuple with the FullDataMigration field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *MigrationServicePrecheckBody) GetFullDataMigrationOk() (*bool, bool) { - if o == nil || IsNil(o.FullDataMigration) { - return nil, false - } - return o.FullDataMigration, true -} - -// HasFullDataMigration returns a boolean if a field has been set. -func (o *MigrationServicePrecheckBody) HasFullDataMigration() bool { - if o != nil && !IsNil(o.FullDataMigration) { - return true - } - - return false -} - -// SetFullDataMigration gets a reference to the given bool and assigns it to the FullDataMigration field. -func (o *MigrationServicePrecheckBody) SetFullDataMigration(v bool) { - o.FullDataMigration = &v + o.Mode = v } func (o MigrationServicePrecheckBody) MarshalJSON() ([]byte, error) { @@ -215,19 +160,10 @@ func (o MigrationServicePrecheckBody) MarshalJSON() ([]byte, error) { func (o MigrationServicePrecheckBody) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} - if !IsNil(o.Name) { - toSerialize["name"] = o.Name - } + toSerialize["displayName"] = o.DisplayName toSerialize["sources"] = o.Sources - if !IsNil(o.Target) { - toSerialize["target"] = o.Target - } - if !IsNil(o.Mode) { - toSerialize["mode"] = o.Mode - } - if !IsNil(o.FullDataMigration) { - toSerialize["fullDataMigration"] = o.FullDataMigration - } + toSerialize["target"] = o.Target + toSerialize["mode"] = o.Mode for key, value := range o.AdditionalProperties { toSerialize[key] = value @@ -241,7 +177,10 @@ func (o *MigrationServicePrecheckBody) UnmarshalJSON(data []byte) (err error) { // by unmarshalling the object into a generic map with string keys and checking // that every required field exists as a key in the generic map. requiredProperties := []string{ + "displayName", "sources", + "target", + "mode", } allProperties := make(map[string]interface{}) @@ -271,11 +210,10 @@ func (o *MigrationServicePrecheckBody) UnmarshalJSON(data []byte) (err error) { additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(data, &additionalProperties); err == nil { - delete(additionalProperties, "name") + delete(additionalProperties, "displayName") delete(additionalProperties, "sources") delete(additionalProperties, "target") delete(additionalProperties, "mode") - delete(additionalProperties, "fullDataMigration") o.AdditionalProperties = additionalProperties } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go new file mode 100644 index 00000000..56ed947e --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go @@ -0,0 +1,111 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// MigrationState Overall state of a migration. - CREATING: Task is being created. - RUNNING: Task is actively running. - PAUSED: Task is paused. - CANCELED: Task has been canceled. - FAILED: Task failed with error. +type MigrationState string + +// List of Migration.State +const ( + MIGRATIONSTATE_CREATING MigrationState = "CREATING" + MIGRATIONSTATE_RUNNING MigrationState = "RUNNING" + MIGRATIONSTATE_PAUSED MigrationState = "PAUSED" + MIGRATIONSTATE_CANCELED MigrationState = "CANCELED" + MIGRATIONSTATE_FAILED MigrationState = "FAILED" +) + +// All allowed values of MigrationState enum +var AllowedMigrationStateEnumValues = []MigrationState{ + "CREATING", + "RUNNING", + "PAUSED", + "CANCELED", + "FAILED", +} + +func (v *MigrationState) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := MigrationState(value) + for _, existing := range AllowedMigrationStateEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = MigrationState(value) + return nil +} + +// NewMigrationStateFromValue returns a pointer to a valid MigrationState for the value passed as argument +func NewMigrationStateFromValue(v string) *MigrationState { + ev := MigrationState(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v MigrationState) IsValid() bool { + for _, existing := range AllowedMigrationStateEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to Migration.State value +func (v MigrationState) Ptr() *MigrationState { + return &v +} + +type NullableMigrationState struct { + value *MigrationState + isSet bool +} + +func (v NullableMigrationState) Get() *MigrationState { + return v.value +} + +func (v *NullableMigrationState) Set(val *MigrationState) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationState) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationState) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationState(val *MigrationState) *NullableMigrationState { + return &NullableMigrationState{value: val, isSet: true} +} + +func (v NullableMigrationState) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationState) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task_state.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task_state.go deleted file mode 100644 index 479adf2e..00000000 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_task_state.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -TiDB Cloud Starter and Essential API - -TiDB Cloud Starter and Essential API - -API version: v1beta1 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package migration - -import ( - "encoding/json" -) - -// MigrationTaskState Overall state of a migration task. - CREATING: Task is being created. - RUNNING: Task is actively running. - PAUSED: Task is paused. - CANCELED: Task has been canceled. - FAILED: Task failed with error. -type MigrationTaskState string - -// List of MigrationTask.State -const ( - MIGRATIONTASKSTATE_CREATING MigrationTaskState = "CREATING" - MIGRATIONTASKSTATE_RUNNING MigrationTaskState = "RUNNING" - MIGRATIONTASKSTATE_PAUSED MigrationTaskState = "PAUSED" - MIGRATIONTASKSTATE_CANCELED MigrationTaskState = "CANCELED" - MIGRATIONTASKSTATE_FAILED MigrationTaskState = "FAILED" -) - -// All allowed values of MigrationTaskState enum -var AllowedMigrationTaskStateEnumValues = []MigrationTaskState{ - "CREATING", - "RUNNING", - "PAUSED", - "CANCELED", - "FAILED", -} - -func (v *MigrationTaskState) UnmarshalJSON(src []byte) error { - var value string - err := json.Unmarshal(src, &value) - if err != nil { - return err - } - enumTypeValue := MigrationTaskState(value) - for _, existing := range AllowedMigrationTaskStateEnumValues { - if existing == enumTypeValue { - *v = enumTypeValue - return nil - } - } - - *v = MigrationTaskState(value) - return nil -} - -// NewMigrationTaskStateFromValue returns a pointer to a valid MigrationTaskState for the value passed as argument -func NewMigrationTaskStateFromValue(v string) *MigrationTaskState { - ev := MigrationTaskState(v) - return &ev -} - -// IsValid return true if the value is valid for the enum, false otherwise -func (v MigrationTaskState) IsValid() bool { - for _, existing := range AllowedMigrationTaskStateEnumValues { - if existing == v { - return true - } - } - return false -} - -// Ptr returns reference to MigrationTask.State value -func (v MigrationTaskState) Ptr() *MigrationTaskState { - return &v -} - -type NullableMigrationTaskState struct { - value *MigrationTaskState - isSet bool -} - -func (v NullableMigrationTaskState) Get() *MigrationTaskState { - return v.value -} - -func (v *NullableMigrationTaskState) Set(val *MigrationTaskState) { - v.value = val - v.isSet = true -} - -func (v NullableMigrationTaskState) IsSet() bool { - return v.isSet -} - -func (v *NullableMigrationTaskState) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableMigrationTaskState(val *MigrationTaskState) *NullableMigrationTaskState { - return &NullableMigrationTaskState{value: val, isSet: true} -} - -func (v NullableMigrationTaskState) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableMigrationTaskState) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go index 51e5d39a..813ea9ab 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go @@ -20,7 +20,7 @@ var _ MappedNullable = &PrecheckItem{} // PrecheckItem struct for PrecheckItem type PrecheckItem struct { // Human-readable description of the check. - Desc *string `json:"desc,omitempty"` + Description *string `json:"description,omitempty"` // Status of this check (e.g., SUCCESS, FAILED, WARN). Status *string `json:"status,omitempty"` // Suggested solution if the check failed or warned. @@ -53,36 +53,36 @@ func NewPrecheckItemWithDefaults() *PrecheckItem { return &this } -// GetDesc returns the Desc field value if set, zero value otherwise. -func (o *PrecheckItem) GetDesc() string { - if o == nil || IsNil(o.Desc) { +// GetDescription returns the Description field value if set, zero value otherwise. +func (o *PrecheckItem) GetDescription() string { + if o == nil || IsNil(o.Description) { var ret string return ret } - return *o.Desc + return *o.Description } -// GetDescOk returns a tuple with the Desc field value if set, nil otherwise +// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *PrecheckItem) GetDescOk() (*string, bool) { - if o == nil || IsNil(o.Desc) { +func (o *PrecheckItem) GetDescriptionOk() (*string, bool) { + if o == nil || IsNil(o.Description) { return nil, false } - return o.Desc, true + return o.Description, true } -// HasDesc returns a boolean if a field has been set. -func (o *PrecheckItem) HasDesc() bool { - if o != nil && !IsNil(o.Desc) { +// HasDescription returns a boolean if a field has been set. +func (o *PrecheckItem) HasDescription() bool { + if o != nil && !IsNil(o.Description) { return true } return false } -// SetDesc gets a reference to the given string and assigns it to the Desc field. -func (o *PrecheckItem) SetDesc(v string) { - o.Desc = &v +// SetDescription gets a reference to the given string and assigns it to the Description field. +func (o *PrecheckItem) SetDescription(v string) { + o.Description = &v } // GetStatus returns the Status field value if set, zero value otherwise. @@ -255,8 +255,8 @@ func (o PrecheckItem) MarshalJSON() ([]byte, error) { func (o PrecheckItem) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} - if !IsNil(o.Desc) { - toSerialize["desc"] = o.Desc + if !IsNil(o.Description) { + toSerialize["description"] = o.Description } if !IsNil(o.Status) { toSerialize["status"] = o.Status @@ -295,7 +295,7 @@ func (o *PrecheckItem) UnmarshalJSON(data []byte) (err error) { additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(data, &additionalProperties); err == nil { - delete(additionalProperties, "desc") + delete(additionalProperties, "description") delete(additionalProperties, "status") delete(additionalProperties, "solution") delete(additionalProperties, "reason") diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go index 2967d7cc..04749b6c 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_type.go @@ -14,43 +14,43 @@ import ( "encoding/json" ) -// PrecheckItemType Types of prechecks performed before starting a migration. - PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges. - PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges. - PRECHECK_ITEM_TYPE_VERSION_CHECKING: Check source database version compatibility. - PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING: Check source server_id configuration. - PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source. - PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source. - PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting. - PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target. - PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING: Check binlog database-level filtering configuration. - PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING: Check concurrent connections limit/availability. - PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges. - PRECHECK_ITEM_TYPE_META_POSITION_CHECKING: Check saved meta/binlog position validity. - PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load. - PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING: Check primary key settings on source tables. +// PrecheckItemType Types of prechecks performed before starting a migration. - DUMP_PRIVILEGE_CHECKING: Check source has required dump/export privileges. - REPLICATION_PRIVILEGE_CHECKING: Check source has required replication privileges. - VERSION_CHECKING: Check source database version compatibility. - SERVER_ID_CHECKING: Check source server_id configuration. - BINLOG_ENABLE_CHECKING: Check whether binlog is enabled on source. - BINLOG_FORMAT_CHECKING: Check binlog format (e.g., ROW) on source. - BINLOG_ROW_IMAGE_CHECKING: Check binlog row image setting. - TABLE_SCHEMA_CHECKING: Check table/schema compatibility with target. - BINLOG_DB_CHECKING: Check binlog database-level filtering configuration. - CONN_NUMBER_CHECKING: Check concurrent connections limit/availability. - TARGET_DB_PRIVILEGE_CHECKING: Check target database privileges. - META_POSITION_CHECKING: Check saved meta/binlog position validity. - LIGHTNING_TABLE_EMPTY_CHECKING: Check target tables are empty for Lightning load. - PRIMARY_KEY_CHECKING: Check primary key settings on source tables. type PrecheckItemType string // List of PrecheckItemType const ( - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_VERSION_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_VERSION_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_META_POSITION_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_META_POSITION_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING" - PRECHECKITEMTYPE_PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING PrecheckItemType = "PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING" + PRECHECKITEMTYPE_DUMP_PRIVILEGE_CHECKING PrecheckItemType = "DUMP_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_REPLICATION_PRIVILEGE_CHECKING PrecheckItemType = "REPLICATION_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_VERSION_CHECKING PrecheckItemType = "VERSION_CHECKING" + PRECHECKITEMTYPE_SERVER_ID_CHECKING PrecheckItemType = "SERVER_ID_CHECKING" + PRECHECKITEMTYPE_BINLOG_ENABLE_CHECKING PrecheckItemType = "BINLOG_ENABLE_CHECKING" + PRECHECKITEMTYPE_BINLOG_FORMAT_CHECKING PrecheckItemType = "BINLOG_FORMAT_CHECKING" + PRECHECKITEMTYPE_BINLOG_ROW_IMAGE_CHECKING PrecheckItemType = "BINLOG_ROW_IMAGE_CHECKING" + PRECHECKITEMTYPE_TABLE_SCHEMA_CHECKING PrecheckItemType = "TABLE_SCHEMA_CHECKING" + PRECHECKITEMTYPE_BINLOG_DB_CHECKING PrecheckItemType = "BINLOG_DB_CHECKING" + PRECHECKITEMTYPE_CONN_NUMBER_CHECKING PrecheckItemType = "CONN_NUMBER_CHECKING" + PRECHECKITEMTYPE_TARGET_DB_PRIVILEGE_CHECKING PrecheckItemType = "TARGET_DB_PRIVILEGE_CHECKING" + PRECHECKITEMTYPE_META_POSITION_CHECKING PrecheckItemType = "META_POSITION_CHECKING" + PRECHECKITEMTYPE_LIGHTNING_TABLE_EMPTY_CHECKING PrecheckItemType = "LIGHTNING_TABLE_EMPTY_CHECKING" + PRECHECKITEMTYPE_PRIMARY_KEY_CHECKING PrecheckItemType = "PRIMARY_KEY_CHECKING" ) // All allowed values of PrecheckItemType enum var AllowedPrecheckItemTypeEnumValues = []PrecheckItemType{ - "PRECHECK_ITEM_TYPE_DUMP_PRIVILEGE_CHECKING", - "PRECHECK_ITEM_TYPE_REPLICATION_PRIVILEGE_CHECKING", - "PRECHECK_ITEM_TYPE_VERSION_CHECKING", - "PRECHECK_ITEM_TYPE_SERVER_ID_CHECKING", - "PRECHECK_ITEM_TYPE_BINLOG_ENABLE_CHECKING", - "PRECHECK_ITEM_TYPE_BINLOG_FORMAT_CHECKING", - "PRECHECK_ITEM_TYPE_BINLOG_ROW_IMAGE_CHECKING", - "PRECHECK_ITEM_TYPE_TABLE_SCHEMA_CHECKING", - "PRECHECK_ITEM_TYPE_BINLOG_DB_CHECKING", - "PRECHECK_ITEM_TYPE_CONN_NUMBER_CHECKING", - "PRECHECK_ITEM_TYPE_TARGET_DB_PRIVILEGE_CHECKING", - "PRECHECK_ITEM_TYPE_META_POSITION_CHECKING", - "PRECHECK_ITEM_TYPE_LIGHTNING_TABLE_EMPTY_CHECKING", - "PRECHECK_ITEM_TYPE_PRIMARY_KEY_CHECKING", + "DUMP_PRIVILEGE_CHECKING", + "REPLICATION_PRIVILEGE_CHECKING", + "VERSION_CHECKING", + "SERVER_ID_CHECKING", + "BINLOG_ENABLE_CHECKING", + "BINLOG_FORMAT_CHECKING", + "BINLOG_ROW_IMAGE_CHECKING", + "TABLE_SCHEMA_CHECKING", + "BINLOG_DB_CHECKING", + "CONN_NUMBER_CHECKING", + "TARGET_DB_PRIVILEGE_CHECKING", + "META_POSITION_CHECKING", + "LIGHTNING_TABLE_EMPTY_CHECKING", + "PRIMARY_KEY_CHECKING", } func (v *PrecheckItemType) UnmarshalJSON(src []byte) error { diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go index efb21ed7..8b0dc20c 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule.go @@ -22,7 +22,7 @@ type RouteRule struct { // Source table pattern to match. SourceTable *RouteRuleSource `json:"sourceTable,omitempty"` // Target table to route to. - TargetTable *RouteRuleTarget `json:"targetTable,omitempty"` + TargetTable *Table `json:"targetTable,omitempty"` AdditionalProperties map[string]interface{} } @@ -78,9 +78,9 @@ func (o *RouteRule) SetSourceTable(v RouteRuleSource) { } // GetTargetTable returns the TargetTable field value if set, zero value otherwise. -func (o *RouteRule) GetTargetTable() RouteRuleTarget { +func (o *RouteRule) GetTargetTable() Table { if o == nil || IsNil(o.TargetTable) { - var ret RouteRuleTarget + var ret Table return ret } return *o.TargetTable @@ -88,7 +88,7 @@ func (o *RouteRule) GetTargetTable() RouteRuleTarget { // GetTargetTableOk returns a tuple with the TargetTable field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *RouteRule) GetTargetTableOk() (*RouteRuleTarget, bool) { +func (o *RouteRule) GetTargetTableOk() (*Table, bool) { if o == nil || IsNil(o.TargetTable) { return nil, false } @@ -104,8 +104,8 @@ func (o *RouteRule) HasTargetTable() bool { return false } -// SetTargetTable gets a reference to the given RouteRuleTarget and assigns it to the TargetTable field. -func (o *RouteRule) SetTargetTable(v RouteRuleTarget) { +// SetTargetTable gets a reference to the given Table and assigns it to the TargetTable field. +func (o *RouteRule) SetTargetTable(v Table) { o.TargetTable = &v } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go index 99028000..bad6131c 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_source.go @@ -29,7 +29,7 @@ type Source struct { // Starting binlog file name for incremental sync. BinlogName NullableString `json:"binlogName,omitempty"` // Starting binlog position for incremental sync. - BinlogPos NullableInt32 `json:"binlogPos,omitempty"` + BinlogPosition NullableInt32 `json:"binlogPosition,omitempty"` // Starting GTID set for incremental sync. BinlogGtid NullableString `json:"binlogGtid,omitempty"` // Source type (e.g., MySQL). @@ -189,47 +189,47 @@ func (o *Source) UnsetBinlogName() { o.BinlogName.Unset() } -// GetBinlogPos returns the BinlogPos field value if set, zero value otherwise (both if not set or set to explicit null). -func (o *Source) GetBinlogPos() int32 { - if o == nil || IsNil(o.BinlogPos.Get()) { +// GetBinlogPosition returns the BinlogPosition field value if set, zero value otherwise (both if not set or set to explicit null). +func (o *Source) GetBinlogPosition() int32 { + if o == nil || IsNil(o.BinlogPosition.Get()) { var ret int32 return ret } - return *o.BinlogPos.Get() + return *o.BinlogPosition.Get() } -// GetBinlogPosOk returns a tuple with the BinlogPos field value if set, nil otherwise +// GetBinlogPositionOk returns a tuple with the BinlogPosition field value if set, nil otherwise // and a boolean to check if the value has been set. // NOTE: If the value is an explicit nil, `nil, true` will be returned -func (o *Source) GetBinlogPosOk() (*int32, bool) { +func (o *Source) GetBinlogPositionOk() (*int32, bool) { if o == nil { return nil, false } - return o.BinlogPos.Get(), o.BinlogPos.IsSet() + return o.BinlogPosition.Get(), o.BinlogPosition.IsSet() } -// HasBinlogPos returns a boolean if a field has been set. -func (o *Source) HasBinlogPos() bool { - if o != nil && o.BinlogPos.IsSet() { +// HasBinlogPosition returns a boolean if a field has been set. +func (o *Source) HasBinlogPosition() bool { + if o != nil && o.BinlogPosition.IsSet() { return true } return false } -// SetBinlogPos gets a reference to the given NullableInt32 and assigns it to the BinlogPos field. -func (o *Source) SetBinlogPos(v int32) { - o.BinlogPos.Set(&v) +// SetBinlogPosition gets a reference to the given NullableInt32 and assigns it to the BinlogPosition field. +func (o *Source) SetBinlogPosition(v int32) { + o.BinlogPosition.Set(&v) } -// SetBinlogPosNil sets the value for BinlogPos to be an explicit nil -func (o *Source) SetBinlogPosNil() { - o.BinlogPos.Set(nil) +// SetBinlogPositionNil sets the value for BinlogPosition to be an explicit nil +func (o *Source) SetBinlogPositionNil() { + o.BinlogPosition.Set(nil) } -// UnsetBinlogPos ensures that no value is present for BinlogPos, not even an explicit nil -func (o *Source) UnsetBinlogPos() { - o.BinlogPos.Unset() +// UnsetBinlogPosition ensures that no value is present for BinlogPosition, not even an explicit nil +func (o *Source) UnsetBinlogPosition() { + o.BinlogPosition.Unset() } // GetBinlogGtid returns the BinlogGtid field value if set, zero value otherwise (both if not set or set to explicit null). @@ -319,8 +319,8 @@ func (o Source) ToMap() (map[string]interface{}, error) { if o.BinlogName.IsSet() { toSerialize["binlogName"] = o.BinlogName.Get() } - if o.BinlogPos.IsSet() { - toSerialize["binlogPos"] = o.BinlogPos.Get() + if o.BinlogPosition.IsSet() { + toSerialize["binlogPosition"] = o.BinlogPosition.Get() } if o.BinlogGtid.IsSet() { toSerialize["binlogGtid"] = o.BinlogGtid.Get() @@ -374,7 +374,7 @@ func (o *Source) UnmarshalJSON(data []byte) (err error) { delete(additionalProperties, "baRules") delete(additionalProperties, "routeRules") delete(additionalProperties, "binlogName") - delete(additionalProperties, "binlogPos") + delete(additionalProperties, "binlogPosition") delete(additionalProperties, "binlogGtid") delete(additionalProperties, "sourceType") o.AdditionalProperties = additionalProperties diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go index 2148bcf3..cb1feb4e 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go @@ -14,19 +14,19 @@ import ( "encoding/json" ) -// SourceSourceType The source database type. - SOURCE_TYPE_MYSQL: Self-managed MySQL. - SOURCE_TYPE_ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. +// SourceSourceType The source database type. - MYSQL: Self-managed MySQL. - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. type SourceSourceType string // List of Source.SourceType const ( - SOURCESOURCETYPE_SOURCE_TYPE_MYSQL SourceSourceType = "SOURCE_TYPE_MYSQL" - SOURCESOURCETYPE_SOURCE_TYPE_ALICLOUD_RDS_MYSQL SourceSourceType = "SOURCE_TYPE_ALICLOUD_RDS_MYSQL" + SOURCESOURCETYPE_MYSQL SourceSourceType = "MYSQL" + SOURCESOURCETYPE_ALICLOUD_RDS_MYSQL SourceSourceType = "ALICLOUD_RDS_MYSQL" ) // All allowed values of SourceSourceType enum var AllowedSourceSourceTypeEnumValues = []SourceSourceType{ - "SOURCE_TYPE_MYSQL", - "SOURCE_TYPE_ALICLOUD_RDS_MYSQL", + "MYSQL", + "ALICLOUD_RDS_MYSQL", } func (v *SourceSourceType) UnmarshalJSON(src []byte) error { diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go index 2bfba751..0c3ab74b 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_stage.go @@ -14,25 +14,25 @@ import ( "encoding/json" ) -// SubTaskStage The high-level lifecycle stage of a subtask. - STAGE_RUNNING: Subtask is running. - STAGE_PAUSED: Subtask is paused. - STAGE_FAILED: Subtask failed. - STAGE_FINISHED: Subtask finished successfully. - STAGE_UNKNOWN: Subtask stage is unknown. +// SubTaskStage The high-level lifecycle stage of a subtask. - RUNNING: Subtask is running. - PAUSED: Subtask is paused. - FAILED: Subtask failed. - FINISHED: Subtask finished successfully. - UNKNOWN: Subtask stage is unknown. type SubTaskStage string // List of SubTask.Stage const ( - SUBTASKSTAGE_STAGE_RUNNING SubTaskStage = "STAGE_RUNNING" - SUBTASKSTAGE_STAGE_PAUSED SubTaskStage = "STAGE_PAUSED" - SUBTASKSTAGE_STAGE_FAILED SubTaskStage = "STAGE_FAILED" - SUBTASKSTAGE_STAGE_FINISHED SubTaskStage = "STAGE_FINISHED" - SUBTASKSTAGE_STAGE_UNKNOWN SubTaskStage = "STAGE_UNKNOWN" + SUBTASKSTAGE_RUNNING SubTaskStage = "RUNNING" + SUBTASKSTAGE_PAUSED SubTaskStage = "PAUSED" + SUBTASKSTAGE_FAILED SubTaskStage = "FAILED" + SUBTASKSTAGE_FINISHED SubTaskStage = "FINISHED" + SUBTASKSTAGE_UNKNOWN SubTaskStage = "UNKNOWN" ) // All allowed values of SubTaskStage enum var AllowedSubTaskStageEnumValues = []SubTaskStage{ - "STAGE_RUNNING", - "STAGE_PAUSED", - "STAGE_FAILED", - "STAGE_FINISHED", - "STAGE_UNKNOWN", + "RUNNING", + "PAUSED", + "FAILED", + "FINISHED", + "UNKNOWN", } func (v *SubTaskStage) UnmarshalJSON(src []byte) error { diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go index 37baa0e1..006e9d99 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_sub_task_step.go @@ -14,21 +14,21 @@ import ( "encoding/json" ) -// SubTaskStep The current step within a subtask. - STEP_DUMP: Dump/export data from source. - STEP_LOAD: Load/import data into target. - STEP_SYNC: Sync/replicate binlog changes. +// SubTaskStep The current step within a subtask. - DUMP: Dump/export data from source. - LOAD: Load/import data into target. - SYNC: Sync/replicate binlog changes. type SubTaskStep string // List of SubTask.Step const ( - SUBTASKSTEP_STEP_DUMP SubTaskStep = "STEP_DUMP" - SUBTASKSTEP_STEP_LOAD SubTaskStep = "STEP_LOAD" - SUBTASKSTEP_STEP_SYNC SubTaskStep = "STEP_SYNC" + SUBTASKSTEP_DUMP SubTaskStep = "DUMP" + SUBTASKSTEP_LOAD SubTaskStep = "LOAD" + SUBTASKSTEP_SYNC SubTaskStep = "SYNC" ) // All allowed values of SubTaskStep enum var AllowedSubTaskStepEnumValues = []SubTaskStep{ - "STEP_DUMP", - "STEP_LOAD", - "STEP_SYNC", + "DUMP", + "LOAD", + "SYNC", } func (v *SubTaskStep) UnmarshalJSON(src []byte) error { diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_target.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_table.go similarity index 61% rename from pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_target.go rename to pkg/tidbcloud/v1beta1/serverless/migration/model_table.go index 5e45c109..5e5baa59 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_route_rule_target.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_table.go @@ -14,39 +14,39 @@ import ( "encoding/json" ) -// checks if the RouteRuleTarget type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &RouteRuleTarget{} +// checks if the Table type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Table{} -// RouteRuleTarget struct for RouteRuleTarget -type RouteRuleTarget struct { - // Target schema name. +// Table struct for Table +type Table struct { + // Schema name. Schema *string `json:"schema,omitempty"` - // Target table name. + // Table name. Table *string `json:"table,omitempty"` AdditionalProperties map[string]interface{} } -type _RouteRuleTarget RouteRuleTarget +type _Table Table -// NewRouteRuleTarget instantiates a new RouteRuleTarget object +// NewTable instantiates a new Table object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewRouteRuleTarget() *RouteRuleTarget { - this := RouteRuleTarget{} +func NewTable() *Table { + this := Table{} return &this } -// NewRouteRuleTargetWithDefaults instantiates a new RouteRuleTarget object +// NewTableWithDefaults instantiates a new Table object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set -func NewRouteRuleTargetWithDefaults() *RouteRuleTarget { - this := RouteRuleTarget{} +func NewTableWithDefaults() *Table { + this := Table{} return &this } // GetSchema returns the Schema field value if set, zero value otherwise. -func (o *RouteRuleTarget) GetSchema() string { +func (o *Table) GetSchema() string { if o == nil || IsNil(o.Schema) { var ret string return ret @@ -56,7 +56,7 @@ func (o *RouteRuleTarget) GetSchema() string { // GetSchemaOk returns a tuple with the Schema field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *RouteRuleTarget) GetSchemaOk() (*string, bool) { +func (o *Table) GetSchemaOk() (*string, bool) { if o == nil || IsNil(o.Schema) { return nil, false } @@ -64,7 +64,7 @@ func (o *RouteRuleTarget) GetSchemaOk() (*string, bool) { } // HasSchema returns a boolean if a field has been set. -func (o *RouteRuleTarget) HasSchema() bool { +func (o *Table) HasSchema() bool { if o != nil && !IsNil(o.Schema) { return true } @@ -73,12 +73,12 @@ func (o *RouteRuleTarget) HasSchema() bool { } // SetSchema gets a reference to the given string and assigns it to the Schema field. -func (o *RouteRuleTarget) SetSchema(v string) { +func (o *Table) SetSchema(v string) { o.Schema = &v } // GetTable returns the Table field value if set, zero value otherwise. -func (o *RouteRuleTarget) GetTable() string { +func (o *Table) GetTable() string { if o == nil || IsNil(o.Table) { var ret string return ret @@ -88,7 +88,7 @@ func (o *RouteRuleTarget) GetTable() string { // GetTableOk returns a tuple with the Table field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *RouteRuleTarget) GetTableOk() (*string, bool) { +func (o *Table) GetTableOk() (*string, bool) { if o == nil || IsNil(o.Table) { return nil, false } @@ -96,7 +96,7 @@ func (o *RouteRuleTarget) GetTableOk() (*string, bool) { } // HasTable returns a boolean if a field has been set. -func (o *RouteRuleTarget) HasTable() bool { +func (o *Table) HasTable() bool { if o != nil && !IsNil(o.Table) { return true } @@ -105,11 +105,11 @@ func (o *RouteRuleTarget) HasTable() bool { } // SetTable gets a reference to the given string and assigns it to the Table field. -func (o *RouteRuleTarget) SetTable(v string) { +func (o *Table) SetTable(v string) { o.Table = &v } -func (o RouteRuleTarget) MarshalJSON() ([]byte, error) { +func (o Table) MarshalJSON() ([]byte, error) { toSerialize, err := o.ToMap() if err != nil { return []byte{}, err @@ -117,7 +117,7 @@ func (o RouteRuleTarget) MarshalJSON() ([]byte, error) { return json.Marshal(toSerialize) } -func (o RouteRuleTarget) ToMap() (map[string]interface{}, error) { +func (o Table) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} if !IsNil(o.Schema) { toSerialize["schema"] = o.Schema @@ -133,16 +133,16 @@ func (o RouteRuleTarget) ToMap() (map[string]interface{}, error) { return toSerialize, nil } -func (o *RouteRuleTarget) UnmarshalJSON(data []byte) (err error) { - varRouteRuleTarget := _RouteRuleTarget{} +func (o *Table) UnmarshalJSON(data []byte) (err error) { + varTable := _Table{} - err = json.Unmarshal(data, &varRouteRuleTarget) + err = json.Unmarshal(data, &varTable) if err != nil { return err } - *o = RouteRuleTarget(varRouteRuleTarget) + *o = Table(varTable) additionalProperties := make(map[string]interface{}) @@ -155,38 +155,38 @@ func (o *RouteRuleTarget) UnmarshalJSON(data []byte) (err error) { return err } -type NullableRouteRuleTarget struct { - value *RouteRuleTarget +type NullableTable struct { + value *Table isSet bool } -func (v NullableRouteRuleTarget) Get() *RouteRuleTarget { +func (v NullableTable) Get() *Table { return v.value } -func (v *NullableRouteRuleTarget) Set(val *RouteRuleTarget) { +func (v *NullableTable) Set(val *Table) { v.value = val v.isSet = true } -func (v NullableRouteRuleTarget) IsSet() bool { +func (v NullableTable) IsSet() bool { return v.isSet } -func (v *NullableRouteRuleTarget) Unset() { +func (v *NullableTable) Unset() { v.value = nil v.isSet = false } -func NewNullableRouteRuleTarget(val *RouteRuleTarget) *NullableRouteRuleTarget { - return &NullableRouteRuleTarget{value: val, isSet: true} +func NewNullableTable(val *Table) *NullableTable { + return &NullableTable{value: val, isSet: true} } -func (v NullableRouteRuleTarget) MarshalJSON() ([]byte, error) { +func (v NullableTable) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } -func (v *NullableRouteRuleTarget) UnmarshalJSON(src []byte) error { +func (v *NullableTable) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go index 43ae7dc2..5b5458d1 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_target.go @@ -12,6 +12,7 @@ package migration import ( "encoding/json" + "fmt" ) // checks if the Target type satisfies the MappedNullable interface at compile time @@ -20,9 +21,9 @@ var _ MappedNullable = &Target{} // Target struct for Target type Target struct { // Target database user. - User *string `json:"user,omitempty"` + User string `json:"user"` // Target database password. - Password *string `json:"password,omitempty"` + Password string `json:"password"` AdditionalProperties map[string]interface{} } @@ -32,8 +33,10 @@ type _Target Target // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed -func NewTarget() *Target { +func NewTarget(user string, password string) *Target { this := Target{} + this.User = user + this.Password = password return &this } @@ -45,68 +48,52 @@ func NewTargetWithDefaults() *Target { return &this } -// GetUser returns the User field value if set, zero value otherwise. +// GetUser returns the User field value func (o *Target) GetUser() string { - if o == nil || IsNil(o.User) { + if o == nil { var ret string return ret } - return *o.User + + return o.User } -// GetUserOk returns a tuple with the User field value if set, nil otherwise +// GetUserOk returns a tuple with the User field value // and a boolean to check if the value has been set. func (o *Target) GetUserOk() (*string, bool) { - if o == nil || IsNil(o.User) { + if o == nil { return nil, false } - return o.User, true -} - -// HasUser returns a boolean if a field has been set. -func (o *Target) HasUser() bool { - if o != nil && !IsNil(o.User) { - return true - } - - return false + return &o.User, true } -// SetUser gets a reference to the given string and assigns it to the User field. +// SetUser sets field value func (o *Target) SetUser(v string) { - o.User = &v + o.User = v } -// GetPassword returns the Password field value if set, zero value otherwise. +// GetPassword returns the Password field value func (o *Target) GetPassword() string { - if o == nil || IsNil(o.Password) { + if o == nil { var ret string return ret } - return *o.Password + + return o.Password } -// GetPasswordOk returns a tuple with the Password field value if set, nil otherwise +// GetPasswordOk returns a tuple with the Password field value // and a boolean to check if the value has been set. func (o *Target) GetPasswordOk() (*string, bool) { - if o == nil || IsNil(o.Password) { + if o == nil { return nil, false } - return o.Password, true + return &o.Password, true } -// HasPassword returns a boolean if a field has been set. -func (o *Target) HasPassword() bool { - if o != nil && !IsNil(o.Password) { - return true - } - - return false -} - -// SetPassword gets a reference to the given string and assigns it to the Password field. +// SetPassword sets field value func (o *Target) SetPassword(v string) { - o.Password = &v + o.Password = v } func (o Target) MarshalJSON() ([]byte, error) { @@ -119,12 +106,8 @@ func (o Target) MarshalJSON() ([]byte, error) { func (o Target) ToMap() (map[string]interface{}, error) { toSerialize := map[string]interface{}{} - if !IsNil(o.User) { - toSerialize["user"] = o.User - } - if !IsNil(o.Password) { - toSerialize["password"] = o.Password - } + toSerialize["user"] = o.User + toSerialize["password"] = o.Password for key, value := range o.AdditionalProperties { toSerialize[key] = value @@ -134,6 +117,28 @@ func (o Target) ToMap() (map[string]interface{}, error) { } func (o *Target) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "user", + "password", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err + } + + for _, requiredProperty := range requiredProperties { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + varTarget := _Target{} err = json.Unmarshal(data, &varTarget) diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go index 8dd44bf8..ba5c04bf 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_task_mode.go @@ -14,19 +14,19 @@ import ( "encoding/json" ) -// TaskMode Migration task mode. - MODE_ALL: Full + incremental migration (all phases). - MODE_INCREMENTAL: Incremental-only migration (replication). +// TaskMode Migration task mode. - ALL: Full + incremental migration (all phases). - INCREMENTAL: Incremental-only migration (replication). type TaskMode string // List of TaskMode const ( - TASKMODE_MODE_ALL TaskMode = "MODE_ALL" - TASKMODE_MODE_INCREMENTAL TaskMode = "MODE_INCREMENTAL" + TASKMODE_ALL TaskMode = "ALL" + TASKMODE_INCREMENTAL TaskMode = "INCREMENTAL" ) // All allowed values of TaskMode enum var AllowedTaskModeEnumValues = []TaskMode{ - "MODE_ALL", - "MODE_INCREMENTAL", + "ALL", + "INCREMENTAL", } func (v *TaskMode) UnmarshalJSON(src []byte) error { From a2218a2115eea80b2f83f9803c6e036180b6dd19 Mon Sep 17 00:00:00 2001 From: yangxin Date: Mon, 24 Nov 2025 16:25:36 +0800 Subject: [PATCH 03/19] fix build --- internal/cli/serverless/migration/create.go | 77 +++++++------------- internal/cli/serverless/migration/helpers.go | 38 +++------- internal/cli/serverless/migration/list.go | 8 +- internal/mock/api_client.go | 42 +++++------ internal/service/cloud/api_client.go | 28 +++---- internal/service/cloud/logic.go | 16 ++-- 6 files changed, 87 insertions(+), 122 deletions(-) diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index b33c372d..52c3227b 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -30,7 +30,6 @@ func (c CreateOpts) NonInteractiveFlags() []string { flag.MigrationSources, flag.MigrationTarget, flag.MigrationMode, - flag.MigrationFullData, } } @@ -38,6 +37,9 @@ func (c CreateOpts) RequiredFlags() []string { return []string{ flag.ClusterID, flag.MigrationSources, + flag.DisplayName, + flag.MigrationTarget, + flag.MigrationMode, } } @@ -65,12 +67,12 @@ func CreateCmd(h *internal.Helper) *cobra.Command { var cmd = &cobra.Command{ Use: "create", - Short: "Create a migration task", + Short: "Create a migration", Args: cobra.NoArgs, - Example: fmt.Sprintf(` Create a migration task in interactive mode: + Example: fmt.Sprintf(` Create a migration in interactive mode: $ %[1]s serverless migration create - Create a migration task in non-interactive mode: + Create a migration in non-interactive mode: $ %[1]s serverless migration create -c --sources '' --target '' Run migration precheck only with shared inputs: @@ -85,8 +87,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { } ctx := cmd.Context() - var clusterID, name, sourcesStr, targetStr, modeStr, fullDataStr string - var fullDataPtr *bool + var clusterID, name, sourcesStr, targetStr, modeStr string precheckOnly, err := cmd.Flags().GetBool(flag.MigrationPrecheckOnly) if err != nil { return errors.Trace(err) @@ -105,7 +106,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { } clusterID = cluster.ID - inputs := []string{flag.DisplayName, flag.MigrationSources, flag.MigrationTarget, flag.MigrationMode, flag.MigrationFullData} + inputs := []string{flag.DisplayName, flag.MigrationSources, flag.MigrationTarget, flag.MigrationMode} textInput, err := ui.InitialInputModel(inputs, migrationInputDescription) if err != nil { return err @@ -114,11 +115,6 @@ func CreateCmd(h *internal.Helper) *cobra.Command { sourcesStr = textInput.Inputs[1].Value() targetStr = textInput.Inputs[2].Value() modeStr = textInput.Inputs[3].Value() - fullDataStr = textInput.Inputs[4].Value() - fullDataPtr, err = parseFullData(fullDataStr) - if err != nil { - return err - } } else { var err error clusterID, err = cmd.Flags().GetString(flag.ClusterID) @@ -141,15 +137,11 @@ func CreateCmd(h *internal.Helper) *cobra.Command { if err != nil { return errors.Trace(err) } - if cmd.Flags().Changed(flag.MigrationFullData) { - fullDataVal, err := cmd.Flags().GetBool(flag.MigrationFullData) - if err != nil { - return errors.Trace(err) - } - fullDataPtr = &fullDataVal - } } + if strings.TrimSpace(name) == "" { + return errors.New("display name is required") + } sources, err := parseMigrationSources(sourcesStr) if err != nil { return err @@ -163,27 +155,17 @@ func CreateCmd(h *internal.Helper) *cobra.Command { return err } - createBody := &pkgmigration.MigrationServiceCreateTaskBody{ - Sources: sources, + createBody := &pkgmigration.MigrationServiceCreateMigrationBody{ + DisplayName: name, + Sources: sources, + Target: target, + Mode: mode, } precheckBody := &pkgmigration.MigrationServicePrecheckBody{ - Sources: sources, - } - if name != "" { - createBody.Name = &name - precheckBody.Name = &name - } - if target != nil { - createBody.Target = target - precheckBody.Target = target - } - if mode != nil { - createBody.Mode = mode - precheckBody.Mode = mode - } - if fullDataPtr != nil { - createBody.FullDataMigration = fullDataPtr - precheckBody.FullDataMigration = fullDataPtr + DisplayName: name, + Sources: sources, + Target: target, + Mode: mode, } if precheckOnly { @@ -195,26 +177,23 @@ func CreateCmd(h *internal.Helper) *cobra.Command { return errors.Trace(err) } - taskID := "" - if resp.Id != nil { - taskID = *resp.Id - } else if resp.Name != nil { - taskID = *resp.Name + taskID := ptrString(resp.MigrationId) + if taskID == "" { + taskID = ptrString(resp.DisplayName) } if taskID == "" { taskID = "" } - fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration task %s created", taskID)) + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration %s created", taskID)) return nil }, } cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") - cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration task.") + cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") cmd.Flags().String(flag.MigrationSources, "", "Sources definition in JSON. Use \"ticloud serverless migration template --type sources\" for a template.") cmd.Flags().String(flag.MigrationTarget, "", "Target definition in JSON. Use \"ticloud serverless migration template --type target\" for a template.") cmd.Flags().String(flag.MigrationMode, "", fmt.Sprintf("Migration mode, one of %v.", taskModeValues())) - cmd.Flags().Bool(flag.MigrationFullData, false, "Migrate all user data (equivalent to enabling every non-system database).") cmd.Flags().Bool(flag.MigrationPrecheckOnly, false, "Run a migration precheck with the provided inputs and exit without creating a task.") return cmd @@ -227,10 +206,10 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu if err != nil { return errors.Trace(err) } - if resp.Id == nil || *resp.Id == "" { + if resp.PrecheckId == nil || *resp.PrecheckId == "" { return errors.New("precheck created but ID is empty") } - precheckID := *resp.Id + precheckID := *resp.PrecheckId fmt.Fprintf(h.IOStreams.Out, "migration precheck %s created, polling results...\n", precheckID) ticker := time.NewTicker(precheckPollInterval) @@ -290,7 +269,7 @@ func printPrecheckSummary(id, status string, result *pkgmigration.MigrationPrech rows = append(rows, output.Row{ precheckItemType(item.Type), ptrString(item.Status), - ptrString(item.Desc), + ptrString(item.Description), ptrString(item.Reason), ptrString(item.Solution), }) diff --git a/internal/cli/serverless/migration/helpers.go b/internal/cli/serverless/migration/helpers.go index 61738db1..65875825 100644 --- a/internal/cli/serverless/migration/helpers.go +++ b/internal/cli/serverless/migration/helpers.go @@ -17,7 +17,6 @@ package migration import ( "encoding/json" "fmt" - "strconv" "strings" "github.com/juju/errors" @@ -27,11 +26,10 @@ import ( ) var migrationInputDescription = map[string]string{ - flag.DisplayName: "Optional display name for the migration task/precheck.", - flag.MigrationSources: "Sources definition in JSON. Use \"ticloud serverless migration template --type sources\" as a reference.", - flag.MigrationTarget: "Target definition in JSON. Use \"ticloud serverless migration template --type target\" as a reference.", - flag.MigrationMode: "Migration mode, one of MODE_ALL or MODE_INCREMENTAL. Leave blank to use the server default.", - flag.MigrationFullData: "Whether to migrate all user data. Enter true, false, or leave blank to use the server default.", + flag.DisplayName: "Display name for the migration.", + flag.MigrationSources: "Sources definition in JSON. Use \"ticloud serverless migration template --type sources\" as a reference.", + flag.MigrationTarget: "Target definition in JSON. Use \"ticloud serverless migration template --type target\" as a reference.", + flag.MigrationMode: "Migration mode, one of MODE_ALL or MODE_INCREMENTAL.", } func parseMigrationSources(value string) ([]pkgmigration.Source, error) { @@ -49,22 +47,22 @@ func parseMigrationSources(value string) ([]pkgmigration.Source, error) { return sources, nil } -func parseMigrationTarget(value string) (*pkgmigration.Target, error) { +func parseMigrationTarget(value string) (pkgmigration.Target, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return nil, nil + return pkgmigration.Target{}, errors.New("target is required, use --target or provide it in interactive mode") } var target pkgmigration.Target if err := json.Unmarshal([]byte(trimmed), &target); err != nil { - return nil, errors.Annotate(err, "invalid target JSON") + return pkgmigration.Target{}, errors.Annotate(err, "invalid target JSON") } - return &target, nil + return target, nil } -func parseMigrationMode(value string) (*pkgmigration.TaskMode, error) { +func parseMigrationMode(value string) (pkgmigration.TaskMode, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return nil, nil + return "", errors.New("mode is required, use --mode or provide it in interactive mode") } normalized := strings.ToUpper(trimmed) if !strings.HasPrefix(normalized, "MODE_") { @@ -73,22 +71,10 @@ func parseMigrationMode(value string) (*pkgmigration.TaskMode, error) { mode := pkgmigration.TaskMode(normalized) for _, allowed := range pkgmigration.AllowedTaskModeEnumValues { if mode == allowed { - return &mode, nil + return mode, nil } } - return nil, errors.Errorf("invalid mode %q, allowed values: %s", value, strings.Join(taskModeValues(), ", ")) -} - -func parseFullData(value string) (*bool, error) { - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return nil, nil - } - boolValue, err := strconv.ParseBool(trimmed) - if err != nil { - return nil, errors.Annotate(err, "invalid boolean value for full-data") - } - return &boolValue, nil + return "", errors.Errorf("invalid mode %q, allowed values: %s", value, strings.Join(taskModeValues(), ", ")) } func taskModeValues() []string { diff --git a/internal/cli/serverless/migration/list.go b/internal/cli/serverless/migration/list.go index 6d4da7f7..c883a333 100644 --- a/internal/cli/serverless/migration/list.go +++ b/internal/cli/serverless/migration/list.go @@ -45,7 +45,7 @@ func ListCmd(h *internal.Helper) *cobra.Command { var cmd = &cobra.Command{ Use: "list", - Short: "List migration tasks", + Short: "List migrations", Aliases: []string{"ls"}, Args: cobra.NoArgs, Example: fmt.Sprintf(` List migrations in interactive mode: @@ -106,9 +106,9 @@ func ListCmd(h *internal.Helper) *cobra.Command { columns := []output.Column{"ID", "Name", "Mode", "State", "CreatedAt"} var rows []output.Row - for _, task := range resp.Tasks { - id := safeString(task.Id) - name := safeString(task.Name) + for _, task := range resp.Migrations { + id := safeString(task.MigrationId) + name := safeString(task.DisplayName) if name == "" { name = id } diff --git a/internal/mock/api_client.go b/internal/mock/api_client.go index b80c8e29..de2232f9 100644 --- a/internal/mock/api_client.go +++ b/internal/mock/api_client.go @@ -109,23 +109,23 @@ func (_m *TiDBCloudClient) CancelMigrationPrecheck(ctx context.Context, clusterI } // CancelMigrationTask provides a mock function with given fields: ctx, clusterId, taskId -func (_m *TiDBCloudClient) CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) { +func (_m *TiDBCloudClient) CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { ret := _m.Called(ctx, clusterId, taskId) if len(ret) == 0 { panic("no return value specified for CancelMigrationTask") } - var r0 *migration.MigrationTask + var r0 *migration.Migration var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.MigrationTask, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.Migration, error)); ok { return rf(ctx, clusterId, taskId) } - if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.MigrationTask); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.Migration); ok { r0 = rf(ctx, clusterId, taskId) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.MigrationTask) + r0 = ret.Get(0).(*migration.Migration) } } @@ -385,27 +385,27 @@ func (_m *TiDBCloudClient) CreateMigrationPrecheck(ctx context.Context, clusterI } // CreateMigrationTask provides a mock function with given fields: ctx, clusterId, body -func (_m *TiDBCloudClient) CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateTaskBody) (*migration.MigrationTask, error) { +func (_m *TiDBCloudClient) CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) { ret := _m.Called(ctx, clusterId, body) if len(ret) == 0 { panic("no return value specified for CreateMigrationTask") } - var r0 *migration.MigrationTask + var r0 *migration.Migration var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateTaskBody) (*migration.MigrationTask, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error)); ok { return rf(ctx, clusterId, body) } - if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateTaskBody) *migration.MigrationTask); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) *migration.Migration); ok { r0 = rf(ctx, clusterId, body) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.MigrationTask) + r0 = ret.Get(0).(*migration.Migration) } } - if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServiceCreateTaskBody) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) error); ok { r1 = rf(ctx, clusterId, body) } else { r1 = ret.Error(1) @@ -1045,23 +1045,23 @@ func (_m *TiDBCloudClient) GetMigrationPrecheck(ctx context.Context, clusterId s } // GetMigrationTask provides a mock function with given fields: ctx, clusterId, taskId -func (_m *TiDBCloudClient) GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) { +func (_m *TiDBCloudClient) GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { ret := _m.Called(ctx, clusterId, taskId) if len(ret) == 0 { panic("no return value specified for GetMigrationTask") } - var r0 *migration.MigrationTask + var r0 *migration.Migration var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.MigrationTask, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.Migration, error)); ok { return rf(ctx, clusterId, taskId) } - if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.MigrationTask); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.Migration); ok { r0 = rf(ctx, clusterId, taskId) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.MigrationTask) + r0 = ret.Get(0).(*migration.Migration) } } @@ -1375,23 +1375,23 @@ func (_m *TiDBCloudClient) ListImports(ctx context.Context, clusterId string, pa } // ListMigrationTasks provides a mock function with given fields: ctx, clusterId, pageSize, pageToken, orderBy -func (_m *TiDBCloudClient) ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationTasksResp, error) { +func (_m *TiDBCloudClient) ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) { ret := _m.Called(ctx, clusterId, pageSize, pageToken, orderBy) if len(ret) == 0 { panic("no return value specified for ListMigrationTasks") } - var r0 *migration.ListMigrationTasksResp + var r0 *migration.ListMigrationsResp var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, *int32, *string, *string) (*migration.ListMigrationTasksResp, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *int32, *string, *string) (*migration.ListMigrationsResp, error)); ok { return rf(ctx, clusterId, pageSize, pageToken, orderBy) } - if rf, ok := ret.Get(0).(func(context.Context, string, *int32, *string, *string) *migration.ListMigrationTasksResp); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *int32, *string, *string) *migration.ListMigrationsResp); ok { r0 = rf(ctx, clusterId, pageSize, pageToken, orderBy) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.ListMigrationTasksResp) + r0 = ret.Get(0).(*migration.ListMigrationsResp) } } diff --git a/internal/service/cloud/api_client.go b/internal/service/cloud/api_client.go index a9c5abc8..afdccf21 100644 --- a/internal/service/cloud/api_client.go +++ b/internal/service/cloud/api_client.go @@ -107,17 +107,17 @@ type TiDBCloudClient interface { CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (map[string]interface{}, error) - CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) + CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) - CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateTaskBody) (*migration.MigrationTask, error) + CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) - GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) + GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) - ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationTasksResp, error) + ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) PauseMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) @@ -495,8 +495,8 @@ func (d *ClientDelegate) CancelMigrationPrecheck(ctx context.Context, clusterId return res, parseError(err, h) } -func (d *ClientDelegate) CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) { - res, h, err := d.mc.MigrationAPI.MigrationServiceCancelTask(ctx, clusterId, taskId).Execute() +func (d *ClientDelegate) CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceCancelMigration(ctx, clusterId, taskId).Execute() return res, parseError(err, h) } @@ -509,8 +509,8 @@ func (d *ClientDelegate) CreateMigrationPrecheck(ctx context.Context, clusterId return res, parseError(err, h) } -func (d *ClientDelegate) CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateTaskBody) (*migration.MigrationTask, error) { - r := d.mc.MigrationAPI.MigrationServiceCreateTask(ctx, clusterId) +func (d *ClientDelegate) CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) { + r := d.mc.MigrationAPI.MigrationServiceCreateMigration(ctx, clusterId) if body != nil { r = r.Body(*body) } @@ -523,13 +523,13 @@ func (d *ClientDelegate) GetMigrationPrecheck(ctx context.Context, clusterId str return res, parseError(err, h) } -func (d *ClientDelegate) GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.MigrationTask, error) { - res, h, err := d.mc.MigrationAPI.MigrationServiceGetTask(ctx, clusterId, taskId).Execute() +func (d *ClientDelegate) GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceGetMigration(ctx, clusterId, taskId).Execute() return res, parseError(err, h) } -func (d *ClientDelegate) ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationTasksResp, error) { - r := d.mc.MigrationAPI.MigrationServiceListTasks(ctx, clusterId) +func (d *ClientDelegate) ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) { + r := d.mc.MigrationAPI.MigrationServiceListMigrations(ctx, clusterId) if pageToken != nil { r = r.PageToken(*pageToken) } @@ -548,7 +548,7 @@ func (d *ClientDelegate) PauseMigrationTask(ctx context.Context, clusterId strin if body != nil { payload = *body } - res, h, err := d.mc.MigrationAPI.MigrationServicePauseTask(ctx, clusterId, taskId).Body(payload).Execute() + res, h, err := d.mc.MigrationAPI.MigrationServicePauseMigration(ctx, clusterId, taskId).Body(payload).Execute() return res, parseError(err, h) } @@ -557,7 +557,7 @@ func (d *ClientDelegate) ResumeMigrationTask(ctx context.Context, clusterId stri if body != nil { payload = *body } - res, h, err := d.mc.MigrationAPI.MigrationServiceResumeTask(ctx, clusterId, taskId).Body(payload).Execute() + res, h, err := d.mc.MigrationAPI.MigrationServiceResumeMigration(ctx, clusterId, taskId).Body(payload).Execute() return res, parseError(err, h) } diff --git a/internal/service/cloud/logic.go b/internal/service/cloud/logic.go index 5c5bc584..390e6e06 100644 --- a/internal/service/cloud/logic.go +++ b/internal/service/cloud/logic.go @@ -1052,33 +1052,33 @@ func GetSelectedMigrationTask(ctx context.Context, clusterID string, pageSize in if err != nil { return nil, errors.Trace(err) } - appendMigrationTaskItems := func(tasks []migration.MigrationTask) { + appendMigrationTaskItems := func(tasks []migration.Migration) { for _, item := range tasks { - if item.Id == nil { + if item.MigrationId == nil { continue } - name := *item.Id - if item.Name != nil && *item.Name != "" { - name = *item.Name + name := *item.MigrationId + if item.DisplayName != nil && *item.DisplayName != "" { + name = *item.DisplayName } state := "" if item.State != nil { state = string(*item.State) } items = append(items, &MigrationTask{ - ID: *item.Id, + ID: *item.MigrationId, Name: name, State: state, }) } } - appendMigrationTaskItems(resp.Tasks) + appendMigrationTaskItems(resp.Migrations) for resp.NextPageToken != nil && *resp.NextPageToken != "" { resp, err = client.ListMigrationTasks(ctx, clusterID, &pageSizeInt32, resp.NextPageToken, nil) if err != nil { return nil, errors.Trace(err) } - appendMigrationTaskItems(resp.Tasks) + appendMigrationTaskItems(resp.Migrations) } if len(items) == 0 { From 1ace5c3180057bf800f8080b014b6dd2ce55c0fd Mon Sep 17 00:00:00 2001 From: yangxin Date: Mon, 24 Nov 2025 16:30:45 +0800 Subject: [PATCH 04/19] fix lint --- internal/cli/serverless/migration/create.go | 2 +- internal/service/cloud/logic.go | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index 52c3227b..dc3c60d5 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -264,7 +264,7 @@ func printPrecheckSummary(id, status string, result *pkgmigration.MigrationPrech return nil } columns := []output.Column{"Type", "Status", "Description", "Reason", "Solution"} - var rows []output.Row + rows := make([]output.Row, 0, len(result.Items)) for _, item := range result.Items { rows = append(rows, output.Row{ precheckItemType(item.Type), diff --git a/internal/service/cloud/logic.go b/internal/service/cloud/logic.go index 390e6e06..a943226b 100644 --- a/internal/service/cloud/logic.go +++ b/internal/service/cloud/logic.go @@ -1054,31 +1054,31 @@ func GetSelectedMigrationTask(ctx context.Context, clusterID string, pageSize in } appendMigrationTaskItems := func(tasks []migration.Migration) { for _, item := range tasks { - if item.MigrationId == nil { + if item.MigrationId == nil { continue } - name := *item.MigrationId - if item.DisplayName != nil && *item.DisplayName != "" { - name = *item.DisplayName + name := *item.MigrationId + if item.DisplayName != nil && *item.DisplayName != "" { + name = *item.DisplayName } state := "" if item.State != nil { state = string(*item.State) } items = append(items, &MigrationTask{ - ID: *item.MigrationId, + ID: *item.MigrationId, Name: name, State: state, }) } } - appendMigrationTaskItems(resp.Migrations) + appendMigrationTaskItems(resp.Migrations) for resp.NextPageToken != nil && *resp.NextPageToken != "" { resp, err = client.ListMigrationTasks(ctx, clusterID, &pageSizeInt32, resp.NextPageToken, nil) if err != nil { return nil, errors.Trace(err) } - appendMigrationTaskItems(resp.Migrations) + appendMigrationTaskItems(resp.Migrations) } if len(items) == 0 { From 521e3f4f7b4c83afcd268448ef2cca9322aade99 Mon Sep 17 00:00:00 2001 From: yangxin Date: Mon, 24 Nov 2025 16:56:26 +0800 Subject: [PATCH 05/19] use aws.String --- internal/cli/serverless/migration/create.go | 164 +++++-------------- internal/cli/serverless/migration/helpers.go | 14 +- 2 files changed, 45 insertions(+), 133 deletions(-) diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index dc3c60d5..a8cc98a4 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -6,6 +6,7 @@ import ( "strings" "time" + aws "github.com/aws/aws-sdk-go-v2/aws" "github.com/fatih/color" "github.com/juju/errors" "github.com/spf13/cobra" @@ -15,70 +16,21 @@ import ( "github.com/tidbcloud/tidbcloud-cli/internal/flag" "github.com/tidbcloud/tidbcloud-cli/internal/output" "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" - "github.com/tidbcloud/tidbcloud-cli/internal/ui" pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" ) -type CreateOpts struct { - interactive bool -} - -func (c CreateOpts) NonInteractiveFlags() []string { - return []string{ - flag.ClusterID, - flag.DisplayName, - flag.MigrationSources, - flag.MigrationTarget, - flag.MigrationMode, - } -} - -func (c CreateOpts) RequiredFlags() []string { - return []string{ - flag.ClusterID, - flag.MigrationSources, - flag.DisplayName, - flag.MigrationTarget, - flag.MigrationMode, - } -} - -func (c *CreateOpts) MarkInteractive(cmd *cobra.Command) error { - flags := c.NonInteractiveFlags() - for _, fn := range flags { - f := cmd.Flags().Lookup(fn) - if f != nil && f.Changed { - c.interactive = false - break - } - } - if !c.interactive { - for _, fn := range c.RequiredFlags() { - if err := cmd.MarkFlagRequired(fn); err != nil { - return err - } - } - } - return nil -} - func CreateCmd(h *internal.Helper) *cobra.Command { - opts := CreateOpts{interactive: true} - var cmd = &cobra.Command{ Use: "create", Short: "Create a migration", Args: cobra.NoArgs, - Example: fmt.Sprintf(` Create a migration in interactive mode: - $ %[1]s serverless migration create + Example: fmt.Sprintf(` Create a migration: + $ %[1]s serverless migration create -c --display-name --sources '' --target '' --mode - Create a migration in non-interactive mode: - $ %[1]s serverless migration create -c --sources '' --target '' - - Run migration precheck only with shared inputs: + Run migration precheck only with shared inputs: $ %[1]s serverless migration create --precheck-only`, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { - return opts.MarkInteractive(cmd) + return markCreateMigrationRequiredFlags(cmd) }, RunE: func(cmd *cobra.Command, args []string) error { d, err := h.Client() @@ -87,56 +39,29 @@ func CreateCmd(h *internal.Helper) *cobra.Command { } ctx := cmd.Context() - var clusterID, name, sourcesStr, targetStr, modeStr string precheckOnly, err := cmd.Flags().GetBool(flag.MigrationPrecheckOnly) if err != nil { return errors.Trace(err) } - if opts.interactive { - if !h.IOStreams.CanPrompt { - return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") - } - project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) - if err != nil { - return err - } - cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) - if err != nil { - return err - } - clusterID = cluster.ID - - inputs := []string{flag.DisplayName, flag.MigrationSources, flag.MigrationTarget, flag.MigrationMode} - textInput, err := ui.InitialInputModel(inputs, migrationInputDescription) - if err != nil { - return err - } - name = textInput.Inputs[0].Value() - sourcesStr = textInput.Inputs[1].Value() - targetStr = textInput.Inputs[2].Value() - modeStr = textInput.Inputs[3].Value() - } else { - var err error - clusterID, err = cmd.Flags().GetString(flag.ClusterID) - if err != nil { - return errors.Trace(err) - } - name, err = cmd.Flags().GetString(flag.DisplayName) - if err != nil { - return errors.Trace(err) - } - sourcesStr, err = cmd.Flags().GetString(flag.MigrationSources) - if err != nil { - return errors.Trace(err) - } - targetStr, err = cmd.Flags().GetString(flag.MigrationTarget) - if err != nil { - return errors.Trace(err) - } - modeStr, err = cmd.Flags().GetString(flag.MigrationMode) - if err != nil { - return errors.Trace(err) - } + clusterID, err := cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + name, err := cmd.Flags().GetString(flag.DisplayName) + if err != nil { + return errors.Trace(err) + } + sourcesStr, err := cmd.Flags().GetString(flag.MigrationSources) + if err != nil { + return errors.Trace(err) + } + targetStr, err := cmd.Flags().GetString(flag.MigrationTarget) + if err != nil { + return errors.Trace(err) + } + modeStr, err := cmd.Flags().GetString(flag.MigrationMode) + if err != nil { + return errors.Trace(err) } if strings.TrimSpace(name) == "" { @@ -177,9 +102,9 @@ func CreateCmd(h *internal.Helper) *cobra.Command { return errors.Trace(err) } - taskID := ptrString(resp.MigrationId) + taskID := aws.ToString(resp.MigrationId) if taskID == "" { - taskID = ptrString(resp.DisplayName) + taskID = aws.ToString(resp.DisplayName) } if taskID == "" { taskID = "" @@ -199,6 +124,15 @@ func CreateCmd(h *internal.Helper) *cobra.Command { return cmd } +func markCreateMigrationRequiredFlags(cmd *cobra.Command) error { + for _, fn := range []string{flag.ClusterID, flag.DisplayName, flag.MigrationSources, flag.MigrationTarget, flag.MigrationMode} { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + return nil +} + const precheckPollInterval = 5 * time.Second func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clusterID string, body *pkgmigration.MigrationServicePrecheckBody, h *internal.Helper) error { @@ -224,7 +158,7 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu if err != nil { return errors.Trace(err) } - status := strings.ToUpper(ptrString(result.Status)) + status := strings.ToUpper(aws.ToString(result.Status)) if status == "" { status = "PENDING" } @@ -238,7 +172,7 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu if err := printPrecheckSummary(precheckID, status, result, h); err != nil { return err } - if strings.EqualFold(status, "FAILED") || (result.FailedCnt != nil && *result.FailedCnt > 0) { + if strings.EqualFold(status, "FAILED") || aws.ToInt32(result.FailedCnt) > 0 { return errors.New("migration precheck failed") } fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration precheck %s passed", precheckID)) @@ -259,7 +193,7 @@ func isPrecheckPending(status string) bool { func printPrecheckSummary(id, status string, result *pkgmigration.MigrationPrecheck, h *internal.Helper) error { fmt.Fprintf(h.IOStreams.Out, "precheck %s finished with status %s\n", id, status) fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", - ptrInt32(result.Total), ptrInt32(result.SuccessCnt), ptrInt32(result.WarnCnt), ptrInt32(result.FailedCnt)) + aws.ToInt32(result.Total), aws.ToInt32(result.SuccessCnt), aws.ToInt32(result.WarnCnt), aws.ToInt32(result.FailedCnt)) if len(result.Items) == 0 { return nil } @@ -268,29 +202,15 @@ func printPrecheckSummary(id, status string, result *pkgmigration.MigrationPrech for _, item := range result.Items { rows = append(rows, output.Row{ precheckItemType(item.Type), - ptrString(item.Status), - ptrString(item.Description), - ptrString(item.Reason), - ptrString(item.Solution), + aws.ToString(item.Status), + aws.ToString(item.Description), + aws.ToString(item.Reason), + aws.ToString(item.Solution), }) } return output.PrintHumanTable(h.IOStreams.Out, columns, rows) } -func ptrString(value *string) string { - if value == nil { - return "" - } - return *value -} - -func ptrInt32(value *int32) int32 { - if value == nil { - return 0 - } - return *value -} - func precheckItemType(value *pkgmigration.PrecheckItemType) string { if value == nil { return "" diff --git a/internal/cli/serverless/migration/helpers.go b/internal/cli/serverless/migration/helpers.go index 65875825..21b533c8 100644 --- a/internal/cli/serverless/migration/helpers.go +++ b/internal/cli/serverless/migration/helpers.go @@ -21,21 +21,13 @@ import ( "github.com/juju/errors" - "github.com/tidbcloud/tidbcloud-cli/internal/flag" pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" ) -var migrationInputDescription = map[string]string{ - flag.DisplayName: "Display name for the migration.", - flag.MigrationSources: "Sources definition in JSON. Use \"ticloud serverless migration template --type sources\" as a reference.", - flag.MigrationTarget: "Target definition in JSON. Use \"ticloud serverless migration template --type target\" as a reference.", - flag.MigrationMode: "Migration mode, one of MODE_ALL or MODE_INCREMENTAL.", -} - func parseMigrationSources(value string) ([]pkgmigration.Source, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return nil, errors.New("sources is required, use --sources or provide it in interactive mode") + return nil, errors.New("sources is required; use --sources") } var sources []pkgmigration.Source if err := json.Unmarshal([]byte(trimmed), &sources); err != nil { @@ -50,7 +42,7 @@ func parseMigrationSources(value string) ([]pkgmigration.Source, error) { func parseMigrationTarget(value string) (pkgmigration.Target, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return pkgmigration.Target{}, errors.New("target is required, use --target or provide it in interactive mode") + return pkgmigration.Target{}, errors.New("target is required; use --target") } var target pkgmigration.Target if err := json.Unmarshal([]byte(trimmed), &target); err != nil { @@ -62,7 +54,7 @@ func parseMigrationTarget(value string) (pkgmigration.Target, error) { func parseMigrationMode(value string) (pkgmigration.TaskMode, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return "", errors.New("mode is required, use --mode or provide it in interactive mode") + return "", errors.New("mode is required; use --mode") } normalized := strings.ToUpper(trimmed) if !strings.HasPrefix(normalized, "MODE_") { From f18f340cafa7fdf5c59bd286ea67236f69b73a50 Mon Sep 17 00:00:00 2001 From: yangxin Date: Tue, 25 Nov 2025 10:25:34 +0800 Subject: [PATCH 06/19] use enum --- internal/cli/serverless/migration/create.go | 55 +++++++-- internal/flag/flag.go | 1 - .../v1beta1/serverless/dm.swagger.json | 28 ++++- .../migration/.openapi-generator/FILES | 2 + .../v1beta1/serverless/migration/README.md | 2 + .../serverless/migration/api/openapi.yaml | 44 +++++-- .../migration/model_migration_precheck.go | 12 +- .../model_migration_precheck_status.go | 111 ++++++++++++++++++ .../migration/model_precheck_item.go | 14 +-- .../migration/model_precheck_item_status.go | 107 +++++++++++++++++ 10 files changed, 337 insertions(+), 39 deletions(-) create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck_status.go create mode 100644 pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_status.go diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index a8cc98a4..f4dd4b04 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -148,7 +148,7 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu ticker := time.NewTicker(precheckPollInterval) defer ticker.Stop() - var lastStatus string + var lastStatus pkgmigration.MigrationPrecheckStatus for { select { case <-ctx.Done(): @@ -158,21 +158,19 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu if err != nil { return errors.Trace(err) } - status := strings.ToUpper(aws.ToString(result.Status)) - if status == "" { - status = "PENDING" - } + status := precheckStatusOrDefault(result.Status) if status != lastStatus { fmt.Fprintf(h.IOStreams.Out, "precheck %s status: %s\n", precheckID, status) lastStatus = status } - if isPrecheckPending(status) { + if isPrecheckUnfinished(status) { continue } if err := printPrecheckSummary(precheckID, status, result, h); err != nil { return err } - if strings.EqualFold(status, "FAILED") || aws.ToInt32(result.FailedCnt) > 0 { + if status == pkgmigration.MIGRATIONPRECHECKSTATUS_FAILED { + fmt.Fprintln(h.IOStreams.Out, color.RedString("migration precheck %s failed", precheckID)) return errors.New("migration precheck failed") } fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration precheck %s passed", precheckID)) @@ -181,16 +179,24 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu } } -func isPrecheckPending(status string) bool { +func precheckStatusOrDefault(value *pkgmigration.MigrationPrecheckStatus) pkgmigration.MigrationPrecheckStatus { + if value == nil || *value == "" { + return pkgmigration.MIGRATIONPRECHECKSTATUS_PENDING + } + return *value +} + +func isPrecheckUnfinished(status pkgmigration.MigrationPrecheckStatus) bool { switch status { - case "PENDING", "RUNNING", "PROCESSING", "IN_PROGRESS", "": + case pkgmigration.MIGRATIONPRECHECKSTATUS_PENDING, + pkgmigration.MIGRATIONPRECHECKSTATUS_RUNNING: return true default: return false } } -func printPrecheckSummary(id, status string, result *pkgmigration.MigrationPrecheck, h *internal.Helper) error { +func printPrecheckSummary(id string, status pkgmigration.MigrationPrecheckStatus, result *pkgmigration.MigrationPrecheck, h *internal.Helper) error { fmt.Fprintf(h.IOStreams.Out, "precheck %s finished with status %s\n", id, status) fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", aws.ToInt32(result.Total), aws.ToInt32(result.SuccessCnt), aws.ToInt32(result.WarnCnt), aws.ToInt32(result.FailedCnt)) @@ -200,14 +206,25 @@ func printPrecheckSummary(id, status string, result *pkgmigration.MigrationPrech columns := []output.Column{"Type", "Status", "Description", "Reason", "Solution"} rows := make([]output.Row, 0, len(result.Items)) for _, item := range result.Items { + if !shouldPrintPrecheckItem(item.Status) { + continue + } + var status string + if item.Status != nil { + status = string(*item.Status) + } rows = append(rows, output.Row{ precheckItemType(item.Type), - aws.ToString(item.Status), + status, aws.ToString(item.Description), aws.ToString(item.Reason), aws.ToString(item.Solution), }) } + if len(rows) == 0 { + fmt.Fprintln(h.IOStreams.Out, "No warning or failure details returned.") + return nil + } return output.PrintHumanTable(h.IOStreams.Out, columns, rows) } @@ -217,3 +234,19 @@ func precheckItemType(value *pkgmigration.PrecheckItemType) string { } return string(*value) } + +// shouldPrintPrecheckItem reports whether a precheck item should be shown to users. +// Currently only WARNING and FAILED statuses surface because SUCCESS does not +// provide actionable information. +func shouldPrintPrecheckItem(status *pkgmigration.PrecheckItemStatus) bool { + if status == nil { + return false + } + switch *status { + case pkgmigration.PRECHECKITEMSTATUS_WARNING, + pkgmigration.PRECHECKITEMSTATUS_FAILED: + return true + default: + return false + } +} diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 82c2ed8a..a37a161f 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -124,7 +124,6 @@ const ( MigrationSources string = "sources" MigrationTarget string = "target" MigrationMode string = "mode" - MigrationFullData string = "full-data" MigrationTemplateType string = "type" MigrationPrecheckOnly string = "precheck-only" ) diff --git a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json index 1f15290a..25a1e68f 100644 --- a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json +++ b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json @@ -717,9 +717,13 @@ "readOnly": true }, "status": { - "type": "string", "description": "Overall status of the precheck.", - "readOnly": true + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/MigrationPrecheck.Status" + } + ] }, "items": { "type": "array", @@ -732,6 +736,11 @@ } } }, + "MigrationPrecheck.Status": { + "type": "string", + "enum": ["RUNNING", "FINISHED", "PENDING", "FAILED", "CANCELED"], + "description": " - RUNNING: Precheck is in progress.\n - FINISHED: Precheck finished successfully.\n - PENDING: Precheck is pending.\n - FAILED: Precheck failed.\n - CANCELED: Precheck is canceled." + }, "MigrationService.CreateMigrationBody": { "type": "object", "properties": { @@ -817,9 +826,13 @@ "readOnly": true }, "status": { - "type": "string", - "description": "Status of this check (e.g., SUCCESS, FAILED, WARN).", - "readOnly": true + "description": "Status of this check.", + "readOnly": true, + "allOf": [ + { + "$ref": "#/definitions/PrecheckItem.Status" + } + ] }, "solution": { "type": "string", @@ -847,6 +860,11 @@ } } }, + "PrecheckItem.Status": { + "type": "string", + "enum": ["SUCCESS", "WARNING", "FAILED"], + "description": " - SUCCESS: Check passed successfully.\n - WARNING: Check resulted in a warning.\n - FAILED: Check failed." + }, "PrecheckItemType": { "type": "string", "enum": [ diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES index e7969c2b..78f02545 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES +++ b/pkg/tidbcloud/v1beta1/serverless/migration/.openapi-generator/FILES @@ -17,10 +17,12 @@ model_list_migrations_resp.go model_load_detail.go model_migration.go model_migration_precheck.go +model_migration_precheck_status.go model_migration_service_create_migration_body.go model_migration_service_precheck_body.go model_migration_state.go model_precheck_item.go +model_precheck_item_status.go model_precheck_item_type.go model_route_rule.go model_route_rule_source.go diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/README.md b/pkg/tidbcloud/v1beta1/serverless/migration/README.md index 967c2723..1c2c746b 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/README.md +++ b/pkg/tidbcloud/v1beta1/serverless/migration/README.md @@ -101,10 +101,12 @@ Class | Method | HTTP request | Description - [LoadDetail](docs/LoadDetail.md) - [Migration](docs/Migration.md) - [MigrationPrecheck](docs/MigrationPrecheck.md) + - [MigrationPrecheckStatus](docs/MigrationPrecheckStatus.md) - [MigrationServiceCreateMigrationBody](docs/MigrationServiceCreateMigrationBody.md) - [MigrationServicePrecheckBody](docs/MigrationServicePrecheckBody.md) - [MigrationState](docs/MigrationState.md) - [PrecheckItem](docs/PrecheckItem.md) + - [PrecheckItemStatus](docs/PrecheckItemStatus.md) - [PrecheckItemType](docs/PrecheckItemType.md) - [RouteRule](docs/RouteRule.md) - [RouteRuleSource](docs/RouteRuleSource.md) diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml index 269b75e5..3e13e322 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml @@ -676,14 +676,14 @@ components: solutionDocUrl: solutionDocUrl description: description type: "{}" - status: status + status: "{}" - reason: reason solution: solution solutionDocUrl: solutionDocUrl description: description type: "{}" - status: status - status: status + status: "{}" + status: "{}" properties: precheckId: description: The ID of the precheck. @@ -710,9 +710,10 @@ components: readOnly: true type: integer status: + allOf: + - $ref: '#/components/schemas/MigrationPrecheck.Status' description: Overall status of the precheck. - readOnly: true - type: string + type: object items: description: Details for each precheck item. items: @@ -720,6 +721,20 @@ components: readOnly: true type: array type: object + MigrationPrecheck.Status: + description: |2- + - RUNNING: Precheck is in progress. + - FINISHED: Precheck finished successfully. + - PENDING: Precheck is pending. + - FAILED: Precheck failed. + - CANCELED: Precheck is canceled. + enum: + - RUNNING + - FINISHED + - PENDING + - FAILED + - CANCELED + type: string MigrationService.CreateMigrationBody: properties: displayName: @@ -785,16 +800,17 @@ components: solutionDocUrl: solutionDocUrl description: description type: "{}" - status: status + status: "{}" properties: description: description: Human-readable description of the check. readOnly: true type: string status: - description: "Status of this check (e.g., SUCCESS, FAILED, WARN)." - readOnly: true - type: string + allOf: + - $ref: '#/components/schemas/PrecheckItem.Status' + description: Status of this check. + type: object solution: description: Suggested solution if the check failed or warned. readOnly: true @@ -813,6 +829,16 @@ components: description: The type of precheck. type: object type: object + PrecheckItem.Status: + description: |2- + - SUCCESS: Check passed successfully. + - WARNING: Check resulted in a warning. + - FAILED: Check failed. + enum: + - SUCCESS + - WARNING + - FAILED + type: string PrecheckItemType: description: |- Types of prechecks performed before starting a migration. diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go index ace07dc2..6dc9411b 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck.go @@ -30,7 +30,7 @@ type MigrationPrecheck struct { // Number of successful items. SuccessCnt *int32 `json:"successCnt,omitempty"` // Overall status of the precheck. - Status *string `json:"status,omitempty"` + Status *MigrationPrecheckStatus `json:"status,omitempty"` // Details for each precheck item. Items []PrecheckItem `json:"items,omitempty"` AdditionalProperties map[string]interface{} @@ -216,9 +216,9 @@ func (o *MigrationPrecheck) SetSuccessCnt(v int32) { } // GetStatus returns the Status field value if set, zero value otherwise. -func (o *MigrationPrecheck) GetStatus() string { +func (o *MigrationPrecheck) GetStatus() MigrationPrecheckStatus { if o == nil || IsNil(o.Status) { - var ret string + var ret MigrationPrecheckStatus return ret } return *o.Status @@ -226,7 +226,7 @@ func (o *MigrationPrecheck) GetStatus() string { // GetStatusOk returns a tuple with the Status field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *MigrationPrecheck) GetStatusOk() (*string, bool) { +func (o *MigrationPrecheck) GetStatusOk() (*MigrationPrecheckStatus, bool) { if o == nil || IsNil(o.Status) { return nil, false } @@ -242,8 +242,8 @@ func (o *MigrationPrecheck) HasStatus() bool { return false } -// SetStatus gets a reference to the given string and assigns it to the Status field. -func (o *MigrationPrecheck) SetStatus(v string) { +// SetStatus gets a reference to the given MigrationPrecheckStatus and assigns it to the Status field. +func (o *MigrationPrecheck) SetStatus(v MigrationPrecheckStatus) { o.Status = &v } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck_status.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck_status.go new file mode 100644 index 00000000..12ec29f0 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_precheck_status.go @@ -0,0 +1,111 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// MigrationPrecheckStatus - RUNNING: Precheck is in progress. - FINISHED: Precheck finished successfully. - PENDING: Precheck is pending. - FAILED: Precheck failed. - CANCELED: Precheck is canceled. +type MigrationPrecheckStatus string + +// List of MigrationPrecheck.Status +const ( + MIGRATIONPRECHECKSTATUS_RUNNING MigrationPrecheckStatus = "RUNNING" + MIGRATIONPRECHECKSTATUS_FINISHED MigrationPrecheckStatus = "FINISHED" + MIGRATIONPRECHECKSTATUS_PENDING MigrationPrecheckStatus = "PENDING" + MIGRATIONPRECHECKSTATUS_FAILED MigrationPrecheckStatus = "FAILED" + MIGRATIONPRECHECKSTATUS_CANCELED MigrationPrecheckStatus = "CANCELED" +) + +// All allowed values of MigrationPrecheckStatus enum +var AllowedMigrationPrecheckStatusEnumValues = []MigrationPrecheckStatus{ + "RUNNING", + "FINISHED", + "PENDING", + "FAILED", + "CANCELED", +} + +func (v *MigrationPrecheckStatus) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := MigrationPrecheckStatus(value) + for _, existing := range AllowedMigrationPrecheckStatusEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = MigrationPrecheckStatus(value) + return nil +} + +// NewMigrationPrecheckStatusFromValue returns a pointer to a valid MigrationPrecheckStatus for the value passed as argument +func NewMigrationPrecheckStatusFromValue(v string) *MigrationPrecheckStatus { + ev := MigrationPrecheckStatus(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v MigrationPrecheckStatus) IsValid() bool { + for _, existing := range AllowedMigrationPrecheckStatusEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to MigrationPrecheck.Status value +func (v MigrationPrecheckStatus) Ptr() *MigrationPrecheckStatus { + return &v +} + +type NullableMigrationPrecheckStatus struct { + value *MigrationPrecheckStatus + isSet bool +} + +func (v NullableMigrationPrecheckStatus) Get() *MigrationPrecheckStatus { + return v.value +} + +func (v *NullableMigrationPrecheckStatus) Set(val *MigrationPrecheckStatus) { + v.value = val + v.isSet = true +} + +func (v NullableMigrationPrecheckStatus) IsSet() bool { + return v.isSet +} + +func (v *NullableMigrationPrecheckStatus) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableMigrationPrecheckStatus(val *MigrationPrecheckStatus) *NullableMigrationPrecheckStatus { + return &NullableMigrationPrecheckStatus{value: val, isSet: true} +} + +func (v NullableMigrationPrecheckStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableMigrationPrecheckStatus) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go index 813ea9ab..f3a93501 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item.go @@ -21,8 +21,8 @@ var _ MappedNullable = &PrecheckItem{} type PrecheckItem struct { // Human-readable description of the check. Description *string `json:"description,omitempty"` - // Status of this check (e.g., SUCCESS, FAILED, WARN). - Status *string `json:"status,omitempty"` + // Status of this check. + Status *PrecheckItemStatus `json:"status,omitempty"` // Suggested solution if the check failed or warned. Solution *string `json:"solution,omitempty"` // Reason for the failure or warning. @@ -86,9 +86,9 @@ func (o *PrecheckItem) SetDescription(v string) { } // GetStatus returns the Status field value if set, zero value otherwise. -func (o *PrecheckItem) GetStatus() string { +func (o *PrecheckItem) GetStatus() PrecheckItemStatus { if o == nil || IsNil(o.Status) { - var ret string + var ret PrecheckItemStatus return ret } return *o.Status @@ -96,7 +96,7 @@ func (o *PrecheckItem) GetStatus() string { // GetStatusOk returns a tuple with the Status field value if set, nil otherwise // and a boolean to check if the value has been set. -func (o *PrecheckItem) GetStatusOk() (*string, bool) { +func (o *PrecheckItem) GetStatusOk() (*PrecheckItemStatus, bool) { if o == nil || IsNil(o.Status) { return nil, false } @@ -112,8 +112,8 @@ func (o *PrecheckItem) HasStatus() bool { return false } -// SetStatus gets a reference to the given string and assigns it to the Status field. -func (o *PrecheckItem) SetStatus(v string) { +// SetStatus gets a reference to the given PrecheckItemStatus and assigns it to the Status field. +func (o *PrecheckItem) SetStatus(v PrecheckItemStatus) { o.Status = &v } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_status.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_status.go new file mode 100644 index 00000000..f17d7d00 --- /dev/null +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_precheck_item_status.go @@ -0,0 +1,107 @@ +/* +TiDB Cloud Starter and Essential API + +TiDB Cloud Starter and Essential API + +API version: v1beta1 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package migration + +import ( + "encoding/json" +) + +// PrecheckItemStatus - SUCCESS: Check passed successfully. - WARNING: Check resulted in a warning. - FAILED: Check failed. +type PrecheckItemStatus string + +// List of PrecheckItem.Status +const ( + PRECHECKITEMSTATUS_SUCCESS PrecheckItemStatus = "SUCCESS" + PRECHECKITEMSTATUS_WARNING PrecheckItemStatus = "WARNING" + PRECHECKITEMSTATUS_FAILED PrecheckItemStatus = "FAILED" +) + +// All allowed values of PrecheckItemStatus enum +var AllowedPrecheckItemStatusEnumValues = []PrecheckItemStatus{ + "SUCCESS", + "WARNING", + "FAILED", +} + +func (v *PrecheckItemStatus) UnmarshalJSON(src []byte) error { + var value string + err := json.Unmarshal(src, &value) + if err != nil { + return err + } + enumTypeValue := PrecheckItemStatus(value) + for _, existing := range AllowedPrecheckItemStatusEnumValues { + if existing == enumTypeValue { + *v = enumTypeValue + return nil + } + } + + *v = PrecheckItemStatus(value) + return nil +} + +// NewPrecheckItemStatusFromValue returns a pointer to a valid PrecheckItemStatus for the value passed as argument +func NewPrecheckItemStatusFromValue(v string) *PrecheckItemStatus { + ev := PrecheckItemStatus(v) + return &ev +} + +// IsValid return true if the value is valid for the enum, false otherwise +func (v PrecheckItemStatus) IsValid() bool { + for _, existing := range AllowedPrecheckItemStatusEnumValues { + if existing == v { + return true + } + } + return false +} + +// Ptr returns reference to PrecheckItem.Status value +func (v PrecheckItemStatus) Ptr() *PrecheckItemStatus { + return &v +} + +type NullablePrecheckItemStatus struct { + value *PrecheckItemStatus + isSet bool +} + +func (v NullablePrecheckItemStatus) Get() *PrecheckItemStatus { + return v.value +} + +func (v *NullablePrecheckItemStatus) Set(val *PrecheckItemStatus) { + v.value = val + v.isSet = true +} + +func (v NullablePrecheckItemStatus) IsSet() bool { + return v.isSet +} + +func (v *NullablePrecheckItemStatus) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePrecheckItemStatus(val *PrecheckItemStatus) *NullablePrecheckItemStatus { + return &NullablePrecheckItemStatus{value: val, isSet: true} +} + +func (v NullablePrecheckItemStatus) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePrecheckItemStatus) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} From 41046b2264fa9c11bb9fc60728bc6e601616a194 Mon Sep 17 00:00:00 2001 From: yangxin Date: Tue, 25 Nov 2025 17:21:49 +0800 Subject: [PATCH 07/19] use one file --- internal/cli/serverless/migration/create.go | 28 +- internal/cli/serverless/migration/helpers.go | 37 ++- internal/cli/serverless/migration/template.go | 241 +++++++++--------- internal/flag/flag.go | 6 +- 4 files changed, 140 insertions(+), 172 deletions(-) diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index f4dd4b04..33b87106 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -25,7 +25,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { Short: "Create a migration", Args: cobra.NoArgs, Example: fmt.Sprintf(` Create a migration: - $ %[1]s serverless migration create -c --display-name --sources '' --target '' --mode + $ %[1]s serverless migration create -c --display-name --definition '' Run migration precheck only with shared inputs: $ %[1]s serverless migration create --precheck-only`, config.CliName), @@ -51,15 +51,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { if err != nil { return errors.Trace(err) } - sourcesStr, err := cmd.Flags().GetString(flag.MigrationSources) - if err != nil { - return errors.Trace(err) - } - targetStr, err := cmd.Flags().GetString(flag.MigrationTarget) - if err != nil { - return errors.Trace(err) - } - modeStr, err := cmd.Flags().GetString(flag.MigrationMode) + definitionStr, err := cmd.Flags().GetString(flag.MigrationDefinition) if err != nil { return errors.Trace(err) } @@ -67,15 +59,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { if strings.TrimSpace(name) == "" { return errors.New("display name is required") } - sources, err := parseMigrationSources(sourcesStr) - if err != nil { - return err - } - target, err := parseMigrationTarget(targetStr) - if err != nil { - return err - } - mode, err := parseMigrationMode(modeStr) + sources, target, mode, err := parseMigrationDefinition(definitionStr) if err != nil { return err } @@ -116,16 +100,14 @@ func CreateCmd(h *internal.Helper) *cobra.Command { cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") - cmd.Flags().String(flag.MigrationSources, "", "Sources definition in JSON. Use \"ticloud serverless migration template --type sources\" for a template.") - cmd.Flags().String(flag.MigrationTarget, "", "Target definition in JSON. Use \"ticloud serverless migration template --type target\" for a template.") - cmd.Flags().String(flag.MigrationMode, "", fmt.Sprintf("Migration mode, one of %v.", taskModeValues())) + cmd.Flags().String(flag.MigrationDefinition, "", "Migration definition in JSON. Use \"ticloud serverless migration template --type definition\" for a template.") cmd.Flags().Bool(flag.MigrationPrecheckOnly, false, "Run a migration precheck with the provided inputs and exit without creating a task.") return cmd } func markCreateMigrationRequiredFlags(cmd *cobra.Command) error { - for _, fn := range []string{flag.ClusterID, flag.DisplayName, flag.MigrationSources, flag.MigrationTarget, flag.MigrationMode} { + for _, fn := range []string{flag.ClusterID, flag.DisplayName, flag.MigrationDefinition} { if err := cmd.MarkFlagRequired(fn); err != nil { return err } diff --git a/internal/cli/serverless/migration/helpers.go b/internal/cli/serverless/migration/helpers.go index 21b533c8..775f4b55 100644 --- a/internal/cli/serverless/migration/helpers.go +++ b/internal/cli/serverless/migration/helpers.go @@ -24,37 +24,36 @@ import ( pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" ) -func parseMigrationSources(value string) ([]pkgmigration.Source, error) { +func parseMigrationDefinition(value string) ([]pkgmigration.Source, pkgmigration.Target, pkgmigration.TaskMode, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return nil, errors.New("sources is required; use --sources") + return nil, pkgmigration.Target{}, "", errors.New("migration definition is required; use --definition") } - var sources []pkgmigration.Source - if err := json.Unmarshal([]byte(trimmed), &sources); err != nil { - return nil, errors.Annotate(err, "invalid sources JSON") + var payload struct { + Sources []pkgmigration.Source `json:"sources"` + Target *pkgmigration.Target `json:"target"` + Mode string `json:"mode"` } - if len(sources) == 0 { - return nil, errors.New("sources must contain at least one entry") + if err := json.Unmarshal([]byte(trimmed), &payload); err != nil { + return nil, pkgmigration.Target{}, "", errors.Annotate(err, "invalid migration definition JSON") } - return sources, nil -} - -func parseMigrationTarget(value string) (pkgmigration.Target, error) { - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return pkgmigration.Target{}, errors.New("target is required; use --target") + if len(payload.Sources) == 0 { + return nil, pkgmigration.Target{}, "", errors.New("migration definition must include at least one source") + } + if payload.Target == nil { + return nil, pkgmigration.Target{}, "", errors.New("migration definition must include the target block") } - var target pkgmigration.Target - if err := json.Unmarshal([]byte(trimmed), &target); err != nil { - return pkgmigration.Target{}, errors.Annotate(err, "invalid target JSON") + mode, err := parseMigrationMode(payload.Mode) + if err != nil { + return nil, pkgmigration.Target{}, "", err } - return target, nil + return payload.Sources, *payload.Target, mode, nil } func parseMigrationMode(value string) (pkgmigration.TaskMode, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return "", errors.New("mode is required; use --mode") + return "", errors.New("mode is required in the migration definition") } normalized := strings.ToUpper(trimmed) if !strings.HasPrefix(normalized, "MODE_") { diff --git a/internal/cli/serverless/migration/template.go b/internal/cli/serverless/migration/template.go index 4dde6dc0..191069e5 100644 --- a/internal/cli/serverless/migration/template.go +++ b/internal/cli/serverless/migration/template.go @@ -13,149 +13,138 @@ import ( ) const ( - migrationSourcesTemplateWithExplain = `[ - { - // Required: source database type. Supported: SOURCE_TYPE_MYSQL, SOURCE_TYPE_ALICLOUD_RDS_MYSQL - "sourceType": "SOURCE_TYPE_MYSQL", - "connProfile": { - // Optional connection type, PUBLIC or PRIVATE_LINK - "connType": "PUBLIC", - "host": "10.0.0.2", - "port": 3306, - "user": "dm_sync_user", - "password": "Passw0rd!", - // Optional when using private link - "endpointId": "pl-xxxxxxxx", - "security": { - // Optional TLS materials encoded in Base64 - "sslCaContent": "", - "sslCertContent": "", - "sslKeyContent": "", - "certAllowedCn": ["client-cn"] - } - }, - // Optional block/allow rules to whitelist schemas/tables - "baRules": { - "doDbs": ["app_db"], - "doTables": [ - {"schema": "app_db", "table": "orders"}, - {"schema": "app_db", "table": "customers"} - ] - }, - // Optional route rules for renaming schemas/tables - "routeRules": [ - { - "sourceTable": { - "schemaPattern": "app_db", - "tablePattern": "orders" - }, - "targetTable": { - "schema": "app_db", - "table": "orders_copy" + migrationDefinitionAllTemplate = `{ + // Required migration mode. Use "ALL" for full + incremental. + "mode": "ALL", + // Target TiDB Cloud user credentials used by the migration task + "target": { + "user": "migration_user", + "password": "Passw0rd!" + }, + // List at least one migration source + "sources": [ + { + // Required: source database type + "sourceType": "SOURCE_TYPE_MYSQL", + "connProfile": { + // Optional connection type, PUBLIC or PRIVATE_LINK + "connType": "PUBLIC", + "host": "10.0.0.2", + "port": 3306, + "user": "dm_sync_user", + "password": "Passw0rd!", + // Optional fields below are needed only for private link or TLS + "endpointId": "pl-xxxxxxxx", + "security": { + // TLS materials must be Base64 encoded + "sslCaContent": "", + "sslCertContent": "", + "sslKeyContent": "", + "certAllowedCn": ["client-cn"] } - } - ], - // Optional start position for incremental sync. Provide binlogName+binlogPos or binlogGtid - "binlogName": "mysql-bin.000001", - "binlogPos": 4, - "binlogGtid": "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-12345" - } - ]` - - migrationSourcesTemplate = `[ - { - "sourceType": "SOURCE_TYPE_MYSQL", - "connProfile": { - "connType": "PUBLIC", - "host": "10.0.0.2", - "port": 3306, - "user": "dm_sync_user", - "password": "Passw0rd!" - }, - "baRules": { - "doDbs": ["app_db"], - "doTables": [{"schema": "app_db", "table": "orders"}] - }, - "routeRules": [ - { - "sourceTable": {"schemaPattern": "app_db", "tablePattern": "orders"}, - "targetTable": {"schema": "app_db", "table": "orders_copy"} - } - ] - } - ]` - - migrationTargetTemplateWithExplain = `{ - // Target TiDB Cloud user used by the migration task - "user": "migration_user", - // Password corresponding to the target user - "password": "Passw0rd!" + }, + // Optional block/allow rules to control synced schemas/tables + "baRules": { + "doDbs": ["app_db"], + "doTables": [ + {"schema": "app_db", "table": "orders"}, + {"schema": "app_db", "table": "customers"} + ] + }, + // Optional route rules to rename objects during migration + "routeRules": [ + { + "sourceTable": { + "schemaPattern": "app_db", + "tablePattern": "orders" + }, + "targetTable": { + "schema": "app_db", + "table": "orders_copy" + } + } + ], + // Optional start position for incremental sync (binlog position or GTID) + "binlogName": "mysql-bin.000001", + "binlogPos": 4, + "binlogGtid": "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-12345" + } + ] }` - migrationTargetTemplate = `{ - "user": "migration_user", - "password": "Passw0rd!" + migrationDefinitionIncrementalTemplate = `{ + // Incremental-only mode keeps the source and target in sync + "mode": "INCREMENTAL", + "target": { + "user": "migration_user", + "password": "Passw0rd!" + }, + "sources": [ + { + "sourceType": "SOURCE_TYPE_MYSQL", + "connProfile": { + "connType": "PUBLIC", + "host": "10.0.0.2", + "port": 3306, + "user": "dm_sync_user", + "password": "Passw0rd!" + }, + // Binlog coordinates are usually required when starting from existing data + "binlogName": "mysql-bin.000777", + "binlogPos": 12345, + "binlogGtid": "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-12345" + } + ] }` ) -func TemplateCmd(h *internal.Helper) *cobra.Command { - var cmd = &cobra.Command{ - Use: "template", - Short: "Show migration JSON templates", - Args: cobra.NoArgs, - Example: fmt.Sprintf(` Show all migration templates: - $ %[1]s serverless migration template +type templateVariant struct { + heading string + body string +} + +var allowedTemplateModes = []string{"all", "incremental"} + +var definitionTemplates = map[string]templateVariant{ + "all": { + heading: "Definition template (mode = ALL)", + body: migrationDefinitionAllTemplate, + }, + "incremental": { + heading: "Definition template (mode = INCREMENTAL)", + body: migrationDefinitionIncrementalTemplate, + }, +} - Show the sources template with explanations: - $ %[1]s serverless migration template --type sources --explain`, config.CliName), +func TemplateCmd(h *internal.Helper) *cobra.Command { + cmd := &cobra.Command{ + Use: "template", + Short: "Show migration JSON templates", + Args: cobra.NoArgs, + Example: fmt.Sprintf(" Show the ALL mode migration template:\n $ %[1]s serverless migration template --modetype all\n\n Show the INCREMENTAL migration template:\n $ %[1]s serverless migration template --modetype incremental\n", config.CliName), RunE: func(cmd *cobra.Command, args []string) error { - explain, err := cmd.Flags().GetBool(flag.Explain) - if err != nil { - return err - } - templateType, err := cmd.Flags().GetString(flag.MigrationTemplateType) + mode, err := cmd.Flags().GetString(flag.MigrationModeType) if err != nil { return err } - - return renderMigrationTemplate(h, strings.ToLower(templateType), explain) + return renderMigrationTemplate(h, strings.ToLower(mode)) }, } - cmd.Flags().Bool(flag.Explain, false, "Show template with inline explanations.") - cmd.Flags().String(flag.MigrationTemplateType, "", "Template type to show, one of [\"sources\", \"target\"]. Default prints both.") + cmd.Flags().String(flag.MigrationModeType, "", "Migration mode template to show, one of [\"all\", \"incremental\"].") + if err := cmd.MarkFlagRequired(flag.MigrationModeType); err != nil { + panic(err) + } return cmd } -func renderMigrationTemplate(h *internal.Helper, templateType string, explain bool) error { - switch templateType { - case "sources": - if explain { - fmt.Fprintln(h.IOStreams.Out, migrationSourcesTemplateWithExplain) - } else { - fmt.Fprintln(h.IOStreams.Out, migrationSourcesTemplate) - } - case "target": - if explain { - fmt.Fprintln(h.IOStreams.Out, migrationTargetTemplateWithExplain) - } else { - fmt.Fprintln(h.IOStreams.Out, migrationTargetTemplate) - } - case "": - fmt.Fprintln(h.IOStreams.Out, color.GreenString("Sources template:")) - if explain { - fmt.Fprintln(h.IOStreams.Out, migrationSourcesTemplateWithExplain) - } else { - fmt.Fprintln(h.IOStreams.Out, migrationSourcesTemplate) - } - fmt.Fprintln(h.IOStreams.Out, color.GreenString("Target template:")) - if explain { - fmt.Fprintln(h.IOStreams.Out, migrationTargetTemplateWithExplain) - } else { - fmt.Fprintln(h.IOStreams.Out, migrationTargetTemplate) - } - default: - return fmt.Errorf("unknown template type %q", templateType) +func renderMigrationTemplate(h *internal.Helper, mode string) error { + variant, ok := definitionTemplates[mode] + if !ok { + return fmt.Errorf("unknown mode %q, allowed values: %s", mode, strings.Join(allowedTemplateModes, ", ")) } + + fmt.Fprintln(h.IOStreams.Out, color.GreenString(variant.heading)) + fmt.Fprintln(h.IOStreams.Out, variant.body) return nil } diff --git a/internal/flag/flag.go b/internal/flag/flag.go index a37a161f..005f7f2a 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -121,10 +121,8 @@ const ( MigrationTaskID string = "migration-id" MigrationTaskIDShort string = "m" MigrationPrecheckID string = "precheck-id" - MigrationSources string = "sources" - MigrationTarget string = "target" - MigrationMode string = "mode" - MigrationTemplateType string = "type" + MigrationDefinition string = "definition" + MigrationModeType string = "modetype" MigrationPrecheckOnly string = "precheck-only" ) From 0d35e7cbd507594490c2c2a4882a1b2f9cfeca54 Mon Sep 17 00:00:00 2001 From: yangxin Date: Tue, 25 Nov 2025 17:50:26 +0800 Subject: [PATCH 08/19] use config-file --- internal/cli/serverless/migration/create.go | 18 ++- internal/cli/serverless/migration/helpers.go | 2 +- internal/cli/serverless/migration/template.go | 125 +++++++++++++----- internal/flag/flag.go | 2 +- 4 files changed, 109 insertions(+), 38 deletions(-) diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index 33b87106..ea475d5b 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -3,6 +3,7 @@ package migration import ( "context" "fmt" + "os" "strings" "time" @@ -25,7 +26,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { Short: "Create a migration", Args: cobra.NoArgs, Example: fmt.Sprintf(` Create a migration: - $ %[1]s serverless migration create -c --display-name --definition '' + $ %[1]s serverless migration create -c --display-name --config-file /path/to/config.json Run migration precheck only with shared inputs: $ %[1]s serverless migration create --precheck-only`, config.CliName), @@ -51,10 +52,19 @@ func CreateCmd(h *internal.Helper) *cobra.Command { if err != nil { return errors.Trace(err) } - definitionStr, err := cmd.Flags().GetString(flag.MigrationDefinition) + configPath, err := cmd.Flags().GetString(flag.MigrationConfigFile) if err != nil { return errors.Trace(err) } + configPath = strings.TrimSpace(configPath) + if configPath == "" { + return errors.New("config file path is required") + } + definitionBytes, err := os.ReadFile(configPath) + if err != nil { + return errors.Annotatef(err, "failed to read config file %q", configPath) + } + definitionStr := string(definitionBytes) if strings.TrimSpace(name) == "" { return errors.New("display name is required") @@ -100,14 +110,14 @@ func CreateCmd(h *internal.Helper) *cobra.Command { cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") - cmd.Flags().String(flag.MigrationDefinition, "", "Migration definition in JSON. Use \"ticloud serverless migration template --type definition\" for a template.") + cmd.Flags().String(flag.MigrationConfigFile, "", "Path to a migration config JSON file. Use \"ticloud serverless migration template --modetype \" to print templates.") cmd.Flags().Bool(flag.MigrationPrecheckOnly, false, "Run a migration precheck with the provided inputs and exit without creating a task.") return cmd } func markCreateMigrationRequiredFlags(cmd *cobra.Command) error { - for _, fn := range []string{flag.ClusterID, flag.DisplayName, flag.MigrationDefinition} { + for _, fn := range []string{flag.ClusterID, flag.DisplayName, flag.MigrationConfigFile} { if err := cmd.MarkFlagRequired(fn); err != nil { return err } diff --git a/internal/cli/serverless/migration/helpers.go b/internal/cli/serverless/migration/helpers.go index 775f4b55..ab5b1e14 100644 --- a/internal/cli/serverless/migration/helpers.go +++ b/internal/cli/serverless/migration/helpers.go @@ -27,7 +27,7 @@ import ( func parseMigrationDefinition(value string) ([]pkgmigration.Source, pkgmigration.Target, pkgmigration.TaskMode, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return nil, pkgmigration.Target{}, "", errors.New("migration definition is required; use --definition") + return nil, pkgmigration.Target{}, "", errors.New("migration config is required; use --config-file") } var payload struct { Sources []pkgmigration.Source `json:"sources"` diff --git a/internal/cli/serverless/migration/template.go b/internal/cli/serverless/migration/template.go index 191069e5..a0728c9a 100644 --- a/internal/cli/serverless/migration/template.go +++ b/internal/cli/serverless/migration/template.go @@ -10,6 +10,7 @@ import ( "github.com/tidbcloud/tidbcloud-cli/internal" "github.com/tidbcloud/tidbcloud-cli/internal/config" "github.com/tidbcloud/tidbcloud-cli/internal/flag" + pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" ) const ( @@ -24,10 +25,10 @@ const ( // List at least one migration source "sources": [ { - // Required: source database type + // Required: source database type. Supported values: SOURCE_TYPE_MYSQL, SOURCE_TYPE_ALICLOUD_RDS_MYSQL "sourceType": "SOURCE_TYPE_MYSQL", "connProfile": { - // Optional connection type, PUBLIC or PRIVATE_LINK + // Optional connection type. Supported values: PUBLIC, PRIVATE_LINK "connType": "PUBLIC", "host": "10.0.0.2", "port": 3306, @@ -35,6 +36,7 @@ const ( "password": "Passw0rd!", // Optional fields below are needed only for private link or TLS "endpointId": "pl-xxxxxxxx", + // optional TLS settings "security": { // TLS materials must be Base64 encoded "sslCaContent": "", @@ -43,7 +45,7 @@ const ( "certAllowedCn": ["client-cn"] } }, - // Optional block/allow rules to control synced schemas/tables + // Optional block/allow rules to control synced schemas/tables (mutually exclusive with routeRules) "baRules": { "doDbs": ["app_db"], "doTables": [ @@ -51,7 +53,7 @@ const ( {"schema": "app_db", "table": "customers"} ] }, - // Optional route rules to rename objects during migration + // Optional route rules to rename objects during migration (mutually exclusive with baRules) "routeRules": [ { "sourceTable": { @@ -64,10 +66,6 @@ const ( } } ], - // Optional start position for incremental sync (binlog position or GTID) - "binlogName": "mysql-bin.000001", - "binlogPos": 4, - "binlogGtid": "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-12345" } ] }` @@ -81,17 +79,48 @@ const ( }, "sources": [ { + // Required: source database type. Supported values: SOURCE_TYPE_MYSQL, SOURCE_TYPE_ALICLOUD_RDS_MYSQL "sourceType": "SOURCE_TYPE_MYSQL", "connProfile": { + // Optional connection type. Supported values: PUBLIC, PRIVATE_LINK "connType": "PUBLIC", "host": "10.0.0.2", "port": 3306, "user": "dm_sync_user", - "password": "Passw0rd!" + "password": "Passw0rd!", + "endpointId": "pl-xxxxxxxx", + // optional TLS settings + "security": { + // TLS materials must be Base64 encoded + "sslCaContent": "", + "sslCertContent": "", + "sslKeyContent": "", + "certAllowedCn": ["client-cn"] + } }, - // Binlog coordinates are usually required when starting from existing data - "binlogName": "mysql-bin.000777", - "binlogPos": 12345, + // Optional block/allow rules when only part of the data should be replicated (mutually exclusive with routeRules) + "baRules": { + "doDbs": ["app_db"], + "doTables": [ + {"schema": "app_db", "table": "orders"} + ] + }, + // Optional route rule sample for remapping objects during incremental sync (mutually exclusive with baRules) + "routeRules": [ + { + "sourceTable": { + "schemaPattern": "app_db", + "tablePattern": "orders" + }, + "targetTable": { + "schema": "app_db", + "table": "orders_copy" + } + } + ], + // Optional start position for incremental sync (binlog position or GTID) + "binlogName": "mysql-bin.000001", + "binlogPos": 4, "binlogGtid": "3E11FA47-71CA-11E1-9E33-C80AA9429562:1-12345" } ] @@ -99,52 +128,84 @@ const ( ) type templateVariant struct { - heading string - body string + heading string + body string } -var allowedTemplateModes = []string{"all", "incremental"} +var allowedTemplateModes = []pkgmigration.TaskMode{pkgmigration.TASKMODE_ALL, pkgmigration.TASKMODE_INCREMENTAL} -var definitionTemplates = map[string]templateVariant{ - "all": { +var definitionTemplates = map[pkgmigration.TaskMode]templateVariant{ + pkgmigration.TASKMODE_ALL: { heading: "Definition template (mode = ALL)", body: migrationDefinitionAllTemplate, }, - "incremental": { + pkgmigration.TASKMODE_INCREMENTAL: { heading: "Definition template (mode = INCREMENTAL)", body: migrationDefinitionIncrementalTemplate, }, } func TemplateCmd(h *internal.Helper) *cobra.Command { - cmd := &cobra.Command{ - Use: "template", - Short: "Show migration JSON templates", - Args: cobra.NoArgs, - Example: fmt.Sprintf(" Show the ALL mode migration template:\n $ %[1]s serverless migration template --modetype all\n\n Show the INCREMENTAL migration template:\n $ %[1]s serverless migration template --modetype incremental\n", config.CliName), - RunE: func(cmd *cobra.Command, args []string) error { - mode, err := cmd.Flags().GetString(flag.MigrationModeType) + cmd := &cobra.Command{ + Use: "template", + Short: "Show migration JSON templates", + Args: cobra.NoArgs, + Example: fmt.Sprintf(" Show the ALL mode migration template:\n $ %[1]s serverless migration template --modetype all\n\n Show the INCREMENTAL migration template:\n $ %[1]s serverless migration template --modetype incremental\n", config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return cmd.MarkFlagRequired(flag.MigrationModeType) + }, + RunE: func(cmd *cobra.Command, args []string) error { + modeValue, err := cmd.Flags().GetString(flag.MigrationModeType) if err != nil { return err } - return renderMigrationTemplate(h, strings.ToLower(mode)) + mode, err := parseTemplateMode(modeValue) + if err != nil { + return err + } + return renderMigrationTemplate(h, mode) }, } - cmd.Flags().String(flag.MigrationModeType, "", "Migration mode template to show, one of [\"all\", \"incremental\"].") - if err := cmd.MarkFlagRequired(flag.MigrationModeType); err != nil { - panic(err) - } + cmd.Flags().String( + flag.MigrationModeType, + "", + fmt.Sprintf( + "Migration mode template to show, one of [%s].", + strings.Join(allowedTemplateModeStrings(), ", "), + ), + ) return cmd } -func renderMigrationTemplate(h *internal.Helper, mode string) error { +func renderMigrationTemplate(h *internal.Helper, mode pkgmigration.TaskMode) error { variant, ok := definitionTemplates[mode] if !ok { - return fmt.Errorf("unknown mode %q, allowed values: %s", mode, strings.Join(allowedTemplateModes, ", ")) + return fmt.Errorf("unknown mode %q, allowed values: %s", mode, strings.Join(allowedTemplateModeStrings(), ", ")) } fmt.Fprintln(h.IOStreams.Out, color.GreenString(variant.heading)) fmt.Fprintln(h.IOStreams.Out, variant.body) return nil } + +func parseTemplateMode(raw string) (pkgmigration.TaskMode, error) { + trimmed := strings.TrimSpace(raw) + if trimmed == "" { + return "", fmt.Errorf("mode is required; use --%s", flag.MigrationModeType) + } + normalized := strings.ToUpper(trimmed) + mode := pkgmigration.TaskMode(normalized) + if _, ok := definitionTemplates[mode]; ok { + return mode, nil + } + return "", fmt.Errorf("unknown mode %q, allowed values: %s", trimmed, strings.Join(allowedTemplateModeStrings(), ", ")) +} + +func allowedTemplateModeStrings() []string { + values := make([]string, 0, len(allowedTemplateModes)) + for _, mode := range allowedTemplateModes { + values = append(values, strings.ToLower(string(mode))) + } + return values +} diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 005f7f2a..846e53e6 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -121,7 +121,7 @@ const ( MigrationTaskID string = "migration-id" MigrationTaskIDShort string = "m" MigrationPrecheckID string = "precheck-id" - MigrationDefinition string = "definition" + MigrationConfigFile string = "config-file" MigrationModeType string = "modetype" MigrationPrecheckOnly string = "precheck-only" ) From 5dcd89bc48f8411285a1a959538ae5159c7aabee Mon Sep 17 00:00:00 2001 From: yangxin Date: Wed, 26 Nov 2025 14:49:29 +0800 Subject: [PATCH 09/19] add copys --- internal/cli/serverless/migration/create.go | 14 ++++++++++++++ internal/cli/serverless/migration/delete.go | 14 ++++++++++++++ internal/cli/serverless/migration/describe.go | 14 ++++++++++++++ internal/cli/serverless/migration/list.go | 14 ++++++++++++++ internal/cli/serverless/migration/migration.go | 14 ++++++++++++++ internal/cli/serverless/migration/pause.go | 14 ++++++++++++++ internal/cli/serverless/migration/resume.go | 14 ++++++++++++++ internal/cli/serverless/migration/template.go | 14 ++++++++++++++ 8 files changed, 112 insertions(+) diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index ea475d5b..0e7d764e 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -1,3 +1,17 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( diff --git a/internal/cli/serverless/migration/delete.go b/internal/cli/serverless/migration/delete.go index 23df21ea..9a20755e 100644 --- a/internal/cli/serverless/migration/delete.go +++ b/internal/cli/serverless/migration/delete.go @@ -1,3 +1,17 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( diff --git a/internal/cli/serverless/migration/describe.go b/internal/cli/serverless/migration/describe.go index 7fe20ab6..1bcea5ff 100644 --- a/internal/cli/serverless/migration/describe.go +++ b/internal/cli/serverless/migration/describe.go @@ -1,3 +1,17 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( diff --git a/internal/cli/serverless/migration/list.go b/internal/cli/serverless/migration/list.go index c883a333..22bdb82f 100644 --- a/internal/cli/serverless/migration/list.go +++ b/internal/cli/serverless/migration/list.go @@ -1,3 +1,17 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( diff --git a/internal/cli/serverless/migration/migration.go b/internal/cli/serverless/migration/migration.go index 1660a0e1..1ad0195d 100644 --- a/internal/cli/serverless/migration/migration.go +++ b/internal/cli/serverless/migration/migration.go @@ -1,3 +1,17 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( diff --git a/internal/cli/serverless/migration/pause.go b/internal/cli/serverless/migration/pause.go index 66302a53..1ab26852 100644 --- a/internal/cli/serverless/migration/pause.go +++ b/internal/cli/serverless/migration/pause.go @@ -1,3 +1,17 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( diff --git a/internal/cli/serverless/migration/resume.go b/internal/cli/serverless/migration/resume.go index 935b398a..f752ac08 100644 --- a/internal/cli/serverless/migration/resume.go +++ b/internal/cli/serverless/migration/resume.go @@ -1,3 +1,17 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( diff --git a/internal/cli/serverless/migration/template.go b/internal/cli/serverless/migration/template.go index a0728c9a..bf31ecb9 100644 --- a/internal/cli/serverless/migration/template.go +++ b/internal/cli/serverless/migration/template.go @@ -1,3 +1,17 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( From c4f6e7d9541c52d16d67d15890a8562c00e2aa2f Mon Sep 17 00:00:00 2001 From: yangxin Date: Wed, 26 Nov 2025 16:03:13 +0800 Subject: [PATCH 10/19] rename cancel --- internal/cli/serverless/migration/cancel.go | 149 ++++++++++++++++++ internal/cli/serverless/migration/create.go | 146 +++++++++++------ internal/cli/serverless/migration/helpers.go | 77 --------- .../cli/serverless/migration/migration.go | 2 +- internal/flag/flag.go | 3 +- 5 files changed, 247 insertions(+), 130 deletions(-) create mode 100644 internal/cli/serverless/migration/cancel.go delete mode 100644 internal/cli/serverless/migration/helpers.go diff --git a/internal/cli/serverless/migration/cancel.go b/internal/cli/serverless/migration/cancel.go new file mode 100644 index 00000000..9301b0b8 --- /dev/null +++ b/internal/cli/serverless/migration/cancel.go @@ -0,0 +1,149 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +import ( + "fmt" + + "github.com/AlecAivazis/survey/v2" + "github.com/AlecAivazis/survey/v2/terminal" + "github.com/fatih/color" + "github.com/juju/errors" + "github.com/spf13/cobra" + + "github.com/tidbcloud/tidbcloud-cli/internal" + "github.com/tidbcloud/tidbcloud-cli/internal/config" + "github.com/tidbcloud/tidbcloud-cli/internal/flag" + "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" + "github.com/tidbcloud/tidbcloud-cli/internal/util" +) + +type CancelOpts struct { + interactive bool +} + +func (c CancelOpts) NonInteractiveFlags() []string { + return []string{ + flag.ClusterID, + flag.MigrationTaskID, + } +} + +func (c *CancelOpts) MarkInteractive(cmd *cobra.Command) error { + for _, fn := range c.NonInteractiveFlags() { + f := cmd.Flags().Lookup(fn) + if f != nil && f.Changed { + c.interactive = false + break + } + } + if !c.interactive { + for _, fn := range c.NonInteractiveFlags() { + if err := cmd.MarkFlagRequired(fn); err != nil { + return err + } + } + } + return nil +} + +func CancelCmd(h *internal.Helper) *cobra.Command { + opts := CancelOpts{interactive: true} + var force bool + + var cmd = &cobra.Command{ + Use: "cancel", + Short: "Cancel a migration task", + Aliases: []string{"rm"}, + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Cancel a migration task in interactive mode: + $ %[1]s serverless migration cancel + + Cancel a migration task in non-interactive mode: + $ %[1]s serverless migration cancel -c --migration-id `, config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return opts.MarkInteractive(cmd) + }, + RunE: func(cmd *cobra.Command, args []string) error { + d, err := h.Client() + if err != nil { + return err + } + ctx := cmd.Context() + + var clusterID, taskID string + if opts.interactive { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") + } + project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) + if err != nil { + return err + } + cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) + if err != nil { + return err + } + clusterID = cluster.ID + task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + if err != nil { + return err + } + taskID = task.ID + } else { + var err error + clusterID, err = cmd.Flags().GetString(flag.ClusterID) + if err != nil { + return errors.Trace(err) + } + taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + if err != nil { + return errors.Trace(err) + } + } + + if !force { + if !h.IOStreams.CanPrompt { + return errors.New("The terminal doesn't support prompt, please run with --force to cancel the migration task") + } + prompt := &survey.Input{ + Message: fmt.Sprintf("%s %s %s", color.BlueString("Please type"), color.HiBlueString("yes"), color.BlueString("to confirm:")), + } + var confirmation string + if err := survey.AskOne(prompt, &confirmation); err != nil { + if err == terminal.InterruptErr { + return util.InterruptError + } + return err + } + if confirmation != "yes" { + return errors.New("Incorrect confirm string entered, skipping migration task cancellation") + } + } + + if _, err := d.CancelMigrationTask(ctx, clusterID, taskID); err != nil { + return errors.Trace(err) + } + + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration task %s canceled", taskID)) + return nil + }, + } + + cmd.Flags().BoolVar(&force, flag.Force, false, "Cancel without confirmation.") + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") + cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to cancel.") + return cmd +} diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index 0e7d764e..e13e2dcd 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -16,6 +16,7 @@ package migration import ( "context" + "encoding/json" "fmt" "os" "strings" @@ -40,10 +41,9 @@ func CreateCmd(h *internal.Helper) *cobra.Command { Short: "Create a migration", Args: cobra.NoArgs, Example: fmt.Sprintf(` Create a migration: - $ %[1]s serverless migration create -c --display-name --config-file /path/to/config.json - - Run migration precheck only with shared inputs: - $ %[1]s serverless migration create --precheck-only`, config.CliName), + $ %[1]s serverless migration create -c --display-name --config-file --dryrun + $ %[1]s serverless migration create -c --display-name --config-file +`, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { return markCreateMigrationRequiredFlags(cmd) }, @@ -54,7 +54,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { } ctx := cmd.Context() - precheckOnly, err := cmd.Flags().GetBool(flag.MigrationPrecheckOnly) + dryRun, err := cmd.Flags().GetBool(flag.MigrationDryRun) if err != nil { return errors.Trace(err) } @@ -88,44 +88,38 @@ func CreateCmd(h *internal.Helper) *cobra.Command { return err } - createBody := &pkgmigration.MigrationServiceCreateMigrationBody{ - DisplayName: name, - Sources: sources, - Target: target, - Mode: mode, + if dryRun { + precheckBody := &pkgmigration.MigrationServicePrecheckBody{ + DisplayName: name, + Sources: sources, + Target: target, + Mode: mode, + } + return runMigrationPrecheck(ctx, d, clusterID, precheckBody, h) } - precheckBody := &pkgmigration.MigrationServicePrecheckBody{ + + createBody := &pkgmigration.MigrationServiceCreateMigrationBody{ DisplayName: name, Sources: sources, Target: target, Mode: mode, } - if precheckOnly { - return runMigrationPrecheck(ctx, d, clusterID, precheckBody, h) - } - resp, err := d.CreateMigrationTask(ctx, clusterID, createBody) if err != nil { return errors.Trace(err) } taskID := aws.ToString(resp.MigrationId) - if taskID == "" { - taskID = aws.ToString(resp.DisplayName) - } - if taskID == "" { - taskID = "" - } - fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration %s created", taskID)) + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration %s(%s) created", name, taskID)) return nil }, } cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") - cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") + cmd.Flags().String(flag.DisplayName, "", "Display name for the migration.") cmd.Flags().String(flag.MigrationConfigFile, "", "Path to a migration config JSON file. Use \"ticloud serverless migration template --modetype \" to print templates.") - cmd.Flags().Bool(flag.MigrationPrecheckOnly, false, "Run a migration precheck with the provided inputs and exit without creating a task.") + cmd.Flags().Bool(flag.MigrationDryRun, false, "Run a migration precheck (dry run) with the provided inputs without creating a task.") return cmd } @@ -139,7 +133,10 @@ func markCreateMigrationRequiredFlags(cmd *cobra.Command) error { return nil } -const precheckPollInterval = 5 * time.Second +const ( + precheckPollInterval = 5 * time.Second + precheckPollTimeout = time.Minute +) func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clusterID string, body *pkgmigration.MigrationServicePrecheckBody, h *internal.Helper) error { resp, err := client.CreateMigrationPrecheck(ctx, clusterID, body) @@ -154,28 +151,29 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu ticker := time.NewTicker(precheckPollInterval) defer ticker.Stop() - var lastStatus pkgmigration.MigrationPrecheckStatus + pollCtx, cancel := context.WithTimeout(ctx, precheckPollTimeout) + defer cancel() + for { select { - case <-ctx.Done(): - return ctx.Err() + case <-pollCtx.Done(): + if pollCtx.Err() == context.DeadlineExceeded { + return errors.Errorf("migration precheck polling timed out after %s", precheckPollTimeout) + } + return pollCtx.Err() case <-ticker.C: - result, err := client.GetMigrationPrecheck(ctx, clusterID, precheckID) + result, err := client.GetMigrationPrecheck(pollCtx, clusterID, precheckID) if err != nil { return errors.Trace(err) } - status := precheckStatusOrDefault(result.Status) - if status != lastStatus { - fmt.Fprintf(h.IOStreams.Out, "precheck %s status: %s\n", precheckID, status) - lastStatus = status + finished, err := printPrecheckSummary(precheckID, result.GetStatus(), result, h) + if err != nil { + return err } - if isPrecheckUnfinished(status) { + if !finished { continue } - if err := printPrecheckSummary(precheckID, status, result, h); err != nil { - return err - } - if status == pkgmigration.MIGRATIONPRECHECKSTATUS_FAILED { + if result.GetStatus() == pkgmigration.MIGRATIONPRECHECKSTATUS_FAILED { fmt.Fprintln(h.IOStreams.Out, color.RedString("migration precheck %s failed", precheckID)) return errors.New("migration precheck failed") } @@ -185,13 +183,6 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu } } -func precheckStatusOrDefault(value *pkgmigration.MigrationPrecheckStatus) pkgmigration.MigrationPrecheckStatus { - if value == nil || *value == "" { - return pkgmigration.MIGRATIONPRECHECKSTATUS_PENDING - } - return *value -} - func isPrecheckUnfinished(status pkgmigration.MigrationPrecheckStatus) bool { switch status { case pkgmigration.MIGRATIONPRECHECKSTATUS_PENDING, @@ -202,12 +193,19 @@ func isPrecheckUnfinished(status pkgmigration.MigrationPrecheckStatus) bool { } } -func printPrecheckSummary(id string, status pkgmigration.MigrationPrecheckStatus, result *pkgmigration.MigrationPrecheck, h *internal.Helper) error { +func printPrecheckSummary(id string, status pkgmigration.MigrationPrecheckStatus, result *pkgmigration.MigrationPrecheck, h *internal.Helper) (bool, error) { + if isPrecheckUnfinished(status) { + fmt.Fprintf(h.IOStreams.Out, "precheck %s summary (status %s)\n", id, status) + fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", + aws.ToInt32(result.Total), aws.ToInt32(result.SuccessCnt), aws.ToInt32(result.WarnCnt), aws.ToInt32(result.FailedCnt)) + return false, nil + } + fmt.Fprintf(h.IOStreams.Out, "precheck %s finished with status %s\n", id, status) fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", aws.ToInt32(result.Total), aws.ToInt32(result.SuccessCnt), aws.ToInt32(result.WarnCnt), aws.ToInt32(result.FailedCnt)) if len(result.Items) == 0 { - return nil + return true, nil } columns := []output.Column{"Type", "Status", "Description", "Reason", "Solution"} rows := make([]output.Row, 0, len(result.Items)) @@ -228,10 +226,9 @@ func printPrecheckSummary(id string, status pkgmigration.MigrationPrecheckStatus }) } if len(rows) == 0 { - fmt.Fprintln(h.IOStreams.Out, "No warning or failure details returned.") - return nil + return true, nil } - return output.PrintHumanTable(h.IOStreams.Out, columns, rows) + return true, output.PrintHumanTable(h.IOStreams.Out, columns, rows) } func precheckItemType(value *pkgmigration.PrecheckItemType) string { @@ -256,3 +253,52 @@ func shouldPrintPrecheckItem(status *pkgmigration.PrecheckItemStatus) bool { return false } } + +func parseMigrationDefinition(value string) ([]pkgmigration.Source, pkgmigration.Target, pkgmigration.TaskMode, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return nil, pkgmigration.Target{}, "", errors.New("migration config is required; use --config-file") + } + var payload struct { + Sources []pkgmigration.Source `json:"sources"` + Target *pkgmigration.Target `json:"target"` + Mode string `json:"mode"` + } + if err := json.Unmarshal([]byte(trimmed), &payload); err != nil { + return nil, pkgmigration.Target{}, "", errors.Annotate(err, "invalid migration definition JSON") + } + if len(payload.Sources) == 0 { + return nil, pkgmigration.Target{}, "", errors.New("migration definition must include at least one source") + } + if payload.Target == nil { + return nil, pkgmigration.Target{}, "", errors.New("migration definition must include the target block") + } + mode, err := parseMigrationMode(payload.Mode) + if err != nil { + return nil, pkgmigration.Target{}, "", err + } + return payload.Sources, *payload.Target, mode, nil +} + +func parseMigrationMode(value string) (pkgmigration.TaskMode, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return "", errors.New("mode is required in the migration definition") + } + normalized := strings.ToUpper(trimmed) + mode := pkgmigration.TaskMode(normalized) + for _, allowed := range pkgmigration.AllowedTaskModeEnumValues { + if mode == allowed { + return mode, nil + } + } + return "", errors.Errorf("invalid mode %q, allowed values: %s", value, strings.Join(taskModeValues(), ", ")) +} + +func taskModeValues() []string { + values := make([]string, 0, len(pkgmigration.AllowedTaskModeEnumValues)) + for _, mode := range pkgmigration.AllowedTaskModeEnumValues { + values = append(values, string(mode)) + } + return values +} \ No newline at end of file diff --git a/internal/cli/serverless/migration/helpers.go b/internal/cli/serverless/migration/helpers.go deleted file mode 100644 index ab5b1e14..00000000 --- a/internal/cli/serverless/migration/helpers.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2025 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package migration - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/juju/errors" - - pkgmigration "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/serverless/migration" -) - -func parseMigrationDefinition(value string) ([]pkgmigration.Source, pkgmigration.Target, pkgmigration.TaskMode, error) { - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return nil, pkgmigration.Target{}, "", errors.New("migration config is required; use --config-file") - } - var payload struct { - Sources []pkgmigration.Source `json:"sources"` - Target *pkgmigration.Target `json:"target"` - Mode string `json:"mode"` - } - if err := json.Unmarshal([]byte(trimmed), &payload); err != nil { - return nil, pkgmigration.Target{}, "", errors.Annotate(err, "invalid migration definition JSON") - } - if len(payload.Sources) == 0 { - return nil, pkgmigration.Target{}, "", errors.New("migration definition must include at least one source") - } - if payload.Target == nil { - return nil, pkgmigration.Target{}, "", errors.New("migration definition must include the target block") - } - mode, err := parseMigrationMode(payload.Mode) - if err != nil { - return nil, pkgmigration.Target{}, "", err - } - return payload.Sources, *payload.Target, mode, nil -} - -func parseMigrationMode(value string) (pkgmigration.TaskMode, error) { - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return "", errors.New("mode is required in the migration definition") - } - normalized := strings.ToUpper(trimmed) - if !strings.HasPrefix(normalized, "MODE_") { - normalized = fmt.Sprintf("MODE_%s", normalized) - } - mode := pkgmigration.TaskMode(normalized) - for _, allowed := range pkgmigration.AllowedTaskModeEnumValues { - if mode == allowed { - return mode, nil - } - } - return "", errors.Errorf("invalid mode %q, allowed values: %s", value, strings.Join(taskModeValues(), ", ")) -} - -func taskModeValues() []string { - values := make([]string, 0, len(pkgmigration.AllowedTaskModeEnumValues)) - for _, mode := range pkgmigration.AllowedTaskModeEnumValues { - values = append(values, string(mode)) - } - return values -} diff --git a/internal/cli/serverless/migration/migration.go b/internal/cli/serverless/migration/migration.go index 1ad0195d..96f78f02 100644 --- a/internal/cli/serverless/migration/migration.go +++ b/internal/cli/serverless/migration/migration.go @@ -30,7 +30,7 @@ func MigrationCmd(h *internal.Helper) *cobra.Command { cmd.AddCommand(CreateCmd(h)) cmd.AddCommand(DescribeCmd(h)) cmd.AddCommand(ListCmd(h)) - cmd.AddCommand(DeleteCmd(h)) + cmd.AddCommand(CancelCmd(h)) cmd.AddCommand(TemplateCmd(h)) cmd.AddCommand(PauseCmd(h)) cmd.AddCommand(ResumeCmd(h)) diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 846e53e6..6604130d 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -26,7 +26,6 @@ const ( CSVSkipHeader string = "csv.skip-header" CSVNotNull string = "csv.not-null" DisplayName string = "display-name" - DisplayNameShort string = "n" BranchID string = "branch-id" BranchIDShort string = "b" Debug string = "debug" @@ -123,7 +122,7 @@ const ( MigrationPrecheckID string = "precheck-id" MigrationConfigFile string = "config-file" MigrationModeType string = "modetype" - MigrationPrecheckOnly string = "precheck-only" + MigrationDryRun string = "dryrun" ) const OutputHelp = "Output format, one of [\"human\" \"json\"]. For the complete result, please use json format." From 2cbbeabe447d56f9edd21fea6330851847d1aea1 Mon Sep 17 00:00:00 2001 From: yangxin Date: Wed, 26 Nov 2025 16:36:20 +0800 Subject: [PATCH 11/19] add MigrationCmd --- internal/cli/serverless/cluster.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/cli/serverless/cluster.go b/internal/cli/serverless/cluster.go index 7d5c0f9f..97b3281c 100644 --- a/internal/cli/serverless/cluster.go +++ b/internal/cli/serverless/cluster.go @@ -22,6 +22,7 @@ import ( "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/changefeed" "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/dataimport" "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/export" + "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/migration" "github.com/tidbcloud/tidbcloud-cli/internal/cli/serverless/sqluser" "github.com/spf13/cobra" @@ -52,6 +53,7 @@ func Cmd(h *internal.Helper) *cobra.Command { serverlessCmd.AddCommand(CapacityCmd(h)) serverlessCmd.AddCommand(authorizednetwork.AuthorizedNetworkCmd(h)) serverlessCmd.AddCommand(changefeed.ChangefeedCmd(h)) + serverlessCmd.AddCommand(migration.MigrationCmd(h)) return serverlessCmd } From dcf083bbb58ddb6b8705a3d1c16f49484aeea9b4 Mon Sep 17 00:00:00 2001 From: yangxin Date: Fri, 28 Nov 2025 14:01:56 +0800 Subject: [PATCH 12/19] rename to delete --- internal/cli/serverless/migration/cancel.go | 149 ----------------- internal/cli/serverless/migration/create.go | 8 +- internal/cli/serverless/migration/delete.go | 22 +-- internal/cli/serverless/migration/describe.go | 2 +- internal/cli/serverless/migration/list.go | 2 +- .../cli/serverless/migration/migration.go | 2 +- internal/cli/serverless/migration/pause.go | 2 +- internal/cli/serverless/migration/resume.go | 2 +- internal/cli/serverless/migration/template.go | 86 +++++----- internal/flag/flag.go | 1 + internal/mock/api_client.go | 154 +++++++++--------- internal/service/cloud/api_client.go | 26 +-- internal/service/cloud/logic.go | 4 +- .../v1beta1/serverless/dm.swagger.json | 138 ++++++++++++---- .../v1beta1/serverless/migration/README.md | 2 +- .../serverless/migration/api/openapi.yaml | 10 +- .../serverless/migration/api_migration.go | 142 ++++++++-------- .../migration/model_migration_state.go | 6 +- 18 files changed, 345 insertions(+), 413 deletions(-) delete mode 100644 internal/cli/serverless/migration/cancel.go diff --git a/internal/cli/serverless/migration/cancel.go b/internal/cli/serverless/migration/cancel.go deleted file mode 100644 index 9301b0b8..00000000 --- a/internal/cli/serverless/migration/cancel.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2025 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package migration - -import ( - "fmt" - - "github.com/AlecAivazis/survey/v2" - "github.com/AlecAivazis/survey/v2/terminal" - "github.com/fatih/color" - "github.com/juju/errors" - "github.com/spf13/cobra" - - "github.com/tidbcloud/tidbcloud-cli/internal" - "github.com/tidbcloud/tidbcloud-cli/internal/config" - "github.com/tidbcloud/tidbcloud-cli/internal/flag" - "github.com/tidbcloud/tidbcloud-cli/internal/service/cloud" - "github.com/tidbcloud/tidbcloud-cli/internal/util" -) - -type CancelOpts struct { - interactive bool -} - -func (c CancelOpts) NonInteractiveFlags() []string { - return []string{ - flag.ClusterID, - flag.MigrationTaskID, - } -} - -func (c *CancelOpts) MarkInteractive(cmd *cobra.Command) error { - for _, fn := range c.NonInteractiveFlags() { - f := cmd.Flags().Lookup(fn) - if f != nil && f.Changed { - c.interactive = false - break - } - } - if !c.interactive { - for _, fn := range c.NonInteractiveFlags() { - if err := cmd.MarkFlagRequired(fn); err != nil { - return err - } - } - } - return nil -} - -func CancelCmd(h *internal.Helper) *cobra.Command { - opts := CancelOpts{interactive: true} - var force bool - - var cmd = &cobra.Command{ - Use: "cancel", - Short: "Cancel a migration task", - Aliases: []string{"rm"}, - Args: cobra.NoArgs, - Example: fmt.Sprintf(` Cancel a migration task in interactive mode: - $ %[1]s serverless migration cancel - - Cancel a migration task in non-interactive mode: - $ %[1]s serverless migration cancel -c --migration-id `, config.CliName), - PreRunE: func(cmd *cobra.Command, args []string) error { - return opts.MarkInteractive(cmd) - }, - RunE: func(cmd *cobra.Command, args []string) error { - d, err := h.Client() - if err != nil { - return err - } - ctx := cmd.Context() - - var clusterID, taskID string - if opts.interactive { - if !h.IOStreams.CanPrompt { - return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") - } - project, err := cloud.GetSelectedProject(ctx, h.QueryPageSize, d) - if err != nil { - return err - } - cluster, err := cloud.GetSelectedCluster(ctx, project.ID, h.QueryPageSize, d) - if err != nil { - return err - } - clusterID = cluster.ID - task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) - if err != nil { - return err - } - taskID = task.ID - } else { - var err error - clusterID, err = cmd.Flags().GetString(flag.ClusterID) - if err != nil { - return errors.Trace(err) - } - taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) - if err != nil { - return errors.Trace(err) - } - } - - if !force { - if !h.IOStreams.CanPrompt { - return errors.New("The terminal doesn't support prompt, please run with --force to cancel the migration task") - } - prompt := &survey.Input{ - Message: fmt.Sprintf("%s %s %s", color.BlueString("Please type"), color.HiBlueString("yes"), color.BlueString("to confirm:")), - } - var confirmation string - if err := survey.AskOne(prompt, &confirmation); err != nil { - if err == terminal.InterruptErr { - return util.InterruptError - } - return err - } - if confirmation != "yes" { - return errors.New("Incorrect confirm string entered, skipping migration task cancellation") - } - } - - if _, err := d.CancelMigrationTask(ctx, clusterID, taskID); err != nil { - return errors.Trace(err) - } - - fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration task %s canceled", taskID)) - return nil - }, - } - - cmd.Flags().BoolVar(&force, flag.Force, false, "Cancel without confirmation.") - cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") - cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to cancel.") - return cmd -} diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index e13e2dcd..cd1a4178 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -105,7 +105,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { Mode: mode, } - resp, err := d.CreateMigrationTask(ctx, clusterID, createBody) + resp, err := d.CreateMigration(ctx, clusterID, createBody) if err != nil { return errors.Trace(err) } @@ -117,7 +117,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { } cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") - cmd.Flags().String(flag.DisplayName, "", "Display name for the migration.") + cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") cmd.Flags().String(flag.MigrationConfigFile, "", "Path to a migration config JSON file. Use \"ticloud serverless migration template --modetype \" to print templates.") cmd.Flags().Bool(flag.MigrationDryRun, false, "Run a migration precheck (dry run) with the provided inputs without creating a task.") @@ -153,7 +153,7 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu defer ticker.Stop() pollCtx, cancel := context.WithTimeout(ctx, precheckPollTimeout) defer cancel() - + for { select { case <-pollCtx.Done(): @@ -301,4 +301,4 @@ func taskModeValues() []string { values = append(values, string(mode)) } return values -} \ No newline at end of file +} diff --git a/internal/cli/serverless/migration/delete.go b/internal/cli/serverless/migration/delete.go index 9a20755e..dffbe1c5 100644 --- a/internal/cli/serverless/migration/delete.go +++ b/internal/cli/serverless/migration/delete.go @@ -65,14 +65,14 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { var cmd = &cobra.Command{ Use: "delete", - Short: "Cancel a migration task", + Short: "Delete a migration task", Aliases: []string{"rm"}, Args: cobra.NoArgs, - Example: fmt.Sprintf(` Cancel a migration task in interactive mode: - $ %[1]s serverless migration delete + Example: fmt.Sprintf(` Delete a migration task in interactive mode: + $ %[1]s serverless migration delete - Cancel a migration task in non-interactive mode: - $ %[1]s serverless migration delete -c --migration-id `, config.CliName), + Delete a migration task in non-interactive mode: + $ %[1]s serverless migration delete -c --migration-id `, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { return opts.MarkInteractive(cmd) }, @@ -116,7 +116,7 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { if !force { if !h.IOStreams.CanPrompt { - return errors.New("The terminal doesn't support prompt, please run with --force to cancel the migration task") + return errors.New("The terminal doesn't support prompt, please run with --force to delete the migration task") } prompt := &survey.Input{ Message: fmt.Sprintf("%s %s %s", color.BlueString("Please type"), color.HiBlueString("yes"), color.BlueString("to confirm:")), @@ -129,21 +129,21 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { return err } if confirmation != "yes" { - return errors.New("Incorrect confirm string entered, skipping migration task cancellation") + return errors.New("Incorrect confirm string entered, skipping migration task deletion") } } - if _, err := d.CancelMigrationTask(ctx, clusterID, taskID); err != nil { + if _, err := d.DeleteMigration(ctx, clusterID, taskID); err != nil { return errors.Trace(err) } - fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration task %s canceled", taskID)) + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration task %s deleted", taskID)) return nil }, } - cmd.Flags().BoolVar(&force, flag.Force, false, "Cancel without confirmation.") + cmd.Flags().BoolVar(&force, flag.Force, false, "Delete without confirmation.") cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") - cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to cancel.") + cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to delete.") return cmd } diff --git a/internal/cli/serverless/migration/describe.go b/internal/cli/serverless/migration/describe.go index 1bcea5ff..6ab03082 100644 --- a/internal/cli/serverless/migration/describe.go +++ b/internal/cli/serverless/migration/describe.go @@ -111,7 +111,7 @@ func DescribeCmd(h *internal.Helper) *cobra.Command { } } - resp, err := d.GetMigrationTask(ctx, clusterID, taskID) + resp, err := d.GetMigration(ctx, clusterID, taskID) if err != nil { return errors.Trace(err) } diff --git a/internal/cli/serverless/migration/list.go b/internal/cli/serverless/migration/list.go index 22bdb82f..6022303d 100644 --- a/internal/cli/serverless/migration/list.go +++ b/internal/cli/serverless/migration/list.go @@ -100,7 +100,7 @@ func ListCmd(h *internal.Helper) *cobra.Command { } pageSize := int32(h.QueryPageSize) - resp, err := d.ListMigrationTasks(ctx, clusterID, &pageSize, nil, nil) + resp, err := d.ListMigrations(ctx, clusterID, &pageSize, nil, nil) if err != nil { return errors.Trace(err) } diff --git a/internal/cli/serverless/migration/migration.go b/internal/cli/serverless/migration/migration.go index 96f78f02..1ad0195d 100644 --- a/internal/cli/serverless/migration/migration.go +++ b/internal/cli/serverless/migration/migration.go @@ -30,7 +30,7 @@ func MigrationCmd(h *internal.Helper) *cobra.Command { cmd.AddCommand(CreateCmd(h)) cmd.AddCommand(DescribeCmd(h)) cmd.AddCommand(ListCmd(h)) - cmd.AddCommand(CancelCmd(h)) + cmd.AddCommand(DeleteCmd(h)) cmd.AddCommand(TemplateCmd(h)) cmd.AddCommand(PauseCmd(h)) cmd.AddCommand(ResumeCmd(h)) diff --git a/internal/cli/serverless/migration/pause.go b/internal/cli/serverless/migration/pause.go index 1ab26852..1931c598 100644 --- a/internal/cli/serverless/migration/pause.go +++ b/internal/cli/serverless/migration/pause.go @@ -109,7 +109,7 @@ func PauseCmd(h *internal.Helper) *cobra.Command { } emptyBody := map[string]interface{}{} - if _, err := d.PauseMigrationTask(ctx, clusterID, taskID, &emptyBody); err != nil { + if _, err := d.PauseMigration(ctx, clusterID, taskID, &emptyBody); err != nil { return errors.Trace(err) } diff --git a/internal/cli/serverless/migration/resume.go b/internal/cli/serverless/migration/resume.go index f752ac08..98eaa3f5 100644 --- a/internal/cli/serverless/migration/resume.go +++ b/internal/cli/serverless/migration/resume.go @@ -109,7 +109,7 @@ func ResumeCmd(h *internal.Helper) *cobra.Command { } emptyBody := map[string]interface{}{} - if _, err := d.ResumeMigrationTask(ctx, clusterID, taskID, &emptyBody); err != nil { + if _, err := d.ResumeMigration(ctx, clusterID, taskID, &emptyBody); err != nil { return errors.Trace(err) } diff --git a/internal/cli/serverless/migration/template.go b/internal/cli/serverless/migration/template.go index bf31ecb9..8754d064 100644 --- a/internal/cli/serverless/migration/template.go +++ b/internal/cli/serverless/migration/template.go @@ -142,60 +142,60 @@ const ( ) type templateVariant struct { - heading string - body string + heading string + body string } var allowedTemplateModes = []pkgmigration.TaskMode{pkgmigration.TASKMODE_ALL, pkgmigration.TASKMODE_INCREMENTAL} var definitionTemplates = map[pkgmigration.TaskMode]templateVariant{ - pkgmigration.TASKMODE_ALL: { + pkgmigration.TASKMODE_ALL: { heading: "Definition template (mode = ALL)", body: migrationDefinitionAllTemplate, }, - pkgmigration.TASKMODE_INCREMENTAL: { + pkgmigration.TASKMODE_INCREMENTAL: { heading: "Definition template (mode = INCREMENTAL)", body: migrationDefinitionIncrementalTemplate, }, } func TemplateCmd(h *internal.Helper) *cobra.Command { - cmd := &cobra.Command{ - Use: "template", - Short: "Show migration JSON templates", - Args: cobra.NoArgs, - Example: fmt.Sprintf(" Show the ALL mode migration template:\n $ %[1]s serverless migration template --modetype all\n\n Show the INCREMENTAL migration template:\n $ %[1]s serverless migration template --modetype incremental\n", config.CliName), - PreRunE: func(cmd *cobra.Command, args []string) error { - return cmd.MarkFlagRequired(flag.MigrationModeType) - }, - RunE: func(cmd *cobra.Command, args []string) error { - modeValue, err := cmd.Flags().GetString(flag.MigrationModeType) + cmd := &cobra.Command{ + Use: "template", + Short: "Show migration JSON templates", + Args: cobra.NoArgs, + Example: fmt.Sprintf(" Show the ALL mode migration template:\n $ %[1]s serverless migration template --modetype all\n\n Show the INCREMENTAL migration template:\n $ %[1]s serverless migration template --modetype incremental\n", config.CliName), + PreRunE: func(cmd *cobra.Command, args []string) error { + return cmd.MarkFlagRequired(flag.MigrationModeType) + }, + RunE: func(cmd *cobra.Command, args []string) error { + modeValue, err := cmd.Flags().GetString(flag.MigrationModeType) + if err != nil { + return err + } + mode, err := parseTemplateMode(modeValue) if err != nil { return err } - mode, err := parseTemplateMode(modeValue) - if err != nil { - return err - } - return renderMigrationTemplate(h, mode) + return renderMigrationTemplate(h, mode) }, } - cmd.Flags().String( - flag.MigrationModeType, - "", - fmt.Sprintf( - "Migration mode template to show, one of [%s].", - strings.Join(allowedTemplateModeStrings(), ", "), - ), - ) + cmd.Flags().String( + flag.MigrationModeType, + "", + fmt.Sprintf( + "Migration mode template to show, one of [%s].", + strings.Join(allowedTemplateModeStrings(), ", "), + ), + ) return cmd } func renderMigrationTemplate(h *internal.Helper, mode pkgmigration.TaskMode) error { variant, ok := definitionTemplates[mode] if !ok { - return fmt.Errorf("unknown mode %q, allowed values: %s", mode, strings.Join(allowedTemplateModeStrings(), ", ")) + return fmt.Errorf("unknown mode %q, allowed values: %s", mode, strings.Join(allowedTemplateModeStrings(), ", ")) } fmt.Fprintln(h.IOStreams.Out, color.GreenString(variant.heading)) @@ -204,22 +204,22 @@ func renderMigrationTemplate(h *internal.Helper, mode pkgmigration.TaskMode) err } func parseTemplateMode(raw string) (pkgmigration.TaskMode, error) { - trimmed := strings.TrimSpace(raw) - if trimmed == "" { - return "", fmt.Errorf("mode is required; use --%s", flag.MigrationModeType) - } - normalized := strings.ToUpper(trimmed) - mode := pkgmigration.TaskMode(normalized) - if _, ok := definitionTemplates[mode]; ok { - return mode, nil - } - return "", fmt.Errorf("unknown mode %q, allowed values: %s", trimmed, strings.Join(allowedTemplateModeStrings(), ", ")) + trimmed := strings.TrimSpace(raw) + if trimmed == "" { + return "", fmt.Errorf("mode is required; use --%s", flag.MigrationModeType) + } + normalized := strings.ToUpper(trimmed) + mode := pkgmigration.TaskMode(normalized) + if _, ok := definitionTemplates[mode]; ok { + return mode, nil + } + return "", fmt.Errorf("unknown mode %q, allowed values: %s", trimmed, strings.Join(allowedTemplateModeStrings(), ", ")) } func allowedTemplateModeStrings() []string { - values := make([]string, 0, len(allowedTemplateModes)) - for _, mode := range allowedTemplateModes { - values = append(values, strings.ToLower(string(mode))) - } - return values + values := make([]string, 0, len(allowedTemplateModes)) + for _, mode := range allowedTemplateModes { + values = append(values, strings.ToLower(string(mode))) + } + return values } diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 6604130d..34797811 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -26,6 +26,7 @@ const ( CSVSkipHeader string = "csv.skip-header" CSVNotNull string = "csv.not-null" DisplayName string = "display-name" + DisplayNameShort string = "n" BranchID string = "branch-id" BranchIDShort string = "b" Debug string = "debug" diff --git a/internal/mock/api_client.go b/internal/mock/api_client.go index de2232f9..f3a7994a 100644 --- a/internal/mock/api_client.go +++ b/internal/mock/api_client.go @@ -108,36 +108,6 @@ func (_m *TiDBCloudClient) CancelMigrationPrecheck(ctx context.Context, clusterI return r0, r1 } -// CancelMigrationTask provides a mock function with given fields: ctx, clusterId, taskId -func (_m *TiDBCloudClient) CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { - ret := _m.Called(ctx, clusterId, taskId) - - if len(ret) == 0 { - panic("no return value specified for CancelMigrationTask") - } - - var r0 *migration.Migration - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.Migration, error)); ok { - return rf(ctx, clusterId, taskId) - } - if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.Migration); ok { - r0 = rf(ctx, clusterId, taskId) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.Migration) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { - r1 = rf(ctx, clusterId, taskId) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // CancelUpload provides a mock function with given fields: ctx, clusterId, uploadId func (_m *TiDBCloudClient) CancelUpload(ctx context.Context, clusterId string, uploadId *string) error { ret := _m.Called(ctx, clusterId, uploadId) @@ -354,28 +324,28 @@ func (_m *TiDBCloudClient) CreateImport(ctx context.Context, clusterId string, b return r0, r1 } -// CreateMigrationPrecheck provides a mock function with given fields: ctx, clusterId, body -func (_m *TiDBCloudClient) CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) { +// CreateMigration provides a mock function with given fields: ctx, clusterId, body +func (_m *TiDBCloudClient) CreateMigration(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) { ret := _m.Called(ctx, clusterId, body) if len(ret) == 0 { - panic("no return value specified for CreateMigrationPrecheck") + panic("no return value specified for CreateMigration") } - var r0 *migration.CreateMigrationPrecheckResp + var r0 *migration.Migration var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error)); ok { return rf(ctx, clusterId, body) } - if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServicePrecheckBody) *migration.CreateMigrationPrecheckResp); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) *migration.Migration); ok { r0 = rf(ctx, clusterId, body) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.CreateMigrationPrecheckResp) + r0 = ret.Get(0).(*migration.Migration) } } - if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServicePrecheckBody) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) error); ok { r1 = rf(ctx, clusterId, body) } else { r1 = ret.Error(1) @@ -384,28 +354,28 @@ func (_m *TiDBCloudClient) CreateMigrationPrecheck(ctx context.Context, clusterI return r0, r1 } -// CreateMigrationTask provides a mock function with given fields: ctx, clusterId, body -func (_m *TiDBCloudClient) CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) { +// CreateMigrationPrecheck provides a mock function with given fields: ctx, clusterId, body +func (_m *TiDBCloudClient) CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) { ret := _m.Called(ctx, clusterId, body) if len(ret) == 0 { - panic("no return value specified for CreateMigrationTask") + panic("no return value specified for CreateMigrationPrecheck") } - var r0 *migration.Migration + var r0 *migration.CreateMigrationPrecheckResp var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error)); ok { return rf(ctx, clusterId, body) } - if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) *migration.Migration); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *migration.MigrationServicePrecheckBody) *migration.CreateMigrationPrecheckResp); ok { r0 = rf(ctx, clusterId, body) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.Migration) + r0 = ret.Get(0).(*migration.CreateMigrationPrecheckResp) } } - if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServiceCreateMigrationBody) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, *migration.MigrationServicePrecheckBody) error); ok { r1 = rf(ctx, clusterId, body) } else { r1 = ret.Error(1) @@ -624,6 +594,36 @@ func (_m *TiDBCloudClient) DeleteExport(ctx context.Context, clusterId string, e return r0, r1 } +// DeleteMigration provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) DeleteMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + ret := _m.Called(ctx, clusterId, taskId) + + if len(ret) == 0 { + panic("no return value specified for DeleteMigration") + } + + var r0 *migration.Migration + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.Migration, error)); ok { + return rf(ctx, clusterId, taskId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.Migration); ok { + r0 = rf(ctx, clusterId, taskId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*migration.Migration) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, clusterId, taskId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // DeleteSQLUser provides a mock function with given fields: ctx, clusterID, userName func (_m *TiDBCloudClient) DeleteSQLUser(ctx context.Context, clusterID string, userName string) (*iam.ApiBasicResp, error) { ret := _m.Called(ctx, clusterID, userName) @@ -1014,29 +1014,29 @@ func (_m *TiDBCloudClient) GetImport(ctx context.Context, clusterId string, id s return r0, r1 } -// GetMigrationPrecheck provides a mock function with given fields: ctx, clusterId, precheckId -func (_m *TiDBCloudClient) GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) { - ret := _m.Called(ctx, clusterId, precheckId) +// GetMigration provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) GetMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + ret := _m.Called(ctx, clusterId, taskId) if len(ret) == 0 { - panic("no return value specified for GetMigrationPrecheck") + panic("no return value specified for GetMigration") } - var r0 *migration.MigrationPrecheck + var r0 *migration.Migration var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.MigrationPrecheck, error)); ok { - return rf(ctx, clusterId, precheckId) + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.Migration, error)); ok { + return rf(ctx, clusterId, taskId) } - if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.MigrationPrecheck); ok { - r0 = rf(ctx, clusterId, precheckId) + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.Migration); ok { + r0 = rf(ctx, clusterId, taskId) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.MigrationPrecheck) + r0 = ret.Get(0).(*migration.Migration) } } if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { - r1 = rf(ctx, clusterId, precheckId) + r1 = rf(ctx, clusterId, taskId) } else { r1 = ret.Error(1) } @@ -1044,29 +1044,29 @@ func (_m *TiDBCloudClient) GetMigrationPrecheck(ctx context.Context, clusterId s return r0, r1 } -// GetMigrationTask provides a mock function with given fields: ctx, clusterId, taskId -func (_m *TiDBCloudClient) GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { - ret := _m.Called(ctx, clusterId, taskId) +// GetMigrationPrecheck provides a mock function with given fields: ctx, clusterId, precheckId +func (_m *TiDBCloudClient) GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) { + ret := _m.Called(ctx, clusterId, precheckId) if len(ret) == 0 { - panic("no return value specified for GetMigrationTask") + panic("no return value specified for GetMigrationPrecheck") } - var r0 *migration.Migration + var r0 *migration.MigrationPrecheck var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.Migration, error)); ok { - return rf(ctx, clusterId, taskId) + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*migration.MigrationPrecheck, error)); ok { + return rf(ctx, clusterId, precheckId) } - if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.Migration); ok { - r0 = rf(ctx, clusterId, taskId) + if rf, ok := ret.Get(0).(func(context.Context, string, string) *migration.MigrationPrecheck); ok { + r0 = rf(ctx, clusterId, precheckId) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*migration.Migration) + r0 = ret.Get(0).(*migration.MigrationPrecheck) } } if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { - r1 = rf(ctx, clusterId, taskId) + r1 = rf(ctx, clusterId, precheckId) } else { r1 = ret.Error(1) } @@ -1374,12 +1374,12 @@ func (_m *TiDBCloudClient) ListImports(ctx context.Context, clusterId string, pa return r0, r1 } -// ListMigrationTasks provides a mock function with given fields: ctx, clusterId, pageSize, pageToken, orderBy -func (_m *TiDBCloudClient) ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) { +// ListMigrations provides a mock function with given fields: ctx, clusterId, pageSize, pageToken, orderBy +func (_m *TiDBCloudClient) ListMigrations(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) { ret := _m.Called(ctx, clusterId, pageSize, pageToken, orderBy) if len(ret) == 0 { - panic("no return value specified for ListMigrationTasks") + panic("no return value specified for ListMigrations") } var r0 *migration.ListMigrationsResp @@ -1524,12 +1524,12 @@ func (_m *TiDBCloudClient) PartialUpdateCluster(ctx context.Context, clusterId s return r0, r1 } -// PauseMigrationTask provides a mock function with given fields: ctx, clusterId, taskId, body -func (_m *TiDBCloudClient) PauseMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { +// PauseMigration provides a mock function with given fields: ctx, clusterId, taskId, body +func (_m *TiDBCloudClient) PauseMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { ret := _m.Called(ctx, clusterId, taskId, body) if len(ret) == 0 { - panic("no return value specified for PauseMigrationTask") + panic("no return value specified for PauseMigration") } var r0 map[string]interface{} @@ -1614,12 +1614,12 @@ func (_m *TiDBCloudClient) Restore(ctx context.Context, body *br.V1beta1RestoreR return r0, r1 } -// ResumeMigrationTask provides a mock function with given fields: ctx, clusterId, taskId, body -func (_m *TiDBCloudClient) ResumeMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { +// ResumeMigration provides a mock function with given fields: ctx, clusterId, taskId, body +func (_m *TiDBCloudClient) ResumeMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { ret := _m.Called(ctx, clusterId, taskId, body) if len(ret) == 0 { - panic("no return value specified for ResumeMigrationTask") + panic("no return value specified for ResumeMigration") } var r0 map[string]interface{} diff --git a/internal/service/cloud/api_client.go b/internal/service/cloud/api_client.go index afdccf21..6960525c 100644 --- a/internal/service/cloud/api_client.go +++ b/internal/service/cloud/api_client.go @@ -107,21 +107,21 @@ type TiDBCloudClient interface { CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (map[string]interface{}, error) - CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) + DeleteMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) CreateMigrationPrecheck(ctx context.Context, clusterId string, body *migration.MigrationServicePrecheckBody) (*migration.CreateMigrationPrecheckResp, error) - CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) + CreateMigration(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) GetMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (*migration.MigrationPrecheck, error) - GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) + GetMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) - ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) + ListMigrations(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) - PauseMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) + PauseMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) - ResumeMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) + ResumeMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) ListSQLUsers(ctx context.Context, clusterID string, pageSize *int32, pageToken *string) (*iam.ApiListSqlUsersRsp, error) @@ -495,8 +495,8 @@ func (d *ClientDelegate) CancelMigrationPrecheck(ctx context.Context, clusterId return res, parseError(err, h) } -func (d *ClientDelegate) CancelMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { - res, h, err := d.mc.MigrationAPI.MigrationServiceCancelMigration(ctx, clusterId, taskId).Execute() +func (d *ClientDelegate) DeleteMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { + res, h, err := d.mc.MigrationAPI.MigrationServiceDeleteMigration(ctx, clusterId, taskId).Execute() return res, parseError(err, h) } @@ -509,7 +509,7 @@ func (d *ClientDelegate) CreateMigrationPrecheck(ctx context.Context, clusterId return res, parseError(err, h) } -func (d *ClientDelegate) CreateMigrationTask(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) { +func (d *ClientDelegate) CreateMigration(ctx context.Context, clusterId string, body *migration.MigrationServiceCreateMigrationBody) (*migration.Migration, error) { r := d.mc.MigrationAPI.MigrationServiceCreateMigration(ctx, clusterId) if body != nil { r = r.Body(*body) @@ -523,12 +523,12 @@ func (d *ClientDelegate) GetMigrationPrecheck(ctx context.Context, clusterId str return res, parseError(err, h) } -func (d *ClientDelegate) GetMigrationTask(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { +func (d *ClientDelegate) GetMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { res, h, err := d.mc.MigrationAPI.MigrationServiceGetMigration(ctx, clusterId, taskId).Execute() return res, parseError(err, h) } -func (d *ClientDelegate) ListMigrationTasks(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) { +func (d *ClientDelegate) ListMigrations(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) { r := d.mc.MigrationAPI.MigrationServiceListMigrations(ctx, clusterId) if pageToken != nil { r = r.PageToken(*pageToken) @@ -543,7 +543,7 @@ func (d *ClientDelegate) ListMigrationTasks(ctx context.Context, clusterId strin return res, parseError(err, h) } -func (d *ClientDelegate) PauseMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { +func (d *ClientDelegate) PauseMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { payload := map[string]interface{}{} if body != nil { payload = *body @@ -552,7 +552,7 @@ func (d *ClientDelegate) PauseMigrationTask(ctx context.Context, clusterId strin return res, parseError(err, h) } -func (d *ClientDelegate) ResumeMigrationTask(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { +func (d *ClientDelegate) ResumeMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { payload := map[string]interface{}{} if body != nil { payload = *body diff --git a/internal/service/cloud/logic.go b/internal/service/cloud/logic.go index a943226b..de4600ef 100644 --- a/internal/service/cloud/logic.go +++ b/internal/service/cloud/logic.go @@ -1048,7 +1048,7 @@ func GetSelectedChangefeed(ctx context.Context, clusterID string, pageSize int64 func GetSelectedMigrationTask(ctx context.Context, clusterID string, pageSize int64, client TiDBCloudClient) (*MigrationTask, error) { var items = make([]interface{}, 0) pageSizeInt32 := int32(pageSize) - resp, err := client.ListMigrationTasks(ctx, clusterID, &pageSizeInt32, nil, nil) + resp, err := client.ListMigrations(ctx, clusterID, &pageSizeInt32, nil, nil) if err != nil { return nil, errors.Trace(err) } @@ -1074,7 +1074,7 @@ func GetSelectedMigrationTask(ctx context.Context, clusterID string, pageSize in } appendMigrationTaskItems(resp.Migrations) for resp.NextPageToken != nil && *resp.NextPageToken != "" { - resp, err = client.ListMigrationTasks(ctx, clusterID, &pageSizeInt32, resp.NextPageToken, nil) + resp, err = client.ListMigrations(ctx, clusterID, &pageSizeInt32, resp.NextPageToken, nil) if err != nil { return nil, errors.Trace(err) } diff --git a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json index 25a1e68f..39663e32 100644 --- a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json +++ b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json @@ -11,9 +11,15 @@ } ], "host": "serverless.tidbapi.com", - "schemes": ["https"], - "consumes": ["application/json"], - "produces": ["application/json"], + "schemes": [ + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], "paths": { "/v1beta1/clusters/{clusterId}/migrations": { "get": { @@ -65,7 +71,9 @@ "type": "string" } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -108,7 +116,9 @@ } } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -152,7 +162,9 @@ "type": "string" } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -162,8 +174,8 @@ ] }, "delete": { - "summary": "Cancel a migration", - "operationId": "MigrationService_CancelMigration", + "summary": "Delete a migration", + "operationId": "MigrationService_DeleteMigration", "responses": { "200": { "description": "A successful response.", @@ -188,13 +200,15 @@ }, { "name": "migrationId", - "description": "The ID of the migration to cancel.", + "description": "The ID of the migration to delete.", "in": "path", "required": true, "type": "string" } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -247,7 +261,9 @@ } } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -300,7 +316,9 @@ } } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -345,7 +363,9 @@ } } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -389,7 +409,9 @@ "type": "string" } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -432,7 +454,9 @@ "type": "string" } ], - "tags": ["Migration"], + "tags": [ + "Migration" + ], "x-codeSamples": [ { "label": "curl", @@ -514,11 +538,19 @@ ] } }, - "required": ["connType", "port", "user", "password"] + "required": [ + "connType", + "port", + "user", + "password" + ] }, "ConnType": { "type": "string", - "enum": ["PUBLIC", "PRIVATE_LINK"], + "enum": [ + "PUBLIC", + "PRIVATE_LINK" + ], "description": "The connection type used to connect to the source database.\n\n - PUBLIC: Connect over the public internet.\n - PRIVATE_LINK: Connect via Private Link/Private Endpoint." }, "CreateMigrationPrecheckResp": { @@ -681,8 +713,14 @@ }, "Migration.State": { "type": "string", - "enum": ["CREATING", "RUNNING", "PAUSED", "CANCELED", "FAILED"], - "description": "Overall state of a migration.\n\n - CREATING: Task is being created.\n - RUNNING: Task is actively running.\n - PAUSED: Task is paused.\n - CANCELED: Task has been canceled.\n - FAILED: Task failed with error." + "enum": [ + "CREATING", + "RUNNING", + "PAUSED", + "FAILED", + "DELETING" + ], + "description": "Overall state of a migration.\n\n - CREATING: Task is being created.\n - RUNNING: Task is actively running.\n - PAUSED: Task is paused.\n - FAILED: Task failed with error.\n - DELETING: Task is being deleted." }, "MigrationPrecheck": { "type": "object", @@ -738,7 +776,13 @@ }, "MigrationPrecheck.Status": { "type": "string", - "enum": ["RUNNING", "FINISHED", "PENDING", "FAILED", "CANCELED"], + "enum": [ + "RUNNING", + "FINISHED", + "PENDING", + "FAILED", + "CANCELED" + ], "description": " - RUNNING: Precheck is in progress.\n - FINISHED: Precheck finished successfully.\n - PENDING: Precheck is pending.\n - FAILED: Precheck failed.\n - CANCELED: Precheck is canceled." }, "MigrationService.CreateMigrationBody": { @@ -773,7 +817,12 @@ ] } }, - "required": ["displayName", "sources", "target", "mode"] + "required": [ + "displayName", + "sources", + "target", + "mode" + ] }, "MigrationService.PauseMigrationBody": { "type": "object", @@ -811,7 +860,12 @@ ] } }, - "required": ["displayName", "sources", "target", "mode"] + "required": [ + "displayName", + "sources", + "target", + "mode" + ] }, "MigrationService.ResumeMigrationBody": { "type": "object", @@ -862,7 +916,11 @@ }, "PrecheckItem.Status": { "type": "string", - "enum": ["SUCCESS", "WARNING", "FAILED"], + "enum": [ + "SUCCESS", + "WARNING", + "FAILED" + ], "description": " - SUCCESS: Check passed successfully.\n - WARNING: Check resulted in a warning.\n - FAILED: Check failed." }, "PrecheckItemType": { @@ -998,11 +1056,17 @@ ] } }, - "required": ["connProfile", "sourceType"] + "required": [ + "connProfile", + "sourceType" + ] }, "Source.SourceType": { "type": "string", - "enum": ["MYSQL", "ALICLOUD_RDS_MYSQL"], + "enum": [ + "MYSQL", + "ALICLOUD_RDS_MYSQL" + ], "description": "The source database type.\n\n - MYSQL: Self-managed MySQL.\n - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL." }, "Status": { @@ -1091,12 +1155,22 @@ }, "SubTask.Stage": { "type": "string", - "enum": ["RUNNING", "PAUSED", "FAILED", "FINISHED", "UNKNOWN"], + "enum": [ + "RUNNING", + "PAUSED", + "FAILED", + "FINISHED", + "UNKNOWN" + ], "description": "The high-level lifecycle stage of a subtask.\n\n - RUNNING: Subtask is running.\n - PAUSED: Subtask is paused.\n - FAILED: Subtask failed.\n - FINISHED: Subtask finished successfully.\n - UNKNOWN: Subtask stage is unknown." }, "SubTask.Step": { "type": "string", - "enum": ["DUMP", "LOAD", "SYNC"], + "enum": [ + "DUMP", + "LOAD", + "SYNC" + ], "description": "The current step within a subtask.\n\n - DUMP: Dump/export data from source.\n - LOAD: Load/import data into target.\n - SYNC: Sync/replicate binlog changes." }, "SyncDetail": { @@ -1146,11 +1220,17 @@ "description": "Target database password." } }, - "required": ["user", "password"] + "required": [ + "user", + "password" + ] }, "TaskMode": { "type": "string", - "enum": ["ALL", "INCREMENTAL"], + "enum": [ + "ALL", + "INCREMENTAL" + ], "description": "Migration task mode.\n\n - ALL: Full + incremental migration (all phases).\n - INCREMENTAL: Incremental-only migration (replication)." } } diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/README.md b/pkg/tidbcloud/v1beta1/serverless/migration/README.md index 1c2c746b..03b7dac1 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/README.md +++ b/pkg/tidbcloud/v1beta1/serverless/migration/README.md @@ -78,9 +78,9 @@ All URIs are relative to *https://serverless.tidbapi.com* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- -*MigrationAPI* | [**MigrationServiceCancelMigration**](docs/MigrationAPI.md#migrationservicecancelmigration) | **Delete** /v1beta1/clusters/{clusterId}/migrations/{migrationId} | Cancel a migration *MigrationAPI* | [**MigrationServiceCancelPrecheck**](docs/MigrationAPI.md#migrationservicecancelprecheck) | **Delete** /v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId} | Cancel a migration precheck *MigrationAPI* | [**MigrationServiceCreateMigration**](docs/MigrationAPI.md#migrationservicecreatemigration) | **Post** /v1beta1/clusters/{clusterId}/migrations | Create a migration +*MigrationAPI* | [**MigrationServiceDeleteMigration**](docs/MigrationAPI.md#migrationservicedeletemigration) | **Delete** /v1beta1/clusters/{clusterId}/migrations/{migrationId} | Delete a migration *MigrationAPI* | [**MigrationServiceGetMigration**](docs/MigrationAPI.md#migrationservicegetmigration) | **Get** /v1beta1/clusters/{clusterId}/migrations/{migrationId} | Get a migration *MigrationAPI* | [**MigrationServiceGetPrecheck**](docs/MigrationAPI.md#migrationservicegetprecheck) | **Get** /v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId} | Get a migration precheck *MigrationAPI* | [**MigrationServiceListMigrations**](docs/MigrationAPI.md#migrationservicelistmigrations) | **Get** /v1beta1/clusters/{clusterId}/migrations | List migrations diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml index 3e13e322..05c8ae5f 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml @@ -104,7 +104,7 @@ paths: x-codegen-request-body-name: body /v1beta1/clusters/{clusterId}/migrations/{migrationId}: delete: - operationId: MigrationService_CancelMigration + operationId: MigrationService_DeleteMigration parameters: - description: The ID of the cluster. in: path @@ -112,7 +112,7 @@ paths: required: true schema: type: string - - description: The ID of the migration to cancel. + - description: The ID of the migration to delete. in: path name: migrationId required: true @@ -131,7 +131,7 @@ paths: schema: $ref: '#/components/schemas/Status' description: An unexpected error response. - summary: Cancel a migration + summary: Delete a migration tags: - Migration x-codeSamples: @@ -654,14 +654,14 @@ components: - CREATING: Task is being created. - RUNNING: Task is actively running. - PAUSED: Task is paused. - - CANCELED: Task has been canceled. - FAILED: Task failed with error. + - DELETING: Task is being deleted. enum: - CREATING - RUNNING - PAUSED - - CANCELED - FAILED + - DELETING type: string MigrationPrecheck: example: diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go b/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go index 57549068..f1844f06 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api_migration.go @@ -22,53 +22,53 @@ import ( // MigrationAPIService MigrationAPI service type MigrationAPIService service -type ApiMigrationServiceCancelMigrationRequest struct { - ctx context.Context - ApiService *MigrationAPIService - clusterId string - migrationId string +type ApiMigrationServiceCancelPrecheckRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + precheckId string } -func (r ApiMigrationServiceCancelMigrationRequest) Execute() (*Migration, *http.Response, error) { - return r.ApiService.MigrationServiceCancelMigrationExecute(r) +func (r ApiMigrationServiceCancelPrecheckRequest) Execute() (map[string]interface{}, *http.Response, error) { + return r.ApiService.MigrationServiceCancelPrecheckExecute(r) } /* -MigrationServiceCancelMigration Cancel a migration +MigrationServiceCancelPrecheck Cancel a migration precheck @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). @param clusterId The ID of the cluster. - @param migrationId The ID of the migration to cancel. - @return ApiMigrationServiceCancelMigrationRequest + @param precheckId The ID of the precheck to cancel. + @return ApiMigrationServiceCancelPrecheckRequest */ -func (a *MigrationAPIService) MigrationServiceCancelMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServiceCancelMigrationRequest { - return ApiMigrationServiceCancelMigrationRequest{ - ApiService: a, - ctx: ctx, - clusterId: clusterId, - migrationId: migrationId, +func (a *MigrationAPIService) MigrationServiceCancelPrecheck(ctx context.Context, clusterId string, precheckId string) ApiMigrationServiceCancelPrecheckRequest { + return ApiMigrationServiceCancelPrecheckRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + precheckId: precheckId, } } // Execute executes the request // -// @return Migration -func (a *MigrationAPIService) MigrationServiceCancelMigrationExecute(r ApiMigrationServiceCancelMigrationRequest) (*Migration, *http.Response, error) { +// @return map[string]interface{} +func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrationServiceCancelPrecheckRequest) (map[string]interface{}, *http.Response, error) { var ( localVarHTTPMethod = http.MethodDelete localVarPostBody interface{} formFiles []formFile - localVarReturnValue *Migration + localVarReturnValue map[string]interface{} ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelMigration") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelPrecheck") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"precheckId"+"}", url.PathEscape(parameterValueToString(r.precheckId, "precheckId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} @@ -136,60 +136,65 @@ func (a *MigrationAPIService) MigrationServiceCancelMigrationExecute(r ApiMigrat return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServiceCancelPrecheckRequest struct { +type ApiMigrationServiceCreateMigrationRequest struct { ctx context.Context ApiService *MigrationAPIService clusterId string - precheckId string + body *MigrationServiceCreateMigrationBody } -func (r ApiMigrationServiceCancelPrecheckRequest) Execute() (map[string]interface{}, *http.Response, error) { - return r.ApiService.MigrationServiceCancelPrecheckExecute(r) +func (r ApiMigrationServiceCreateMigrationRequest) Body(body MigrationServiceCreateMigrationBody) ApiMigrationServiceCreateMigrationRequest { + r.body = &body + return r +} + +func (r ApiMigrationServiceCreateMigrationRequest) Execute() (*Migration, *http.Response, error) { + return r.ApiService.MigrationServiceCreateMigrationExecute(r) } /* -MigrationServiceCancelPrecheck Cancel a migration precheck +MigrationServiceCreateMigration Create a migration @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param clusterId The ID of the cluster. - @param precheckId The ID of the precheck to cancel. - @return ApiMigrationServiceCancelPrecheckRequest + @param clusterId The ID of the cluster to create the migration in. + @return ApiMigrationServiceCreateMigrationRequest */ -func (a *MigrationAPIService) MigrationServiceCancelPrecheck(ctx context.Context, clusterId string, precheckId string) ApiMigrationServiceCancelPrecheckRequest { - return ApiMigrationServiceCancelPrecheckRequest{ +func (a *MigrationAPIService) MigrationServiceCreateMigration(ctx context.Context, clusterId string) ApiMigrationServiceCreateMigrationRequest { + return ApiMigrationServiceCreateMigrationRequest{ ApiService: a, ctx: ctx, clusterId: clusterId, - precheckId: precheckId, } } // Execute executes the request // -// @return map[string]interface{} -func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrationServiceCancelPrecheckRequest) (map[string]interface{}, *http.Response, error) { +// @return Migration +func (a *MigrationAPIService) MigrationServiceCreateMigrationExecute(r ApiMigrationServiceCreateMigrationRequest) (*Migration, *http.Response, error) { var ( - localVarHTTPMethod = http.MethodDelete + localVarHTTPMethod = http.MethodPost localVarPostBody interface{} formFiles []formFile - localVarReturnValue map[string]interface{} + localVarReturnValue *Migration ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCancelPrecheck") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCreateMigration") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrationPrechecks/{precheckId}" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) - localVarPath = strings.Replace(localVarPath, "{"+"precheckId"+"}", url.PathEscape(parameterValueToString(r.precheckId, "precheckId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} + if r.body == nil { + return localVarReturnValue, nil, reportError("body is required and must be specified") + } // to determine the Content-Type header - localVarHTTPContentTypes := []string{} + localVarHTTPContentTypes := []string{"application/json"} // set Content-Type header localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) @@ -205,6 +210,8 @@ func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrati if localVarHTTPHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept } + // body params + localVarPostBody = r.body req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) if err != nil { return localVarReturnValue, nil, err @@ -250,65 +257,60 @@ func (a *MigrationAPIService) MigrationServiceCancelPrecheckExecute(r ApiMigrati return localVarReturnValue, localVarHTTPResponse, nil } -type ApiMigrationServiceCreateMigrationRequest struct { - ctx context.Context - ApiService *MigrationAPIService - clusterId string - body *MigrationServiceCreateMigrationBody -} - -func (r ApiMigrationServiceCreateMigrationRequest) Body(body MigrationServiceCreateMigrationBody) ApiMigrationServiceCreateMigrationRequest { - r.body = &body - return r +type ApiMigrationServiceDeleteMigrationRequest struct { + ctx context.Context + ApiService *MigrationAPIService + clusterId string + migrationId string } -func (r ApiMigrationServiceCreateMigrationRequest) Execute() (*Migration, *http.Response, error) { - return r.ApiService.MigrationServiceCreateMigrationExecute(r) +func (r ApiMigrationServiceDeleteMigrationRequest) Execute() (*Migration, *http.Response, error) { + return r.ApiService.MigrationServiceDeleteMigrationExecute(r) } /* -MigrationServiceCreateMigration Create a migration +MigrationServiceDeleteMigration Delete a migration @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). - @param clusterId The ID of the cluster to create the migration in. - @return ApiMigrationServiceCreateMigrationRequest + @param clusterId The ID of the cluster. + @param migrationId The ID of the migration to delete. + @return ApiMigrationServiceDeleteMigrationRequest */ -func (a *MigrationAPIService) MigrationServiceCreateMigration(ctx context.Context, clusterId string) ApiMigrationServiceCreateMigrationRequest { - return ApiMigrationServiceCreateMigrationRequest{ - ApiService: a, - ctx: ctx, - clusterId: clusterId, +func (a *MigrationAPIService) MigrationServiceDeleteMigration(ctx context.Context, clusterId string, migrationId string) ApiMigrationServiceDeleteMigrationRequest { + return ApiMigrationServiceDeleteMigrationRequest{ + ApiService: a, + ctx: ctx, + clusterId: clusterId, + migrationId: migrationId, } } // Execute executes the request // // @return Migration -func (a *MigrationAPIService) MigrationServiceCreateMigrationExecute(r ApiMigrationServiceCreateMigrationRequest) (*Migration, *http.Response, error) { +func (a *MigrationAPIService) MigrationServiceDeleteMigrationExecute(r ApiMigrationServiceDeleteMigrationRequest) (*Migration, *http.Response, error) { var ( - localVarHTTPMethod = http.MethodPost + localVarHTTPMethod = http.MethodDelete localVarPostBody interface{} formFiles []formFile localVarReturnValue *Migration ) - localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceCreateMigration") + localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "MigrationAPIService.MigrationServiceDeleteMigration") if err != nil { return localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()} } - localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations" + localVarPath := localBasePath + "/v1beta1/clusters/{clusterId}/migrations/{migrationId}" localVarPath = strings.Replace(localVarPath, "{"+"clusterId"+"}", url.PathEscape(parameterValueToString(r.clusterId, "clusterId")), -1) + localVarPath = strings.Replace(localVarPath, "{"+"migrationId"+"}", url.PathEscape(parameterValueToString(r.migrationId, "migrationId")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := url.Values{} localVarFormParams := url.Values{} - if r.body == nil { - return localVarReturnValue, nil, reportError("body is required and must be specified") - } // to determine the Content-Type header - localVarHTTPContentTypes := []string{"application/json"} + localVarHTTPContentTypes := []string{} // set Content-Type header localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) @@ -324,8 +326,6 @@ func (a *MigrationAPIService) MigrationServiceCreateMigrationExecute(r ApiMigrat if localVarHTTPHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept } - // body params - localVarPostBody = r.body req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles) if err != nil { return localVarReturnValue, nil, err diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go index 56ed947e..703c78be 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_migration_state.go @@ -14,7 +14,7 @@ import ( "encoding/json" ) -// MigrationState Overall state of a migration. - CREATING: Task is being created. - RUNNING: Task is actively running. - PAUSED: Task is paused. - CANCELED: Task has been canceled. - FAILED: Task failed with error. +// MigrationState Overall state of a migration. - CREATING: Task is being created. - RUNNING: Task is actively running. - PAUSED: Task is paused. - FAILED: Task failed with error. - DELETING: Task is being deleted. type MigrationState string // List of Migration.State @@ -22,8 +22,8 @@ const ( MIGRATIONSTATE_CREATING MigrationState = "CREATING" MIGRATIONSTATE_RUNNING MigrationState = "RUNNING" MIGRATIONSTATE_PAUSED MigrationState = "PAUSED" - MIGRATIONSTATE_CANCELED MigrationState = "CANCELED" MIGRATIONSTATE_FAILED MigrationState = "FAILED" + MIGRATIONSTATE_DELETING MigrationState = "DELETING" ) // All allowed values of MigrationState enum @@ -31,8 +31,8 @@ var AllowedMigrationStateEnumValues = []MigrationState{ "CREATING", "RUNNING", "PAUSED", - "CANCELED", "FAILED", + "DELETING", } func (v *MigrationState) UnmarshalJSON(src []byte) error { From 0d765016bdb9cf9eef77704dd5a8c123a57b65c0 Mon Sep 17 00:00:00 2001 From: yangxin Date: Thu, 4 Dec 2025 14:05:44 +0800 Subject: [PATCH 13/19] fmt --- internal/service/cloud/api_client.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/internal/service/cloud/api_client.go b/internal/service/cloud/api_client.go index d453cbdc..0ddd2dea 100644 --- a/internal/service/cloud/api_client.go +++ b/internal/service/cloud/api_client.go @@ -793,12 +793,10 @@ func NewApiClient(rt http.RoundTripper, serverlessEndpoint string, iamEndpoint s migrationCfg.UserAgent = userAgent return branch.NewAPIClient(branchCfg), cluster.NewAPIClient(clusterCfg), - br.NewAPIClient(backupRestoreCfg), - imp.NewAPIClient(importCfg), export.NewAPIClient(exportCfg), - iam.NewAPIClient(iamCfg), auditlog.NewAPIClient(auditLogCfg), - cdc.NewAPIClient(cdcCfg), privatelink.NewAPIClient(privateLinkCfg), - migration.NewAPIClient(migrationCfg), nil - + br.NewAPIClient(backupRestoreCfg), imp.NewAPIClient(importCfg), + export.NewAPIClient(exportCfg), iam.NewAPIClient(iamCfg), + auditlog.NewAPIClient(auditLogCfg), cdc.NewAPIClient(cdcCfg), + privatelink.NewAPIClient(privateLinkCfg), migration.NewAPIClient(migrationCfg), nil } func NewDigestTransport(publicKey, privateKey string) http.RoundTripper { From a1b2a8682ada9e62224f3d7bfde14fa55da77678 Mon Sep 17 00:00:00 2001 From: yangxin Date: Fri, 5 Dec 2025 15:51:13 +0800 Subject: [PATCH 14/19] polish --- go.mod | 1 + go.sum | 2 ++ internal/cli/serverless/migration/create.go | 24 +++++++++++-- internal/cli/serverless/migration/template.go | 36 +++++++++++-------- internal/flag/flag.go | 5 ++- .../v1beta1/serverless/dm.swagger.json | 5 +-- .../serverless/migration/api/openapi.yaml | 2 ++ .../migration/model_source_source_type.go | 4 ++- 8 files changed, 55 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index d949bff1..731db47d 100644 --- a/go.mod +++ b/go.mod @@ -31,6 +31,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.9.0 + github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a github.com/tidbcloud/tidbcloud-cli/pkg v0.0.1 github.com/xo/usql v0.19.2 github.com/zalando/go-keyring v0.2.3 diff --git a/go.sum b/go.sum index 4c870cc0..a751a31c 100644 --- a/go.sum +++ b/go.sum @@ -641,6 +641,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a h1:a6TNDN9CgG+cYjaeN8l2mc4kSz2iMiCDQxPEyltUV/I= +github.com/tailscale/hujson v0.0.0-20250605163823-992244df8c5a/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo= github.com/thda/tds v0.1.7 h1:s29kbnJK0agL3ps85A/sb9XS2uxgKF5UJ6AZjbyqXX4= github.com/thda/tds v0.1.7/go.mod h1:isLIF1oZdXfkqVMJM8RyNrsjlHPlTKnPlnsBs7ngZcM= github.com/trinodb/trino-go-client v0.315.0 h1:9mU+42VGw9Hnp9R1hkhWlIrQp9o+V01Gx1KlHjTkM1c= diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index cd1a4178..89d25702 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -26,6 +26,7 @@ import ( "github.com/fatih/color" "github.com/juju/errors" "github.com/spf13/cobra" + "github.com/tailscale/hujson" "github.com/tidbcloud/tidbcloud-cli/internal" "github.com/tidbcloud/tidbcloud-cli/internal/config" @@ -41,7 +42,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { Short: "Create a migration", Args: cobra.NoArgs, Example: fmt.Sprintf(` Create a migration: - $ %[1]s serverless migration create -c --display-name --config-file --dryrun + $ %[1]s serverless migration create -c --display-name --config-file --dry-run $ %[1]s serverless migration create -c --display-name --config-file `, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { @@ -118,7 +119,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") - cmd.Flags().String(flag.MigrationConfigFile, "", "Path to a migration config JSON file. Use \"ticloud serverless migration template --modetype \" to print templates.") + cmd.Flags().String(flag.MigrationConfigFile, "", "Path to a migration config JSON file. Use \"ticloud serverless migration template --mode \" to print templates.") cmd.Flags().Bool(flag.MigrationDryRun, false, "Run a migration precheck (dry run) with the provided inputs without creating a task.") return cmd @@ -154,6 +155,7 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu pollCtx, cancel := context.WithTimeout(ctx, precheckPollTimeout) defer cancel() + // Poll precheck status until it finishes or the overall timeout is hit. for { select { case <-pollCtx.Done(): @@ -264,7 +266,11 @@ func parseMigrationDefinition(value string) ([]pkgmigration.Source, pkgmigration Target *pkgmigration.Target `json:"target"` Mode string `json:"mode"` } - if err := json.Unmarshal([]byte(trimmed), &payload); err != nil { + stdJson, err := standardizeJSON([]byte(trimmed)) + if err != nil { + return nil, pkgmigration.Target{}, "", errors.Annotate(err, "invalid migration definition JSON") + } + if err := json.Unmarshal(stdJson, &payload); err != nil { return nil, pkgmigration.Target{}, "", errors.Annotate(err, "invalid migration definition JSON") } if len(payload.Sources) == 0 { @@ -302,3 +308,15 @@ func taskModeValues() []string { } return values } + +// standardizeJSON accepts JSON With Commas and Comments(JWCC) see +// https://nigeltao.github.io/blog/2021/json-with-commas-comments.html) and +// returns a standard JSON byte slice ready for json.Unmarshal. +func standardizeJSON(b []byte) ([]byte, error) { + ast, err := hujson.Parse(b) + if err != nil { + return b, err + } + ast.Standardize() + return ast.Pack(), nil +} diff --git a/internal/cli/serverless/migration/template.go b/internal/cli/serverless/migration/template.go index 8754d064..6768a795 100644 --- a/internal/cli/serverless/migration/template.go +++ b/internal/cli/serverless/migration/template.go @@ -39,17 +39,20 @@ const ( // List at least one migration source "sources": [ { - // Required: source database type. Supported values: SOURCE_TYPE_MYSQL, SOURCE_TYPE_ALICLOUD_RDS_MYSQL - "sourceType": "SOURCE_TYPE_MYSQL", + // Required: source database type. Supported values: MYSQL, ALICLOUD_RDS_MYSQL, AWS_RDS_MYSQL + "sourceType": "MYSQL", "connProfile": { - // Optional connection type. Supported values: PUBLIC, PRIVATE_LINK + // Required connection type. Supported values: PUBLIC, PRIVATE_LINK + // PUBLIC connections require host "connType": "PUBLIC", + "host": "10.0.0.8", + // PRIVATE_LINK connections use endpointId. Get endpointId by 'ticloud plc' commands. + "connType": "PRIVATE_LINK", + "endpointId": "pl-xxxxxxxx", "host": "10.0.0.2", "port": 3306, "user": "dm_sync_user", "password": "Passw0rd!", - // Optional fields below are needed only for private link or TLS - "endpointId": "pl-xxxxxxxx", // optional TLS settings "security": { // TLS materials must be Base64 encoded @@ -93,16 +96,19 @@ const ( }, "sources": [ { - // Required: source database type. Supported values: SOURCE_TYPE_MYSQL, SOURCE_TYPE_ALICLOUD_RDS_MYSQL - "sourceType": "SOURCE_TYPE_MYSQL", + // Required: source database type. Supported values: MYSQL, ALICLOUD_RDS_MYSQL, AWS_RDS_MYSQL + "sourceType": "MYSQL", "connProfile": { - // Optional connection type. Supported values: PUBLIC, PRIVATE_LINK + // Required connection type. Supported values: PUBLIC, PRIVATE_LINK + // PUBLIC connections require host "connType": "PUBLIC", - "host": "10.0.0.2", + "host": "10.0.0.8", + // PRIVATE_LINK connections use endpointId. Get endpointId by 'ticloud plc' commands. + "connType": "PRIVATE_LINK", + "endpointId": "pl-xxxxxxxx", "port": 3306, "user": "dm_sync_user", "password": "Passw0rd!", - "endpointId": "pl-xxxxxxxx", // optional TLS settings "security": { // TLS materials must be Base64 encoded @@ -164,12 +170,12 @@ func TemplateCmd(h *internal.Helper) *cobra.Command { Use: "template", Short: "Show migration JSON templates", Args: cobra.NoArgs, - Example: fmt.Sprintf(" Show the ALL mode migration template:\n $ %[1]s serverless migration template --modetype all\n\n Show the INCREMENTAL migration template:\n $ %[1]s serverless migration template --modetype incremental\n", config.CliName), + Example: fmt.Sprintf(" Show the ALL mode migration template:\n $ %[1]s serverless migration template --mode all\n\n Show the INCREMENTAL migration template:\n $ %[1]s serverless migration template --mode incremental\n", config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { - return cmd.MarkFlagRequired(flag.MigrationModeType) + return cmd.MarkFlagRequired(flag.MigrationMode) }, RunE: func(cmd *cobra.Command, args []string) error { - modeValue, err := cmd.Flags().GetString(flag.MigrationModeType) + modeValue, err := cmd.Flags().GetString(flag.MigrationMode) if err != nil { return err } @@ -182,7 +188,7 @@ func TemplateCmd(h *internal.Helper) *cobra.Command { } cmd.Flags().String( - flag.MigrationModeType, + flag.MigrationMode, "", fmt.Sprintf( "Migration mode template to show, one of [%s].", @@ -206,7 +212,7 @@ func renderMigrationTemplate(h *internal.Helper, mode pkgmigration.TaskMode) err func parseTemplateMode(raw string) (pkgmigration.TaskMode, error) { trimmed := strings.TrimSpace(raw) if trimmed == "" { - return "", fmt.Errorf("mode is required; use --%s", flag.MigrationModeType) + return "", fmt.Errorf("mode is required; use --%s", flag.MigrationMode) } normalized := strings.ToUpper(trimmed) mode := pkgmigration.TaskMode(normalized) diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 05df10e9..76defe98 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -127,10 +127,9 @@ const ( MigrationTaskID string = "migration-id" MigrationTaskIDShort string = "m" - MigrationPrecheckID string = "precheck-id" MigrationConfigFile string = "config-file" - MigrationModeType string = "modetype" - MigrationDryRun string = "dryrun" + MigrationMode string = "mode" + MigrationDryRun string = "dry-run" ) const OutputHelp = "Output format, one of [\"human\" \"json\"]. For the complete result, please use json format." diff --git a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json index 39663e32..b58bf9f7 100644 --- a/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json +++ b/pkg/tidbcloud/v1beta1/serverless/dm.swagger.json @@ -1065,9 +1065,10 @@ "type": "string", "enum": [ "MYSQL", - "ALICLOUD_RDS_MYSQL" + "ALICLOUD_RDS_MYSQL", + "AWS_RDS_MYSQL" ], - "description": "The source database type.\n\n - MYSQL: Self-managed MySQL.\n - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL." + "description": "The source database type.\n\n - MYSQL: Self-managed MySQL.\n - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL.\n - AWS_RDS_MYSQL: Amazon RDS for MySQL." }, "Status": { "type": "object", diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml index 05c8ae5f..19a6b41b 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml +++ b/pkg/tidbcloud/v1beta1/serverless/migration/api/openapi.yaml @@ -964,9 +964,11 @@ components: - MYSQL: Self-managed MySQL. - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. + - AWS_RDS_MYSQL: Amazon RDS for MySQL. enum: - MYSQL - ALICLOUD_RDS_MYSQL + - AWS_RDS_MYSQL type: string Status: example: diff --git a/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go b/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go index cb1feb4e..dea0370f 100644 --- a/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go +++ b/pkg/tidbcloud/v1beta1/serverless/migration/model_source_source_type.go @@ -14,19 +14,21 @@ import ( "encoding/json" ) -// SourceSourceType The source database type. - MYSQL: Self-managed MySQL. - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. +// SourceSourceType The source database type. - MYSQL: Self-managed MySQL. - ALICLOUD_RDS_MYSQL: Alibaba Cloud RDS for MySQL. - AWS_RDS_MYSQL: Amazon RDS for MySQL. type SourceSourceType string // List of Source.SourceType const ( SOURCESOURCETYPE_MYSQL SourceSourceType = "MYSQL" SOURCESOURCETYPE_ALICLOUD_RDS_MYSQL SourceSourceType = "ALICLOUD_RDS_MYSQL" + SOURCESOURCETYPE_AWS_RDS_MYSQL SourceSourceType = "AWS_RDS_MYSQL" ) // All allowed values of SourceSourceType enum var AllowedSourceSourceTypeEnumValues = []SourceSourceType{ "MYSQL", "ALICLOUD_RDS_MYSQL", + "AWS_RDS_MYSQL", } func (v *SourceSourceType) UnmarshalJSON(src []byte) error { From f5adb76a5d37e91df0c8fc5507c3c6775fbf94ec Mon Sep 17 00:00:00 2001 From: yangxin Date: Fri, 5 Dec 2025 15:59:28 +0800 Subject: [PATCH 15/19] fmt --- internal/cli/serverless/migration/create.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index 89d25702..613c2a04 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -309,7 +309,7 @@ func taskModeValues() []string { return values } -// standardizeJSON accepts JSON With Commas and Comments(JWCC) see +// standardizeJSON accepts JSON With Commas and Comments(JWCC) see // https://nigeltao.github.io/blog/2021/json-with-commas-comments.html) and // returns a standard JSON byte slice ready for json.Unmarshal. func standardizeJSON(b []byte) ([]byte, error) { From 8d5186a0568086a035aecb38a8c44db8b81b90d8 Mon Sep 17 00:00:00 2001 From: yangxin Date: Wed, 10 Dec 2025 15:35:08 +0800 Subject: [PATCH 16/19] polish --- go.mod | 1 + go.sum | 2 + internal/cli/serverless/migration/create.go | 45 ++++++++----------- internal/cli/serverless/migration/delete.go | 32 ++++++------- internal/cli/serverless/migration/describe.go | 24 +++++----- internal/cli/serverless/migration/list.go | 36 +++------------ internal/cli/serverless/migration/pause.go | 27 ++++++----- internal/cli/serverless/migration/resume.go | 27 ++++++----- internal/cli/serverless/migration/template.go | 15 ++++--- internal/flag/flag.go | 10 ++--- internal/service/cloud/api_client.go | 30 +++++-------- internal/service/cloud/logic.go | 2 +- 12 files changed, 109 insertions(+), 142 deletions(-) diff --git a/go.mod b/go.mod index 731db47d..ebc9b6df 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,7 @@ require ( require ( filippo.io/edwards25519 v1.1.0 // indirect + github.com/AlekSi/pointer v1.2.0 // indirect github.com/alecthomas/chroma v0.10.0 // indirect github.com/alecthomas/chroma/v2 v2.14.0 // indirect github.com/alessio/shellescape v1.4.1 // indirect diff --git a/go.sum b/go.sum index a751a31c..b23afa23 100644 --- a/go.sum +++ b/go.sum @@ -27,6 +27,8 @@ github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XB github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= github.com/AlecAivazis/survey/v2 v2.3.6 h1:NvTuVHISgTHEHeBFqt6BHOe4Ny/NwGZr7w+F8S9ziyw= github.com/AlecAivazis/survey/v2 v2.3.6/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI= +github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= +github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index 613c2a04..bf7d14d1 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -19,6 +19,7 @@ import ( "encoding/json" "fmt" "os" + "slices" "strings" "time" @@ -42,8 +43,8 @@ func CreateCmd(h *internal.Helper) *cobra.Command { Short: "Create a migration", Args: cobra.NoArgs, Example: fmt.Sprintf(` Create a migration: - $ %[1]s serverless migration create -c --display-name --config-file --dry-run - $ %[1]s serverless migration create -c --display-name --config-file + $ %[1]s serverless migration create -c --display-name --config-file --dry-run + $ %[1]s serverless migration create -c --display-name --config-file `, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { return markCreateMigrationRequiredFlags(cmd) @@ -67,6 +68,9 @@ func CreateCmd(h *internal.Helper) *cobra.Command { if err != nil { return errors.Trace(err) } + if strings.TrimSpace(name) == "" { + return errors.New("display name is required") + } configPath, err := cmd.Flags().GetString(flag.MigrationConfigFile) if err != nil { return errors.Trace(err) @@ -81,9 +85,6 @@ func CreateCmd(h *internal.Helper) *cobra.Command { } definitionStr := string(definitionBytes) - if strings.TrimSpace(name) == "" { - return errors.New("display name is required") - } sources, target, mode, err := parseMigrationDefinition(definitionStr) if err != nil { return err @@ -111,8 +112,8 @@ func CreateCmd(h *internal.Helper) *cobra.Command { return errors.Trace(err) } - taskID := aws.ToString(resp.MigrationId) - fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration %s(%s) created", name, taskID)) + migrationID := aws.ToString(resp.MigrationId) + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration %s(%s) created", name, migrationID)) return nil }, } @@ -120,7 +121,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") cmd.Flags().String(flag.MigrationConfigFile, "", "Path to a migration config JSON file. Use \"ticloud serverless migration template --mode \" to print templates.") - cmd.Flags().Bool(flag.MigrationDryRun, false, "Run a migration precheck (dry run) with the provided inputs without creating a task.") + cmd.Flags().Bool(flag.MigrationDryRun, false, "Run a migration precheck (dry run) with the provided inputs without creating a migration.") return cmd } @@ -168,7 +169,7 @@ func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clu if err != nil { return errors.Trace(err) } - finished, err := printPrecheckSummary(precheckID, result.GetStatus(), result, h) + finished, err := printPrecheckSummary(result, h) if err != nil { return err } @@ -195,15 +196,15 @@ func isPrecheckUnfinished(status pkgmigration.MigrationPrecheckStatus) bool { } } -func printPrecheckSummary(id string, status pkgmigration.MigrationPrecheckStatus, result *pkgmigration.MigrationPrecheck, h *internal.Helper) (bool, error) { - if isPrecheckUnfinished(status) { - fmt.Fprintf(h.IOStreams.Out, "precheck %s summary (status %s)\n", id, status) +func printPrecheckSummary(result *pkgmigration.MigrationPrecheck, h *internal.Helper) (bool, error) { + if isPrecheckUnfinished(result.GetStatus()) { + fmt.Fprintf(h.IOStreams.Out, "precheck %s summary (status %s)\n", result.GetPrecheckId(), result.GetStatus()) fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", aws.ToInt32(result.Total), aws.ToInt32(result.SuccessCnt), aws.ToInt32(result.WarnCnt), aws.ToInt32(result.FailedCnt)) return false, nil } - fmt.Fprintf(h.IOStreams.Out, "precheck %s finished with status %s\n", id, status) + fmt.Fprintf(h.IOStreams.Out, "precheck %s finished with status %s\n", result.GetPrecheckId(), result.GetStatus()) fmt.Fprintf(h.IOStreams.Out, "Total: %d, Success: %d, Warn: %d, Failed: %d\n", aws.ToInt32(result.Total), aws.ToInt32(result.SuccessCnt), aws.ToInt32(result.WarnCnt), aws.ToInt32(result.FailedCnt)) if len(result.Items) == 0 { @@ -289,24 +290,14 @@ func parseMigrationDefinition(value string) ([]pkgmigration.Source, pkgmigration func parseMigrationMode(value string) (pkgmigration.TaskMode, error) { trimmed := strings.TrimSpace(value) if trimmed == "" { - return "", errors.New("mode is required in the migration definition") + return "", errors.New("empty config file") } normalized := strings.ToUpper(trimmed) mode := pkgmigration.TaskMode(normalized) - for _, allowed := range pkgmigration.AllowedTaskModeEnumValues { - if mode == allowed { - return mode, nil - } - } - return "", errors.Errorf("invalid mode %q, allowed values: %s", value, strings.Join(taskModeValues(), ", ")) -} - -func taskModeValues() []string { - values := make([]string, 0, len(pkgmigration.AllowedTaskModeEnumValues)) - for _, mode := range pkgmigration.AllowedTaskModeEnumValues { - values = append(values, string(mode)) + if slices.Contains(pkgmigration.AllowedTaskModeEnumValues, mode) { + return mode, nil } - return values + return "", errors.Errorf("invalid mode %q, allowed values: %s", value, pkgmigration.AllowedTaskModeEnumValues) } // standardizeJSON accepts JSON With Commas and Comments(JWCC) see diff --git a/internal/cli/serverless/migration/delete.go b/internal/cli/serverless/migration/delete.go index dffbe1c5..a71b3546 100644 --- a/internal/cli/serverless/migration/delete.go +++ b/internal/cli/serverless/migration/delete.go @@ -37,7 +37,7 @@ type DeleteOpts struct { func (c DeleteOpts) NonInteractiveFlags() []string { return []string{ flag.ClusterID, - flag.MigrationTaskID, + flag.MigrationID, } } @@ -65,14 +65,14 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { var cmd = &cobra.Command{ Use: "delete", - Short: "Delete a migration task", + Short: "Delete a migration", Aliases: []string{"rm"}, Args: cobra.NoArgs, - Example: fmt.Sprintf(` Delete a migration task in interactive mode: - $ %[1]s serverless migration delete + Example: fmt.Sprintf(` Delete a migration in interactive mode: + $ %[1]s serverless migration delete - Delete a migration task in non-interactive mode: - $ %[1]s serverless migration delete -c --migration-id `, config.CliName), + Delete a migration in non-interactive mode: + $ %[1]s serverless migration delete -c --migration-id `, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { return opts.MarkInteractive(cmd) }, @@ -83,7 +83,7 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { } ctx := cmd.Context() - var clusterID, taskID string + var clusterID, migrationID string if opts.interactive { if !h.IOStreams.CanPrompt { return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") @@ -97,18 +97,18 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { return err } clusterID = cluster.ID - task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + migration, err := cloud.GetSelectedMigration(ctx, clusterID, h.QueryPageSize, d) if err != nil { return err } - taskID = task.ID + migrationID = migration.ID } else { var err error clusterID, err = cmd.Flags().GetString(flag.ClusterID) if err != nil { return errors.Trace(err) } - taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + migrationID, err = cmd.Flags().GetString(flag.MigrationID) if err != nil { return errors.Trace(err) } @@ -116,7 +116,7 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { if !force { if !h.IOStreams.CanPrompt { - return errors.New("The terminal doesn't support prompt, please run with --force to delete the migration task") + return errors.New("The terminal doesn't support prompt, please run with --force to delete the migration") } prompt := &survey.Input{ Message: fmt.Sprintf("%s %s %s", color.BlueString("Please type"), color.HiBlueString("yes"), color.BlueString("to confirm:")), @@ -129,21 +129,21 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { return err } if confirmation != "yes" { - return errors.New("Incorrect confirm string entered, skipping migration task deletion") + return errors.New("Incorrect confirm string entered, skipping migration deletion") } } - if _, err := d.DeleteMigration(ctx, clusterID, taskID); err != nil { + if _, err := d.DeleteMigration(ctx, clusterID, migrationID); err != nil { return errors.Trace(err) } - fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration task %s deleted", taskID)) + fmt.Fprintln(h.IOStreams.Out, color.GreenString("migration %s deleted", migrationID)) return nil }, } cmd.Flags().BoolVar(&force, flag.Force, false, "Delete without confirmation.") - cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") - cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to delete.") + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration.") + cmd.Flags().StringP(flag.MigrationID, flag.MigrationIDShort, "", "ID of the migration to delete.") return cmd } diff --git a/internal/cli/serverless/migration/describe.go b/internal/cli/serverless/migration/describe.go index 6ab03082..fb0a68f7 100644 --- a/internal/cli/serverless/migration/describe.go +++ b/internal/cli/serverless/migration/describe.go @@ -34,7 +34,7 @@ type DescribeOpts struct { func (c DescribeOpts) NonInteractiveFlags() []string { return []string{ flag.ClusterID, - flag.MigrationTaskID, + flag.MigrationID, } } @@ -61,14 +61,14 @@ func DescribeCmd(h *internal.Helper) *cobra.Command { var cmd = &cobra.Command{ Use: "describe", - Short: "Describe a migration task", + Short: "Describe a migration", Aliases: []string{"get"}, Args: cobra.NoArgs, - Example: fmt.Sprintf(` Describe a migration task in interactive mode: + Example: fmt.Sprintf(` Describe a migration in interactive mode: $ %[1]s serverless migration describe - Describe a migration task in non-interactive mode: - $ %[1]s serverless migration describe -c --migration-id `, config.CliName), + Describe a migration in non-interactive mode: + $ %[1]s serverless migration describe -c --migration-id `, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { return opts.MarkInteractive(cmd) }, @@ -79,7 +79,7 @@ func DescribeCmd(h *internal.Helper) *cobra.Command { } ctx := cmd.Context() - var clusterID, taskID string + var clusterID, migrationID string if opts.interactive { if !h.IOStreams.CanPrompt { return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") @@ -94,24 +94,24 @@ func DescribeCmd(h *internal.Helper) *cobra.Command { } clusterID = cluster.ID - task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + migration, err := cloud.GetSelectedMigration(ctx, clusterID, h.QueryPageSize, d) if err != nil { return err } - taskID = task.ID + migrationID = migration.ID } else { var err error clusterID, err = cmd.Flags().GetString(flag.ClusterID) if err != nil { return errors.Trace(err) } - taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + migrationID, err = cmd.Flags().GetString(flag.MigrationID) if err != nil { return errors.Trace(err) } } - resp, err := d.GetMigration(ctx, clusterID, taskID) + resp, err := d.GetMigration(ctx, clusterID, migrationID) if err != nil { return errors.Trace(err) } @@ -120,7 +120,7 @@ func DescribeCmd(h *internal.Helper) *cobra.Command { }, } - cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") - cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to describe.") + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration.") + cmd.Flags().StringP(flag.MigrationID, flag.MigrationIDShort, "", "ID of the migration to describe.") return cmd } diff --git a/internal/cli/serverless/migration/list.go b/internal/cli/serverless/migration/list.go index 6022303d..acad546e 100644 --- a/internal/cli/serverless/migration/list.go +++ b/internal/cli/serverless/migration/list.go @@ -16,8 +16,8 @@ package migration import ( "fmt" - "time" + "github.com/AlekSi/pointer" "github.com/juju/errors" "github.com/spf13/cobra" @@ -121,21 +121,11 @@ func ListCmd(h *internal.Helper) *cobra.Command { columns := []output.Column{"ID", "Name", "Mode", "State", "CreatedAt"} var rows []output.Row for _, task := range resp.Migrations { - id := safeString(task.MigrationId) - name := safeString(task.DisplayName) - if name == "" { - name = id - } - mode := "" - if task.Mode != nil { - mode = string(*task.Mode) - } - state := "" - if task.State != nil { - state = string(*task.State) - } - created := formatTime(task.CreateTime) - rows = append(rows, output.Row{id, name, mode, state, created}) + id := pointer.Get(task.MigrationId) + name := pointer.Get(task.DisplayName) + mode := string(pointer.Get(task.Mode)) + state := string(pointer.Get(task.State)) + rows = append(rows, output.Row{id, name, mode, state, task.CreateTime.String()}) } return errors.Trace(output.PrintHumanTable(h.IOStreams.Out, columns, rows)) }, @@ -145,17 +135,3 @@ func ListCmd(h *internal.Helper) *cobra.Command { cmd.Flags().StringP(flag.Output, flag.OutputShort, output.HumanFormat, flag.OutputHelp) return cmd } - -func safeString(value *string) string { - if value == nil { - return "" - } - return *value -} - -func formatTime(value *time.Time) string { - if value == nil { - return "" - } - return value.Format(time.RFC3339) -} diff --git a/internal/cli/serverless/migration/pause.go b/internal/cli/serverless/migration/pause.go index 1931c598..01b0c207 100644 --- a/internal/cli/serverless/migration/pause.go +++ b/internal/cli/serverless/migration/pause.go @@ -33,7 +33,7 @@ type PauseOpts struct { func (c PauseOpts) NonInteractiveFlags() []string { return []string{ flag.ClusterID, - flag.MigrationTaskID, + flag.MigrationID, } } @@ -60,13 +60,13 @@ func PauseCmd(h *internal.Helper) *cobra.Command { var cmd = &cobra.Command{ Use: "pause", - Short: "Pause a migration task", + Short: "Pause a migration", Args: cobra.NoArgs, - Example: fmt.Sprintf(` Pause a migration task in interactive mode: + Example: fmt.Sprintf(` Pause a migration in interactive mode: $ %[1]s serverless migration pause - Pause a migration task in non-interactive mode: - $ %[1]s serverless migration pause -c --migration-id `, config.CliName), + Pause a migration in non-interactive mode: + $ %[1]s serverless migration pause -c --migration-id `, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { return opts.MarkInteractive(cmd) }, @@ -77,7 +77,7 @@ func PauseCmd(h *internal.Helper) *cobra.Command { } ctx := cmd.Context() - var clusterID, taskID string + var clusterID, migrationID string if opts.interactive { if !h.IOStreams.CanPrompt { return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") @@ -91,34 +91,33 @@ func PauseCmd(h *internal.Helper) *cobra.Command { return err } clusterID = cluster.ID - task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + migration, err := cloud.GetSelectedMigration(ctx, clusterID, h.QueryPageSize, d) if err != nil { return err } - taskID = task.ID + migrationID = migration.ID } else { var err error clusterID, err = cmd.Flags().GetString(flag.ClusterID) if err != nil { return errors.Trace(err) } - taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + migrationID, err = cmd.Flags().GetString(flag.MigrationID) if err != nil { return errors.Trace(err) } } - emptyBody := map[string]interface{}{} - if _, err := d.PauseMigration(ctx, clusterID, taskID, &emptyBody); err != nil { + if err := d.PauseMigration(ctx, clusterID, migrationID); err != nil { return errors.Trace(err) } - fmt.Fprintf(h.IOStreams.Out, "migration task %s paused\n", taskID) + fmt.Fprintf(h.IOStreams.Out, "migration %s paused\n", migrationID) return nil }, } - cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") - cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to pause.") + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration.") + cmd.Flags().StringP(flag.MigrationID, flag.MigrationIDShort, "", "ID of the migration to pause.") return cmd } diff --git a/internal/cli/serverless/migration/resume.go b/internal/cli/serverless/migration/resume.go index 98eaa3f5..100fa6ae 100644 --- a/internal/cli/serverless/migration/resume.go +++ b/internal/cli/serverless/migration/resume.go @@ -33,7 +33,7 @@ type ResumeOpts struct { func (c ResumeOpts) NonInteractiveFlags() []string { return []string{ flag.ClusterID, - flag.MigrationTaskID, + flag.MigrationID, } } @@ -60,13 +60,13 @@ func ResumeCmd(h *internal.Helper) *cobra.Command { var cmd = &cobra.Command{ Use: "resume", - Short: "Resume a paused migration task", + Short: "Resume a paused migration", Args: cobra.NoArgs, - Example: fmt.Sprintf(` Resume a migration task in interactive mode: + Example: fmt.Sprintf(` Resume a migration in interactive mode: $ %[1]s serverless migration resume - Resume a migration task in non-interactive mode: - $ %[1]s serverless migration resume -c --migration-id `, config.CliName), + Resume a migration in non-interactive mode: + $ %[1]s serverless migration resume -c --migration-id `, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { return opts.MarkInteractive(cmd) }, @@ -77,7 +77,7 @@ func ResumeCmd(h *internal.Helper) *cobra.Command { } ctx := cmd.Context() - var clusterID, taskID string + var clusterID, migrationID string if opts.interactive { if !h.IOStreams.CanPrompt { return errors.New("The terminal doesn't support interactive mode, please use non-interactive mode") @@ -91,34 +91,33 @@ func ResumeCmd(h *internal.Helper) *cobra.Command { return err } clusterID = cluster.ID - task, err := cloud.GetSelectedMigrationTask(ctx, clusterID, h.QueryPageSize, d) + migration, err := cloud.GetSelectedMigration(ctx, clusterID, h.QueryPageSize, d) if err != nil { return err } - taskID = task.ID + migrationID = migration.ID } else { var err error clusterID, err = cmd.Flags().GetString(flag.ClusterID) if err != nil { return errors.Trace(err) } - taskID, err = cmd.Flags().GetString(flag.MigrationTaskID) + migrationID, err = cmd.Flags().GetString(flag.MigrationID) if err != nil { return errors.Trace(err) } } - emptyBody := map[string]interface{}{} - if _, err := d.ResumeMigration(ctx, clusterID, taskID, &emptyBody); err != nil { + if err := d.ResumeMigration(ctx, clusterID, migrationID); err != nil { return errors.Trace(err) } - fmt.Fprintf(h.IOStreams.Out, "migration task %s resumed\n", taskID) + fmt.Fprintf(h.IOStreams.Out, "migration %s resumed\n", migrationID) return nil }, } - cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration task.") - cmd.Flags().StringP(flag.MigrationTaskID, flag.MigrationTaskIDShort, "", "ID of the migration task to resume.") + cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "Cluster ID that owns the migration.") + cmd.Flags().StringP(flag.MigrationID, flag.MigrationIDShort, "", "ID of the migration to resume.") return cmd } diff --git a/internal/cli/serverless/migration/template.go b/internal/cli/serverless/migration/template.go index 6768a795..1f6ddd12 100644 --- a/internal/cli/serverless/migration/template.go +++ b/internal/cli/serverless/migration/template.go @@ -31,7 +31,7 @@ const ( migrationDefinitionAllTemplate = `{ // Required migration mode. Use "ALL" for full + incremental. "mode": "ALL", - // Target TiDB Cloud user credentials used by the migration task + // Target TiDB Cloud user credentials used by the migration "target": { "user": "migration_user", "password": "Passw0rd!" @@ -90,6 +90,7 @@ const ( migrationDefinitionIncrementalTemplate = `{ // Incremental-only mode keeps the source and target in sync "mode": "INCREMENTAL", + // Target TiDB Cloud user credentials used by the migration "target": { "user": "migration_user", "password": "Passw0rd!" @@ -167,10 +168,14 @@ var definitionTemplates = map[pkgmigration.TaskMode]templateVariant{ func TemplateCmd(h *internal.Helper) *cobra.Command { cmd := &cobra.Command{ - Use: "template", - Short: "Show migration JSON templates", - Args: cobra.NoArgs, - Example: fmt.Sprintf(" Show the ALL mode migration template:\n $ %[1]s serverless migration template --mode all\n\n Show the INCREMENTAL migration template:\n $ %[1]s serverless migration template --mode incremental\n", config.CliName), + Use: "template", + Short: "Show migration JSON templates", + Args: cobra.NoArgs, + Example: fmt.Sprintf(` Show the ALL mode migration template: + $ %[1]s serverless migration template --mode all + + Show the INCREMENTAL migration template: + $ %[1]s serverless migration template --mode incremental`, config.CliName), PreRunE: func(cmd *cobra.Command, args []string) error { return cmd.MarkFlagRequired(flag.MigrationMode) }, diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 0a182277..42d1f6c6 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -125,11 +125,11 @@ const ( AWSEndpointServiceRegion string = "aws.endpoint-service-region" AlicloudEndpointServiceName string = "alicloud.endpoint-service-name" - MigrationTaskID string = "migration-id" - MigrationTaskIDShort string = "m" - MigrationConfigFile string = "config-file" - MigrationMode string = "mode" - MigrationDryRun string = "dry-run" + MigrationID string = "migration-id" + MigrationIDShort string = "m" + MigrationConfigFile string = "config-file" + MigrationMode string = "mode" + MigrationDryRun string = "dry-run" ) const OutputHelp = "Output format, one of [\"human\" \"json\"]. For the complete result, please use json format." diff --git a/internal/service/cloud/api_client.go b/internal/service/cloud/api_client.go index 0ddd2dea..d70291a6 100644 --- a/internal/service/cloud/api_client.go +++ b/internal/service/cloud/api_client.go @@ -106,7 +106,7 @@ type TiDBCloudClient interface { DownloadExportFiles(ctx context.Context, clusterId string, exportId string, body *export.ExportServiceDownloadExportFilesBody) (*export.DownloadExportFilesResponse, error) - CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (map[string]interface{}, error) + CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) error DeleteMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) @@ -120,9 +120,9 @@ type TiDBCloudClient interface { ListMigrations(ctx context.Context, clusterId string, pageSize *int32, pageToken *string, orderBy *string) (*migration.ListMigrationsResp, error) - PauseMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) + PauseMigration(ctx context.Context, clusterId string, taskId string) error - ResumeMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) + ResumeMigration(ctx context.Context, clusterId string, taskId string) error ListSQLUsers(ctx context.Context, clusterID string, pageSize *int32, pageToken *string) (*iam.ApiListSqlUsersRsp, error) @@ -501,9 +501,9 @@ func (d *ClientDelegate) DownloadExportFiles(ctx context.Context, clusterId stri return res, parseError(err, h) } -func (d *ClientDelegate) CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (map[string]interface{}, error) { - res, h, err := d.mc.MigrationAPI.MigrationServiceCancelPrecheck(ctx, clusterId, precheckId).Execute() - return res, parseError(err, h) +func (d *ClientDelegate) CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) error { + _, h, err := d.mc.MigrationAPI.MigrationServiceCancelPrecheck(ctx, clusterId, precheckId).Execute() + return parseError(err, h) } func (d *ClientDelegate) DeleteMigration(ctx context.Context, clusterId string, taskId string) (*migration.Migration, error) { @@ -554,22 +554,16 @@ func (d *ClientDelegate) ListMigrations(ctx context.Context, clusterId string, p return res, parseError(err, h) } -func (d *ClientDelegate) PauseMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { +func (d *ClientDelegate) PauseMigration(ctx context.Context, clusterId string, taskId string) error { payload := map[string]interface{}{} - if body != nil { - payload = *body - } - res, h, err := d.mc.MigrationAPI.MigrationServicePauseMigration(ctx, clusterId, taskId).Body(payload).Execute() - return res, parseError(err, h) + _, h, err := d.mc.MigrationAPI.MigrationServicePauseMigration(ctx, clusterId, taskId).Body(payload).Execute() + return parseError(err, h) } -func (d *ClientDelegate) ResumeMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { +func (d *ClientDelegate) ResumeMigration(ctx context.Context, clusterId string, taskId string) error { payload := map[string]interface{}{} - if body != nil { - payload = *body - } - res, h, err := d.mc.MigrationAPI.MigrationServiceResumeMigration(ctx, clusterId, taskId).Body(payload).Execute() - return res, parseError(err, h) + _, h, err := d.mc.MigrationAPI.MigrationServiceResumeMigration(ctx, clusterId, taskId).Body(payload).Execute() + return parseError(err, h) } func (d *ClientDelegate) ListSQLUsers(ctx context.Context, clusterID string, pageSize *int32, pageToken *string) (*iam.ApiListSqlUsersRsp, error) { diff --git a/internal/service/cloud/logic.go b/internal/service/cloud/logic.go index 375aeaf0..f9f26c3a 100644 --- a/internal/service/cloud/logic.go +++ b/internal/service/cloud/logic.go @@ -1055,7 +1055,7 @@ func GetSelectedChangefeed(ctx context.Context, clusterID string, pageSize int64 return changefeed.(*Changefeed), nil } -func GetSelectedMigrationTask(ctx context.Context, clusterID string, pageSize int64, client TiDBCloudClient) (*MigrationTask, error) { +func GetSelectedMigration(ctx context.Context, clusterID string, pageSize int64, client TiDBCloudClient) (*MigrationTask, error) { var items = make([]interface{}, 0) pageSizeInt32 := int32(pageSize) resp, err := client.ListMigrations(ctx, clusterID, &pageSizeInt32, nil, nil) From d3d5eca5abef767194eec48facd0e04ab1fef84f Mon Sep 17 00:00:00 2001 From: yangxin Date: Wed, 10 Dec 2025 16:20:04 +0800 Subject: [PATCH 17/19] fix lint --- go.mod | 2 +- internal/cli/serverless/migration/create.go | 26 +++---- internal/cli/serverless/migration/delete.go | 6 +- internal/flag/flag.go | 2 +- internal/mock/api_client.go | 78 ++++++--------------- 5 files changed, 34 insertions(+), 80 deletions(-) diff --git a/go.mod b/go.mod index ebc9b6df..1496ce93 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.24.0 require ( github.com/AlecAivazis/survey/v2 v2.3.6 + github.com/AlekSi/pointer v1.2.0 github.com/aws/aws-sdk-go-v2 v1.27.1 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.23 github.com/charmbracelet/bubbles v0.17.1 @@ -44,7 +45,6 @@ require ( require ( filippo.io/edwards25519 v1.1.0 // indirect - github.com/AlekSi/pointer v1.2.0 // indirect github.com/alecthomas/chroma v0.10.0 // indirect github.com/alecthomas/chroma/v2 v2.14.0 // indirect github.com/alessio/shellescape v1.4.1 // indirect diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index bf7d14d1..a556b02c 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -23,6 +23,7 @@ import ( "strings" "time" + "github.com/AlekSi/pointer" aws "github.com/aws/aws-sdk-go-v2/aws" "github.com/fatih/color" "github.com/juju/errors" @@ -56,7 +57,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { } ctx := cmd.Context() - dryRun, err := cmd.Flags().GetBool(flag.MigrationDryRun) + dryRun, err := cmd.Flags().GetBool(flag.DryRun) if err != nil { return errors.Trace(err) } @@ -121,7 +122,7 @@ func CreateCmd(h *internal.Helper) *cobra.Command { cmd.Flags().StringP(flag.ClusterID, flag.ClusterIDShort, "", "The ID of the target cluster.") cmd.Flags().StringP(flag.DisplayName, flag.DisplayNameShort, "", "Display name for the migration.") cmd.Flags().String(flag.MigrationConfigFile, "", "Path to a migration config JSON file. Use \"ticloud serverless migration template --mode \" to print templates.") - cmd.Flags().Bool(flag.MigrationDryRun, false, "Run a migration precheck (dry run) with the provided inputs without creating a migration.") + cmd.Flags().Bool(flag.DryRun, false, "Run a migration precheck (dry run) with the provided inputs without creating a migration.") return cmd } @@ -216,16 +217,12 @@ func printPrecheckSummary(result *pkgmigration.MigrationPrecheck, h *internal.He if !shouldPrintPrecheckItem(item.Status) { continue } - var status string - if item.Status != nil { - status = string(*item.Status) - } rows = append(rows, output.Row{ - precheckItemType(item.Type), - status, - aws.ToString(item.Description), - aws.ToString(item.Reason), - aws.ToString(item.Solution), + string(pointer.Get(item.Type)), + string(pointer.Get(item.Status)), + pointer.Get(item.Description), + pointer.Get(item.Reason), + pointer.Get(item.Solution), }) } if len(rows) == 0 { @@ -234,13 +231,6 @@ func printPrecheckSummary(result *pkgmigration.MigrationPrecheck, h *internal.He return true, output.PrintHumanTable(h.IOStreams.Out, columns, rows) } -func precheckItemType(value *pkgmigration.PrecheckItemType) string { - if value == nil { - return "" - } - return string(*value) -} - // shouldPrintPrecheckItem reports whether a precheck item should be shown to users. // Currently only WARNING and FAILED statuses surface because SUCCESS does not // provide actionable information. diff --git a/internal/cli/serverless/migration/delete.go b/internal/cli/serverless/migration/delete.go index a71b3546..1abe641c 100644 --- a/internal/cli/serverless/migration/delete.go +++ b/internal/cli/serverless/migration/delete.go @@ -119,16 +119,16 @@ func DeleteCmd(h *internal.Helper) *cobra.Command { return errors.New("The terminal doesn't support prompt, please run with --force to delete the migration") } prompt := &survey.Input{ - Message: fmt.Sprintf("%s %s %s", color.BlueString("Please type"), color.HiBlueString("yes"), color.BlueString("to confirm:")), + Message: fmt.Sprintf("%s %s %s", color.BlueString("Please type"), color.HiBlueString(config.Confirmed), color.BlueString("to confirm:")), } var confirmation string if err := survey.AskOne(prompt, &confirmation); err != nil { - if err == terminal.InterruptErr { + if errors.Is(err, terminal.InterruptErr) { return util.InterruptError } return err } - if confirmation != "yes" { + if confirmation != config.Confirmed { return errors.New("Incorrect confirm string entered, skipping migration deletion") } } diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 42d1f6c6..97529487 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -129,7 +129,7 @@ const ( MigrationIDShort string = "m" MigrationConfigFile string = "config-file" MigrationMode string = "mode" - MigrationDryRun string = "dry-run" + DryRun string = "dry-run" ) const OutputHelp = "Output format, one of [\"human\" \"json\"]. For the complete result, please use json format." diff --git a/internal/mock/api_client.go b/internal/mock/api_client.go index 262a19f0..95bada0f 100644 --- a/internal/mock/api_client.go +++ b/internal/mock/api_client.go @@ -81,33 +81,21 @@ func (_m *TiDBCloudClient) CancelImport(ctx context.Context, clusterId string, i } // CancelMigrationPrecheck provides a mock function with given fields: ctx, clusterId, precheckId -func (_m *TiDBCloudClient) CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) (map[string]interface{}, error) { +func (_m *TiDBCloudClient) CancelMigrationPrecheck(ctx context.Context, clusterId string, precheckId string) error { ret := _m.Called(ctx, clusterId, precheckId) if len(ret) == 0 { panic("no return value specified for CancelMigrationPrecheck") } - var r0 map[string]interface{} - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) (map[string]interface{}, error)); ok { - return rf(ctx, clusterId, precheckId) - } - if rf, ok := ret.Get(0).(func(context.Context, string, string) map[string]interface{}); ok { + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { r0 = rf(ctx, clusterId, precheckId) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]interface{}) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { - r1 = rf(ctx, clusterId, precheckId) - } else { - r1 = ret.Error(1) + r0 = ret.Error(0) } - return r0, r1 + return r0 } // CancelUpload provides a mock function with given fields: ctx, clusterId, uploadId @@ -1676,34 +1664,22 @@ func (_m *TiDBCloudClient) PartialUpdateCluster(ctx context.Context, clusterId s return r0, r1 } -// PauseMigration provides a mock function with given fields: ctx, clusterId, taskId, body -func (_m *TiDBCloudClient) PauseMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { - ret := _m.Called(ctx, clusterId, taskId, body) +// PauseMigration provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) PauseMigration(ctx context.Context, clusterId string, taskId string) error { + ret := _m.Called(ctx, clusterId, taskId) if len(ret) == 0 { panic("no return value specified for PauseMigration") } - var r0 map[string]interface{} - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, *map[string]interface{}) (map[string]interface{}, error)); ok { - return rf(ctx, clusterId, taskId, body) - } - if rf, ok := ret.Get(0).(func(context.Context, string, string, *map[string]interface{}) map[string]interface{}); ok { - r0 = rf(ctx, clusterId, taskId, body) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]interface{}) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, string, *map[string]interface{}) error); ok { - r1 = rf(ctx, clusterId, taskId, body) + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, clusterId, taskId) } else { - r1 = ret.Error(1) + r0 = ret.Error(0) } - return r0, r1 + return r0 } // ResetBranch provides a mock function with given fields: ctx, clusterId, branchId @@ -1766,34 +1742,22 @@ func (_m *TiDBCloudClient) Restore(ctx context.Context, body *br.V1beta1RestoreR return r0, r1 } -// ResumeMigration provides a mock function with given fields: ctx, clusterId, taskId, body -func (_m *TiDBCloudClient) ResumeMigration(ctx context.Context, clusterId string, taskId string, body *map[string]interface{}) (map[string]interface{}, error) { - ret := _m.Called(ctx, clusterId, taskId, body) +// ResumeMigration provides a mock function with given fields: ctx, clusterId, taskId +func (_m *TiDBCloudClient) ResumeMigration(ctx context.Context, clusterId string, taskId string) error { + ret := _m.Called(ctx, clusterId, taskId) if len(ret) == 0 { panic("no return value specified for ResumeMigration") } - var r0 map[string]interface{} - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, *map[string]interface{}) (map[string]interface{}, error)); ok { - return rf(ctx, clusterId, taskId, body) - } - if rf, ok := ret.Get(0).(func(context.Context, string, string, *map[string]interface{}) map[string]interface{}); ok { - r0 = rf(ctx, clusterId, taskId, body) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]interface{}) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, string, *map[string]interface{}) error); ok { - r1 = rf(ctx, clusterId, taskId, body) + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, clusterId, taskId) } else { - r1 = ret.Error(1) + r0 = ret.Error(0) } - return r0, r1 + return r0 } // StartChangefeed provides a mock function with given fields: ctx, clusterId, changefeedId From 1631e2bdc6432cc23fedb87da9f806f620080a2e Mon Sep 17 00:00:00 2001 From: yangxin Date: Thu, 11 Dec 2025 14:54:51 +0800 Subject: [PATCH 18/19] increase to 2min --- internal/cli/serverless/migration/create.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/cli/serverless/migration/create.go b/internal/cli/serverless/migration/create.go index a556b02c..5e91c2bd 100644 --- a/internal/cli/serverless/migration/create.go +++ b/internal/cli/serverless/migration/create.go @@ -138,7 +138,7 @@ func markCreateMigrationRequiredFlags(cmd *cobra.Command) error { const ( precheckPollInterval = 5 * time.Second - precheckPollTimeout = time.Minute + precheckPollTimeout = 2 * time.Minute ) func runMigrationPrecheck(ctx context.Context, client cloud.TiDBCloudClient, clusterID string, body *pkgmigration.MigrationServicePrecheckBody, h *internal.Helper) error { From 629487fc117d77efd9f047583524a18857ac5cd3 Mon Sep 17 00:00:00 2001 From: yangxin Date: Thu, 11 Dec 2025 17:47:43 +0800 Subject: [PATCH 19/19] time format --- internal/cli/serverless/changefeed/list.go | 3 ++- internal/cli/serverless/migration/list.go | 3 ++- internal/cli/serverless/privatelink/list.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/internal/cli/serverless/changefeed/list.go b/internal/cli/serverless/changefeed/list.go index 87c94a41..8b5269de 100644 --- a/internal/cli/serverless/changefeed/list.go +++ b/internal/cli/serverless/changefeed/list.go @@ -16,6 +16,7 @@ package changefeed import ( "fmt" + "time" "github.com/juju/errors" "github.com/spf13/cobra" @@ -138,7 +139,7 @@ func ListCmd(h *internal.Helper) *cobra.Command { *item.DisplayName, string(item.Sink.Type), string(*item.State), - item.CreateTime.String(), + item.CreateTime.Format(time.RFC3339), }) } err := output.PrintHumanTable(h.IOStreams.Out, columns, rows) diff --git a/internal/cli/serverless/migration/list.go b/internal/cli/serverless/migration/list.go index acad546e..798d3576 100644 --- a/internal/cli/serverless/migration/list.go +++ b/internal/cli/serverless/migration/list.go @@ -16,6 +16,7 @@ package migration import ( "fmt" + "time" "github.com/AlekSi/pointer" "github.com/juju/errors" @@ -125,7 +126,7 @@ func ListCmd(h *internal.Helper) *cobra.Command { name := pointer.Get(task.DisplayName) mode := string(pointer.Get(task.Mode)) state := string(pointer.Get(task.State)) - rows = append(rows, output.Row{id, name, mode, state, task.CreateTime.String()}) + rows = append(rows, output.Row{id, name, mode, state, task.CreateTime.Format(time.RFC3339)}) } return errors.Trace(output.PrintHumanTable(h.IOStreams.Out, columns, rows)) }, diff --git a/internal/cli/serverless/privatelink/list.go b/internal/cli/serverless/privatelink/list.go index 0e6eaab4..c7deb220 100644 --- a/internal/cli/serverless/privatelink/list.go +++ b/internal/cli/serverless/privatelink/list.go @@ -16,6 +16,7 @@ package privatelink import ( "fmt" + "time" "github.com/juju/errors" "github.com/spf13/cobra" @@ -130,7 +131,7 @@ func ListCmd(h *internal.Helper) *cobra.Command { item.DisplayName, string(item.Type), string(*item.State), - item.CreateTime.String(), + item.CreateTime.Format(time.RFC3339), }) } err := output.PrintHumanTable(h.IOStreams.Out, columns, rows)