Skip to content
1 change: 1 addition & 0 deletions components/cluster/command/prune.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ func newPruneCmd() *cobra.Command {
}

cmd.Flags().BoolVar(&gOpt.Force, "force", false, "Ignore errors when deleting the instance with data from the cluster")
cmd.Flags().StringSliceVar(&gOpt.IngoreInitConfigComps, "ignore-generate-config", nil, "ignore generate config in specified components(tidb,pd,tikv)")

return cmd
}
1 change: 1 addition & 0 deletions components/cluster/command/scale_in.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ func newScaleInCmd() *cobra.Command {
cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Specify the nodes (required)")
cmd.Flags().Uint64Var(&gOpt.APITimeout, "transfer-timeout", 600, "Timeout in seconds when transferring PD and TiKV store leaders, also for TiCDC drain one capture")
cmd.Flags().BoolVar(&gOpt.Force, "force", false, "Force just try stop and destroy instance before removing the instance from topo")
cmd.Flags().StringSliceVar(&gOpt.IngoreInitConfigComps, "ignore-generate-config", nil, "ignore generate config in specified components(tidb,pd,tikv)")

_ = cmd.MarkFlagRequired("node")

Expand Down
1 change: 1 addition & 0 deletions components/cluster/command/scale_out.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ func newScaleOutCmd() *cobra.Command {
cmd.Flags().BoolVarP(&opt.NoLabels, "no-labels", "", false, "Don't check TiKV labels")
cmd.Flags().BoolVarP(&opt.Stage1, "stage1", "", false, "Don't start the new instance after scale-out, need to manually execute cluster scale-out --stage2")
cmd.Flags().BoolVarP(&opt.Stage2, "stage2", "", false, "Start the new instance and init config after scale-out --stage1")
cmd.Flags().StringSliceVar(&gOpt.IngoreInitConfigComps, "ignore-generate-config", nil, "ignore generate config in specified components(tidb,pd,tikv)")

return cmd
}
Expand Down
7 changes: 7 additions & 0 deletions pkg/cluster/manager/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -629,6 +629,13 @@ func buildInitConfigTasks(
return
}
compName := instance.ComponentName()

for _, IgnoreComp := range gOpt.IngoreInitConfigComps {
if IgnoreComp == compName {
return
}
}

deployDir := spec.Abs(base.User, instance.DeployDir())
// data dir would be empty for components which don't need it
dataDirs := spec.MultiDirAbs(base.User, instance.DataDir())
Expand Down
2 changes: 2 additions & 0 deletions pkg/cluster/operation/operation.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ type Options struct {

DisplayMode string // the output format
Operation Operation

IngoreInitConfigComps []string // ignore config generate in the specific component
}

// Operation represents the type of cluster operation
Expand Down
12 changes: 6 additions & 6 deletions tests/tiup-cluster/script/scale_core.sh
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ function scale_core() {
tiup-cluster $client exec $name -N n1 --command "grep -q n1:10080 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml"

# scale in tikv maybe exists in several minutes or hours, and the GitHub CI is not guaranteed
# echo "start scale in tikv"
# tiup-cluster --yes scale-in $name -N n3:20160
# wait_instance_num_reach $name $total_sub_one $native_ssh
# echo "start scale out tikv"
# topo=./topo/full_scale_in_tikv.yaml
# tiup-cluster --yes scale-out $name $topo
echo "start scale in tikv"
tiup-cluster --yes scale-in $name -N n3:20160 --ignore-generate-config tikv
wait_instance_num_reach $name $total_sub_one $native_ssh
echo "start scale out tikv"
topo=./topo/full_scale_in_tikv.yaml
tiup-cluster --yes scale-out $name $topo

echo "start scale in pump"
tiup-cluster $client --yes scale-in $name -N n3:8250
Expand Down