diff --git a/.github/workflows/kind_e2e.yaml b/.github/workflows/kind_e2e.yaml index f3be0038b..fac1c0fa2 100644 --- a/.github/workflows/kind_e2e.yaml +++ b/.github/workflows/kind_e2e.yaml @@ -50,7 +50,7 @@ jobs: name: E2E tests for K8s (KinD) runs-on: ubuntu-latest env: - DAPR_RUNTIME_PINNED_VERSION: 1.16.0-rc.8 + DAPR_RUNTIME_PINNED_VERSION: 1.16.2 DAPR_DASHBOARD_PINNED_VERSION: 0.15.0 DAPR_RUNTIME_LATEST_STABLE_VERSION: DAPR_DASHBOARD_LATEST_STABLE_VERSION: diff --git a/.github/workflows/self_hosted_e2e.yaml b/.github/workflows/self_hosted_e2e.yaml index 20a3edccd..4388c7824 100644 --- a/.github/workflows/self_hosted_e2e.yaml +++ b/.github/workflows/self_hosted_e2e.yaml @@ -38,7 +38,7 @@ jobs: GOARCH: ${{ matrix.target_arch }} GOPROXY: https://proxy.golang.org ARCHIVE_OUTDIR: dist/archives - DAPR_RUNTIME_PINNED_VERSION: "1.16.0-rc.8" + DAPR_RUNTIME_PINNED_VERSION: "1.16.2" DAPR_DASHBOARD_PINNED_VERSION: 0.15.0 DAPR_RUNTIME_LATEST_STABLE_VERSION: "" DAPR_DASHBOARD_LATEST_STABLE_VERSION: "" @@ -131,7 +131,7 @@ jobs: shell: bash - name: Set the test timeout - MacOS if: matrix.os == 'macos-14-large' - run: echo "E2E_SH_TEST_TIMEOUT=30m" >> $GITHUB_ENV + run: echo "E2E_SH_TEST_TIMEOUT=40m" >> $GITHUB_ENV - name: Run E2E tests with GHCR # runs every 6hrs if: github.event.schedule == '0 */6 * * *' diff --git a/Makefile b/Makefile index 3bc4b4005..c6b853a23 100644 --- a/Makefile +++ b/Makefile @@ -74,7 +74,7 @@ TEST_OUTPUT_FILE ?= test_output.json # Set the default timeout for tests to 10 minutes ifndef E2E_SH_TEST_TIMEOUT - override E2E_SH_TEST_TIMEOUT := 10m + override E2E_SH_TEST_TIMEOUT := 40m endif # Use the variable H to add a header (equivalent to =>) to informational output diff --git a/cmd/dapr.go b/cmd/dapr.go index 0bfd59952..5f943a553 100644 --- a/cmd/dapr.go +++ b/cmd/dapr.go @@ -21,6 +21,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/dapr/cli/cmd/scheduler" + "github.com/dapr/cli/cmd/workflow" "github.com/dapr/cli/pkg/api" "github.com/dapr/cli/pkg/print" "github.com/dapr/cli/pkg/standalone" @@ -108,4 +110,7 @@ func init() { RootCmd.Flags().BoolVarP(&versionFlag, "version", "v", false, "version for dapr") RootCmd.PersistentFlags().StringVarP(&daprRuntimePath, "runtime-path", "", "", "The path to the dapr runtime installation directory") RootCmd.PersistentFlags().BoolVarP(&logAsJSON, "log-as-json", "", false, "Log output in JSON format") + + RootCmd.AddCommand(scheduler.SchedulerCmd) + RootCmd.AddCommand(workflow.WorkflowCmd) } diff --git a/cmd/scheduler/delete.go b/cmd/scheduler/delete.go new file mode 100644 index 000000000..b6146348e --- /dev/null +++ b/cmd/scheduler/delete.go @@ -0,0 +1,52 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/scheduler" + "github.com/dapr/kit/signals" +) + +var DeleteCmd = &cobra.Command{ + Use: "delete", + Aliases: []string{"d", "del"}, + Short: `Delete one of more jobs from scheduler. +Job names are formatted by their type, app ID, then identifier. +Actor reminders require the actor type, actor ID, then reminder name, separated by /. +Workflow reminders require the app ID, instance ID, then reminder name, separated by /. +Accepts multiple names. +`, + Args: cobra.MinimumNArgs(1), + Example: ` +dapr scheduler delete app/my-app-id/my-job-name +dapr scheduler delete actor/my-actor-type/my-actor-id/my-reminder-name +dapr scheduler delete workflow/my-app-id/my-instance-id/my-workflow-reminder-name +`, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + opts := scheduler.DeleteOptions{ + SchedulerNamespace: schedulerNamespace, + KubernetesMode: kubernetesMode, + DaprNamespace: daprNamespace, + } + + return scheduler.Delete(ctx, opts, args...) + }, +} + +func init() { + SchedulerCmd.AddCommand(DeleteCmd) +} diff --git a/cmd/scheduler/deleteall.go b/cmd/scheduler/deleteall.go new file mode 100644 index 000000000..820c3a080 --- /dev/null +++ b/cmd/scheduler/deleteall.go @@ -0,0 +1,54 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/scheduler" + "github.com/dapr/kit/signals" +) + +var DeleteAllCmd = &cobra.Command{ + Use: "delete-all", + Aliases: []string{"da", "delall"}, + Short: `Delete all scheduled jobs in the specified namespace of a particular filter. +Accepts a single key as an argument. Deletes all jobs which match the filter key. +`, + Args: cobra.ExactArgs(1), + Example: ` +dapr scheduler delete-all all +dapr scheduler delete-all app +dapr scheduler delete-all app/my-app-id +dapr scheduler delete-all actor/my-actor-type +dapr scheduler delete-all actor/my-actor-type/my-actor-id +dapr scheduler delete-all workflow +dapr scheduler delete-all workflow/my-app-id +dapr scheduler delete-all workflow/my-app-id/my-workflow-id +`, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + opts := scheduler.DeleteOptions{ + SchedulerNamespace: schedulerNamespace, + KubernetesMode: kubernetesMode, + DaprNamespace: daprNamespace, + } + + return scheduler.DeleteAll(ctx, opts, args[0]) + }, +} + +func init() { + SchedulerCmd.AddCommand(DeleteAllCmd) +} diff --git a/cmd/scheduler/export.go b/cmd/scheduler/export.go new file mode 100644 index 000000000..279177d52 --- /dev/null +++ b/cmd/scheduler/export.go @@ -0,0 +1,61 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "os" + + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/scheduler" + "github.com/dapr/kit/signals" +) + +var ( + schedulerExportFile string +) + +var SchedulerExportCmd = &cobra.Command{ + Use: "export", + Short: "Export all jobs and actor reminders to a binary file, including the tracked count.", + Long: `Export jobs and actor reminders which are scheduled in Scheduler. +Can later be imported using 'dapr scheduler import'. +dapr scheduler export -o output.bin +`, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + err := scheduler.Export(ctx, scheduler.ExportImportOptions{ + SchedulerNamespace: schedulerNamespace, + KubernetesMode: kubernetesMode, + TargetFile: schedulerExportFile, + }) + if err != nil { + return err + } + + print.InfoStatusEvent(os.Stdout, "Export to '%s' complete.", schedulerExportFile) + + return nil + }, +} + +func init() { + SchedulerExportCmd.Flags().MarkHidden("namespace") + SchedulerExportCmd.Flags().StringVarP(&schedulerExportFile, "output-file", "o", "", "Output binary file to export jobs and actor reminders to.") + SchedulerExportCmd.MarkFlagRequired("output-file") + SchedulerExportCmd.MarkFlagFilename("output-file") + SchedulerCmd.AddCommand(SchedulerExportCmd) +} diff --git a/cmd/scheduler/get.go b/cmd/scheduler/get.go new file mode 100644 index 000000000..f5ad880a1 --- /dev/null +++ b/cmd/scheduler/get.go @@ -0,0 +1,93 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "os" + + "github.com/gocarina/gocsv" + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/scheduler" + "github.com/dapr/cli/utils" + "github.com/dapr/kit/signals" +) + +var ( + getOutputFormat *string +) + +var GetCmd = &cobra.Command{ + Use: "get", + Aliases: []string{"g", "ge"}, + Short: `Get a scheduled app job or actor reminder in Scheduler. +Job names are formatted by their type, app ID, then identifier. +Actor reminders require the actor type, actor ID, then reminder name, separated by /. +Workflow reminders require the app ID, instance ID, then reminder name, separated by /. +Activity reminders require the app ID, activity ID, separated by /. +Accepts multiple names. +`, + Args: cobra.MinimumNArgs(1), + Example: ` +dapr scheduler get app/my-app-id/my-job-name +dapr scheduler get actor/my-actor-type/my-actor-id/my-reminder-name +dapr scheduler get workflow/my-app-id/my-instance-id/my-workflow-reminder-name +dapr scheduler get activity/my-app-id/xyz::0::1 +`, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + opts := scheduler.GetOptions{ + SchedulerNamespace: schedulerNamespace, + KubernetesMode: kubernetesMode, + DaprNamespace: daprNamespace, + } + + var list any + var err error + if *getOutputFormat == outputFormatShort { + list, err = scheduler.Get(ctx, opts, args...) + } else { + list, err = scheduler.GetWide(ctx, opts, args...) + } + if err != nil { + return err + } + + switch *getOutputFormat { + case outputFormatYAML: + err = utils.PrintDetail(os.Stdout, "yaml", list) + case outputFormatJSON: + err = utils.PrintDetail(os.Stdout, "json", list) + default: + var table string + table, err = gocsv.MarshalString(list) + if err != nil { + break + } + + utils.PrintTable(table) + } + + if err != nil { + return err + } + + return nil + }, +} + +func init() { + getOutputFormat = outputFunc(GetCmd) + SchedulerCmd.AddCommand(GetCmd) +} diff --git a/cmd/scheduler/import.go b/cmd/scheduler/import.go new file mode 100644 index 000000000..267b70ed2 --- /dev/null +++ b/cmd/scheduler/import.go @@ -0,0 +1,60 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "os" + + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/scheduler" + "github.com/dapr/kit/signals" +) + +var ( + schedulerImportFile string +) + +var SchedulerImportCmd = &cobra.Command{ + Use: "import", + Short: "Import all jobs and actor reminders from a binary file generated by 'dapr scheduler export'.", + Long: `Import jobs and actor reminders to Scheduler from a binary file generated by 'dapr scheduler export'. +dapr scheduler import -f export.bin`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + err := scheduler.Import(ctx, scheduler.ExportImportOptions{ + SchedulerNamespace: schedulerNamespace, + KubernetesMode: kubernetesMode, + TargetFile: schedulerImportFile, + }) + if err != nil { + return err + } + + print.InfoStatusEvent(os.Stdout, "Import from '%s' complete.", schedulerImportFile) + + return nil + }, +} + +func init() { + SchedulerImportCmd.Flags().MarkHidden("namespace") + SchedulerImportCmd.Flags().StringVarP(&schedulerImportFile, "input-file", "f", "", "Input file to import jobs and actor reminders from.") + SchedulerImportCmd.MarkFlagRequired("input-file") + SchedulerImportCmd.MarkFlagFilename("input-file") + SchedulerCmd.AddCommand(SchedulerImportCmd) +} diff --git a/cmd/scheduler/list.go b/cmd/scheduler/list.go new file mode 100644 index 000000000..c6fefd95e --- /dev/null +++ b/cmd/scheduler/list.go @@ -0,0 +1,102 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "os" + + "github.com/gocarina/gocsv" + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/scheduler" + "github.com/dapr/cli/utils" + "github.com/dapr/kit/ptr" + "github.com/dapr/kit/signals" +) + +var ( + listFilterType *string + listOutputFormat *string +) + +var ListCmd = &cobra.Command{ + Use: "list", + Short: "List scheduled jobs in Scheduler.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + opts := scheduler.ListOptions{ + SchedulerNamespace: schedulerNamespace, + KubernetesMode: kubernetesMode, + Filter: scheduler.Filter{ + Type: *listFilterType, + }, + } + opts.Filter.Namespace = ptr.Of(daprNamespace) + + var list any + var empty bool + switch *listOutputFormat { + case outputFormatShort: + ll, err := scheduler.List(ctx, opts) + if err != nil { + return err + } + empty = len(ll) == 0 + list = ll + default: + ll, err := scheduler.ListWide(ctx, opts) + if err != nil { + return err + } + empty = len(ll) == 0 + list = ll + } + + if empty { + print.FailureStatusEvent(os.Stderr, "No jobs found in namespace %q", daprNamespace) + return nil + } + + var err error + switch *listOutputFormat { + case outputFormatYAML: + err = utils.PrintDetail(os.Stdout, "yaml", list) + case outputFormatJSON: + err = utils.PrintDetail(os.Stdout, "json", list) + default: + var table string + table, err = gocsv.MarshalString(list) + if err != nil { + break + } + + utils.PrintTable(table) + } + + if err != nil { + return err + } + + return nil + }, +} + +func init() { + listOutputFormat = outputFunc(ListCmd) + listFilterType = filterFunc(ListCmd) + SchedulerCmd.AddCommand(ListCmd) +} diff --git a/cmd/scheduler/scheduler.go b/cmd/scheduler/scheduler.go new file mode 100644 index 000000000..8fae6bdfa --- /dev/null +++ b/cmd/scheduler/scheduler.go @@ -0,0 +1,108 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "errors" + "fmt" + "slices" + "strings" + + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/scheduler" +) + +const ( + outputFormatShort = "short" + outputFormatWide = "wide" + outputFormatYAML = "yaml" + outputFormatJSON = "json" +) + +var ( + daprNamespace string + schedulerNamespace string + kubernetesMode bool +) + +var SchedulerCmd = &cobra.Command{ + Use: "scheduler", + Short: "Scheduler management commands. Use -k to target a Kubernetes Dapr cluster.", + Aliases: []string{"sched"}, +} + +func init() { + SchedulerCmd.PersistentFlags().BoolVarP(&kubernetesMode, "kubernetes", "k", false, "Perform scheduler command on a Kubernetes Dapr cluster") + SchedulerCmd.PersistentFlags().StringVarP(&daprNamespace, "namespace", "n", "default", "Namespace of the Dapr application") + SchedulerCmd.PersistentFlags().StringVar(&schedulerNamespace, "scheduler-namespace", "dapr-system", "Kubernetes namespace where the scheduler is deployed, only relevant if --kubernetes is set") +} + +func outputFunc(cmd *cobra.Command) *string { + + outputs := []string{ + outputFormatShort, + outputFormatWide, + outputFormatYAML, + outputFormatJSON, + } + + var outputFormat string + cmd.Flags().StringVarP(&outputFormat, "output", "o", outputFormatShort, fmt.Sprintf("Output format. One of %s", + strings.Join(outputs, ", ")), + ) + + pre := cmd.PreRunE + cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if !slices.Contains(outputs, outputFormat) { + return errors.New("invalid value for --output. Supported values are " + strings.Join(outputs, ", ")) + } + + if pre != nil { + return pre(cmd, args) + } + return nil + } + + return &outputFormat +} + +func filterFunc(cmd *cobra.Command) *string { + all := []string{ + scheduler.FilterAll, + scheduler.FilterApp, + scheduler.FilterActor, + scheduler.FilterWorkflow, + scheduler.FilterActivity, + } + + var filterType string + cmd.Flags().StringVar(&filterType, "filter", scheduler.FilterAll, + fmt.Sprintf("Filter jobs by type. Supported values are %s\n", strings.Join(all, ", ")), + ) + + pre := cmd.PreRunE + cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if !slices.Contains(all, filterType) { + return errors.New("invalid value for --filter. Supported values are " + strings.Join(all, ", ")) + } + + if pre != nil { + return pre(cmd, args) + } + return nil + } + + return &filterType +} diff --git a/cmd/workflow/history.go b/cmd/workflow/history.go new file mode 100644 index 000000000..afa153eca --- /dev/null +++ b/cmd/workflow/history.go @@ -0,0 +1,85 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "os" + + "github.com/gocarina/gocsv" + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/cli/utils" + "github.com/dapr/kit/signals" +) + +var ( + historyOutputFormat *string +) + +var HistoryCmd = &cobra.Command{ + Use: "history", + Short: "Get the history of a workflow instance.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + opts := workflow.HistoryOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + AppID: appID, + InstanceID: args[0], + } + + var list any + if *historyOutputFormat == outputFormatShort { + list, err = workflow.HistoryShort(ctx, opts) + } else { + list, err = workflow.HistoryWide(ctx, opts) + } + if err != nil { + return err + } + + switch *historyOutputFormat { + case outputFormatYAML: + err = utils.PrintDetail(os.Stdout, "yaml", list) + case outputFormatJSON: + err = utils.PrintDetail(os.Stdout, "json", list) + default: + var table string + table, err = gocsv.MarshalString(list) + if err != nil { + break + } + + utils.PrintTable(table) + } + if err != nil { + return err + } + + return nil + }, +} + +func init() { + historyOutputFormat = outputFunc(HistoryCmd) + WorkflowCmd.AddCommand(HistoryCmd) +} diff --git a/cmd/workflow/list.go b/cmd/workflow/list.go new file mode 100644 index 000000000..d3928d572 --- /dev/null +++ b/cmd/workflow/list.go @@ -0,0 +1,113 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "os" + + "github.com/gocarina/gocsv" + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/cli/utils" + "github.com/dapr/kit/signals" +) + +var ( + listFilter *workflow.Filter + listOutputFormat *string + + listConn *connFlag +) + +var ListCmd = &cobra.Command{ + Use: "list", + Aliases: []string{"ls"}, + Short: "List workflows for the given app ID.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + opts := workflow.ListOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + AppID: appID, + ConnectionString: listConn.connectionString, + TableName: listConn.tableName, + Filter: *listFilter, + } + + var list any + var empty bool + + switch *listOutputFormat { + case outputFormatShort: + var ll []*workflow.ListOutputShort + ll, err = workflow.ListShort(ctx, opts) + if err != nil { + return err + } + empty = len(ll) == 0 + list = ll + + default: + var ll []*workflow.ListOutputWide + ll, err = workflow.ListWide(ctx, opts) + if err != nil { + return err + } + empty = len(ll) == 0 + list = ll + } + + if empty { + print.FailureStatusEvent(os.Stderr, "No workflow found in namespace %q for app ID %q", flagDaprNamespace, appID) + return nil + } + + switch *listOutputFormat { + case outputFormatYAML: + err = utils.PrintDetail(os.Stdout, "yaml", list) + case outputFormatJSON: + err = utils.PrintDetail(os.Stdout, "json", list) + default: + var table string + table, err = gocsv.MarshalString(list) + if err != nil { + break + } + + utils.PrintTable(table) + } + + if err != nil { + return err + } + + return nil + }, +} + +func init() { + listFilter = filterCmd(ListCmd) + listOutputFormat = outputFunc(ListCmd) + listConn = connectionCmd(ListCmd) + WorkflowCmd.AddCommand(ListCmd) +} diff --git a/cmd/workflow/purge.go b/cmd/workflow/purge.go new file mode 100644 index 000000000..5ddac021e --- /dev/null +++ b/cmd/workflow/purge.go @@ -0,0 +1,89 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "errors" + + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/kit/signals" + "github.com/spf13/cobra" +) + +var ( + flagPurgeOlderThan string + flagPurgeAll bool + flagPurgeConn *connFlag + schedulerNamespace string +) + +var PurgeCmd = &cobra.Command{ + Use: "purge", + Short: "Purge one or more workflow instances with a terminal state. Accepts a workflow instance ID argument or flags to purge multiple/all terminal instances. Also deletes all associated scheduler jobs.", + Args: func(cmd *cobra.Command, args []string) error { + switch { + case cmd.Flags().Changed("all-older-than"), + cmd.Flags().Changed("all"): + if len(args) > 0 { + return errors.New("no arguments are accepted when using purge all flags") + } + default: + if len(args) == 0 { + return errors.New("one or more workflow instance ID arguments are required when not using purge all flags") + } + } + + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + opts := workflow.PurgeOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + SchedulerNamespace: schedulerNamespace, + AppID: appID, + InstanceIDs: args, + All: flagPurgeAll, + ConnectionString: flagPurgeConn.connectionString, + TableName: flagPurgeConn.tableName, + } + + if cmd.Flags().Changed("all-older-than") { + opts.AllOlderThan, err = parseWorkflowDurationTimestamp(flagPurgeOlderThan, true) + if err != nil { + return err + } + } + + return workflow.Purge(ctx, opts) + }, +} + +func init() { + PurgeCmd.Flags().StringVar(&flagPurgeOlderThan, "all-older-than", "", "Purge workflow instances older than the specified Go duration or timestamp, e.g., '24h' or '2023-01-02T15:04:05Z'.") + PurgeCmd.Flags().BoolVar(&flagPurgeAll, "all", false, "Purge all workflow instances in a terminal state. Use with caution.") + PurgeCmd.MarkFlagsMutuallyExclusive("all-older-than", "all") + + PurgeCmd.Flags().StringVar(&schedulerNamespace, "scheduler-namespace", "dapr-system", "Kubernetes namespace where the scheduler is deployed, only relevant if --kubernetes is set") + + flagPurgeConn = connectionCmd(PurgeCmd) + + WorkflowCmd.AddCommand(PurgeCmd) +} diff --git a/cmd/workflow/raiseevent.go b/cmd/workflow/raiseevent.go new file mode 100644 index 000000000..cc533a0a1 --- /dev/null +++ b/cmd/workflow/raiseevent.go @@ -0,0 +1,74 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "errors" + "os" + "strings" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/kit/signals" + "github.com/spf13/cobra" +) + +var ( + flagRaiseEventInput *inputFlag +) + +var RaiseEventCmd = &cobra.Command{ + Use: "raise-event", + Short: "Raise an event for a workflow waiting for an external event. Expects a single argument '/'.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + split := strings.Split(args[0], "/") + if len(split) != 2 { + return errors.New("the argument must be in the format '/'") + } + instanceID := split[0] + eventName := split[1] + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + opts := workflow.RaiseEventOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + AppID: appID, + InstanceID: instanceID, + Name: eventName, + Input: flagRaiseEventInput.input, + } + + if err = workflow.RaiseEvent(ctx, opts); err != nil { + print.FailureStatusEvent(os.Stdout, err.Error()) + os.Exit(1) + } + + print.InfoStatusEvent(os.Stdout, "Workflow '%s' raised event '%s' successfully", instanceID, eventName) + + return nil + }, +} + +func init() { + flagRaiseEventInput = inputCmd(RaiseEventCmd) + + WorkflowCmd.AddCommand(RaiseEventCmd) +} diff --git a/cmd/workflow/rerun.go b/cmd/workflow/rerun.go new file mode 100644 index 000000000..29b85ba7a --- /dev/null +++ b/cmd/workflow/rerun.go @@ -0,0 +1,76 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "os" + + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/kit/ptr" + "github.com/dapr/kit/signals" +) + +var ( + flagReRunEventID uint32 + flagReRunNewInstanceID string + flagReRunInput *inputFlag +) + +var ReRunCmd = &cobra.Command{ + Use: "rerun [instance ID]", + Short: "ReRun a workflow instance from the beginning or a specific event. Optionally, a new instance ID and input to the starting event can be provided.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + opts := workflow.ReRunOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + AppID: appID, + InstanceID: args[0], + Input: flagReRunInput.input, + EventID: flagReRunEventID, + } + + if cmd.Flags().Changed("new-instance-id") { + opts.NewInstanceID = ptr.Of(flagReRunNewInstanceID) + } + + id, err := workflow.ReRun(ctx, opts) + if err != nil { + print.FailureStatusEvent(os.Stdout, err.Error()) + os.Exit(1) + } + + print.InfoStatusEvent(os.Stdout, "Rerunning workflow instance: %s", id) + + return nil + }, +} + +func init() { + flagReRunInput = inputCmd(ReRunCmd) + ReRunCmd.Flags().StringVar(&flagReRunNewInstanceID, "new-instance-id", "", "Optional new ID for the re-run workflow instance. If not provided, a new ID will be generated.") + ReRunCmd.Flags().Uint32VarP(&flagReRunEventID, "event-id", "e", 0, "The event ID from which to re-run the workflow. If not provided, the workflow will re-run from the beginning.") + + WorkflowCmd.AddCommand(ReRunCmd) +} diff --git a/cmd/workflow/resume.go b/cmd/workflow/resume.go new file mode 100644 index 000000000..68085181c --- /dev/null +++ b/cmd/workflow/resume.go @@ -0,0 +1,64 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "os" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/kit/signals" + "github.com/spf13/cobra" +) + +var ( + flagResumeReason string +) + +var ResumeCmd = &cobra.Command{ + Use: "resume", + Short: "Resume a workflow that is suspended.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + opts := workflow.ResumeOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + AppID: appID, + InstanceID: args[0], + Reason: flagResumeReason, + } + + if err = workflow.Resume(ctx, opts); err != nil { + print.FailureStatusEvent(os.Stdout, err.Error()) + os.Exit(1) + } + + print.InfoStatusEvent(os.Stdout, "Workflow '%s' resumed successfully", args[0]) + + return nil + }, +} + +func init() { + ResumeCmd.Flags().StringVarP(&flagResumeReason, "reason", "r", "", "Reason for resuming the workflow") + + WorkflowCmd.AddCommand(ResumeCmd) +} diff --git a/cmd/workflow/run.go b/cmd/workflow/run.go new file mode 100644 index 000000000..2536def0f --- /dev/null +++ b/cmd/workflow/run.go @@ -0,0 +1,77 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "os" + + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/kit/signals" +) + +var ( + flagRunInstanceID *instanceIDFlag + flagRunInput *inputFlag + flagRunStartTime string +) + +var RunCmd = &cobra.Command{ + Use: "run", + Short: "Run a workflow instance based on a given workflow name. Accepts a single argument, the workflow name.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + opts := workflow.RunOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + AppID: appID, + Name: args[0], + InstanceID: flagRunInstanceID.instanceID, + Input: flagRunInput.input, + } + + if cmd.Flags().Changed("start-time") { + opts.StartTime, err = parseWorkflowDurationTimestamp(flagRunStartTime, false) + if err != nil { + return err + } + } + + id, err := workflow.Run(ctx, opts) + if err != nil { + print.FailureStatusEvent(os.Stdout, err.Error()) + os.Exit(1) + } + + print.InfoStatusEvent(os.Stdout, "Workflow instance started successfully: %s", id) + + return nil + }, +} + +func init() { + flagRunInstanceID = instanceIDCmd(RunCmd) + flagRunInput = inputCmd(RunCmd) + RunCmd.Flags().StringVarP(&flagRunStartTime, "start-time", "s", "", "Optional start time for the workflow in RFC3339 or Go duration string format. If not provided, the workflow starts immediately. A duration of '0s', or any start time, will cause the command to not wait for the command to start") + WorkflowCmd.AddCommand(RunCmd) +} diff --git a/cmd/workflow/suspend.go b/cmd/workflow/suspend.go new file mode 100644 index 000000000..1023a58bb --- /dev/null +++ b/cmd/workflow/suspend.go @@ -0,0 +1,64 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "os" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/kit/signals" + "github.com/spf13/cobra" +) + +var ( + flagSuspendReason string +) + +var SuspendCmd = &cobra.Command{ + Use: "suspend", + Short: "Suspend a workflow in progress.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + opts := workflow.SuspendOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + AppID: appID, + InstanceID: args[0], + Reason: flagSuspendReason, + } + + if err = workflow.Suspend(ctx, opts); err != nil { + print.FailureStatusEvent(os.Stdout, err.Error()) + os.Exit(1) + } + + print.InfoStatusEvent(os.Stdout, "Workflow '%s' suspended successfully", args[0]) + + return nil + }, +} + +func init() { + SuspendCmd.Flags().StringVarP(&flagResumeReason, "reason", "r", "", "Reason for resuming the workflow") + + WorkflowCmd.AddCommand(SuspendCmd) +} diff --git a/cmd/workflow/terminate.go b/cmd/workflow/terminate.go new file mode 100644 index 000000000..2c4823932 --- /dev/null +++ b/cmd/workflow/terminate.go @@ -0,0 +1,69 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "os" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/kit/signals" + "github.com/spf13/cobra" +) + +var ( + flagTerminateOutput string +) + +var TerminateCmd = &cobra.Command{ + Use: "terminate", + Short: "Terminate a workflow in progress.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := signals.Context() + + appID, err := getWorkflowAppID(cmd) + if err != nil { + return err + } + + var output *string + if cmd.Flags().Changed("output") { + output = &flagTerminateOutput + } + + opts := workflow.TerminateOptions{ + KubernetesMode: flagKubernetesMode, + Namespace: flagDaprNamespace, + AppID: appID, + InstanceID: args[0], + Output: output, + } + + if err = workflow.Terminate(ctx, opts); err != nil { + print.FailureStatusEvent(os.Stdout, err.Error()) + os.Exit(1) + } + + print.InfoStatusEvent(os.Stdout, "Workflow '%s' terminated successfully", args[0]) + + return nil + }, +} + +func init() { + TerminateCmd.Flags().StringVarP(&flagTerminateOutput, "output", "o", "", "Optional output data for the workflow in JSON string format.") + + WorkflowCmd.AddCommand(TerminateCmd) +} diff --git a/cmd/workflow/workflow.go b/cmd/workflow/workflow.go new file mode 100644 index 000000000..3174792ed --- /dev/null +++ b/cmd/workflow/workflow.go @@ -0,0 +1,282 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "errors" + "fmt" + "slices" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/dapr/cli/pkg/kubernetes" + "github.com/dapr/cli/pkg/standalone" + "github.com/dapr/cli/pkg/workflow" + "github.com/dapr/kit/ptr" + kittime "github.com/dapr/kit/time" +) + +const ( + outputFormatShort = "short" + outputFormatWide = "wide" + outputFormatYAML = "yaml" + outputFormatJSON = "json" +) + +var ( + flagKubernetesMode bool + flagDaprNamespace string + flagAppID string +) + +var WorkflowCmd = &cobra.Command{ + Use: "workflow", + Short: "Workflow management commands. Use -k to target a Kubernetes Dapr cluster.", + Aliases: []string{"wf"}, +} + +func init() { + WorkflowCmd.PersistentFlags().BoolVarP(&flagKubernetesMode, "kubernetes", "k", false, "Target a Kubernetes dapr installation") + WorkflowCmd.PersistentFlags().StringVarP(&flagDaprNamespace, "namespace", "n", "default", "Namespace to perform workflow operation on") + WorkflowCmd.PersistentFlags().StringVarP(&flagAppID, "app-id", "a", "", "The app ID owner of the workflow instance") +} + +func outputFunc(cmd *cobra.Command) *string { + outputs := []string{ + outputFormatShort, + outputFormatWide, + outputFormatYAML, + outputFormatJSON, + } + + var outputFormat string + cmd.Flags().StringVarP(&outputFormat, "output", "o", outputFormatShort, fmt.Sprintf("Output format. One of %s", + strings.Join(outputs, ", ")), + ) + + pre := cmd.PreRunE + cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if !slices.Contains(outputs, outputFormat) { + return errors.New("invalid value for --output. Supported values are 'short', 'wide', 'yaml', 'json'.") + } + + if pre != nil { + return pre(cmd, args) + } + return nil + } + + return &outputFormat +} + +func getWorkflowAppID(cmd *cobra.Command) (string, error) { + if cmd.Flags().Changed("app-id") { + return flagAppID, nil + } + + var errRequired = fmt.Errorf("the app ID is required when there are multiple Dapr instances. Please specify it using the --app-id flag") + var errNotFound = fmt.Errorf("no Dapr instances found. Please ensure that Dapr is running") + + if flagKubernetesMode { + list, err := kubernetes.List(flagDaprNamespace) + if err != nil { + return "", err + } + + if len(list) == 0 { + return "", errNotFound + } + + if len(list) != 1 { + return "", errRequired + } + + return list[0].AppID, nil + } + + list, err := standalone.List() + if err != nil { + return "", err + } + + if len(list) == 0 { + return "", errNotFound + } + + if len(list) != 1 { + return "", errRequired + } + + return list[0].AppID, nil +} + +func parseWorkflowDurationTimestamp(str string, durationPast bool) (*time.Time, error) { + dur, err := time.ParseDuration(str) + if err == nil { + if durationPast { + dur = -dur + } + return ptr.Of(time.Now().Add(dur)), nil + } + + ts, err := kittime.ParseTime(str, nil) + if err != nil { + return nil, err + } + + return ptr.Of(ts), nil +} + +func filterCmd(cmd *cobra.Command) *workflow.Filter { + filter := new(workflow.Filter) + + var ( + name string + status string + maxAge string + + listStatuses = []string{ + "RUNNING", + "COMPLETED", + "CONTINUED_AS_NEW", + "FAILED", + "CANCELED", + "TERMINATED", + "PENDING", + "SUSPENDED", + } + ) + + cmd.Flags().StringVarP(&name, "filter-name", "w", "", "Filter only the workflows with the given name") + cmd.Flags().StringVarP(&status, "filter-status", "s", "", "Filter only the workflows with the given runtime status. One of "+strings.Join(listStatuses, ", ")) + cmd.Flags().StringVarP(&maxAge, "filter-max-age", "m", "", "Filter only the workflows started within the given duration or timestamp. Examples: 300ms, 1.5h or 2h45m, 2023-01-02T15:04:05 or 2023-01-02") + + pre := cmd.PreRunE + cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("filter-name") { + filter.Name = &name + } + if cmd.Flags().Changed("filter-status") { + if !slices.Contains(listStatuses, status) { + return errors.New("invalid value for --filter-status. Supported values are " + strings.Join(listStatuses, ", ")) + } + filter.Status = &status + } + + if cmd.Flags().Changed("filter-max-age") { + var err error + filter.MaxAge, err = parseWorkflowDurationTimestamp(maxAge, true) + if err != nil { + return err + } + } + + if pre != nil { + return pre(cmd, args) + } + + return nil + } + + return filter +} + +type connFlag struct { + connectionString *string + tableName *string +} + +func connectionCmd(cmd *cobra.Command) *connFlag { + var ( + flagConnectionString string + flagTableName string + ) + + cmd.Flags().StringVarP(&flagConnectionString, "connection-string", "c", "", "The connection string used to connect and authenticate to the actor state store") + cmd.Flags().StringVarP(&flagTableName, "table-name", "t", "", "The name of the table or collection which is used as the actor state store") + + var cflag connFlag + pre := cmd.PreRunE + cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("connection-string") { + cflag.connectionString = &flagConnectionString + } + + if cmd.Flags().Changed("table-name") { + cflag.tableName = &flagTableName + } + + if pre != nil { + return pre(cmd, args) + } + + return nil + } + + return &cflag +} + +type instanceIDFlag struct { + instanceID *string +} + +func instanceIDCmd(cmd *cobra.Command) *instanceIDFlag { + var instanceID string + iFlag := new(instanceIDFlag) + + cmd.Flags().StringVarP(&instanceID, "instance-id", "i", "", "The target workflow instance ID.") + + pre := cmd.PreRunE + cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("instance-id") { + iFlag.instanceID = &instanceID + } + + if pre != nil { + return pre(cmd, args) + } + + return nil + } + + return iFlag +} + +type inputFlag struct { + input *string +} + +func inputCmd(cmd *cobra.Command) *inputFlag { + var input string + iFlag := new(inputFlag) + + cmd.Flags().StringVarP(&input, "input", "x", "", "Optional input data for the new workflow instance. Accepts a JSON string.") + + pre := cmd.PreRunE + cmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("input") { + iFlag.input = &input + } + + if pre != nil { + return pre(cmd, args) + } + + return nil + } + + return iFlag +} diff --git a/go.mod b/go.mod index 10f2d6c34..94f812890 100644 --- a/go.mod +++ b/go.mod @@ -1,34 +1,44 @@ module github.com/dapr/cli -go 1.24.7 +go 1.24.9 require ( github.com/Masterminds/semver v1.5.0 github.com/Masterminds/semver/v3 v3.3.0 github.com/Pallinder/sillyname-go v0.0.0-20130730142914-97aeae9e6ba1 github.com/briandowns/spinner v1.19.0 - github.com/dapr/dapr v1.16.0-rc.7 - github.com/dapr/go-sdk v1.11.0 + github.com/dapr/dapr v1.16.2 + github.com/dapr/durabletask-go v0.10.1 + github.com/dapr/go-sdk v1.13.0 github.com/dapr/kit v0.16.1 + github.com/diagridio/go-etcd-cron v0.9.1 github.com/docker/docker v25.0.6+incompatible github.com/evanphx/json-patch/v5 v5.9.0 github.com/fatih/color v1.17.0 github.com/gocarina/gocsv v0.0.0-20220927221512-ad3251f9fa25 github.com/hashicorp/go-retryablehttp v0.7.7 github.com/hashicorp/go-version v1.6.0 + github.com/jackc/pgx/v5 v5.7.4 github.com/kolesnikovae/go-winjob v1.0.0 + github.com/mattn/go-sqlite3 v1.14.22 + github.com/microsoft/go-mssqldb v1.6.0 github.com/mitchellh/go-ps v1.0.0 github.com/nightlyone/lockfile v1.0.0 github.com/olekukonko/tablewriter v0.0.5 github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c + github.com/redis/go-redis/v9 v9.6.3 github.com/shirou/gopsutil v3.21.11+incompatible + github.com/sijms/go-ora/v2 v2.8.22 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.10.0 + go.etcd.io/etcd/client/v3 v3.5.21 + go.mongodb.org/mongo-driver v1.14.0 golang.org/x/mod v0.25.0 golang.org/x/sys v0.33.0 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.17.4 k8s.io/api v0.32.2 @@ -72,11 +82,13 @@ require ( github.com/containerd/errdefs v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.3.6 // indirect - github.com/dapr/components-contrib v1.16.0-rc.6 // indirect - github.com/dapr/durabletask-go v0.9.0 // indirect + github.com/dapr/components-contrib v1.16.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect github.com/docker/cli v25.0.1+incompatible // indirect @@ -109,8 +121,11 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect + github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v1.0.0 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.9 // indirect @@ -130,6 +145,9 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jhump/protoreflect v1.15.3 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -160,6 +178,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/montanaflynn/stats v0.7.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -195,26 +214,32 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/tmc/langchaingo v0.1.13 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zeebo/errs v1.4.0 // indirect - go.mongodb.org/mongo-driver v1.14.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.21 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect go.opentelemetry.io/otel/exporters/zipkin v1.34.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect go.opentelemetry.io/otel/sdk v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.opentelemetry.io/proto/otlp v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.39.0 // indirect golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect golang.org/x/net v0.41.0 // indirect @@ -226,7 +251,6 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250512202823-5a2f75b736a9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/grpc v1.73.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index de60cea98..315bd4717 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,21 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= @@ -102,6 +115,10 @@ github.com/briandowns/spinner v1.19.0 h1:s8aq38H+Qju89yhp89b4iIiMzMm8YN3p6vGpwyh github.com/briandowns/spinner v1.19.0/go.mod h1:mQak9GHqbspjC/5iUx3qMlIho8xBS/ppAL/hX5SmPJU= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= @@ -142,20 +159,24 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/dapr/components-contrib v1.16.0-rc.6 h1:AB04RdqmWdZ84PBaHcUQFMHHl7XaIgOl67BeHKZzqM4= -github.com/dapr/components-contrib v1.16.0-rc.6/go.mod h1:mPT6lNeoQxJoJ0y9HyOGWozscSmTdf4yVJTTJMjSOJE= -github.com/dapr/dapr v1.16.0-rc.7 h1:VC38W4sJq/sZeBltY0SxdIy1QhuDXrOibY++PMU8YDY= -github.com/dapr/dapr v1.16.0-rc.7/go.mod h1:jKw+BrQap6qjqHe++BMwzDN5ORABrhZiaqNvG06yizI= -github.com/dapr/durabletask-go v0.9.0 h1:b2/aNOJau7VS639JodSES/+momwnjjrroAtbn7rp1PI= -github.com/dapr/durabletask-go v0.9.0/go.mod h1:0Ts4rXp74JyG19gDWPcwNo5V6NBZzhARzHF5XynmA7Q= -github.com/dapr/go-sdk v1.11.0 h1:clANpOQd6MsfvSa6snaX8MVk6eRx26Vsj5GxGdQ6mpE= -github.com/dapr/go-sdk v1.11.0/go.mod h1:btZ/tX8eYnx0fg3HiJUku8J5QBRXHsp3kAB1BUiTxXY= +github.com/dapr/components-contrib v1.16.3 h1:e6M+TW+a+x9yq289OBZI0wwmxvBmEuftx4iSgW7GXrQ= +github.com/dapr/components-contrib v1.16.3/go.mod h1:x2EEwLCMxKiHYu0wt5Zf6QN9h4QqCKJZ1xBLyw/F01Q= +github.com/dapr/dapr v1.16.2 h1:pZ2WSW9CG8tbM+afIHO0lLiGr1/AV2O0bMF75OED0TI= +github.com/dapr/dapr v1.16.2/go.mod h1:8JB17X/DyQ5vbmjdKirgBA/lEhIjM/HF9RffSh4tl/0= +github.com/dapr/durabletask-go v0.10.1 h1:gE88Qh4+/6zKdegHjOAOx+UQaPxmwWKWoIDivee23XY= +github.com/dapr/durabletask-go v0.10.1/go.mod h1:0Ts4rXp74JyG19gDWPcwNo5V6NBZzhARzHF5XynmA7Q= +github.com/dapr/go-sdk v1.13.0 h1:Qw2BmUonClQ9yK/rrEEaFL1PyDgq616RrvYj0CT67Lk= +github.com/dapr/go-sdk v1.13.0/go.mod h1:RsffVNZitDApmQqoS68tNKGMXDZUjTviAbKZupJSzts= github.com/dapr/kit v0.16.1 h1:MqLAhHVg8trPy2WJChMZFU7ToeondvxcNHYVvMDiVf4= github.com/dapr/kit v0.16.1/go.mod h1:40ZWs5P6xfYf7O59XgwqZkIyDldTIXlhTQhGop8QoSM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -164,6 +185,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/diagridio/go-etcd-cron v0.9.1 h1:KUfcceDtypL8s3hL0jD2ZoiIzjjXY6xDQ4kT1DJF4Ws= +github.com/diagridio/go-etcd-cron v0.9.1/go.mod h1:CSzuxoCDFu+Gbds0RO73GE8CnmL5t85axiPLptsej3I= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -271,9 +296,16 @@ github.com/gocarina/gocsv v0.0.0-20220927221512-ad3251f9fa25 h1:wxgEEZvsnOTrDO2n github.com/gocarina/gocsv v0.0.0-20220927221512-ad3251f9fa25/go.mod h1:5YoVOkjYAQumqlV356Hj3xeYh4BdZuLE0/nRkf2NKkI= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -307,6 +339,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -393,6 +427,14 @@ github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg= +github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= @@ -465,6 +507,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -496,6 +540,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -578,6 +624,8 @@ github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzM github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= +github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0= +github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= @@ -593,6 +641,8 @@ github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKl github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sijms/go-ora/v2 v2.8.22 h1:3ABgRzVKxS439cEgSLjFKutIwOyhnyi4oOSBywEdOlU= +github.com/sijms/go-ora/v2 v2.8.22/go.mod h1:QgFInVi3ZWyqAiJwzBQA+nbKYKH77tdp1PYoCqhR2dU= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -647,6 +697,12 @@ github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1Ca github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -656,10 +712,13 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= @@ -670,6 +729,12 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1 github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= +go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= +go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= +go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= +go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= +go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -684,8 +749,8 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= @@ -694,14 +759,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qH go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= go.opentelemetry.io/otel/exporters/zipkin v1.34.0 h1:GSjCkoYqsnvUMCjxF18j2tCWH8fhGZYjH3iYgechPTI= go.opentelemetry.io/otel/exporters/zipkin v1.34.0/go.mod h1:h830hluwAqgSNnZbxL2rJhmAlE7/0SF9esoHVLU04Gc= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -720,6 +785,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -754,6 +820,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -786,9 +853,11 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -811,6 +880,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= @@ -861,6 +931,7 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -879,6 +950,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -929,6 +1001,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/kubernetes/components.go b/pkg/kubernetes/components.go index a18352146..0b9a821ac 100644 --- a/pkg/kubernetes/components.go +++ b/pkg/kubernetes/components.go @@ -47,11 +47,11 @@ func PrintComponents(name, namespace, outputFormat string) error { return nil, err } - return listComponents(client, namespace) + return ListComponents(client, namespace) }, name, outputFormat) } -func listComponents(client versioned.Interface, namespace string) (*v1alpha1.ComponentList, error) { +func ListComponents(client versioned.Interface, namespace string) (*v1alpha1.ComponentList, error) { list, err := client.ComponentsV1alpha1().Components(namespace).List(meta_v1.ListOptions{}) // This means that the Dapr Components CRD is not installed and // therefore no component items exist. diff --git a/pkg/kubernetes/components_test.go b/pkg/kubernetes/components_test.go index 1568a9a8d..420f3aff7 100644 --- a/pkg/kubernetes/components_test.go +++ b/pkg/kubernetes/components_test.go @@ -40,7 +40,7 @@ func TestComponents(t *testing.T) { name: "List one config", configName: "", outputFormat: "", - expectedOutput: " NAMESPACE NAME TYPE VERSION SCOPES CREATED AGE \n default appConfig state.redis v1 " + formattedNow + " 0s \n", + expectedOutput: "NAMESPACE NAME TYPE VERSION CREATED AGE \ndefault appConfig state.redis v1 " + formattedNow + " 0s \n", errString: "", errorExpected: false, k8sConfig: []v1alpha1.Component{ @@ -70,7 +70,7 @@ func TestComponents(t *testing.T) { name: "Filters out daprsystem", configName: "", outputFormat: "", - expectedOutput: " NAMESPACE NAME TYPE VERSION SCOPES CREATED AGE \n default appConfig state.redis v1 " + formattedNow + " 0s \n", + expectedOutput: "NAMESPACE NAME TYPE VERSION CREATED AGE \ndefault appConfig state.redis v1 " + formattedNow + " 0s \n", errString: "", errorExpected: false, k8sConfig: []v1alpha1.Component{ @@ -98,7 +98,7 @@ func TestComponents(t *testing.T) { name: "Name does match", configName: "appConfig", outputFormat: "list", - expectedOutput: " NAMESPACE NAME TYPE VERSION SCOPES CREATED AGE \n default appConfig state.redis v1 " + formattedNow + " 0s \n", + expectedOutput: "NAMESPACE NAME TYPE VERSION CREATED AGE \ndefault appConfig state.redis v1 " + formattedNow + " 0s \n", errString: "", errorExpected: false, k8sConfig: []v1alpha1.Component{ @@ -119,7 +119,7 @@ func TestComponents(t *testing.T) { name: "Name does not match", configName: "appConfig", outputFormat: "list", - expectedOutput: " NAMESPACE NAME TYPE VERSION SCOPES CREATED AGE \n", + expectedOutput: "NAMESPACE NAME TYPE VERSION SCOPES CREATED AGE \n", errString: "", errorExpected: false, k8sConfig: []v1alpha1.Component{ diff --git a/pkg/kubernetes/configurations_test.go b/pkg/kubernetes/configurations_test.go index a48d655bb..8d62a3f51 100644 --- a/pkg/kubernetes/configurations_test.go +++ b/pkg/kubernetes/configurations_test.go @@ -41,7 +41,7 @@ func TestConfigurations(t *testing.T) { name: "List one config", configName: "", outputFormat: "", - expectedOutput: " NAMESPACE NAME TRACING-ENABLED METRICS-ENABLED AGE CREATED \n default appConfig false false 0s " + formattedNow + " \n", + expectedOutput: "NAMESPACE NAME TRACING-ENABLED METRICS-ENABLED AGE CREATED \ndefault appConfig false false 0s " + formattedNow + " \n", errString: "", errorExpected: false, k8sConfig: []v1alpha1.Configuration{ @@ -68,7 +68,7 @@ func TestConfigurations(t *testing.T) { name: "Filters out daprsystem", configName: "", outputFormat: "", - expectedOutput: " NAMESPACE NAME TRACING-ENABLED METRICS-ENABLED AGE CREATED \n default appConfig false false 0s " + formattedNow + " \n", + expectedOutput: "NAMESPACE NAME TRACING-ENABLED METRICS-ENABLED AGE CREATED \ndefault appConfig false false 0s " + formattedNow + " \n", errString: "", errorExpected: false, k8sConfig: []v1alpha1.Configuration{ @@ -94,7 +94,7 @@ func TestConfigurations(t *testing.T) { name: "Name does match", configName: "appConfig", outputFormat: "list", - expectedOutput: " NAMESPACE NAME TRACING-ENABLED METRICS-ENABLED AGE CREATED \n default appConfig false false 0s " + formattedNow + " \n", + expectedOutput: "NAMESPACE NAME TRACING-ENABLED METRICS-ENABLED AGE CREATED \ndefault appConfig false false 0s " + formattedNow + " \n", errString: "", errorExpected: false, k8sConfig: []v1alpha1.Configuration{ @@ -112,7 +112,7 @@ func TestConfigurations(t *testing.T) { name: "Name does not match", configName: "appConfig", outputFormat: "list", - expectedOutput: " NAMESPACE NAME TRACING-ENABLED METRICS-ENABLED AGE CREATED \n", + expectedOutput: "NAMESPACE NAME TRACING-ENABLED METRICS-ENABLED AGE CREATED \n", errString: "", errorExpected: false, k8sConfig: []v1alpha1.Configuration{ diff --git a/pkg/kubernetes/list.go b/pkg/kubernetes/list.go index a4db635dc..64a71c8b0 100644 --- a/pkg/kubernetes/list.go +++ b/pkg/kubernetes/list.go @@ -21,11 +21,13 @@ import ( // ListOutput represents the application ID, application port and creation time. type ListOutput struct { - Namespace string `csv:"NAMESPACE" json:"namespace" yaml:"namespace"` - AppID string `csv:"APP ID" json:"appId" yaml:"appId"` - AppPort string `csv:"APP PORT" json:"appPort" yaml:"appPort"` - Age string `csv:"AGE" json:"age" yaml:"age"` - Created string `csv:"CREATED" json:"created" yaml:"created"` + Namespace string `csv:"NAMESPACE" json:"namespace" yaml:"namespace"` + AppID string `csv:"APP ID" json:"appId" yaml:"appId"` + AppPort string `csv:"APP PORT" json:"appPort" yaml:"appPort"` + Age string `csv:"AGE" json:"age" yaml:"age"` + Created string `csv:"CREATED" json:"created" yaml:"created"` + DaprGRPCPort string `csv:"-" json:"-" yaml:"-"` + PodName string `csv:"-" json:"-" yaml:"-"` } // List outputs all the applications. @@ -46,17 +48,22 @@ func List(namespace string) ([]ListOutput, error) { if c.Name == "daprd" { lo := ListOutput{} for i, a := range c.Args { - if a == "--app-port" { + switch a { + case "--app-port": port := c.Args[i+1] lo.AppPort = port - } else if a == "--app-id" { + case "--app-id": id := c.Args[i+1] lo.AppID = id + case "--dapr-grpc-port": + port := c.Args[i+1] + lo.DaprGRPCPort = port } } lo.Namespace = p.GetNamespace() lo.Created = p.CreationTimestamp.Format("2006-01-02 15:04.05") lo.Age = age.GetAge(p.CreationTimestamp.Time) + lo.PodName = p.GetName() l = append(l, lo) } } diff --git a/pkg/scheduler/delete.go b/pkg/scheduler/delete.go new file mode 100644 index 000000000..2d5b15686 --- /dev/null +++ b/pkg/scheduler/delete.go @@ -0,0 +1,70 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "context" + "fmt" + "os" + + clientv3 "go.etcd.io/etcd/client/v3" + + "github.com/dapr/cli/pkg/print" +) + +type DeleteOptions struct { + SchedulerNamespace string + DaprNamespace string + KubernetesMode bool +} + +func Delete(ctx context.Context, opts DeleteOptions, keys ...string) error { + etcdClient, cancel, err := EtcdClient(opts.KubernetesMode, opts.SchedulerNamespace) + if err != nil { + return err + } + defer cancel() + + for _, key := range keys { + if err = delSingle(ctx, etcdClient, key, opts); err != nil { + return err + } + + print.InfoStatusEvent(os.Stdout, "Deleted %s in namespace '%s'.", key, opts.DaprNamespace) + } + + return nil +} + +func delSingle(ctx context.Context, client *clientv3.Client, key string, opts DeleteOptions) error { + jobKey, err := parseJobKey(key) + if err != nil { + return err + } + + paths := pathsFromJobKey(jobKey, opts.DaprNamespace) + resp, err := client.Txn(ctx).Then( + clientv3.OpDelete(paths[0]), + clientv3.OpDelete(paths[1]), + ).Commit() + if err != nil { + return err + } + + if len(resp.Responses) == 0 || resp.Responses[0].GetResponseDeleteRange().Deleted == 0 { + return fmt.Errorf("no job with key '%s' found in namespace '%s'", key, opts.DaprNamespace) + } + + return nil +} diff --git a/pkg/scheduler/deleteall.go b/pkg/scheduler/deleteall.go new file mode 100644 index 000000000..37edfcf91 --- /dev/null +++ b/pkg/scheduler/deleteall.go @@ -0,0 +1,139 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "context" + "fmt" + "os" + "strings" + + clientv3 "go.etcd.io/etcd/client/v3" + + "github.com/dapr/cli/pkg/print" +) + +func DeleteAll(ctx context.Context, opts DeleteOptions, key string) error { + etcdClient, cancel, err := EtcdClient(opts.KubernetesMode, opts.SchedulerNamespace) + if err != nil { + return err + } + defer cancel() + + split := strings.Split(key, "/") + + var paths []string + switch split[0] { + case "all": + if len(split) != 1 { + return fmt.Errorf("invalid key format: %s", key) + } + paths = []string{ + fmt.Sprintf("dapr/jobs/app||%s||", opts.DaprNamespace), + fmt.Sprintf("dapr/jobs/actorreminder||%s||", opts.DaprNamespace), + fmt.Sprintf("dapr/counters/app||%s||", opts.DaprNamespace), + fmt.Sprintf("dapr/counters/actorreminder||%s||", opts.DaprNamespace), + } + case "app": + switch len(split) { + case 1: + paths = []string{ + fmt.Sprintf("dapr/jobs/app||%s||", opts.DaprNamespace), + fmt.Sprintf("dapr/counters/app||%s||", opts.DaprNamespace), + } + case 2: + paths = []string{ + fmt.Sprintf("dapr/jobs/app||%s||%s||", opts.DaprNamespace, split[1]), + fmt.Sprintf("dapr/counters/app||%s||%s||", opts.DaprNamespace, split[1]), + } + default: + return fmt.Errorf("invalid key format: %s", key) + } + + case "actor": + switch len(split) { + case 2: + paths = []string{ + fmt.Sprintf("dapr/jobs/actorreminder||%s||%s||", opts.DaprNamespace, split[1]), + fmt.Sprintf("dapr/counters/actorreminder||%s||%s||", opts.DaprNamespace, split[1]), + } + case 3: + paths = []string{ + fmt.Sprintf("dapr/jobs/actorreminder||%s||%s||%s||", opts.DaprNamespace, split[1], split[2]), + fmt.Sprintf("dapr/counters/actorreminder||%s||%s||%s||", opts.DaprNamespace, split[1], split[2]), + } + default: + return fmt.Errorf("invalid key format: %s", key) + } + + case "workflow": + switch len(split) { + case 1: + paths = []string{ + fmt.Sprintf("dapr/jobs/actorreminder||%s||dapr.internal.%s.", opts.DaprNamespace, opts.DaprNamespace), + fmt.Sprintf("dapr/counters/actorreminder||%s||dapr.internal.%s.", opts.DaprNamespace, opts.DaprNamespace), + } + case 2: + paths = []string{ + fmt.Sprintf("dapr/jobs/actorreminder||%s||dapr.internal.%s.%s.workflow||", opts.DaprNamespace, opts.DaprNamespace, split[1]), + fmt.Sprintf("dapr/jobs/actorreminder||%s||dapr.internal.%s.%s.activity||", opts.DaprNamespace, opts.DaprNamespace, split[1]), + fmt.Sprintf("dapr/counters/actorreminder||%s||dapr.internal.%s.%s.workflow||", opts.DaprNamespace, opts.DaprNamespace, split[1]), + fmt.Sprintf("dapr/counters/actorreminder||%s||dapr.internal.%s.%s.activity||", opts.DaprNamespace, opts.DaprNamespace, split[1]), + } + case 3: + paths = []string{ + fmt.Sprintf("dapr/jobs/actorreminder||%s||dapr.internal.%s.%s.workflow||%s||", opts.DaprNamespace, opts.DaprNamespace, split[1], split[2]), + fmt.Sprintf("dapr/jobs/actorreminder||%s||dapr.internal.%s.%s.activity||%s::", opts.DaprNamespace, opts.DaprNamespace, split[1], split[2]), + fmt.Sprintf("dapr/counters/actorreminder||%s||dapr.internal.%s.%s.workflow||%s||", opts.DaprNamespace, opts.DaprNamespace, split[1], split[2]), + fmt.Sprintf("dapr/counters/actorreminder||%s||dapr.internal.%s.%s.activity||%s::", opts.DaprNamespace, opts.DaprNamespace, split[1], split[2]), + } + default: + return fmt.Errorf("invalid key format: %s", key) + } + + default: + return fmt.Errorf("unknown key prefix: %s", split[0]) + } + + oopts := make([]clientv3.Op, 0, len(paths)) + for _, path := range paths { + oopts = append(oopts, clientv3.OpDelete(path, + clientv3.WithPrefix(), + clientv3.WithPrevKV(), + clientv3.WithKeysOnly(), + )) + } + + resp, err := etcdClient.Txn(ctx).Then(oopts...).Commit() + if err != nil { + return err + } + + // Only count actual jobs, not counters. + var deleted int64 + toCount := resp.Responses[:1] + if len(paths) > 2 { + toCount = resp.Responses[:2] + } + for _, resp := range toCount { + for _, kv := range resp.GetResponseDeleteRange().GetPrevKvs() { + print.InfoStatusEvent(os.Stdout, "Deleted job '%s'.", kv.Key) + } + deleted += resp.GetResponseDeleteRange().Deleted + } + + print.InfoStatusEvent(os.Stdout, "Deleted %d jobs in namespace '%s'.", deleted, opts.DaprNamespace) + + return nil +} diff --git a/pkg/scheduler/exportimport.go b/pkg/scheduler/exportimport.go new file mode 100644 index 000000000..957ead1ab --- /dev/null +++ b/pkg/scheduler/exportimport.go @@ -0,0 +1,153 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "context" + "encoding/gob" + "errors" + "fmt" + "os" + + clientv3 "go.etcd.io/etcd/client/v3" + "google.golang.org/protobuf/proto" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/scheduler/stored" +) + +type ExportImportOptions struct { + SchedulerNamespace string + KubernetesMode bool + TargetFile string +} + +type ExportFile struct { + Jobs map[string][]byte + Counters map[string][]byte +} + +func Export(ctx context.Context, opts ExportImportOptions) error { + if _, err := os.Stat(opts.TargetFile); !errors.Is(err, os.ErrNotExist) { + if err == nil { + return fmt.Errorf("file '%s' already exists", opts.TargetFile) + } + return err + } + + client, cancel, err := EtcdClient(opts.KubernetesMode, opts.SchedulerNamespace) + if err != nil { + return err + } + defer cancel() + + jobs, err := listJobs(ctx, client) + if err != nil { + return err + } + counters, err := listCounters(ctx, client) + if err != nil { + return err + } + + out := ExportFile{ + Jobs: make(map[string][]byte, len(jobs)), + Counters: make(map[string][]byte, len(counters)), + } + + var b []byte + for k, j := range jobs { + b, err = proto.Marshal(j) + if err != nil { + return fmt.Errorf("marshal job %q: %w", k, err) + } + out.Jobs[k] = b + } + for k, c := range counters { + b, err = proto.Marshal(c) + if err != nil { + return fmt.Errorf("marshal counter %q: %w", k, err) + } + out.Counters[k] = b + } + + f, err := os.OpenFile(opts.TargetFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("open %s: %w", opts.TargetFile, err) + } + defer f.Close() + + if err := gob.NewEncoder(f).Encode(&out); err != nil { + _ = os.Remove(opts.TargetFile) + return fmt.Errorf("encode export file: %w", err) + } + + print.InfoStatusEvent(os.Stdout, "Exported %d jobs and %d counters.", len(out.Jobs), len(out.Counters)) + return nil +} + +func Import(ctx context.Context, opts ExportImportOptions) error { + client, cancel, err := EtcdClient(opts.KubernetesMode, opts.SchedulerNamespace) + if err != nil { + return err + } + defer cancel() + + f, err := os.OpenFile(opts.TargetFile, os.O_RDONLY, 0o600) + if err != nil { + return fmt.Errorf("open %s: %w", opts.TargetFile, err) + } + defer f.Close() + + var in ExportFile + if err := gob.NewDecoder(f).Decode(&in); err != nil { + return fmt.Errorf("decode import file: %w", err) + } + + ops := make([]clientv3.Op, 0, len(in.Jobs)+len(in.Counters)) + + for key, b := range in.Jobs { + var j stored.Job + if err := proto.Unmarshal(b, &j); err != nil { + return fmt.Errorf("unmarshal job %q: %w", key, err) + } + ops = append(ops, clientv3.OpPut(key, string(b))) + } + + for key, b := range in.Counters { + var c stored.Counter + if err := proto.Unmarshal(b, &c); err != nil { + return fmt.Errorf("unmarshal counter %q: %w", key, err) + } + ops = append(ops, clientv3.OpPut(key, string(b))) + } + + var end int + for i := 0; i < len(ops); i += 128 { + txn := client.Txn(ctx) + end = i + 128 + if end > len(ops) { + end = len(ops) + } + txn.Then(ops[i:end]...) + if _, err := txn.Commit(); err != nil { + print.FailureStatusEvent(os.Stderr, "Incomplete import with %d items.", end) + return fmt.Errorf("commit transaction: %w", err) + } + } + + print.InfoStatusEvent(os.Stdout, "Imported %d items.", end) + + return nil +} diff --git a/pkg/scheduler/get.go b/pkg/scheduler/get.go new file mode 100644 index 000000000..28506918f --- /dev/null +++ b/pkg/scheduler/get.go @@ -0,0 +1,141 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "context" + "fmt" + + "github.com/dapr/cli/pkg/scheduler/stored" + clientv3 "go.etcd.io/etcd/client/v3" + "google.golang.org/protobuf/proto" +) + +type GetOptions struct { + SchedulerNamespace string + DaprNamespace string + KubernetesMode bool +} + +func Get(ctx context.Context, opts GetOptions, keys ...string) ([]*ListOutput, error) { + list, err := GetWide(ctx, opts, keys...) + if err != nil { + return nil, err + } + + return listWideToShort(list) +} + +func GetWide(ctx context.Context, opts GetOptions, keys ...string) ([]*ListOutputWide, error) { + etcdClient, cancel, err := EtcdClient(opts.KubernetesMode, opts.SchedulerNamespace) + if err != nil { + return nil, err + } + defer cancel() + + results := make([]*ListOutputWide, 0, len(keys)) + for _, key := range keys { + wide, err := getSingle(ctx, etcdClient, key, opts) + if err != nil { + return nil, err + } + + results = append(results, wide) + } + + return results, nil +} + +func getSingle(ctx context.Context, cl *clientv3.Client, key string, opts GetOptions) (*ListOutputWide, error) { + jobKey, err := parseJobKey(key) + if err != nil { + return nil, err + } + + paths := pathsFromJobKey(jobKey, opts.DaprNamespace) + + resp, err := cl.Txn(ctx).Then( + clientv3.OpGet(paths[0]), + clientv3.OpGet(paths[1]), + ).Commit() + if err != nil { + return nil, err + } + + if len(resp.Responses[0].GetResponseRange().Kvs) == 0 { + return nil, fmt.Errorf("job '%s' not found", key) + } + + var storedJ stored.Job + if err = proto.Unmarshal(resp.Responses[0].GetResponseRange().Kvs[0].Value, &storedJ); err != nil { + return nil, err + } + + var storedC stored.Counter + if kvs := resp.Responses[1].GetResponseRange().Kvs; len(kvs) > 0 { + if err = proto.Unmarshal(kvs[0].Value, &storedC); err != nil { + return nil, err + } + } + + return parseJob(&JobCount{ + Key: paths[0], + Job: &storedJ, + Counter: &storedC, + }, Filter{ + Type: FilterAll, + }) +} + +func pathsFromJobKey(jobKey *jobKey, namespace string) [2]string { + const reminderPath = "dapr/jobs/actorreminder" + const reminderCounterPath = "dapr/counters/actorreminder" + + var paths [2]string + switch { + case jobKey.actorType != nil: + paths[0] = fmt.Sprintf("%s||%s||%s||%s||%s", + reminderPath, namespace, *jobKey.actorType, *jobKey.actorID, jobKey.name, + ) + paths[1] = fmt.Sprintf("%s||%s||%s||%s||%s", + reminderCounterPath, namespace, *jobKey.actorType, *jobKey.actorID, jobKey.name, + ) + + case jobKey.activity: + actorType := fmt.Sprintf("dapr.internal.%s.%s.activity", namespace, *jobKey.appID) + actorID := jobKey.name + paths[0] = fmt.Sprintf("%s||%s||%s||%s||run-activity", + reminderPath, namespace, actorType, actorID, + ) + paths[1] = fmt.Sprintf("%s||%s||%s||%s||run-activity", + reminderCounterPath, namespace, actorType, actorID, + ) + + case jobKey.instanceID != nil: + actorType := fmt.Sprintf("dapr.internal.%s.%s.workflow", namespace, *jobKey.appID) + actorID := *jobKey.instanceID + paths[0] = fmt.Sprintf("%s||%s||%s||%s||%s", + reminderPath, namespace, actorType, actorID, jobKey.name, + ) + paths[1] = fmt.Sprintf("%s||%s||%s||%s||%s", + reminderCounterPath, namespace, actorType, actorID, jobKey.name, + ) + + default: + paths[0] = fmt.Sprintf("dapr/jobs/app||%s||%s||%s", namespace, *jobKey.appID, jobKey.name) + paths[1] = fmt.Sprintf("dapr/counters/app||%s||%s||%s", namespace, *jobKey.appID, jobKey.name) + } + + return paths +} diff --git a/pkg/scheduler/list.go b/pkg/scheduler/list.go new file mode 100644 index 000000000..9f44d5fd1 --- /dev/null +++ b/pkg/scheduler/list.go @@ -0,0 +1,212 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + clientv3 "go.etcd.io/etcd/client/v3" + "google.golang.org/protobuf/proto" + + "github.com/dapr/cli/pkg/scheduler/stored" + "github.com/dapr/cli/utils" +) + +type ListOptions struct { + SchedulerNamespace string + KubernetesMode bool + Filter Filter +} + +type ListOutputWide struct { + Namespace string `csv:"NAMESPACE" json:"namespace" yaml:"namespace"` + Name string `csv:"NAME" json:"name" yaml:"name"` + Begin time.Time `csv:"BEGIN" json:"begin" yaml:"begin,omitempty"` + Expiration *time.Time `csv:"EXPIRATION" json:"expiration" yaml:"expiration,omitempty"` + Schedule *string `csv:"SCHEDULE" json:"schedule" yaml:"schedule,omitempty"` + DueTime *string `csv:"DUE TIME" json:"dueTime" yaml:"dueTime,omitempty"` + TTL *string `csv:"TTL" json:"ttl" yaml:"ttl,omitempty"` + Repeats *uint32 `csv:"REPEATS" json:"repeats" yaml:"repeats,omitempty"` + Count uint32 `csv:"COUNT" json:"count" yaml:"count,omitempty"` + LastTrigger *time.Time `csv:"LAST TRIGGER" json:"lastTrigger,omitempty" yaml:"lastTrigger,omitempty"` +} + +type ListOutput struct { + Name string `csv:"NAME" json:"name" yaml:"name"` + Begin string `csv:"BEGIN" json:"begin" yaml:"begin,omitempty"` + Count uint32 `csv:"COUNT" json:"count" yaml:"count,omitempty"` + LastTrigger string `csv:"LAST TRIGGER" json:"lastTrigger" yaml:"lastTrigger"` +} + +type JobCount struct { + Key string + Job *stored.Job + Counter *stored.Counter +} + +func List(ctx context.Context, opts ListOptions) ([]*ListOutput, error) { + listWide, err := ListWide(ctx, opts) + if err != nil { + return nil, err + } + + return listWideToShort(listWide) +} + +func ListWide(ctx context.Context, opts ListOptions) ([]*ListOutputWide, error) { + jobCounters, err := ListJobs(ctx, opts) + if err != nil { + return nil, err + } + + var list []*ListOutputWide + for _, jobCounter := range jobCounters { + listoutput, err := parseJob(jobCounter, opts.Filter) + if err != nil { + return nil, err + } + + if listoutput == nil { + continue + } + + list = append(list, listoutput) + } + + sort.SliceStable(list, func(i, j int) bool { + if list[i].Namespace == list[j].Namespace { + if list[i].Begin.Equal(list[j].Begin) { + return list[i].Name < list[j].Name + } + return list[i].Begin.Before(list[j].Begin) + } + return list[i].Namespace < list[j].Namespace + }) + + return list, nil +} + +func ListJobs(ctx context.Context, opts ListOptions) ([]*JobCount, error) { + etcdClient, cancel, err := EtcdClient(opts.KubernetesMode, opts.SchedulerNamespace) + if err != nil { + return nil, err + } + defer cancel() + + jobs, err := listJobs(ctx, etcdClient) + if err != nil { + return nil, err + } + + counters, err := listCounters(ctx, etcdClient) + if err != nil { + return nil, err + } + + jobCounts := make([]*JobCount, 0, len(jobs)) + for key, job := range jobs { + jobCount := &JobCount{ + Key: key, + Job: job, + } + + counter, ok := counters[strings.ReplaceAll(key, "dapr/jobs/", "dapr/counters/")] + if ok { + jobCount.Counter = counter + } + + jobCounts = append(jobCounts, jobCount) + } + + return jobCounts, nil +} + +func listWideToShort(listWide []*ListOutputWide) ([]*ListOutput, error) { + now := time.Now() + list := make([]*ListOutput, 0, len(listWide)) + for _, item := range listWide { + if item == nil { + continue + } + + l := ListOutput{ + Name: item.Name, + Count: item.Count, + } + + if item.LastTrigger != nil { + l.LastTrigger = "-" + utils.HumanizeDuration(now.Sub(*item.LastTrigger)) + } + + if item.Begin.After(now) { + l.Begin = "+" + utils.HumanizeDuration(item.Begin.Sub(now)) + } else { + l.Begin = "-" + utils.HumanizeDuration(now.Sub(item.Begin)) + } + + list = append(list, &l) + } + + return list, nil +} + +func listJobs(ctx context.Context, client *clientv3.Client) (map[string]*stored.Job, error) { + resp, err := client.Get(ctx, + "dapr/jobs/", + clientv3.WithPrefix(), + clientv3.WithLimit(0), + ) + if err != nil { + return nil, err + } + + jobs := make(map[string]*stored.Job) + for _, kv := range resp.Kvs { + var stored stored.Job + if err := proto.Unmarshal(kv.Value, &stored); err != nil { + return nil, fmt.Errorf("failed to unmarshal job %s: %w", kv.Key, err) + } + + jobs[string(kv.Key)] = &stored + } + + return jobs, nil +} + +func listCounters(ctx context.Context, client *clientv3.Client) (map[string]*stored.Counter, error) { + resp, err := client.Get(ctx, + "dapr/counters/", + clientv3.WithPrefix(), + clientv3.WithLimit(0), + ) + if err != nil { + return nil, err + } + + counters := make(map[string]*stored.Counter) + for _, kv := range resp.Kvs { + var stored stored.Counter + if err := proto.Unmarshal(kv.Value, &stored); err != nil { + return nil, fmt.Errorf("failed to unmarshal counter %s: %w", kv.Key, err) + } + + counters[string(kv.Key)] = &stored + } + + return counters, nil +} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go new file mode 100644 index 000000000..784ea8371 --- /dev/null +++ b/pkg/scheduler/scheduler.go @@ -0,0 +1,255 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "context" + "fmt" + "strings" + "time" + + clientv3 "go.etcd.io/etcd/client/v3" + + "github.com/dapr/cli/pkg/kubernetes" + "github.com/dapr/cli/pkg/scheduler/stored" + schedulerv1 "github.com/dapr/dapr/pkg/proto/scheduler/v1" + "github.com/dapr/kit/ptr" +) + +const ( + FilterAll = "all" + FilterApp = "app" + FilterActor = "actor" + FilterWorkflow = "workflow" + FilterActivity = "activity" +) + +type Filter struct { + Type string + Namespace *string +} + +type jobKey struct { + appID *string + + actorType *string + actorID *string + + instanceID *string + activity bool + + name string +} + +func parseJob(jobCounter *JobCount, opts Filter) (*ListOutputWide, error) { + var meta schedulerv1.JobMetadata + if err := jobCounter.Job.GetJob().GetMetadata().UnmarshalTo(&meta); err != nil { + return nil, err + } + + if opts.Type != FilterAll { + switch meta.GetTarget().GetType().(type) { + case *schedulerv1.JobTargetMetadata_Job: + if opts.Type != FilterApp { + return nil, nil + } + case *schedulerv1.JobTargetMetadata_Actor: + atype := meta.GetTarget().GetActor().GetType() + switch { + case strings.HasPrefix(atype, "dapr.internal.") && strings.HasSuffix(atype, ".workflow"): + if opts.Type != FilterWorkflow { + return nil, nil + } + case strings.HasPrefix(atype, "dapr.internal.") && strings.HasSuffix(atype, ".activity"): + if opts.Type != FilterActivity { + return nil, nil + } + default: + if opts.Type != FilterActor { + return nil, nil + } + } + } + } + + if opts.Namespace != nil && meta.GetNamespace() != *opts.Namespace { + return nil, nil + } + + listoutput := ListOutputWide{ + Name: jobCounter.Key[(strings.LastIndex(jobCounter.Key, "||") + 2):], + Namespace: meta.GetNamespace(), + Schedule: jobCounter.Job.GetJob().Schedule, + DueTime: jobCounter.Job.GetJob().DueTime, + TTL: jobCounter.Job.GetJob().Ttl, + Repeats: jobCounter.Job.GetJob().Repeats, + } + + switch meta.GetTarget().GetType().(type) { + case *schedulerv1.JobTargetMetadata_Job: + listoutput.Name = "app/" + meta.GetAppId() + "/" + listoutput.Name + case *schedulerv1.JobTargetMetadata_Actor: + atype := meta.GetTarget().GetActor().GetType() + switch { + case strings.HasPrefix(atype, "dapr.internal.") && strings.HasSuffix(atype, ".workflow"): + listoutput.Name = "workflow/" + fmt.Sprintf("%s/%s/%s", + strings.Split(atype, ".")[3], meta.GetTarget().GetActor().GetId(), + listoutput.Name, + ) + case strings.HasPrefix(atype, "dapr.internal.") && strings.HasSuffix(atype, ".activity"): + listoutput.Name = "activity/" + fmt.Sprintf("%s/%s", + strings.Split(atype, ".")[3], meta.GetTarget().GetActor().GetId(), + ) + default: + listoutput.Name = "actor/" + fmt.Sprintf("%s/%s/%s", + meta.GetTarget().GetActor().GetType(), + meta.GetTarget().GetActor().GetId(), + listoutput.Name, + ) + } + } + + switch t := jobCounter.Job.GetBegin().(type) { + case *stored.Job_DueTime: + listoutput.Begin = t.DueTime.AsTime().Truncate(time.Second) + case *stored.Job_Start: + listoutput.Begin = t.Start.AsTime().Truncate(time.Second) + } + + if jobCounter.Job.Expiration != nil { + listoutput.Expiration = ptr.Of(jobCounter.Job.Expiration.AsTime().Truncate(time.Second)) + } + + if jobCounter.Counter != nil { + listoutput.Count = jobCounter.Counter.Count + if jobCounter.Counter.LastTrigger != nil { + listoutput.LastTrigger = ptr.Of(jobCounter.Counter.LastTrigger.AsTime().Truncate(time.Second)) + } + } + + return &listoutput, nil +} + +func parseJobKey(key string) (*jobKey, error) { + split := strings.Split(key, "/") + if len(split) < 2 { + return nil, fmt.Errorf("failed to parse job key, expecting '{target type}/{identifier}', got '%s'", key) + } + + switch split[0] { + case FilterApp: + if len(split) != 3 { + return nil, fmt.Errorf("expecting job key to be in format 'app/{app ID}/{job name}', got '%s'", key) + } + return &jobKey{ + appID: &split[1], + name: split[2], + }, nil + + case FilterActor: + if len(split) != 4 { + return nil, fmt.Errorf("expecting actor reminder key to be in format 'actor/{actor type}/{actor id}/{name}', got '%s'", key) + } + return &jobKey{ + actorType: &split[1], + actorID: &split[2], + name: split[3], + }, nil + + case FilterWorkflow: + if len(split) != 4 { + return nil, fmt.Errorf("expecting workflow key to be in format 'workflow/{app ID}/{instance ID}/{name}', got '%s'", key) + } + return &jobKey{ + appID: &split[1], + instanceID: &split[2], + name: split[3], + }, nil + + case FilterActivity: + if len(split) != 3 { + return nil, fmt.Errorf("expecting activity key to be in format 'activity/{app ID}/{activity ID}', got '%s'", key) + } + return &jobKey{ + appID: &split[1], + name: split[2], + activity: true, + }, nil + + default: + return nil, fmt.Errorf("unsupported job type '%s', accepts 'app', 'actor', 'workflow', or 'activity'", split[0]) + } +} + +func EtcdClient(kubernetesMode bool, schedulerNamespace string) (*clientv3.Client, context.CancelFunc, error) { + var etcdClient *clientv3.Client + var err error + if kubernetesMode { + var cancel context.CancelFunc + etcdClient, cancel, err = etcdClientKubernetes(schedulerNamespace) + if err != nil { + return nil, nil, err + } + return etcdClient, cancel, nil + } else { + etcdClient, err = getEtcdClient("localhost:2379") + if err != nil { + return nil, nil, err + } + } + + return etcdClient, func() {}, nil +} + +func getEtcdClient(host string) (*clientv3.Client, error) { + client, err := clientv3.New(clientv3.Config{ + Endpoints: []string{host}, + }) + if err != nil { + return nil, err + } + + return client, nil +} + +func etcdClientKubernetes(namespace string) (*clientv3.Client, context.CancelFunc, error) { + config, _, err := kubernetes.GetKubeConfigClient() + if err != nil { + return nil, nil, err + } + + portForward, err := kubernetes.NewPortForward( + config, + namespace, + "dapr-scheduler-server-0", + "localhost", + 2379, + 2379, + false, + ) + if err != nil { + return nil, nil, err + } + + if err = portForward.Init(); err != nil { + return nil, nil, err + } + + client, err := getEtcdClient("localhost:2379") + if err != nil { + return nil, nil, err + } + + return client, portForward.Stop, nil +} diff --git a/pkg/scheduler/stored/counter.pb.go b/pkg/scheduler/stored/counter.pb.go new file mode 100644 index 000000000..10ced51ea --- /dev/null +++ b/pkg/scheduler/stored/counter.pb.go @@ -0,0 +1,197 @@ +// +//Copyright (c) 2024 Diagrid Inc. +//Licensed under the MIT License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.29.3 +// source: proto/stored/counter.proto + +package stored + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Counter holds counter information for a given job. +type Counter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // job_partition_id is the parition_id of the job this counter belongs to. + // Prevents an updated job from inheriting the counter of a previous job with + // the same name. + // Doesn't need to be globally unique. + JobPartitionId uint64 `protobuf:"varint,1,opt,name=job_partition_id,json=jobPartitionId,proto3" json:"job_partition_id,omitempty"` + // count is the number of times the job has been triggered. + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + // last_trigger is the timestamp the job was last triggered. Used to + // determine the next time the job should be triggered. + LastTrigger *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_trigger,json=lastTrigger,proto3" json:"last_trigger,omitempty"` + // attempts is the number of times the job has been attempted to be triggered + // at this count. Used by failure policy to track how many times the Job + // trigger should be retried. + Attempts uint32 `protobuf:"varint,4,opt,name=attempts,proto3" json:"attempts,omitempty"` +} + +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_stored_counter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_proto_stored_counter_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. +func (*Counter) Descriptor() ([]byte, []int) { + return file_proto_stored_counter_proto_rawDescGZIP(), []int{0} +} + +func (x *Counter) GetJobPartitionId() uint64 { + if x != nil { + return x.JobPartitionId + } + return 0 +} + +func (x *Counter) GetCount() uint32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *Counter) GetLastTrigger() *timestamppb.Timestamp { + if x != nil { + return x.LastTrigger + } + return nil +} + +func (x *Counter) GetAttempts() uint32 { + if x != nil { + return x.Attempts + } + return 0 +} + +var File_proto_stored_counter_proto protoreflect.FileDescriptor + +var file_proto_stored_counter_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x2f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, + 0x72, 0x12, 0x28, 0x0a, 0x10, 0x6a, 0x6f, 0x62, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6a, 0x6f, 0x62, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x42, 0x37, 0x5a, 0x35, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x69, 0x61, 0x67, 0x72, + 0x69, 0x64, 0x69, 0x6f, 0x2f, 0x67, 0x6f, 0x2d, 0x65, 0x74, 0x63, 0x64, 0x2d, 0x63, 0x72, 0x6f, + 0x6e, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_stored_counter_proto_rawDescOnce sync.Once + file_proto_stored_counter_proto_rawDescData = file_proto_stored_counter_proto_rawDesc +) + +func file_proto_stored_counter_proto_rawDescGZIP() []byte { + file_proto_stored_counter_proto_rawDescOnce.Do(func() { + file_proto_stored_counter_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_stored_counter_proto_rawDescData) + }) + return file_proto_stored_counter_proto_rawDescData +} + +var file_proto_stored_counter_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_proto_stored_counter_proto_goTypes = []interface{}{ + (*Counter)(nil), // 0: stored.Counter + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp +} +var file_proto_stored_counter_proto_depIdxs = []int32{ + 1, // 0: stored.Counter.last_trigger:type_name -> google.protobuf.Timestamp + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_stored_counter_proto_init() } +func file_proto_stored_counter_proto_init() { + if File_proto_stored_counter_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_stored_counter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_stored_counter_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_stored_counter_proto_goTypes, + DependencyIndexes: file_proto_stored_counter_proto_depIdxs, + MessageInfos: file_proto_stored_counter_proto_msgTypes, + }.Build() + File_proto_stored_counter_proto = out.File + file_proto_stored_counter_proto_rawDesc = nil + file_proto_stored_counter_proto_goTypes = nil + file_proto_stored_counter_proto_depIdxs = nil +} diff --git a/pkg/scheduler/stored/job.pb.go b/pkg/scheduler/stored/job.pb.go new file mode 100644 index 000000000..3d4637b8e --- /dev/null +++ b/pkg/scheduler/stored/job.pb.go @@ -0,0 +1,253 @@ +// +//Copyright (c) 2024 Diagrid Inc. +//Licensed under the MIT License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.29.3 +// source: proto/stored/job.proto + +package stored + +import ( + api "github.com/diagridio/go-etcd-cron/api" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Job is the wrapped stored version of a Job which has a partition_id +// associated. +type Job struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // partion_id is an identifier for the job, used for distinguishing jobs with + // the same name and assigning the job to a partition. + // Doesn't need to be globally unique. + PartitionId uint64 `protobuf:"varint,1,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` + // begin is the beginning time of the job. + // + // Types that are assignable to Begin: + // + // *Job_Start + // *Job_DueTime + Begin isJob_Begin `protobuf_oneof:"begin"` + // expiration is the optional time at which the job should no longer be + // scheduled and will be ignored and garbage collected thereafter. + // A job may be removed earlier if repeats are exhausted or schedule doesn't + // permit. + Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3,oneof" json:"expiration,omitempty"` + // job is the job spec. + Job *api.Job `protobuf:"bytes,5,opt,name=job,proto3" json:"job,omitempty"` +} + +func (x *Job) Reset() { + *x = Job{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_stored_job_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Job) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Job) ProtoMessage() {} + +func (x *Job) ProtoReflect() protoreflect.Message { + mi := &file_proto_stored_job_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Job.ProtoReflect.Descriptor instead. +func (*Job) Descriptor() ([]byte, []int) { + return file_proto_stored_job_proto_rawDescGZIP(), []int{0} +} + +func (x *Job) GetPartitionId() uint64 { + if x != nil { + return x.PartitionId + } + return 0 +} + +func (m *Job) GetBegin() isJob_Begin { + if m != nil { + return m.Begin + } + return nil +} + +func (x *Job) GetStart() *timestamppb.Timestamp { + if x, ok := x.GetBegin().(*Job_Start); ok { + return x.Start + } + return nil +} + +func (x *Job) GetDueTime() *timestamppb.Timestamp { + if x, ok := x.GetBegin().(*Job_DueTime); ok { + return x.DueTime + } + return nil +} + +func (x *Job) GetExpiration() *timestamppb.Timestamp { + if x != nil { + return x.Expiration + } + return nil +} + +func (x *Job) GetJob() *api.Job { + if x != nil { + return x.Job + } + return nil +} + +type isJob_Begin interface { + isJob_Begin() +} + +type Job_Start struct { + // start is the epoch time of the job whereby the clock starts on the + // schedule. The job _will not_ trigger at this time. + Start *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start,proto3,oneof"` +} + +type Job_DueTime struct { + // due_time is the epoch time of the job whereby the clock starts on the + // schedule. The job _will_ trigger at this time. + DueTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=due_time,json=dueTime,proto3,oneof"` +} + +func (*Job_Start) isJob_Begin() {} + +func (*Job_DueTime) isJob_Begin() {} + +var File_proto_stored_job_proto protoreflect.FileDescriptor + +var file_proto_stored_job_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x2f, 0x6a, + 0x6f, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x13, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6a, 0x6f, 0x62, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a, 0x02, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x21, + 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x08, 0x64, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x07, 0x64, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, + 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, + 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, + 0x1a, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x42, 0x07, 0x0a, 0x05, 0x62, + 0x65, 0x67, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x64, 0x69, 0x61, 0x67, 0x72, 0x69, 0x64, 0x69, 0x6f, 0x2f, 0x67, 0x6f, 0x2d, 0x65, + 0x74, 0x63, 0x64, 0x2d, 0x63, 0x72, 0x6f, 0x6e, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_stored_job_proto_rawDescOnce sync.Once + file_proto_stored_job_proto_rawDescData = file_proto_stored_job_proto_rawDesc +) + +func file_proto_stored_job_proto_rawDescGZIP() []byte { + file_proto_stored_job_proto_rawDescOnce.Do(func() { + file_proto_stored_job_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_stored_job_proto_rawDescData) + }) + return file_proto_stored_job_proto_rawDescData +} + +var file_proto_stored_job_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_proto_stored_job_proto_goTypes = []interface{}{ + (*Job)(nil), // 0: stored.Job + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp + (*api.Job)(nil), // 2: api.Job +} +var file_proto_stored_job_proto_depIdxs = []int32{ + 1, // 0: stored.Job.start:type_name -> google.protobuf.Timestamp + 1, // 1: stored.Job.due_time:type_name -> google.protobuf.Timestamp + 1, // 2: stored.Job.expiration:type_name -> google.protobuf.Timestamp + 2, // 3: stored.Job.job:type_name -> api.Job + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_proto_stored_job_proto_init() } +func file_proto_stored_job_proto_init() { + if File_proto_stored_job_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_stored_job_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Job); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_proto_stored_job_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Job_Start)(nil), + (*Job_DueTime)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_stored_job_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_stored_job_proto_goTypes, + DependencyIndexes: file_proto_stored_job_proto_depIdxs, + MessageInfos: file_proto_stored_job_proto_msgTypes, + }.Build() + File_proto_stored_job_proto = out.File + file_proto_stored_job_proto_rawDesc = nil + file_proto_stored_job_proto_goTypes = nil + file_proto_stored_job_proto_depIdxs = nil +} diff --git a/pkg/scheduler/stored/leadership.pb.go b/pkg/scheduler/stored/leadership.pb.go new file mode 100644 index 000000000..772970a63 --- /dev/null +++ b/pkg/scheduler/stored/leadership.pb.go @@ -0,0 +1,186 @@ +// +//Copyright (c) 2024 Diagrid Inc. +//Licensed under the MIT License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.29.3 +// source: proto/stored/leadership.proto + +package stored + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Leadership is the message written to the leadership table when the replica +// gains ownership of the leader key. +type Leadership struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // total is this replicas understanding of the total number of partition + // replicas. + Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + // uid is a unique identifier for this replica. Ensures a single replica + // is the leader for a given partition. + Uid uint64 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` + // replica_data is custom data that is associated with this leader (replica). + // All leader data will be sent to library consumer on leadership table + // updates. + ReplicaData *anypb.Any `protobuf:"bytes,3,opt,name=replica_data,json=replicaData,proto3,oneof" json:"replica_data,omitempty"` +} + +func (x *Leadership) Reset() { + *x = Leadership{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_stored_leadership_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Leadership) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Leadership) ProtoMessage() {} + +func (x *Leadership) ProtoReflect() protoreflect.Message { + mi := &file_proto_stored_leadership_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Leadership.ProtoReflect.Descriptor instead. +func (*Leadership) Descriptor() ([]byte, []int) { + return file_proto_stored_leadership_proto_rawDescGZIP(), []int{0} +} + +func (x *Leadership) GetTotal() uint64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *Leadership) GetUid() uint64 { + if x != nil { + return x.Uid + } + return 0 +} + +func (x *Leadership) GetReplicaData() *anypb.Any { + if x != nil { + return x.ReplicaData + } + return nil +} + +var File_proto_stored_leadership_proto protoreflect.FileDescriptor + +var file_proto_stored_leadership_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x2f, 0x6c, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0b, 0x63, 0x72, 0x6f, 0x6e, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x01, 0x0a, 0x0a, 0x4c, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x10, 0x0a, 0x03, + 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x3c, + 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x44, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, + 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x42, 0x37, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x69, 0x61, 0x67, + 0x72, 0x69, 0x64, 0x69, 0x6f, 0x2f, 0x67, 0x6f, 0x2d, 0x65, 0x74, 0x63, 0x64, 0x2d, 0x63, 0x72, + 0x6f, 0x6e, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_stored_leadership_proto_rawDescOnce sync.Once + file_proto_stored_leadership_proto_rawDescData = file_proto_stored_leadership_proto_rawDesc +) + +func file_proto_stored_leadership_proto_rawDescGZIP() []byte { + file_proto_stored_leadership_proto_rawDescOnce.Do(func() { + file_proto_stored_leadership_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_stored_leadership_proto_rawDescData) + }) + return file_proto_stored_leadership_proto_rawDescData +} + +var file_proto_stored_leadership_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_proto_stored_leadership_proto_goTypes = []interface{}{ + (*Leadership)(nil), // 0: cron.stored.Leadership + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_proto_stored_leadership_proto_depIdxs = []int32{ + 1, // 0: cron.stored.Leadership.replica_data:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_stored_leadership_proto_init() } +func file_proto_stored_leadership_proto_init() { + if File_proto_stored_leadership_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_stored_leadership_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Leadership); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_proto_stored_leadership_proto_msgTypes[0].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_stored_leadership_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_stored_leadership_proto_goTypes, + DependencyIndexes: file_proto_stored_leadership_proto_depIdxs, + MessageInfos: file_proto_stored_leadership_proto_msgTypes, + }.Build() + File_proto_stored_leadership_proto = out.File + file_proto_stored_leadership_proto_rawDesc = nil + file_proto_stored_leadership_proto_goTypes = nil + file_proto_stored_leadership_proto_depIdxs = nil +} diff --git a/pkg/standalone/list.go b/pkg/standalone/list.go index da25807b0..2a555b86d 100644 --- a/pkg/standalone/list.go +++ b/pkg/standalone/list.go @@ -14,6 +14,7 @@ limitations under the License. package standalone import ( + "path/filepath" "strconv" "strings" "time" @@ -30,23 +31,24 @@ import ( // ListOutput represents the application ID, application port and creation time. type ListOutput struct { - AppID string `csv:"APP ID" json:"appId" yaml:"appId"` - HTTPPort int `csv:"HTTP PORT" json:"httpPort" yaml:"httpPort"` - GRPCPort int `csv:"GRPC PORT" json:"grpcPort" yaml:"grpcPort"` - AppPort int `csv:"APP PORT" json:"appPort" yaml:"appPort"` - MetricsEnabled bool `csv:"-" json:"metricsEnabled" yaml:"metricsEnabled"` // Not displayed in table, consumed by dashboard. - Command string `csv:"COMMAND" json:"command" yaml:"command"` - Age string `csv:"AGE" json:"age" yaml:"age"` - Created string `csv:"CREATED" json:"created" yaml:"created"` - DaprdPID int `csv:"DAPRD PID" json:"daprdPid" yaml:"daprdPid"` - CliPID int `csv:"CLI PID" json:"cliPid" yaml:"cliPid"` - AppPID int `csv:"APP PID" json:"appPid" yaml:"appPid"` - MaxRequestBodySize int `csv:"-" json:"maxRequestBodySize" yaml:"maxRequestBodySize"` // Additional field, not displayed in table. - HTTPReadBufferSize int `csv:"-" json:"httpReadBufferSize" yaml:"httpReadBufferSize"` // Additional field, not displayed in table. - RunTemplatePath string `csv:"RUN_TEMPLATE_PATH" json:"runTemplatePath" yaml:"runTemplatePath"` - AppLogPath string `csv:"APP_LOG_PATH" json:"appLogPath" yaml:"appLogPath"` - DaprDLogPath string `csv:"DAPRD_LOG_PATH" json:"daprdLogPath" yaml:"daprdLogPath"` - RunTemplateName string `json:"runTemplateName" yaml:"runTemplateName"` // specifically omitted in csv output. + AppID string `csv:"APP ID" json:"appId" yaml:"appId"` + HTTPPort int `csv:"HTTP PORT" json:"httpPort" yaml:"httpPort"` + GRPCPort int `csv:"GRPC PORT" json:"grpcPort" yaml:"grpcPort"` + AppPort int `csv:"APP PORT" json:"appPort" yaml:"appPort"` + MetricsEnabled bool `csv:"-" json:"metricsEnabled" yaml:"metricsEnabled"` // Not displayed in table, consumed by dashboard. + Command string `csv:"COMMAND" json:"command" yaml:"command"` + Age string `csv:"AGE" json:"age" yaml:"age"` + Created string `csv:"CREATED" json:"created" yaml:"created"` + DaprdPID int `csv:"DAPRD PID" json:"daprdPid" yaml:"daprdPid"` + CliPID int `csv:"CLI PID" json:"cliPid" yaml:"cliPid"` + AppPID int `csv:"APP PID" json:"appPid" yaml:"appPid"` + MaxRequestBodySize int `csv:"-" json:"maxRequestBodySize" yaml:"maxRequestBodySize"` // Additional field, not displayed in table. + HTTPReadBufferSize int `csv:"-" json:"httpReadBufferSize" yaml:"httpReadBufferSize"` // Additional field, not displayed in table. + RunTemplatePath string `csv:"RUN_TEMPLATE_PATH" json:"runTemplatePath" yaml:"runTemplatePath"` + AppLogPath string `csv:"APP_LOG_PATH" json:"appLogPath" yaml:"appLogPath"` + DaprDLogPath string `csv:"DAPRD_LOG_PATH" json:"daprdLogPath" yaml:"daprdLogPath"` + RunTemplateName string `json:"runTemplateName" yaml:"runTemplateName"` // specifically omitted in csv output. + ResourcePaths []string `csv:"-" json:"-" yaml:"-"` } func (d *daprProcess) List() ([]ListOutput, error) { @@ -64,7 +66,7 @@ func List() ([]ListOutput, error) { // Populates the map if all data is available for the sidecar. for _, proc := range processes { - executable := strings.ToLower(proc.Executable()) + executable := filepath.Base(strings.ToLower(proc.Executable())) if (executable == "daprd") || (executable == "daprd.exe") { procDetails, err := process.NewProcess(int32(proc.Pid())) //nolint:gosec if err != nil { @@ -81,15 +83,32 @@ func List() ([]ListOutput, error) { continue } + var resourcePaths []string + for i, item := range cmdLineItems { + if strings.HasPrefix(item, "--resources-path") { + if strings.Contains(item, "=") { + resourcePaths = strings.SplitN(item, "=", 2)[1:] + } else { + resourcePaths = append(resourcePaths, cmdLineItems[i+1]) + } + } + } + // Parse command line arguments, example format for cmdLine `daprd --flag1 value1 --enable-flag2 --flag3 value3`. argumentsMap := make(map[string]string) for i := 1; i < len(cmdLineItems)-1; { - if !strings.HasPrefix(cmdLineItems[i+1], "--") { - argumentsMap[cmdLineItems[i]] = cmdLineItems[i+1] - i += 2 - } else { - argumentsMap[cmdLineItems[i]] = "" + split := strings.Split(cmdLineItems[i], "=") + if len(split) > 1 { + argumentsMap[split[0]] = split[1] i++ + } else { + if !strings.HasPrefix(cmdLineItems[i+1], "--") { + argumentsMap[cmdLineItems[i]] = cmdLineItems[i+1] + i += 2 + } else { + argumentsMap[cmdLineItems[i]] = "" + i++ + } } } @@ -167,6 +186,7 @@ func List() ([]ListOutput, error) { RunTemplateName: runTemplateName, AppLogPath: appLogPath, DaprDLogPath: daprdLogPath, + ResourcePaths: resourcePaths, } // filter only dashboard instance. diff --git a/pkg/standalone/standalone.go b/pkg/standalone/standalone.go index 86d23dd7a..7adc86aeb 100644 --- a/pkg/standalone/standalone.go +++ b/pkg/standalone/standalone.go @@ -88,7 +88,7 @@ const ( schedulerHealthPort = 58081 schedulerMetricPort = 59091 - schedulerEtcdPort = 52379 + schedulerEtcdPort = 2379 daprVersionsWithScheduler = ">= 1.14.x" ) @@ -693,6 +693,10 @@ func runSchedulerService(wg *sync.WaitGroup, errorChan chan<- error, info initIn } } + if schedulerEtcdClientListenAddress(info) { + args = append(args, "--etcd-client-listen-address=0.0.0.0") + } + _, err = utils.RunCmdAndWait(runtimeCmd, args...) if err != nil { runError := isContainerRunError(err) @@ -721,6 +725,21 @@ func schedulerOverrideHostPort(info initInfo) bool { return runV.GreaterThan(v115rc5) } +func schedulerEtcdClientListenAddress(info initInfo) bool { + if info.runtimeVersion == "edge" || info.runtimeVersion == "dev" { + return true + } + + runV, err := semver.NewVersion(info.runtimeVersion) + if err != nil { + return true + } + + v1160, _ := semver.NewVersion("1.16.0") + + return runV.GreaterThan(v1160) +} + func moveDashboardFiles(extractedFilePath string, dir string) (string, error) { // Move /release/os/web directory to /web. oldPath := path_filepath.Join(path_filepath.Dir(extractedFilePath), "web") diff --git a/pkg/workflow/db/db.go b/pkg/workflow/db/db.go new file mode 100644 index 000000000..5cfac217a --- /dev/null +++ b/pkg/workflow/db/db.go @@ -0,0 +1,19 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package db + +type ListOptions struct { + Namespace string + AppID string +} diff --git a/pkg/workflow/db/mongo.go b/pkg/workflow/db/mongo.go new file mode 100644 index 000000000..a13e4484d --- /dev/null +++ b/pkg/workflow/db/mongo.go @@ -0,0 +1,79 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package db + +import ( + "context" + "fmt" + "regexp" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" +) + +func Mongo(ctx context.Context, uri string) (*mongo.Client, error) { + client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri)) + if err != nil { + return nil, err + } + if err := client.Ping(ctx, readpref.Primary()); err != nil { + _ = client.Disconnect(ctx) + return nil, err + } + return client, nil +} + +func ListMongo(ctx context.Context, db *mongo.Database, collection string, opts ListOptions) ([]string, error) { + coll := db.Collection(collection) + + ns := regexp.QuoteMeta(opts.Namespace) + app := regexp.QuoteMeta(opts.AppID) + + prefix := fmt.Sprintf("%s\\|\\|dapr\\.internal\\.%s\\.%s\\.workflow\\|\\|", app, ns, app) + suffix := "\\|\\|metadata" + regex := fmt.Sprintf("^%s.*%s$", prefix, suffix) + + filter := bson.M{ + "key": bson.M{ + "$regex": regex, + "$options": "", + }, + } + + findOpts := options.Find().SetProjection(bson.M{"_id": 0, "key": 1}) + + cur, err := coll.Find(ctx, filter, findOpts) + if err != nil { + return nil, err + } + defer cur.Close(ctx) + + var keys []string + for cur.Next(ctx) { + var doc struct { + Key string `bson:"key"` + } + if err := cur.Decode(&doc); err != nil { + return nil, err + } + keys = append(keys, doc.Key) + } + if err := cur.Err(); err != nil { + return nil, err + } + + return keys, nil +} diff --git a/pkg/workflow/db/redis.go b/pkg/workflow/db/redis.go new file mode 100644 index 000000000..1f8718fcd --- /dev/null +++ b/pkg/workflow/db/redis.go @@ -0,0 +1,62 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package db + +import ( + "context" + "fmt" + + "github.com/redis/go-redis/v9" +) + +func Redis(ctx context.Context, url string) (*redis.Client, error) { + opt, err := redis.ParseURL(url) + if err != nil { + return nil, err + + } + + rdb := redis.NewClient(opt) + if err := rdb.Ping(ctx).Err(); err != nil { + return nil, err + } + + return rdb, nil +} + +func ListRedis(ctx context.Context, rdb *redis.Client, opts ListOptions) ([]string, error) { + pattern := fmt.Sprintf("%s||dapr.internal.%s.%s.workflow||*||metadata", + opts.AppID, opts.Namespace, opts.AppID) + + var ( + cursor uint64 + keys []string + ) + + const scanCount int64 = 1000 + + for { + res, nextCursor, err := rdb.Scan(ctx, cursor, pattern, scanCount).Result() + if err != nil { + return nil, err + } + keys = append(keys, res...) + cursor = nextCursor + if cursor == 0 { + break + } + } + + return keys, nil +} diff --git a/pkg/workflow/db/sql.go b/pkg/workflow/db/sql.go new file mode 100644 index 000000000..fd1b30dcc --- /dev/null +++ b/pkg/workflow/db/sql.go @@ -0,0 +1,64 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package db + +import ( + "context" + "database/sql" + "fmt" + + _ "github.com/jackc/pgx/v5/stdlib" + _ "github.com/mattn/go-sqlite3" + _ "github.com/microsoft/go-mssqldb" + _ "github.com/sijms/go-ora/v2" +) + +func SQL(ctx context.Context, driver, connString string) (*sql.DB, error) { + db, err := sql.Open(driver, connString) + if err != nil { + return nil, err + } + if err := db.PingContext(ctx); err != nil { + return nil, err + } + return db, nil +} + +func ListSQL(ctx context.Context, db *sql.DB, table string, opts ListOptions) ([]string, error) { + query := fmt.Sprintf(`SELECT key FROM "%s" WHERE key LIKE ?;`, table) + like := opts.AppID + "||dapr.internal." + opts.Namespace + "." + opts.AppID + ".workflow||%||metadata" + + rows, err := db.QueryContext(ctx, query, like) + if err != nil { + return nil, err + } + + defer rows.Close() + + var keys []string + for rows.Next() { + var key string + if err := rows.Scan(&key); err != nil { + return nil, err + } + + keys = append(keys, key) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return keys, nil +} diff --git a/pkg/workflow/dclient/dclient.go b/pkg/workflow/dclient/dclient.go new file mode 100644 index 000000000..c94b8b15d --- /dev/null +++ b/pkg/workflow/dclient/dclient.go @@ -0,0 +1,220 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dclient + +import ( + "context" + "fmt" + "slices" + "strconv" + + "github.com/dapr/cli/pkg/kubernetes" + "github.com/dapr/cli/pkg/standalone" + "github.com/dapr/dapr/pkg/apis/components/v1alpha1" + "github.com/dapr/dapr/pkg/components/loader" + "github.com/dapr/go-sdk/client" + "github.com/dapr/kit/ptr" +) + +type Client struct { + Dapr client.Client + Cancel context.CancelFunc + StateStoreDriver string + ConnectionString *string + TableName *string +} + +func DaprClient(ctx context.Context, kubernetesMode bool, namespace, appID string) (*Client, error) { + client.SetLogger(nil) + + var client *Client + var err error + if kubernetesMode { + client, err = kube(namespace, appID) + } else { + client, err = stand(ctx, appID) + } + + return client, err +} + +func stand(ctx context.Context, appID string) (*Client, error) { + list, err := standalone.List() + if err != nil { + return nil, err + } + + var proc *standalone.ListOutput + for _, c := range list { + if c.AppID == appID { + proc = &c + break + } + } + + if proc == nil { + return nil, fmt.Errorf("Dapr app with id '%s' not found", appID) + } + + comps, err := loader.NewLocalLoader(appID, proc.ResourcePaths).Load(ctx) + if err != nil { + return nil, err + } + + c, err := clientFromComponents(comps, appID, strconv.Itoa(proc.GRPCPort)) + if err != nil { + return nil, err + } + c.Cancel = func() {} + + return c, nil +} + +func kube(namespace string, appID string) (*Client, error) { + list, err := kubernetes.List(namespace) + if err != nil { + return nil, err + } + + var pod *kubernetes.ListOutput + for _, p := range list { + if p.AppID == appID { + pod = &p + break + } + } + + if pod == nil { + return nil, fmt.Errorf("Dapr app with id '%s' not found in namespace %s", appID, namespace) + } + + config, _, err := kubernetes.GetKubeConfigClient() + if err != nil { + return nil, err + } + + port, err := strconv.Atoi(pod.DaprGRPCPort) + if err != nil { + return nil, err + } + + portForward, err := kubernetes.NewPortForward( + config, + namespace, + pod.PodName, + "localhost", + port, + port, + false, + ) + if err != nil { + return nil, err + } + + if err = portForward.Init(); err != nil { + return nil, err + } + + kclient, err := kubernetes.DaprClient() + if err != nil { + return nil, err + } + + comps, err := kubernetes.ListComponents(kclient, pod.Namespace) + if err != nil { + return nil, err + } + + c, err := clientFromComponents(comps.Items, appID, pod.DaprGRPCPort) + if err != nil { + portForward.Stop() + } + + c.Cancel = portForward.Stop + + return c, nil +} + +func clientFromComponents(comps []v1alpha1.Component, appID string, port string) (*Client, error) { + var comp *v1alpha1.Component + for _, c := range comps { + for _, meta := range c.Spec.Metadata { + if meta.Name == "actorStateStore" && meta.Value.String() == "true" { + comp = &c + break + } + } + } + + if comp == nil { + return nil, fmt.Errorf("no state store configured for app id %s", appID) + } + + driver, err := driverFromType(comp.Spec.Type) + if err != nil { + return nil, err + } + + client, err := client.NewClientWithAddress("localhost:" + port) + if err != nil { + return nil, err + } + + var tableName *string + for _, meta := range comp.Spec.Metadata { + switch meta.Name { + case "tableName": + tableName = ptr.Of(meta.Value.String()) + } + } + + return &Client{ + Dapr: client, + StateStoreDriver: driver, + TableName: tableName, + }, nil +} + +func driverFromType(v string) (string, error) { + switch v { + case "state.mysql": + return "mysql", nil + case "state.postgresql": + return "pgx", nil + case "state.sqlserver": + return "sqlserver", nil + case "state.sqlite": + return "sqlite3", nil + case "state.oracledatabase": + return "oracle", nil + case "state.cockroachdb": + return "pgx", nil + case "state.redis": + return "redis", nil + case "state.mongodb": + return "mongodb", nil + default: + return "", fmt.Errorf("unsupported state store type: %s", v) + } +} + +func IsSQLDriver(driver string) bool { + return slices.Contains([]string{ + "mysql", + "pgx", + "sqlserver", + "sqlite3", + "oracle", + }, driver) +} diff --git a/pkg/workflow/events.go b/pkg/workflow/events.go new file mode 100644 index 000000000..94e9eed86 --- /dev/null +++ b/pkg/workflow/events.go @@ -0,0 +1,112 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + + "github.com/dapr/cli/pkg/workflow/dclient" + "github.com/dapr/durabletask-go/workflow" +) + +type RaiseEventOptions struct { + KubernetesMode bool + Namespace string + AppID string + InstanceID string + Name string + Input *string +} + +func RaiseEvent(ctx context.Context, opts RaiseEventOptions) error { + cli, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return err + } + defer cli.Cancel() + + wf := workflow.NewClient(cli.Dapr.GrpcClientConn()) + + var wopts []workflow.RaiseEventOptions + if opts.Input != nil { + wopts = append(wopts, workflow.WithEventPayload(*opts.Input)) + } + + return wf.RaiseEvent(ctx, opts.InstanceID, opts.Name, wopts...) +} + +type SuspendOptions struct { + KubernetesMode bool + Namespace string + AppID string + InstanceID string + Reason string +} + +func Suspend(ctx context.Context, opts SuspendOptions) error { + cli, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return err + } + defer cli.Cancel() + + wf := workflow.NewClient(cli.Dapr.GrpcClientConn()) + + return wf.SuspendWorkflow(ctx, opts.InstanceID, opts.Reason) +} + +type ResumeOptions struct { + KubernetesMode bool + Namespace string + AppID string + InstanceID string + Reason string +} + +func Resume(ctx context.Context, opts ResumeOptions) error { + cli, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return err + } + defer cli.Cancel() + + wf := workflow.NewClient(cli.Dapr.GrpcClientConn()) + + return wf.ResumeWorkflow(ctx, opts.InstanceID, opts.Reason) +} + +type TerminateOptions struct { + KubernetesMode bool + Namespace string + AppID string + InstanceID string + Output *string +} + +func Terminate(ctx context.Context, opts TerminateOptions) error { + cli, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return err + } + defer cli.Cancel() + + wf := workflow.NewClient(cli.Dapr.GrpcClientConn()) + + var wopts []workflow.TerminateOptions + if opts.Output != nil { + wopts = append(wopts, workflow.WithOutput(*opts.Output)) + } + + return wf.TerminateWorkflow(ctx, opts.InstanceID, wopts...) +} diff --git a/pkg/workflow/history.go b/pkg/workflow/history.go new file mode 100644 index 000000000..c6fb2722f --- /dev/null +++ b/pkg/workflow/history.go @@ -0,0 +1,517 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sort" + "strings" + "time" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/dapr/cli/pkg/workflow/dclient" + "github.com/dapr/cli/utils" + "github.com/dapr/durabletask-go/api/protos" + "github.com/dapr/durabletask-go/workflow" + "github.com/dapr/go-sdk/client" + "github.com/dapr/kit/ptr" +) + +const maxHistoryEntries = 100 + +type HistoryOptions struct { + KubernetesMode bool + Namespace string + AppID string + InstanceID string +} + +type HistoryOutputWide struct { + Namespace string `csv:"-" json:"namespace,omitempty" yaml:"namespace,omitempty"` + AppID string `csv:"-" json:"appID" yaml:"appID"` + Play int `csv:"PLAY" json:"play" yaml:"play"` + Type string `csv:"TYPE" json:"type" yaml:"type"` + Name *string `csv:"NAME" json:"name" yaml:"name"` + EventID *int32 `csv:"EVENTID,omitempty" json:"eventId,omitempty" yaml:"eventId,omitempty"` + Timestamp time.Time `csv:"TIMESTAMP" json:"timestamp" yaml:"timestamp"` + Elapsed string `csv:"ELAPSED" json:"elapsed" yaml:"elapsed"` + Status string `csv:"STATUS" json:"status" yaml:"status"` + Details *string `csv:"DETAILS" json:"details" yaml:"details"` + Router *string `csv:"ROUTER,omitempty" json:"router,omitempty" yaml:"router,omitempty"` + ExecutionID *string `csv:"EXECUTION_ID,omitempty" json:"executionId,omitempty" yaml:"executionId,omitempty"` + + Attrs *string `csv:"ATTRS,omitempty" json:"attrs,omitempty" yaml:"attrs,omitempty"` +} + +type HistoryOutputShort struct { + Type string `csv:"TYPE" json:"type" yaml:"type"` + Name string `csv:"NAME" json:"name" yaml:"name"` + EventID string `csv:"EVENTID,omitempty" json:"eventId,omitempty" yaml:"eventId,omitempty"` + Elapsed string `csv:"ELAPSED" json:"elapsed" yaml:"elapsed"` + Status string `csv:"STATUS" json:"status" yaml:"status"` + Details string `csv:"DETAILS" json:"details" yaml:"details"` +} + +func HistoryShort(ctx context.Context, opts HistoryOptions) ([]*HistoryOutputShort, error) { + wide, err := HistoryWide(ctx, opts) + if err != nil { + return nil, err + } + + short := make([]*HistoryOutputShort, 0, len(wide)) + for _, w := range wide { + s := &HistoryOutputShort{ + Name: "-", + EventID: "-", + Type: w.Type, + Elapsed: w.Elapsed, + Status: w.Status, + Details: "-", + } + + if w.Name != nil { + s.Name = *w.Name + } + + if w.Details != nil { + s.Details = *w.Details + } + if w.EventID != nil { + s.EventID = fmt.Sprintf("%d", *w.EventID) + } + + short = append(short, s) + } + + return short, nil +} + +func HistoryWide(ctx context.Context, opts HistoryOptions) ([]*HistoryOutputWide, error) { + cli, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return nil, err + } + defer cli.Cancel() + + history, err := fetchHistory(ctx, + cli.Dapr, + "dapr.internal."+opts.Namespace+"."+opts.AppID+".workflow", + opts.InstanceID, + ) + if err != nil { + return nil, err + } + + // Sort: EventId if both present, else Timestamp + sort.SliceStable(history, func(i, j int) bool { + ei, ej := history[i], history[j] + if ei.EventId > 0 && ej.EventId > 0 { + return ei.EventId < ej.EventId + } + ti, tj := ei.GetTimestamp().AsTime(), ej.GetTimestamp().AsTime() + if !ti.Equal(tj) { + return ti.Before(tj) + } + return ei.EventId < ej.EventId + }) + + var rows []*HistoryOutputWide + var prevTs time.Time + replay := 0 + + for idx, ev := range history { + ts := ev.GetTimestamp().AsTime() + if idx == 0 { + prevTs = ts + } + + if _, ok := ev.GetEventType().(*protos.HistoryEvent_OrchestratorStarted); ok { + replay++ + } + + row := &HistoryOutputWide{ + Namespace: opts.Namespace, + AppID: opts.AppID, + Play: replay, + Type: eventTypeName(ev), + Name: deriveName(ev), + Timestamp: ts.Truncate(time.Second), + Status: deriveStatus(ev), + Details: deriveDetails(history[0], ev), + } + + if idx == 0 { + row.Elapsed = "Age:" + utils.HumanizeDuration(time.Since(ts)) + } else { + row.Elapsed = utils.HumanizeDuration(ts.Sub(prevTs)) + } + + prevTs = ts + + if ev.EventId > 0 { + row.EventID = ptr.Of(ev.EventId) + } + row.Router = routerStr(ev.Router) + + switch t := ev.GetEventType().(type) { + case *protos.HistoryEvent_ExecutionStarted: + if t.ExecutionStarted.OrchestrationInstance != nil && + t.ExecutionStarted.OrchestrationInstance.ExecutionId != nil { + execID := t.ExecutionStarted.OrchestrationInstance.ExecutionId.Value + row.ExecutionID = &execID + } + if t.ExecutionStarted.Input != nil { + row.addAttr("input", trim(t.ExecutionStarted.Input, 120)) + } + if len(t.ExecutionStarted.Tags) > 0 { + row.addAttr("tags", flatTags(t.ExecutionStarted.Tags, 6)) + } + case *protos.HistoryEvent_TaskScheduled: + if row.EventID == nil { + row.EventID = ptr.Of(int32(0)) + } + if t.TaskScheduled.TaskExecutionId != "" { + row.ExecutionID = ptr.Of(t.TaskScheduled.TaskExecutionId) + } + if t.TaskScheduled.Input != nil { + row.addAttr("input", trim(t.TaskScheduled.Input, 120)) + } + case *protos.HistoryEvent_TaskCompleted: + row.addAttr("scheduledId", fmt.Sprintf("%d", t.TaskCompleted.TaskScheduledId)) + if t.TaskCompleted.TaskExecutionId != "" { + row.ExecutionID = ptr.Of(t.TaskCompleted.TaskExecutionId) + } + if t.TaskCompleted.Result != nil { + row.addAttr("result", trim(t.TaskCompleted.Result, 120)) + } + case *protos.HistoryEvent_TaskFailed: + row.addAttr("scheduledId", fmt.Sprintf("%d", t.TaskFailed.TaskScheduledId)) + if t.TaskFailed.TaskExecutionId != "" { + row.ExecutionID = ptr.Of(t.TaskFailed.TaskExecutionId) + } + if fd := t.TaskFailed.FailureDetails; fd != nil { + if fd.ErrorType != "" { + row.addAttr("errorType", fd.ErrorType) + } + if fd.ErrorMessage != "" { + row.addAttr("errorMsg", trim(wrapperspb.String(fd.ErrorMessage), 160)) + } + if fd.IsNonRetriable { + row.addAttr("nonRetriable", "true") + } + } + case *protos.HistoryEvent_TimerCreated: + if row.EventID == nil { + row.EventID = ptr.Of(int32(0)) + } + if t.TimerCreated.Name != nil { + row.addAttr("timerName", *t.TimerCreated.Name) + } + row.addAttr("fireAt", t.TimerCreated.FireAt.AsTime().Format(time.RFC3339)) + case *protos.HistoryEvent_TimerFired: + row.addAttr("timerId", fmt.Sprintf("%d", t.TimerFired.TimerId)) + row.addAttr("fireAt", t.TimerFired.FireAt.AsTime().Format(time.RFC3339)) + case *protos.HistoryEvent_EventRaised: + row.addAttr("eventName", t.EventRaised.Name) + if t.EventRaised.Input != nil { + row.addAttr("payload", trim(t.EventRaised.Input, 120)) + } + case *protos.HistoryEvent_EventSent: + row.addAttr("eventName", t.EventSent.Name) + if t.EventSent.Input != nil { + row.addAttr("payload", trim(t.EventSent.Input, 120)) + } + row.addAttr("targetInstance", t.EventSent.InstanceId) + case *protos.HistoryEvent_ExecutionCompleted: + if t.ExecutionCompleted.Result != nil { + row.addAttr("output", trim(t.ExecutionCompleted.Result, 160)) + } + if fd := t.ExecutionCompleted.FailureDetails; fd != nil { + if fd.ErrorType != "" { + row.addAttr("failureType", fd.ErrorType) + } + if fd.ErrorMessage != "" { + row.addAttr("failureMessage", trim(wrapperspb.String(fd.ErrorMessage), 160)) + } + } + } + + rows = append(rows, row) + } + + return rows, nil +} + +func fetchHistory(ctx context.Context, cl client.Client, actorType, instanceID string) ([]*protos.HistoryEvent, error) { + var events []*protos.HistoryEvent + for startIndex := 0; startIndex <= 1; startIndex++ { + if len(events) > 0 { + break + } + + for i := startIndex; i < maxHistoryEntries; i++ { + key := fmt.Sprintf("history-%06d", i) + + resp, err := cl.GetActorState(ctx, &client.GetActorStateRequest{ + ActorType: actorType, + ActorID: instanceID, + KeyName: key, + }) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + return nil, err + } + break + } + + if resp == nil || len(resp.Data) == 0 { + break + } + + var event protos.HistoryEvent + if err = decodeKey(resp.Data, &event); err != nil { + return nil, fmt.Errorf("failed to decode history event %s: %w", key, err) + } + + events = append(events, &event) + } + } + + return events, nil +} + +func decodeKey(data []byte, item proto.Message) error { + if len(data) == 0 { + return fmt.Errorf("empty value") + } + + if err := protojson.Unmarshal(data, item); err == nil { + return nil + } + + if unquoted, err := unquoteJSON(data); err == nil { + if err := protojson.Unmarshal([]byte(unquoted), item); err == nil { + return nil + } + } + + if err := proto.Unmarshal(data, item); err == nil { + return nil + } + + return fmt.Errorf("unable to decode history event (len=%d)", len(data)) +} + +func unquoteJSON(data []byte) (string, error) { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return "", err + } + return s, nil +} + +func eventTypeName(h *protos.HistoryEvent) string { + switch h.GetEventType().(type) { + case *protos.HistoryEvent_ExecutionStarted: + return "ExecutionStarted" + case *protos.HistoryEvent_ExecutionCompleted: + return "ExecutionCompleted" + case *protos.HistoryEvent_ExecutionTerminated: + return "ExecutionTerminated" + case *protos.HistoryEvent_TaskScheduled: + return "TaskScheduled" + case *protos.HistoryEvent_TaskCompleted: + return "TaskCompleted" + case *protos.HistoryEvent_TaskFailed: + return "TaskFailed" + case *protos.HistoryEvent_SubOrchestrationInstanceCreated: + return "SubOrchCreated" + case *protos.HistoryEvent_SubOrchestrationInstanceCompleted: + return "SubOrchCompleted" + case *protos.HistoryEvent_SubOrchestrationInstanceFailed: + return "SubOrchFailed" + case *protos.HistoryEvent_TimerCreated: + return "TimerCreated" + case *protos.HistoryEvent_TimerFired: + return "TimerFired" + case *protos.HistoryEvent_OrchestratorStarted: + return "OrchestratorStarted" + case *protos.HistoryEvent_OrchestratorCompleted: + return "OrchestratorCompleted" + case *protos.HistoryEvent_EventSent: + return "EventSent" + case *protos.HistoryEvent_EventRaised: + return "EventRaised" + case *protos.HistoryEvent_GenericEvent: + return "GenericEvent" + case *protos.HistoryEvent_HistoryState: + return "HistoryState" + case *protos.HistoryEvent_ContinueAsNew: + return "ContinueAsNew" + case *protos.HistoryEvent_ExecutionSuspended: + return "ExecutionSuspended" + case *protos.HistoryEvent_ExecutionResumed: + return "ExecutionResumed" + case *protos.HistoryEvent_EntityOperationSignaled: + return "EntitySignaled" + case *protos.HistoryEvent_EntityOperationCalled: + return "EntityCalled" + case *protos.HistoryEvent_EntityOperationCompleted: + return "EntityCompleted" + case *protos.HistoryEvent_EntityOperationFailed: + return "EntityFailed" + case *protos.HistoryEvent_EntityLockRequested: + return "EntityLockRequested" + case *protos.HistoryEvent_EntityLockGranted: + return "EntityLockGranted" + case *protos.HistoryEvent_EntityUnlockSent: + return "EntityUnlockSent" + default: + return "Unknown" + } +} + +func deriveName(h *protos.HistoryEvent) *string { + switch t := h.GetEventType().(type) { + case *protos.HistoryEvent_TaskScheduled: + return ptr.Of(t.TaskScheduled.Name) + case *protos.HistoryEvent_TaskCompleted: + return nil + case *protos.HistoryEvent_TaskFailed: + return nil + case *protos.HistoryEvent_SubOrchestrationInstanceCreated: + return ptr.Of(t.SubOrchestrationInstanceCreated.Name) + case *protos.HistoryEvent_TimerCreated: + if t.TimerCreated.Name != nil { + return ptr.Of(*t.TimerCreated.Name) + } + case *protos.HistoryEvent_EventRaised: + return ptr.Of(t.EventRaised.Name) + case *protos.HistoryEvent_EventSent: + return ptr.Of(t.EventSent.Name) + case *protos.HistoryEvent_ExecutionStarted: + return ptr.Of(t.ExecutionStarted.Name) + } + return nil +} + +func deriveStatus(h *protos.HistoryEvent) string { + switch t := h.GetEventType().(type) { + case *protos.HistoryEvent_TaskFailed: + return "FAILED" + case *protos.HistoryEvent_ExecutionCompleted: + return (workflow.WorkflowMetadata{RuntimeStatus: t.ExecutionCompleted.OrchestrationStatus}).String() + case *protos.HistoryEvent_ExecutionTerminated: + return "TERMINATED" + case *protos.HistoryEvent_ExecutionSuspended: + return "SUSPENDED" + case *protos.HistoryEvent_ExecutionResumed: + return "RESUMED" + default: + return "RUNNING" + } +} + +func deriveDetails(first *protos.HistoryEvent, h *protos.HistoryEvent) *string { + switch t := h.GetEventType().(type) { + case *protos.HistoryEvent_TaskScheduled: + ver := "" + if t.TaskScheduled.Version != nil && t.TaskScheduled.Version.Value != "" { + ver = " v" + t.TaskScheduled.Version.Value + } + return ptr.Of(fmt.Sprintf("activity=%s%s", t.TaskScheduled.Name, ver)) + case *protos.HistoryEvent_TimerCreated: + return ptr.Of(fmt.Sprintf("fireAt=%s", t.TimerCreated.FireAt.AsTime().Format(time.RFC3339))) + case *protos.HistoryEvent_EventRaised: + return ptr.Of(fmt.Sprintf("event=%s", t.EventRaised.Name)) + case *protos.HistoryEvent_EventSent: + return ptr.Of(fmt.Sprintf("event=%s -> %s", t.EventSent.Name, t.EventSent.InstanceId)) + case *protos.HistoryEvent_ExecutionStarted: + return ptr.Of("orchestration start") + case *protos.HistoryEvent_OrchestratorStarted: + return ptr.Of("replay cycle start") + case *protos.HistoryEvent_TaskCompleted: + return ptr.Of(fmt.Sprintf("eventId=%d", t.TaskCompleted.TaskScheduledId)) + case *protos.HistoryEvent_ExecutionCompleted: + return ptr.Of(fmt.Sprintf("execDuration=%s", utils.HumanizeDuration(h.GetTimestamp().AsTime().Sub(first.GetTimestamp().AsTime())))) + default: + return nil + } +} + +func routerStr(rt *protos.TaskRouter) *string { + if rt == nil { + return nil + } + if rt.TargetAppID != nil { + return ptr.Of(fmt.Sprintf("%s->%s", rt.SourceAppID, *rt.TargetAppID)) + } + return ptr.Of(rt.SourceAppID) +} + +func (h *HistoryOutputWide) addAttr(key, val string) { + if val == "" { + return + } + if h.Attrs == nil { + h.Attrs = ptr.Of(key + "=" + val) + return + } + *h.Attrs += ";" + key + "=" + val +} + +func flatTags(tags map[string]string, max int) string { + i := 0 + var parts []string + for k, v := range tags { + parts = append(parts, fmt.Sprintf("%s=%s", k, v)) + i++ + if i >= max { + break + } + } + sort.Strings(parts) + s := strings.Join(parts, ",") + if len(tags) > max { + s += ",…" + } + return s +} + +func trim(ww *wrapperspb.StringValue, limit int) string { + if ww == nil { + return "" + } + + s, err := unquoteJSON([]byte(ww.Value)) + if err != nil { + s = ww.Value + } + + if limit <= 0 || len(s) <= limit { + return s + } + + r := []rune(s) + if len(r) <= limit { + return s + } + return string(r[:limit]) + "…" +} diff --git a/pkg/workflow/list.go b/pkg/workflow/list.go new file mode 100644 index 000000000..d3e7a7966 --- /dev/null +++ b/pkg/workflow/list.go @@ -0,0 +1,199 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/dapr/cli/pkg/workflow/dclient" + "github.com/dapr/durabletask-go/api" + "github.com/dapr/durabletask-go/api/protos" + "github.com/dapr/durabletask-go/workflow" + "github.com/dapr/go-sdk/client" + "github.com/dapr/kit/ptr" + "k8s.io/apimachinery/pkg/util/duration" +) + +type ListOptions struct { + KubernetesMode bool + Namespace string + AppID string + ConnectionString *string + TableName *string + Filter Filter +} + +type Filter struct { + Name *string + Status *string + MaxAge *time.Time + Terminal bool +} + +type ListOutputShort struct { + Namespace string `csv:"-" json:"namespace" yaml:"namespace"` + AppID string `csv:"-" json:"appID" yaml:"appID"` + Name string `csv:"NAME" json:"name" yaml:"name"` + InstanceID string `csv:"ID" json:"instanceID" yaml:"instanceID"` + RuntimeStatus string `csv:"STATUS" json:"runtimeStatus" yaml:"runtimeStatus"` + CustomStatus string `csv:"CUSTOM STATUS" json:"customStatus" yaml:"customStatus"` + Age string `csv:"AGE" json:"age" yaml:"age"` +} + +type ListOutputWide struct { + Namespace string `csv:"NAMESPACE" json:"namespace" yaml:"namespace"` + AppID string `csv:"APP ID" json:"appID" yaml:"appID"` + Name string `csv:"Name" json:"name" yaml:"name"` + InstanceID string `csv:"INSTANCE ID" json:"instanceID" yaml:"instanceID"` + Created time.Time `csv:"CREATED" json:"created" yaml:"created"` + LastUpdate time.Time `csv:"LAST UPDATE" json:"lastUpdate" yaml:"lastUpdate"` + RuntimeStatus string `csv:"STATUS" json:"runtimeStatus" yaml:"runtimeStatus"` + CustomStatus string `csv:"CUSTOM STATUS" json:"customStatus" yaml:"customStatus"` + FailureMessage string `csv:"FAILURE MESSAGE" json:"failureMessage" yaml:"failureMessage"` +} + +func ListShort(ctx context.Context, opts ListOptions) ([]*ListOutputShort, error) { + wide, err := ListWide(ctx, opts) + if err != nil { + return nil, err + } + + short := make([]*ListOutputShort, len(wide)) + for i, w := range wide { + short[i] = &ListOutputShort{ + Namespace: w.Namespace, + AppID: w.AppID, + Name: w.Name, + InstanceID: w.InstanceID, + Age: translateTimestampSince(w.Created), + RuntimeStatus: w.RuntimeStatus, + } + if len(w.CustomStatus) > 0 { + short[i].CustomStatus = w.CustomStatus + } + } + + return short, nil +} + +func ListWide(ctx context.Context, opts ListOptions) ([]*ListOutputWide, error) { + dclient, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return nil, fmt.Errorf("failed to create Dapr client: %w", err) + } + defer dclient.Cancel() + + connString := opts.ConnectionString + if connString == nil { + connString = dclient.ConnectionString + } + tableName := opts.TableName + if tableName == nil { + tableName = dclient.TableName + } + + metaKeys, err := metakeys(ctx, DBOptions{ + Namespace: opts.Namespace, + AppID: opts.AppID, + Driver: dclient.StateStoreDriver, + ConnectionString: connString, + TableName: tableName, + }) + if err != nil { + return nil, err + } + + return list(ctx, metaKeys, dclient.Dapr, opts) +} + +func list(ctx context.Context, metaKeys []string, cl client.Client, opts ListOptions) ([]*ListOutputWide, error) { + wf := workflow.NewClient(cl.GrpcClientConn()) + + var listOutput []*ListOutputWide + for _, key := range metaKeys { + split := strings.Split(key, "||") + if len(split) != 4 { + continue + } + + instanceID := split[2] + + resp, err := wf.FetchWorkflowMetadata(ctx, instanceID) + if err != nil { + return nil, err + } + + if opts.Filter.Name != nil && resp.Name != *opts.Filter.Name { + continue + } + if opts.Filter.Status != nil && resp.String() != *opts.Filter.Status { + continue + } + if opts.Filter.MaxAge != nil && resp.CreatedAt.AsTime().Before(*opts.Filter.MaxAge) { + continue + } + // TODO: @joshvanl: add `WorkflowIsCompleted` func to workflow package. + //nolint:govet + if opts.Filter.Terminal && !api.OrchestrationMetadataIsComplete(ptr.Of(protos.OrchestrationMetadata(*resp))) { + continue + } + + wide := &ListOutputWide{ + Namespace: opts.Namespace, + AppID: opts.AppID, + Name: resp.Name, + InstanceID: instanceID, + Created: resp.CreatedAt.AsTime().Truncate(time.Second), + LastUpdate: resp.LastUpdatedAt.AsTime().Truncate(time.Second), + RuntimeStatus: resp.String(), + } + + if resp.CustomStatus != nil { + wide.CustomStatus = resp.CustomStatus.Value + } + + if resp.FailureDetails != nil { + wide.FailureMessage = strings.ReplaceAll( + strings.ReplaceAll( + resp.FailureDetails.GetErrorMessage(), + "\n", ""), + "\r", "") + } + + listOutput = append(listOutput, wide) + } + + sort.SliceStable(listOutput, func(i, j int) bool { + if listOutput[i].Created.IsZero() { + return false + } + if listOutput[j].Created.IsZero() { + return true + } + return listOutput[i].Created.Before(listOutput[j].Created) + }) + + return listOutput, nil +} + +func translateTimestampSince(timestamp time.Time) string { + if timestamp.IsZero() { + return "" + } + return duration.HumanDuration(time.Since(timestamp)) +} diff --git a/pkg/workflow/purge.go b/pkg/workflow/purge.go new file mode 100644 index 000000000..448930ebf --- /dev/null +++ b/pkg/workflow/purge.go @@ -0,0 +1,124 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "os" + "time" + + clientv3 "go.etcd.io/etcd/client/v3" + + "github.com/dapr/cli/pkg/print" + "github.com/dapr/cli/pkg/scheduler" + "github.com/dapr/cli/pkg/workflow/dclient" + "github.com/dapr/durabletask-go/workflow" +) + +type PurgeOptions struct { + KubernetesMode bool + Namespace string + SchedulerNamespace string + AppID string + InstanceIDs []string + AllOlderThan *time.Time + All bool + + ConnectionString *string + TableName *string +} + +func Purge(ctx context.Context, opts PurgeOptions) error { + cli, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return err + } + defer cli.Cancel() + + var toPurge []string + + if len(opts.InstanceIDs) > 0 { + toPurge = opts.InstanceIDs + } else { + var list []*ListOutputWide + list, err = ListWide(ctx, ListOptions{ + KubernetesMode: opts.KubernetesMode, + Namespace: opts.Namespace, + AppID: opts.AppID, + ConnectionString: opts.ConnectionString, + TableName: opts.TableName, + Filter: Filter{ + Terminal: true, + }, + }) + if err != nil { + return err + } + + switch { + case opts.AllOlderThan != nil: + for _, w := range list { + if w.Created.Before(*opts.AllOlderThan) { + toPurge = append(toPurge, w.InstanceID) + } + } + + case opts.All: + for _, w := range list { + toPurge = append(toPurge, w.InstanceID) + } + } + } + + wf := workflow.NewClient(cli.Dapr.GrpcClientConn()) + + etcdClient, cancel, err := scheduler.EtcdClient(opts.KubernetesMode, opts.SchedulerNamespace) + if err != nil { + return err + } + defer cancel() + + print.InfoStatusEvent(os.Stdout, "Purging %d workflow instance(s)", len(toPurge)) + + for _, id := range toPurge { + if err = wf.PurgeWorkflowState(ctx, id); err != nil { + return fmt.Errorf("%s: %w", id, err) + } + + paths := []string{ + fmt.Sprintf("dapr/jobs/actorreminder||%s||dapr.internal.%s.%s.workflow||%s||", opts.Namespace, opts.Namespace, opts.AppID, id), + fmt.Sprintf("dapr/jobs/actorreminder||%s||dapr.internal.%s.%s.activity||%s::", opts.Namespace, opts.Namespace, opts.AppID, id), + fmt.Sprintf("dapr/counters/actorreminder||%s||dapr.internal.%s.%s.workflow||%s||", opts.Namespace, opts.Namespace, opts.AppID, id), + fmt.Sprintf("dapr/counters/actorreminder||%s||dapr.internal.%s.%s.activity||%s::", opts.Namespace, opts.Namespace, opts.AppID, id), + } + + oopts := make([]clientv3.Op, 0, len(paths)) + for _, path := range paths { + oopts = append(oopts, clientv3.OpDelete(path, + clientv3.WithPrefix(), + clientv3.WithPrevKV(), + clientv3.WithKeysOnly(), + )) + } + + if _, err = etcdClient.Txn(ctx).Then(oopts...).Commit(); err != nil { + return err + } + + print.SuccessStatusEvent(os.Stdout, "Purged workflow instance %q", id) + } + + return nil +} diff --git a/pkg/workflow/rerun.go b/pkg/workflow/rerun.go new file mode 100644 index 000000000..ccc57ad1f --- /dev/null +++ b/pkg/workflow/rerun.go @@ -0,0 +1,51 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + + "github.com/dapr/cli/pkg/workflow/dclient" + "github.com/dapr/durabletask-go/workflow" +) + +type ReRunOptions struct { + KubernetesMode bool + Namespace string + AppID string + InstanceID string + EventID uint32 + NewInstanceID *string + Input *string +} + +func ReRun(ctx context.Context, opts ReRunOptions) (string, error) { + cli, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return "", err + } + defer cli.Cancel() + + wf := workflow.NewClient(cli.Dapr.GrpcClientConn()) + + var wopts []workflow.RerunOptions + if opts.NewInstanceID != nil { + wopts = append(wopts, workflow.WithRerunNewInstanceID(*opts.NewInstanceID)) + } + if opts.Input != nil { + wopts = append(wopts, workflow.WithRerunInput(*opts.Input)) + } + + return wf.RerunWorkflowFromEvent(ctx, opts.InstanceID, opts.EventID, wopts...) +} diff --git a/pkg/workflow/run.go b/pkg/workflow/run.go new file mode 100644 index 000000000..4f8a66a89 --- /dev/null +++ b/pkg/workflow/run.go @@ -0,0 +1,55 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "time" + + "github.com/dapr/cli/pkg/workflow/dclient" + "github.com/dapr/durabletask-go/workflow" +) + +type RunOptions struct { + KubernetesMode bool + Namespace string + AppID string + Name string + InstanceID *string + Input *string + StartTime *time.Time +} + +func Run(ctx context.Context, opts RunOptions) (string, error) { + cli, err := dclient.DaprClient(ctx, opts.KubernetesMode, opts.Namespace, opts.AppID) + if err != nil { + return "", err + } + defer cli.Cancel() + + wf := workflow.NewClient(cli.Dapr.GrpcClientConn()) + + var wopts []workflow.NewWorkflowOptions + if opts.InstanceID != nil { + wopts = append(wopts, workflow.WithInstanceID(*opts.InstanceID)) + } + if opts.Input != nil { + wopts = append(wopts, workflow.WithInput(*opts.Input)) + } + if opts.StartTime != nil { + wopts = append(wopts, workflow.WithStartTime(*opts.StartTime)) + } + + return wf.ScheduleWorkflow(ctx, opts.Name, wopts...) +} diff --git a/pkg/workflow/workflow.go b/pkg/workflow/workflow.go new file mode 100644 index 000000000..033b89d58 --- /dev/null +++ b/pkg/workflow/workflow.go @@ -0,0 +1,85 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + + "github.com/dapr/cli/pkg/workflow/db" + "github.com/dapr/cli/pkg/workflow/dclient" +) + +type DBOptions struct { + Namespace string + AppID string + Driver string + ConnectionString *string + TableName *string +} + +func metakeys(ctx context.Context, opts DBOptions) ([]string, error) { + if opts.ConnectionString == nil { + return nil, fmt.Errorf("connection string is required for all drivers") + } + + switch { + case dclient.IsSQLDriver(opts.Driver): + tableName := "state" + if opts.TableName != nil { + tableName = *opts.TableName + } + + sqldb, err := db.SQL(ctx, opts.Driver, *opts.ConnectionString) + if err != nil { + return nil, err + } + defer sqldb.Close() + + return db.ListSQL(ctx, sqldb, tableName, db.ListOptions{ + Namespace: opts.Namespace, + AppID: opts.AppID, + }) + + case opts.Driver == "redis": + client, err := db.Redis(ctx, *opts.ConnectionString) + if err != nil { + return nil, err + } + + return db.ListRedis(ctx, client, db.ListOptions{ + Namespace: opts.Namespace, + AppID: opts.AppID, + }) + + case opts.Driver == "mongodb": + client, err := db.Mongo(ctx, *opts.ConnectionString) + if err != nil { + return nil, err + } + + collectionName := "daprCollection" + if opts.TableName != nil { + collectionName = *opts.TableName + } + + return db.ListMongo(ctx, client.Database("daprStore"), collectionName, db.ListOptions{ + Namespace: opts.Namespace, + AppID: opts.AppID, + }) + + default: + return nil, fmt.Errorf("unsupported driver: %s", opts.Driver) + } +} diff --git a/tests/apps/scheduler/app.go b/tests/apps/scheduler/app.go new file mode 100644 index 000000000..ce7dd6909 --- /dev/null +++ b/tests/apps/scheduler/app.go @@ -0,0 +1,223 @@ +/* +Copyright 2023 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "log" + "net" + "net/http" + "time" + + "github.com/dapr/durabletask-go/workflow" + "github.com/dapr/go-sdk/client" + "github.com/dapr/kit/ptr" + "github.com/dapr/kit/signals" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +func main() { + const port = 9095 + + ctx := signals.Context() + + fmt.Printf("Starting server in port %v...\n", port) + + regCh := make(chan struct{}) + mux := http.NewServeMux() + mux.HandleFunc("/dapr/config", func(w http.ResponseWriter, r *http.Request) { + close(regCh) + w.Write([]byte(`{"entities": ["myactortype"]}`)) + }) + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {}) + + go func() { + log.Printf("Waiting for registration call...") + select { + case <-regCh: + log.Printf("Registration call received") + case <-ctx.Done(): + log.Printf("Context done while waiting for registration call") + return + } + register(ctx) + }() + + StartServer(ctx, port, mux) +} + +func register(ctx context.Context) { + log.Printf("Registering jobs, reminders and workflows") + + addr := "127.0.0.1:3510" + log.Printf("Creating client to %s", addr) + cl, err := client.NewClientWithAddress(addr) + if err != nil { + log.Fatal(err) + } + log.Println("Client created") + + ds := time.Now().Format(time.RFC3339) + + data, err := anypb.New(wrapperspb.String("hello")) + if err != nil { + log.Fatal(err) + } + + if err = cl.ScheduleJobAlpha1(ctx, &client.Job{ + Name: "test1", + Schedule: ptr.Of("@every 100m"), + Repeats: ptr.Of(uint32(1234)), + DueTime: ptr.Of(ds), + Data: data, + }); err != nil { + log.Fatal(err) + } + + log.Printf("Scheduled job test1") + + if err = cl.ScheduleJobAlpha1(ctx, &client.Job{ + Name: "test2", + Schedule: ptr.Of("@every 100m"), + Repeats: ptr.Of(uint32(56788)), + DueTime: ptr.Of(ds), + TTL: ptr.Of("10000s"), + Data: data, + }); err != nil { + log.Fatal(err) + } + + log.Printf("Scheduled job test2") + + if err = cl.RegisterActorReminder(ctx, &client.RegisterActorReminderRequest{ + ActorType: "myactortype", + ActorID: "actorid1", + Name: "test1", + DueTime: ds, + Period: "R100/PT10000S", + }); err != nil { + log.Fatal(err) + } + + log.Printf("Scheduled actor reminder test1") + + if err = cl.RegisterActorReminder(ctx, &client.RegisterActorReminderRequest{ + ActorType: "myactortype", + ActorID: "actorid2", + Name: "test2", + DueTime: ds, + Period: "R100/PT10000S", + }); err != nil { + log.Fatal(err) + } + + log.Printf("Scheduled actor reminder test2") + + r := workflow.NewRegistry() + + if err := r.AddWorkflow(W1); err != nil { + log.Fatal(err) + } + if err := r.AddWorkflow(W2); err != nil { + log.Fatal(err) + } + if err := r.AddActivity(A1); err != nil { + log.Fatal(err) + } + + wf, err := client.NewWorkflowClient() + if err != nil { + log.Fatal(err) + } + + if err = wf.StartWorker(ctx, r); err != nil { + log.Fatal(err) + } + + if _, err = wf.ScheduleWorkflow(ctx, "W1", workflow.WithInstanceID("abc1")); err != nil { + log.Fatal(err) + } + + log.Printf("Scheduled workflow W1 with id abc1") + + if _, err = wf.ScheduleWorkflow(ctx, "W1", workflow.WithInstanceID("abc2")); err != nil { + log.Fatal(err) + } + + log.Printf("Scheduled workflow W1 with id abc2") + + if _, err = wf.ScheduleWorkflow(ctx, "W2", workflow.WithInstanceID("xyz1")); err != nil { + log.Fatal(err) + } + + log.Printf("Scheduled workflow W2 with id xyz1") + + if _, err = wf.ScheduleWorkflow(ctx, "W2", workflow.WithInstanceID("xyz2")); err != nil { + log.Fatal(err) + } + + log.Printf("Scheduled workflow W2 with id xyz2") +} + +// StartServer starts a HTTP or HTTP2 server +func StartServer(ctx context.Context, port int, handler http.Handler) { + // Create a listener + addr := fmt.Sprintf(":%d", port) + + log.Println("Starting server on ", addr) + + ln, err := net.Listen("tcp", addr) + if err != nil { + log.Fatalf("Failed to create listener: %v", err) + } + + //nolint:gosec + server := &http.Server{ + Addr: addr, + Handler: handler, + } + + go func() { + // Wait for cancelation signal + <-ctx.Done() + log.Println("Shutdown signal received") + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + server.Shutdown(ctx) + }() + + log.Printf("Server listening on %s", addr) + err = server.Serve(ln) + + if err != http.ErrServerClosed { + log.Fatalf("Failed to run server: %v", err) + } + + log.Println("Server shut down") +} + +func W1(ctx *workflow.WorkflowContext) (any, error) { + return nil, ctx.CreateTimer(time.Hour * 50).Await(nil) +} + +func W2(ctx *workflow.WorkflowContext) (any, error) { + return nil, ctx.CallActivity("A1").Await(nil) +} + +func A1(ctx workflow.ActivityContext) (any, error) { + <-ctx.Context().Done() + return nil, nil +} diff --git a/tests/apps/scheduler/go.mod b/tests/apps/scheduler/go.mod new file mode 100644 index 000000000..acd0a4009 --- /dev/null +++ b/tests/apps/scheduler/go.mod @@ -0,0 +1,29 @@ +module scheduler + +go 1.24.7 + +require ( + github.com/dapr/durabletask-go v0.10.0 + github.com/dapr/go-sdk v1.13.0 + github.com/dapr/kit v0.16.1 + google.golang.org/protobuf v1.36.6 +) + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/dapr/dapr v1.16.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.26.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/grpc v1.73.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/tests/apps/scheduler/go.sum b/tests/apps/scheduler/go.sum new file mode 100644 index 000000000..deeb81574 --- /dev/null +++ b/tests/apps/scheduler/go.sum @@ -0,0 +1,71 @@ +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/dapr/dapr v1.16.0 h1:la2WLZM8Myr2Pq3cyrFjHKWDSPYLzGZCs3p502TwBjI= +github.com/dapr/dapr v1.16.0/go.mod h1:ln/mxvNOeqklaDmic4ppsxmnjl2D/oZGKaJy24IwaEY= +github.com/dapr/durabletask-go v0.10.0 h1:vfIivPl4JYd55xZTslDwhA6p6F8ipcNxBtMaupxArr8= +github.com/dapr/durabletask-go v0.10.0/go.mod h1:0Ts4rXp74JyG19gDWPcwNo5V6NBZzhARzHF5XynmA7Q= +github.com/dapr/go-sdk v1.13.0 h1:Qw2BmUonClQ9yK/rrEEaFL1PyDgq616RrvYj0CT67Lk= +github.com/dapr/go-sdk v1.13.0/go.mod h1:RsffVNZitDApmQqoS68tNKGMXDZUjTviAbKZupJSzts= +github.com/dapr/kit v0.16.1 h1:MqLAhHVg8trPy2WJChMZFU7ToeondvxcNHYVvMDiVf4= +github.com/dapr/kit v0.16.1/go.mod h1:40ZWs5P6xfYf7O59XgwqZkIyDldTIXlhTQhGop8QoSM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tests/apps/workflow/app.go b/tests/apps/workflow/app.go new file mode 100644 index 000000000..c5d7e335f --- /dev/null +++ b/tests/apps/workflow/app.go @@ -0,0 +1,370 @@ +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/dapr/durabletask-go/workflow" + "github.com/dapr/go-sdk/client" + "github.com/dapr/kit/signals" +) + +func main() { + ctx := signals.Context() + register(ctx) + + log.Println("Workflow worker started and ready to accept workflow requests") + + <-ctx.Done() +} + +func register(ctx context.Context) { + r := workflow.NewRegistry() + + workflows := []workflow.Workflow{ + WNoOp, + WTimer, + WActivity1, + SimpleWorkflow, + EventWorkflow, + LongWorkflow, + ChildWorkflow, + ParentWorkflow, + NestedParentWorkflow, + RecursiveChildWorkflow, + FanOutWorkflow, + DataWorkflow, + } + activities := []workflow.Activity{ + ANoOP, + SimpleActivity, + LongRunningActivity, + DataProcessingActivity, + } + + for _, w := range workflows { + if err := r.AddWorkflow(w); err != nil { + log.Fatalf("error adding workflow %T: %v", w, err) + } + } + + for _, a := range activities { + if err := r.AddActivity(a); err != nil { + log.Fatalf("error adding activity %T: %v", a, err) + } + } + + wf, err := client.NewWorkflowClient() + if err != nil { + log.Fatal(err) + } + + if err = wf.StartWorker(ctx, r); err != nil { + log.Fatal(err) + } +} + +func WNoOp(ctx *workflow.WorkflowContext) (any, error) { + return nil, nil +} + +func WTimer(ctx *workflow.WorkflowContext) (any, error) { + return nil, ctx.CreateTimer(time.Hour * 10).Await(nil) +} + +func WActivity1(ctx *workflow.WorkflowContext) (any, error) { + return nil, ctx.CallActivity(ANoOP).Await(nil) +} + +func ANoOP(ctx workflow.ActivityContext) (any, error) { + return nil, nil +} + +func SimpleWorkflow(ctx *workflow.WorkflowContext) (any, error) { + var input any + ctx.GetInput(&input) + + var result string + err := ctx.CallActivity(SimpleActivity, workflow.WithActivityInput(input)).Await(&result) + if err != nil { + return nil, fmt.Errorf("activity failed: %w", err) + } + + ctx.CreateTimer(time.Second * 2).Await(nil) + + return map[string]interface{}{ + "status": "completed", + "result": result, + }, nil +} + +func LongWorkflow(ctx *workflow.WorkflowContext) (any, error) { + stages := []string{"initialization", "processing", "validation", "finalization"} + results := make([]string, 0, len(stages)) + + for _, stage := range stages { + var stageResult string + err := ctx.CallActivity(LongRunningActivity, workflow.WithActivityInput(stage)).Await(&stageResult) + if err != nil { + return nil, fmt.Errorf("stage %s failed: %w", stage, err) + } + results = append(results, stageResult) + + ctx.CreateTimer(time.Second * 1).Await(nil) + } + + return map[string]interface{}{ + "status": "completed", + "stages": stages, + "results": results, + }, nil +} + +func EventWorkflow(ctx *workflow.WorkflowContext) (any, error) { + return nil, ctx.WaitForExternalEvent("test-event", time.Hour).Await(nil) +} + +func DataWorkflow(ctx *workflow.WorkflowContext) (any, error) { + var input struct { + Name string `json:"name"` + Value int `json:"value"` + Data map[string]interface{} `json:"data"` + } + + if err := ctx.GetInput(&input); err != nil { + return nil, fmt.Errorf("failed to get input: %w", err) + } + + var processedData any + err := ctx.CallActivity(DataProcessingActivity, workflow.WithActivityInput(input)).Await(&processedData) + if err != nil { + return nil, fmt.Errorf("data processing failed: %w", err) + } + + output := map[string]interface{}{ + "originalName": input.Name, + "processedName": fmt.Sprintf("processed_%s", input.Name), + "originalValue": input.Value, + "doubledValue": input.Value * 2, + "processedData": processedData, + } + + return output, nil +} + +func SimpleActivity(ctx workflow.ActivityContext) (any, error) { + var input map[string]interface{} + if err := ctx.GetInput(&input); err != nil { + input = make(map[string]interface{}) + } + + time.Sleep(time.Millisecond * 500) + + return fmt.Sprintf("Processed simple activity with input: %v", input), nil +} + +func LongRunningActivity(ctx workflow.ActivityContext) (any, error) { + var stage string + if err := ctx.GetInput(&stage); err != nil { + stage = "unknown" + } + + time.Sleep(time.Second * 2) + + return fmt.Sprintf("Completed %s at %s", stage, time.Now().Format(time.RFC3339)), nil +} + +func DataProcessingActivity(ctx workflow.ActivityContext) (any, error) { + var input map[string]interface{} + if err := ctx.GetInput(&input); err != nil { + return nil, fmt.Errorf("failed to get input: %w", err) + } + + processed := make(map[string]interface{}) + for k, v := range input { + processed[fmt.Sprintf("processed_%s", k)] = v + } + + processed["processedAt"] = time.Now().Format(time.RFC3339) + + return processed, nil +} + +func ParentWorkflow(ctx *workflow.WorkflowContext) (any, error) { + var input map[string]interface{} + if err := ctx.GetInput(&input); err != nil { + input = make(map[string]interface{}) + } + + childInput1 := map[string]interface{}{ + "parentID": ctx.ID(), + "step": 1, + "data": input, + } + var childResult1 map[string]interface{} + if err := ctx.CallChildWorkflow(ChildWorkflow, workflow.WithChildWorkflowInput(childInput1)).Await(&childResult1); err != nil { + return nil, fmt.Errorf("first child workflow failed: %w", err) + } + + childInput2 := map[string]interface{}{ + "parentID": ctx.ID(), + "step": 2, + "previousData": childResult1, + } + var childResult2 map[string]interface{} + if err := ctx.CallChildWorkflow(ChildWorkflow, workflow.WithChildWorkflowInput(childInput2)).Await(&childResult2); err != nil { + return nil, fmt.Errorf("second child workflow failed: %w", err) + } + + return map[string]interface{}{ + "status": "completed", + "parentID": ctx.ID(), + "childResult1": childResult1, + "childResult2": childResult2, + }, nil +} + +func ChildWorkflow(ctx *workflow.WorkflowContext) (any, error) { + var input map[string]interface{} + if err := ctx.GetInput(&input); err != nil { + return nil, fmt.Errorf("failed to get input: %w", err) + } + + ctx.CreateTimer(time.Second).Await(nil) + + var activityResult string + if err := ctx.CallActivity(SimpleActivity, workflow.WithActivityInput(input)).Await(&activityResult); err != nil { + return nil, fmt.Errorf("child activity failed: %w", err) + } + + return map[string]interface{}{ + "childID": ctx.ID(), + "parentID": input["parentID"], + "step": input["step"], + "processed": true, + "activityResult": activityResult, + }, nil +} + +func NestedParentWorkflow(ctx *workflow.WorkflowContext) (any, error) { + + nestedInput := map[string]interface{}{ + "level": 1, + "maxLevel": 3, + "rootID": ctx.ID(), + } + + var nestedResult map[string]interface{} + if err := ctx.CallChildWorkflow(RecursiveChildWorkflow, workflow.WithChildWorkflowInput(nestedInput)).Await(&nestedResult); err != nil { + return nil, fmt.Errorf("nested child workflow failed: %w", err) + } + + return map[string]interface{}{ + "status": "completed", + "rootID": ctx.ID(), + "nestedResult": nestedResult, + }, nil +} + +func RecursiveChildWorkflow(ctx *workflow.WorkflowContext) (any, error) { + var input struct { + Level int `json:"level"` + MaxLevel int `json:"maxLevel"` + RootID string `json:"rootID"` + Data map[string]interface{} `json:"data"` + } + + if err := ctx.GetInput(&input); err != nil { + return nil, fmt.Errorf("failed to get input: %w", err) + } + + result := map[string]interface{}{ + "instanceID": ctx.ID(), + "level": input.Level, + "rootID": input.RootID, + } + + if input.Level < input.MaxLevel { + childInput := map[string]interface{}{ + "level": input.Level + 1, + "maxLevel": input.MaxLevel, + "rootID": input.RootID, + "data": input.Data, + } + + var childResult map[string]interface{} + if err := ctx.CallChildWorkflow(RecursiveChildWorkflow, workflow.WithChildWorkflowInput(childInput)).Await(&childResult); err != nil { + return nil, fmt.Errorf("recursive child at level %d failed: %w", input.Level+1, err) + } + result["childResult"] = childResult + } else { + var activityResult string + if err := ctx.CallActivity(SimpleActivity, workflow.WithActivityInput(input.Data)).Await(&activityResult); err != nil { + return nil, fmt.Errorf("activity at max level failed: %w", err) + } + result["finalActivity"] = activityResult + } + + return result, nil +} + +func FanOutWorkflow(ctx *workflow.WorkflowContext) (any, error) { + var input struct { + ParallelCount int `json:"parallelCount"` + Data map[string]interface{} `json:"data"` + } + + input.ParallelCount = 3 + ctx.GetInput(&input) + + if input.ParallelCount <= 0 { + input.ParallelCount = 3 + } + if input.ParallelCount > 10 { + } + + var childTasks []workflow.Task + for i := 0; i < input.ParallelCount; i++ { + childInput := map[string]interface{}{ + "parentID": ctx.ID(), + "index": i, + "data": input.Data, + } + task := ctx.CallChildWorkflow(ChildWorkflow, workflow.WithChildWorkflowInput(childInput)) + childTasks = append(childTasks, task) + } + + results := make([]map[string]interface{}, 0, len(childTasks)) + for i, task := range childTasks { + var result map[string]interface{} + if err := task.Await(&result); err != nil { + result = map[string]interface{}{ + "index": i, + "error": err.Error(), + } + } + results = append(results, result) + } + + return map[string]interface{}{ + "status": "completed", + "parentID": ctx.ID(), + "parallelCount": input.ParallelCount, + "results": results, + }, nil +} diff --git a/tests/apps/workflow/go.mod b/tests/apps/workflow/go.mod new file mode 100644 index 000000000..9beb717cc --- /dev/null +++ b/tests/apps/workflow/go.mod @@ -0,0 +1,29 @@ +module workflow + +go 1.24.7 + +require ( + github.com/dapr/durabletask-go v0.10.0 + github.com/dapr/go-sdk v1.13.0 + github.com/dapr/kit v0.16.1 +) + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/dapr/dapr v1.16.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.26.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/grpc v1.73.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/tests/apps/workflow/go.sum b/tests/apps/workflow/go.sum new file mode 100644 index 000000000..deeb81574 --- /dev/null +++ b/tests/apps/workflow/go.sum @@ -0,0 +1,71 @@ +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/dapr/dapr v1.16.0 h1:la2WLZM8Myr2Pq3cyrFjHKWDSPYLzGZCs3p502TwBjI= +github.com/dapr/dapr v1.16.0/go.mod h1:ln/mxvNOeqklaDmic4ppsxmnjl2D/oZGKaJy24IwaEY= +github.com/dapr/durabletask-go v0.10.0 h1:vfIivPl4JYd55xZTslDwhA6p6F8ipcNxBtMaupxArr8= +github.com/dapr/durabletask-go v0.10.0/go.mod h1:0Ts4rXp74JyG19gDWPcwNo5V6NBZzhARzHF5XynmA7Q= +github.com/dapr/go-sdk v1.13.0 h1:Qw2BmUonClQ9yK/rrEEaFL1PyDgq616RrvYj0CT67Lk= +github.com/dapr/go-sdk v1.13.0/go.mod h1:RsffVNZitDApmQqoS68tNKGMXDZUjTviAbKZupJSzts= +github.com/dapr/kit v0.16.1 h1:MqLAhHVg8trPy2WJChMZFU7ToeondvxcNHYVvMDiVf4= +github.com/dapr/kit v0.16.1/go.mod h1:40ZWs5P6xfYf7O59XgwqZkIyDldTIXlhTQhGop8QoSM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tests/e2e/standalone/commands.go b/tests/e2e/standalone/commands.go index c75bc7171..c3e578ea4 100644 --- a/tests/e2e/standalone/commands.go +++ b/tests/e2e/standalone/commands.go @@ -219,3 +219,105 @@ func cmdVersion(output string, args ...string) (string, error) { return spawn.Command(common.GetDaprPath(), verArgs...) } + +func cmdSchedulerList(args ...string) (string, error) { + listArgs := []string{"scheduler", "list"} + + listArgs = append(listArgs, args...) + + return spawn.Command(common.GetDaprPath(), listArgs...) +} + +func cmdSchedulerGet(args ...string) (string, error) { + listArgs := []string{"scheduler", "get"} + + listArgs = append(listArgs, args...) + + return spawn.Command(common.GetDaprPath(), listArgs...) +} + +func cmdSchedulerDelete(args ...string) (string, error) { + deleteArgs := []string{"scheduler", "delete"} + + deleteArgs = append(deleteArgs, args...) + + return spawn.Command(common.GetDaprPath(), deleteArgs...) +} + +func cmdSchedulerDeleteAll(args ...string) (string, error) { + deleteArgs := []string{"scheduler", "delete-all"} + + deleteArgs = append(deleteArgs, args...) + + return spawn.Command(common.GetDaprPath(), deleteArgs...) +} + +func cmdSchedulerExport(args ...string) (string, error) { + exportArgs := []string{"scheduler", "export"} + + exportArgs = append(exportArgs, args...) + + return spawn.Command(common.GetDaprPath(), exportArgs...) +} + +func cmdSchedulerImport(args ...string) (string, error) { + importArgs := []string{"scheduler", "import"} + + importArgs = append(importArgs, args...) + + return spawn.Command(common.GetDaprPath(), importArgs...) +} + +func cmdWorkflowList(appID string, args ...string) (string, error) { + allArgs := []string{"workflow", "list", "-a", appID} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} + +func cmdWorkflowRun(appID, workflowName string, args ...string) (string, error) { + allArgs := []string{"workflow", "run", "-a", appID, workflowName} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} + +func cmdWorkflowHistory(appID, instanceID string, args ...string) (string, error) { + allArgs := []string{"workflow", "history", "-a", appID, instanceID} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} + +func cmdWorkflowSuspend(appID, instanceID string, args ...string) (string, error) { + allArgs := []string{"workflow", "suspend", "-a", appID, instanceID} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} + +func cmdWorkflowResume(appID, instanceID string, args ...string) (string, error) { + allArgs := []string{"workflow", "resume", "-a", appID, instanceID} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} + +func cmdWorkflowTerminate(appID, instanceID string, args ...string) (string, error) { + allArgs := []string{"workflow", "terminate", "-a", appID, instanceID} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} + +func cmdWorkflowRaiseEvent(appID, eventArg string, args ...string) (string, error) { + allArgs := []string{"workflow", "raise-event", "-a", appID, eventArg} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} + +func cmdWorkflowReRun(appID, instanceID string, args ...string) (string, error) { + allArgs := []string{"workflow", "rerun", "-a", appID, instanceID} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} + +func cmdWorkflowPurge(appID string, args ...string) (string, error) { + allArgs := []string{"workflow", "purge", "-a", appID} + allArgs = append(allArgs, args...) + return spawn.Command(common.GetDaprPath(), allArgs...) +} diff --git a/tests/e2e/standalone/publish_test.go b/tests/e2e/standalone/publish_test.go index 845eb5576..07d80c408 100644 --- a/tests/e2e/standalone/publish_test.go +++ b/tests/e2e/standalone/publish_test.go @@ -91,6 +91,8 @@ func TestStandalonePublish(t *testing.T) { assert.Contains(t, output, "Event published successfully") event := <-events + event.TraceID = "" + event.TraceParent = "" assert.Equal(t, &common.TopicEvent{ ID: "3cc97064-edd1-49f4-b911-c959a7370e68", Source: "e2e_test", diff --git a/tests/e2e/standalone/scheduler_test.go b/tests/e2e/standalone/scheduler_test.go new file mode 100644 index 000000000..b95d6c086 --- /dev/null +++ b/tests/e2e/standalone/scheduler_test.go @@ -0,0 +1,543 @@ +//go:build !windows && (e2e || template) + +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package standalone_test + +import ( + "encoding/json" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + "github.com/dapr/cli/pkg/scheduler" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestSchedulerList(t *testing.T) { + if isSlimMode() { + t.Skip("skipping scheduler tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-scheduler.yaml" + t.Cleanup(func() { + cmdStopWithAppID("test-scheduler") + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, err := cmdRun("", args...) + t.Log(o) + t.Log(err) + }() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(c, strings.Split(output, "\n"), 10) + }, time.Second*30, time.Millisecond*10) + + time.Sleep(time.Second * 3) + + t.Run("short", func(t *testing.T) { + output, err := cmdSchedulerList() + require.NoError(t, err) + lines := strings.Split(output, "\n") + require.Len(t, lines, 10) + + require.Equal(t, []string{ + "NAME", + "BEGIN", + "COUNT", + "LAST", + "TRIGGER", + }, strings.Fields(lines[0])) + + expNames := []string{ + "actor/myactortype/actorid1/test1", + "actor/myactortype/actorid2/test2", + "app/test-scheduler/test1", + "app/test-scheduler/test2", + } + for i, line := range lines[1:5] { + assert.Equal(t, expNames[i], strings.Fields(line)[0]) + + assert.NotEmpty(t, strings.Fields(line)[1]) + + count, err := strconv.Atoi(strings.Fields(line)[2]) + require.NoError(t, err) + assert.Equal(t, 1, count) + } + + expNames = []string{ + "activity/test-scheduler/xyz1::0::1", + "activity/test-scheduler/xyz2::0::1", + } + for i, line := range lines[5:7] { + assert.Equal(t, expNames[i], strings.Fields(line)[0]) + + assert.NotEmpty(t, strings.Fields(line)[1]) + + count, err := strconv.Atoi(strings.Fields(line)[2]) + require.NoError(t, err) + assert.Equal(t, 0, count) + if err != nil { + return + } + } + + expNames = []string{ + "workflow/test-scheduler/abc1", + "workflow/test-scheduler/abc2", + } + for i, line := range lines[7:9] { + assert.True(t, strings.HasPrefix(strings.Fields(line)[0], expNames[i]), strings.Fields(line)[0]) + } + }) + + t.Run("wide", func(t *testing.T) { + output, err := cmdSchedulerList("-o", "wide") + require.NoError(t, err) + lines := strings.Split(output, "\n") + require.Len(t, lines, 10) + + require.Equal(t, []string{ + "NAMESPACE", + "NAME", + "BEGIN", + "EXPIRATION", + "SCHEDULE", + "DUE", + "TIME", + "TTL", + "REPEATS", + "COUNT", + "LAST", + "TRIGGER", + }, strings.Fields(lines[0])) + }) + + t.Run("yaml", func(t *testing.T) { + output, err := cmdSchedulerList("-o", "yaml") + require.NoError(t, err) + + var list []scheduler.ListOutputWide + require.NoError(t, yaml.Unmarshal([]byte(output), &list)) + assert.Len(t, list, 8) + }) + + t.Run("json", func(t *testing.T) { + output, err := cmdSchedulerList("-o", "json") + require.NoError(t, err) + + var list []scheduler.ListOutputWide + require.NoError(t, json.Unmarshal([]byte(output), &list)) + assert.Len(t, list, 8) + }) + + t.Run("filter", func(t *testing.T) { + output, err := cmdSchedulerList("-n", "foo") + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 2) + + output, err = cmdSchedulerList("--filter", "all") + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 10) + + output, err = cmdSchedulerList("--filter", "app") + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 4) + + output, err = cmdSchedulerList("--filter", "actor") + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 4) + + output, err = cmdSchedulerList("--filter", "workflow") + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 4) + + output, err = cmdSchedulerList("--filter", "activity") + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 4) + }) +} + +func TestSchedulerGet(t *testing.T) { + if isSlimMode() { + t.Skip("skipping scheduler tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-scheduler.yaml" + t.Cleanup(func() { + cmdStopWithAppID("test-scheduler") + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, err := cmdRun("", args...) + t.Log(o) + t.Log(err) + }() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(c, strings.Split(output, "\n"), 10) + }, time.Second*30, time.Millisecond*10) + + expNames := []string{ + "actor/myactortype/actorid1/test1", + "actor/myactortype/actorid2/test2", + "app/test-scheduler/test1", + "app/test-scheduler/test2", + "activity/test-scheduler/xyz1::0::1", + "activity/test-scheduler/xyz2::0::1", + } + + t.Run("short", func(t *testing.T) { + for _, name := range expNames { + output, err := cmdSchedulerGet(name) + require.NoError(t, err) + lines := strings.Split(output, "\n") + require.Len(t, lines, 3) + + if strings.HasPrefix(name, "activity/") { + require.Equal(t, []string{ + "NAME", + "BEGIN", + "COUNT", + }, strings.Fields(lines[0]), name) + } else { + require.Equal(t, []string{ + "NAME", + "BEGIN", + "COUNT", + "LAST", + "TRIGGER", + }, strings.Fields(lines[0]), name) + } + } + }) + + t.Run("wide", func(t *testing.T) { + for _, name := range expNames { + output, err := cmdSchedulerGet(name, "-o", "wide") + require.NoError(t, err) + lines := strings.Split(output, "\n") + require.Len(t, lines, 3) + + switch { + case name == "app/test-scheduler/test2": + require.Equal(t, []string{ + "NAMESPACE", + "NAME", + "BEGIN", + "EXPIRATION", + "SCHEDULE", + "DUE", + "TIME", + "TTL", + "REPEATS", + "COUNT", + "LAST", + "TRIGGER", + }, strings.Fields(lines[0]), name) + + case strings.HasPrefix(name, "activity/"): + require.Equal(t, []string{ + "NAMESPACE", + "NAME", + "BEGIN", + "DUE", + "TIME", + "COUNT", + }, strings.Fields(lines[0]), name) + + default: + require.Equal(t, []string{ + "NAMESPACE", + "NAME", + "BEGIN", + "SCHEDULE", + "DUE", + "TIME", + "REPEATS", + "COUNT", + "LAST", + "TRIGGER", + }, strings.Fields(lines[0]), name) + } + } + }) + + t.Run("yaml", func(t *testing.T) { + for _, name := range expNames { + output, err := cmdSchedulerGet(name, "-o", "yaml") + require.NoError(t, err) + + var list []scheduler.ListOutputWide + require.NoError(t, yaml.Unmarshal([]byte(output), &list)) + assert.Len(t, list, 1) + } + }) + + t.Run("json", func(t *testing.T) { + for _, name := range expNames { + output, err := cmdSchedulerGet(name, "-o", "json") + require.NoError(t, err) + + var list []scheduler.ListOutputWide + require.NoError(t, json.Unmarshal([]byte(output), &list)) + assert.Len(t, list, 1) + } + }) +} + +func TestSchedulerDelete(t *testing.T) { + if isSlimMode() { + t.Skip("skipping scheduler tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-scheduler.yaml" + t.Cleanup(func() { + cmdStopWithAppID("test-scheduler") + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, err := cmdRun("", args...) + t.Log(o) + t.Log(err) + }() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(c, strings.Split(output, "\n"), 10) + }, time.Second*30, time.Millisecond*10) + + output, err := cmdSchedulerList() + require.NoError(t, err) + + _, err = cmdSchedulerDelete("actor/myactortype/actorid1/test1") + require.NoError(t, err) + + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 9) + + _, err = cmdSchedulerDelete( + "actor/myactortype/actorid2/test2", + "app/test-scheduler/test1", + "app/test-scheduler/test2", + ) + require.NoError(t, err) + + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 6) + + _, err = cmdSchedulerDelete( + "activity/test-scheduler/xyz1::0::1", + "activity/test-scheduler/xyz2::0::1", + ) + require.NoError(t, err) + + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 4) + + _, err = cmdSchedulerDelete( + strings.Fields(strings.Split(output, "\n")[1])[0], + strings.Fields(strings.Split(output, "\n")[2])[0], + ) + require.NoError(t, err) + + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 2) +} + +func TestSchedulerDeleteAllAll(t *testing.T) { + if isSlimMode() { + t.Skip("skipping scheduler tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-scheduler.yaml" + t.Cleanup(func() { + cmdStopWithAppID("test-scheduler") + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, err := cmdRun("", args...) + t.Log(o) + t.Log(err) + }() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(c, strings.Split(output, "\n"), 10) + }, time.Second*30, time.Millisecond*10) + + _, err := cmdSchedulerDeleteAll("all") + require.NoError(t, err) + + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 2) +} + +func TestSchedulerDeleteAll(t *testing.T) { + if isSlimMode() { + t.Skip("skipping scheduler tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-scheduler.yaml" + t.Cleanup(func() { + cmdStopWithAppID("test-scheduler") + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, err := cmdRun("", args...) + t.Log(o) + t.Log(err) + }() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(c, strings.Split(output, "\n"), 10) + }, time.Second*30, time.Millisecond*10) + + _, err := cmdSchedulerDeleteAll("app/test-scheduler") + require.NoError(t, err) + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 8) + + _, err = cmdSchedulerDeleteAll("workflow/test-scheduler/abc1") + require.NoError(t, err) + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 7) + + _, err = cmdSchedulerDeleteAll("workflow/test-scheduler") + require.NoError(t, err) + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 4) + + _, err = cmdSchedulerDeleteAll("actor/myactortype/actorid1") + require.NoError(t, err) + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 3) + + _, err = cmdSchedulerDeleteAll("actor/myactortype") + require.NoError(t, err) + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 2) +} + +func TestSchedulerExportImport(t *testing.T) { + if isSlimMode() { + t.Skip("skipping scheduler tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-scheduler.yaml" + t.Cleanup(func() { + cmdStopWithAppID("test-scheduler") + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, err := cmdRun("", args...) + t.Log(o) + t.Log(err) + }() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(c, strings.Split(output, "\n"), 10) + }, time.Second*30, time.Millisecond*10) + + f := filepath.Join(t.TempDir(), "foo") + _, err := cmdSchedulerExport("-o", f) + require.NoError(t, err) + + _, err = cmdSchedulerDeleteAll("all") + require.NoError(t, err) + output, err := cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 2) + + _, err = cmdSchedulerImport("-f", f) + require.NoError(t, err) + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 10) +} diff --git a/tests/e2e/standalone/workflow_test.go b/tests/e2e/standalone/workflow_test.go new file mode 100644 index 000000000..471a18a60 --- /dev/null +++ b/tests/e2e/standalone/workflow_test.go @@ -0,0 +1,731 @@ +//go:build !windows && (e2e || template) + +/* +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package standalone_test + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + redisConnString = "--connection-string=redis://127.0.0.1:6379" +) + +func TestWorkflowList(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + time.Sleep(time.Second * 5) + output, err := cmdWorkflowList(appID, redisConnString) + require.NoError(t, err) + assert.Equal(t, `❌ No workflow found in namespace "default" for app ID "test-workflow" +`, output) + + _, err = cmdWorkflowRun(appID, "LongWorkflow", "--instance-id=foo") + require.NoError(t, err, output) + + t.Run("terminate workflow", func(t *testing.T) { + output, err := cmdWorkflowTerminate(appID, "foo") + require.NoError(t, err) + assert.Contains(t, output, "terminated successfully") + }) + + t.Run("verify terminated state", func(t *testing.T) { + output, err := cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err, output) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + found := false + for _, item := range list { + if item["instanceID"] == "foo" { + assert.Equal(t, "TERMINATED", item["runtimeStatus"]) + found = true + break + } + } + assert.True(t, found, "Workflow instance not found") + }) +} + +func TestWorkflowRaiseEvent(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + time.Sleep(time.Second * 5) + output, err := cmdWorkflowRun(appID, "EventWorkflow", "--instance-id=foo") + require.NoError(t, err, output) + + t.Run("raise event", func(t *testing.T) { + output, err := cmdWorkflowRaiseEvent(appID, "foo/test-event") + require.NoError(t, err) + assert.Contains(t, output, "raised event") + assert.Contains(t, output, "successfully") + + time.Sleep(time.Second) + + output, err = cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err, output) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + found := false + for _, item := range list { + if item["instanceID"] == "foo" { + assert.Equal(t, "COMPLETED", item["runtimeStatus"]) + found = true + break + } + } + assert.True(t, found, "Workflow instance not found") + }) + + t.Run("raise event with input", func(t *testing.T) { + output, err := cmdWorkflowRun(appID, "EventWorkflow", "--instance-id=bar") + require.NoError(t, err) + + input := `{"eventData": "test data", "value": 456}` + output, err = cmdWorkflowRaiseEvent(appID, "bar/test-event", "--input", input) + require.NoError(t, err) + assert.Contains(t, output, "raised event") + + time.Sleep(time.Second) + + output, err = cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err, output) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + found := false + for _, item := range list { + if item["instanceID"] == "foo" { + assert.Equal(t, "COMPLETED", item["runtimeStatus"]) + found = true + break + } + } + assert.True(t, found, "Workflow instance not found") + }) +} + +func TestWorkflowReRun(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + time.Sleep(time.Second * 5) + + output, err := cmdWorkflowRun(appID, "SimpleWorkflow", "--instance-id=foo") + require.NoError(t, err, output) + + time.Sleep(3 * time.Second) + + t.Run("rerun from beginning", func(t *testing.T) { + output, err := cmdWorkflowReRun(appID, "foo") + require.NoError(t, err) + assert.Contains(t, output, "Rerunning workflow instance") + }) + + t.Run("rerun with new instance ID", func(t *testing.T) { + output, err := cmdWorkflowReRun(appID, "foo", "--new-instance-id", "bar") + require.NoError(t, err) + assert.Contains(t, output, "bar") + }) + + t.Run("rerun from specific event", func(t *testing.T) { + output, err := cmdWorkflowReRun(appID, "foo", "-e", "1") + require.NoError(t, err) + assert.Contains(t, output, "Rerunning workflow instance") + }) + + t.Run("rerun with new input", func(t *testing.T) { + input := `{"rerun": true, "data": "new input"}` + output, err := cmdWorkflowReRun(appID, "foo", "--input", input) + require.NoError(t, err) + assert.Contains(t, output, "Rerunning workflow instance") + }) +} + +func TestWorkflowPurge(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + time.Sleep(5 * time.Second) + + for i := 0; i < 3; i++ { + output, err := cmdWorkflowRun(appID, "SimpleWorkflow", + "--instance-id=purge-test-"+strconv.Itoa(i)) + require.NoError(t, err, output) + } + + time.Sleep(5 * time.Second) + + _, err := cmdWorkflowTerminate(appID, "purge-test-0") + require.NoError(t, err) + + t.Run("purge single instance", func(t *testing.T) { + output, err := cmdWorkflowPurge(appID, "purge-test-0") + require.NoError(t, err) + assert.Contains(t, output, "Purged") + + output, err = cmdWorkflowList(appID, "-o", "json", redisConnString) + require.NoError(t, err) + assert.NotContains(t, output, "purge-test-0") + }) + + t.Run("purge multiple instances", func(t *testing.T) { + _, _ = cmdWorkflowTerminate(appID, "purge-test-1") + _, _ = cmdWorkflowTerminate(appID, "purge-test-2") + time.Sleep(1 * time.Second) + + output, err := cmdWorkflowPurge(appID, "purge-test-1", "purge-test-2") + require.NoError(t, err) + assert.Contains(t, output, "Purged") + }) + + t.Run("purge all terminal", func(t *testing.T) { + for i := 0; i < 2; i++ { + output, err := cmdWorkflowRun(appID, "SimpleWorkflow", + "--instance-id=purge-all-"+strconv.Itoa(i)) + require.NoError(t, err, output) + _, _ = cmdWorkflowTerminate(appID, "purge-all-"+strconv.Itoa(i)) + } + + output, err := cmdWorkflowPurge(appID, redisConnString, "--all") + require.NoError(t, err, output) + assert.Contains(t, output, `Purged workflow instance "purge-all-1"`) + assert.Contains(t, output, `Purged workflow instance "purge-all-0"`) + + output, err = cmdWorkflowList(appID, "-o", "json", redisConnString) + require.NoError(t, err) + assert.NotContains(t, output, "purge-all-0") + assert.NotContains(t, output, "purge-all-1") + }) + + t.Run("purge older than duration", func(t *testing.T) { + output, err := cmdWorkflowRun(appID, "SimpleWorkflow", + "--instance-id=purge-older") + require.NoError(t, err) + + time.Sleep(5 * time.Second) + + output, err = cmdWorkflowPurge(appID, redisConnString, "--all-older-than", "1s") + require.NoError(t, err, output) + assert.Contains(t, output, "Purging 1 workflow instance(s)") + assert.Contains(t, output, `Purged workflow instance "purge-older"`) + + output, err = cmdWorkflowList(appID, "-o", "json", redisConnString) + require.NoError(t, err, output) + assert.NotContains(t, output, "purge-older") + }) + + t.Run("also purge scheduler", func(t *testing.T) { + output, err := cmdWorkflowRun(appID, "EventWorkflow", + "--instance-id=also-sched") + require.NoError(t, err) + + output, err = cmdWorkflowTerminate(appID, "also-sched") + require.NoError(t, err, output) + + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Greater(t, len(strings.Split(output, "\n")), 2) + + output, err = cmdWorkflowPurge(appID, "also-sched") + require.NoError(t, err, output) + + output, err = cmdSchedulerList() + require.NoError(t, err) + assert.Len(t, strings.Split(output, "\n"), 2) + }) +} + +func TestWorkflowFilters(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + time.Sleep(5 * time.Second) + + _, _ = cmdWorkflowRun(appID, "SimpleWorkflow", "--instance-id=simple-1") + _, _ = cmdWorkflowRun(appID, "LongWorkflow", "--instance-id=long-1") + output, err := cmdWorkflowRun(appID, "EventWorkflow", "--instance-id=suspend-test") + require.NoError(t, err, output) + + time.Sleep(2 * time.Second) + _, _ = cmdWorkflowSuspend(appID, "suspend-test") + + t.Run("filter by status", func(t *testing.T) { + output, err := cmdWorkflowList(appID, redisConnString, "--filter-status", "SUSPENDED") + require.NoError(t, err) + assert.Contains(t, output, "suspend-test") + assert.NotContains(t, output, "simple-1") + assert.NotContains(t, output, "long-1") + }) + + t.Run("filter by name", func(t *testing.T) { + output, err := cmdWorkflowList(appID, redisConnString, "--filter-name", "SimpleWorkflow") + require.NoError(t, err) + lines := strings.Split(output, "\n") + + for i, line := range lines { + if i == 0 || strings.TrimSpace(line) == "" { + continue + } + assert.Contains(t, line, "SimpleWorkflow") + } + }) + + t.Run("filter by max age", func(t *testing.T) { + output, err := cmdWorkflowList(appID, redisConnString, "--filter-max-age", "10s") + require.NoError(t, err) + assert.NotEmpty(t, output) + + output, err = cmdWorkflowList(appID, redisConnString, "--filter-max-age", "0s") + require.NoError(t, err) + lines := strings.Split(output, "\n") + assert.LessOrEqual(t, len(lines), 2) + }) +} + +func TestWorkflowChildCalls(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + time.Sleep(5 * time.Second) + + t.Run("parent child workflow", func(t *testing.T) { + input := `{"test": "parent-child", "value": 42}` + output, err := cmdWorkflowRun(appID, "ParentWorkflow", "--input", input, "--instance-id=parent-1") + require.NoError(t, err, output) + + time.Sleep(5 * time.Second) + + output, err = cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + var parentFound bool + var childCount int + for _, item := range list { + if item["instanceID"] == "parent-1" { + parentFound = true + assert.Equal(t, "ParentWorkflow", item["name"]) + } + if name, ok := item["name"].(string); ok && name == "ChildWorkflow" { + childCount++ + } + } + assert.True(t, parentFound, "Parent workflow not found") + assert.GreaterOrEqual(t, childCount, 2, "Expected at least 2 child workflows") + }) + + t.Run("nested child workflows", func(t *testing.T) { + output, err := cmdWorkflowRun(appID, "NestedParentWorkflow", "--instance-id=nested-parent") + require.NoError(t, err) + + time.Sleep(6 * time.Second) + + output, err = cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + var recursiveCount int + for _, item := range list { + if name, ok := item["name"].(string); ok && name == "RecursiveChildWorkflow" { + recursiveCount++ + } + } + assert.GreaterOrEqual(t, recursiveCount, 2, "Expected multiple recursive child workflows") + }) + + t.Run("fan out workflow", func(t *testing.T) { + parallelCount := 5 + input := fmt.Sprintf(`{"parallelCount": %d, "data": {"test": "fanout"}}`, parallelCount) + output, err := cmdWorkflowRun(appID, "FanOutWorkflow", "--input", input, "--instance-id=fanout-1") + require.NoError(t, err) + + time.Sleep(5 * time.Second) + + output, err = cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + var fanOutChildren int + for _, item := range list { + if name, ok := item["name"].(string); ok && name == "ChildWorkflow" { + fanOutChildren++ + } + } + assert.GreaterOrEqual(t, fanOutChildren, parallelCount, "Expected at least %d child workflows from fan-out", parallelCount) + }) + + t.Run("child workflow failure handling", func(t *testing.T) { + output, err := cmdWorkflowRun(appID, "ParentWorkflow", "--input", `{"fail": true}`, "--instance-id=parent-1") + require.NoError(t, err, output) + + time.Sleep(5 * time.Second) + + output, err = cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + for _, item := range list { + if item["instanceID"] == "parent-1" { + status := item["runtimeStatus"].(string) + assert.Contains(t, []string{"COMPLETED", "FAILED"}, status) + break + } + } + }) +} + +func TestWorkflowHistory(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + // Wait and create a workflow + time.Sleep(5 * time.Second) + output, err := cmdWorkflowRun(appID, "SimpleWorkflow", "--instance-id=history-test") + require.NoError(t, err, output) + + // Wait for workflow to have some history + time.Sleep(2 * time.Second) + + t.Run("get history", func(t *testing.T) { + output, err := cmdWorkflowHistory(appID, "history-test") + require.NoError(t, err) + lines := strings.Split(output, "\n") + + // Should have headers and at least one history entry + assert.GreaterOrEqual(t, len(lines), 2) + + headers := strings.Fields(lines[0]) + assert.Contains(t, headers, "TYPE") + assert.Contains(t, headers, "ELAPSED") + }) + + t.Run("get history json", func(t *testing.T) { + output, err := cmdWorkflowHistory(appID, "history-test", "-o", "json") + require.NoError(t, err) + + var history []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &history)) + assert.GreaterOrEqual(t, len(history), 1) + }) +} + +func TestWorkflowSuspendResume(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + // Wait and create a long-running workflow + time.Sleep(5 * time.Second) + output, err := cmdWorkflowRun(appID, "LongWorkflow", "--instance-id=suspend-resume-test") + require.NoError(t, err, output) + + t.Run("suspend workflow", func(t *testing.T) { + output, err := cmdWorkflowSuspend(appID, "suspend-resume-test") + require.NoError(t, err, output) + assert.Contains(t, output, "Workflow 'suspend-resume-test' suspended successfully") + }) + + t.Run("verify suspended state", func(t *testing.T) { + output, err := cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err, output) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + found := false + for _, item := range list { + if item["instanceID"] == "suspend-resume-test" { + assert.Equal(t, "SUSPENDED", item["runtimeStatus"]) + found = true + break + } + } + assert.True(t, found, "Workflow instance not found") + }) + + t.Run("resume workflow", func(t *testing.T) { + output, err := cmdWorkflowResume(appID, "suspend-resume-test") + require.NoError(t, err) + assert.Contains(t, output, "Workflow 'suspend-resume-test' resumed successfully") + }) + + t.Run("verify resumed state", func(t *testing.T) { + time.Sleep(1 * time.Second) + output, err := cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + found := false + for _, item := range list { + if item["instanceID"] == "suspend-resume-test" { + assert.NotEqual(t, "SUSPENDED", item["runtimeStatus"]) + found = true + break + } + } + assert.True(t, found, "Workflow instance not found") + }) +} + +func TestWorkflowTerminate(t *testing.T) { + if isSlimMode() { + t.Skip("skipping workflow tests in slim mode") + } + + cmdUninstall() + ensureDaprInstallation(t) + t.Cleanup(func() { + must(t, cmdUninstall, "failed to uninstall Dapr") + }) + + runFilePath := "../testdata/run-template-files/test-workflow.yaml" + appID := "test-workflow" + t.Cleanup(func() { + cmdStopWithAppID(appID) + waitAppsToBeStopped() + }) + args := []string{"-f", runFilePath} + + go func() { + o, _ := cmdRun("", args...) + t.Log(o) + }() + + // Wait and create a workflow for testing + time.Sleep(5 * time.Second) + output, err := cmdWorkflowRun(appID, "LongWorkflow", "--instance-id=terminate-test") + require.NoError(t, err, output) + + t.Run("terminate workflow", func(t *testing.T) { + output, err := cmdWorkflowTerminate(appID, "terminate-test") + require.NoError(t, err) + assert.Contains(t, output, "terminated successfully") + }) + + t.Run("verify terminated state", func(t *testing.T) { + output, err := cmdWorkflowList(appID, redisConnString, "-o", "json") + require.NoError(t, err) + + var list []map[string]interface{} + require.NoError(t, json.Unmarshal([]byte(output), &list)) + + found := false + for _, item := range list { + if item["instanceID"] == "terminate-test" { + assert.Equal(t, "TERMINATED", item["runtimeStatus"]) + found = true + break + } + } + assert.True(t, found, "Workflow instance not found") + }) + + t.Run("terminate with output", func(t *testing.T) { + // Create another workflow + output, err := cmdWorkflowRun(appID, "LongWorkflow", "--instance-id=terminate-output-test") + require.NoError(t, err, output) + + outputData := `{"reason": "test termination", "code": 123}` + output, err = cmdWorkflowTerminate(appID, "terminate-output-test", "-o", outputData) + require.NoError(t, err) + assert.Contains(t, output, "terminated successfully") + }) +} diff --git a/tests/e2e/testdata/run-template-files/test-scheduler.yaml b/tests/e2e/testdata/run-template-files/test-scheduler.yaml new file mode 100644 index 000000000..d68bf746f --- /dev/null +++ b/tests/e2e/testdata/run-template-files/test-scheduler.yaml @@ -0,0 +1,11 @@ +version: 1 +apps: +- appID: test-scheduler + appDirPath: ../../../apps/scheduler/ + appPort: 9095 + daprGRPCPort: 3510 + command: ["go", "run", "app.go"] + appLogDestination: console + daprdLogDestination: console + schedulerHostAddress: 127.0.0.1:50006 + placementHostAddress: 127.0.0.1:50005 diff --git a/tests/e2e/testdata/run-template-files/test-workflow.yaml b/tests/e2e/testdata/run-template-files/test-workflow.yaml new file mode 100644 index 000000000..ff4ccd3bb --- /dev/null +++ b/tests/e2e/testdata/run-template-files/test-workflow.yaml @@ -0,0 +1,10 @@ +version: 1 +apps: +- appID: test-workflow + appDirPath: ../../../apps/workflow/ + command: ["go", "run", "app.go"] + appLogDestination: console + daprdLogDestination: console + daprGRPCPort: 3510 + schedulerHostAddress: 127.0.0.1:50006 + placementHostAddress: 127.0.0.1:50005 diff --git a/tests/e2e/upgrade/upgrade_test.go b/tests/e2e/upgrade/upgrade_test.go index 585eb1480..cf6a4ebab 100644 --- a/tests/e2e/upgrade/upgrade_test.go +++ b/tests/e2e/upgrade/upgrade_test.go @@ -30,7 +30,7 @@ type upgradePath struct { } const ( - latestRuntimeVersion = "1.16.0-rc.8" + latestRuntimeVersion = "1.16.2" latestRuntimeVersionMinusOne = "1.15.11" latestRuntimeVersionMinusTwo = "1.14.5" dashboardVersion = "0.15.0" diff --git a/utils/utils.go b/utils/utils.go index 483bf85f7..343f6f8a0 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -15,7 +15,9 @@ package utils import ( "bufio" + "bytes" "context" + "encoding/csv" "encoding/json" "errors" "fmt" @@ -92,30 +94,92 @@ func PrintTable(csvContent string) { // WriteTable writes the csv table to writer. func WriteTable(writer io.Writer, csvContent string) { - table := tablewriter.NewWriter(writer) + var output bytes.Buffer + + table := tablewriter.NewWriter(&output) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) - table.SetBorder(false) table.SetHeaderLine(false) - table.SetRowLine(false) - table.SetCenterSeparator("") + table.SetBorders(tablewriter.Border{Top: false, Bottom: false}) + table.SetTablePadding("") table.SetRowSeparator("") table.SetColumnSeparator("") table.SetAlignment(tablewriter.ALIGN_LEFT) - scanner := bufio.NewScanner(strings.NewReader(csvContent)) - header := true + table.SetAutoWrapText(false) + + r := csv.NewReader(strings.NewReader(csvContent)) + r.FieldsPerRecord = -1 + + var header []string + var rows [][]string + first := true + + for { + rec, err := r.Read() + if err == io.EOF { + break + } + if err != nil { + continue + } + for i := range rec { + rec[i] = sanitizeCell(rec[i]) + } + + if first { + header = rec + first = false + continue + } + rows = append(rows, rec) + } + + if len(header) == 0 { + return + } + + // Pad rows to header len (so indexing is safe) + for i := range rows { + if len(rows[i]) < len(header) { + pad := make([]string, len(header)-len(rows[i])) + rows[i] = append(rows[i], pad...) + } + } + + var keepIdx []int + for c := range header { + if !allBlank(c, rows) { + keepIdx = append(keepIdx, c) + } + } - for scanner.Scan() { - text := strings.Split(scanner.Text(), ",") + if len(keepIdx) == 0 { + for i := range header { + keepIdx = append(keepIdx, i) + } + } - if header { - table.SetHeader(text) - header = false - } else { - table.Append(text) + filter := func(src []string) []string { + dst := make([]string, len(keepIdx)) + for i, c := range keepIdx { + if c < len(src) { + dst[i] = src[c] + } } + return dst + } + + table.SetHeader(filter(header)) + for _, rrow := range rows { + table.Append(filter(rrow)) } table.Render() + + sc := bufio.NewScanner(&output) + for sc.Scan() { + writer.Write(bytes.TrimLeft(sc.Bytes(), " ")) + writer.Write([]byte("\n")) + } } func TruncateString(str string, maxLength int) string { @@ -430,3 +494,44 @@ func AttachJobObjectToProcess(pid string, proc *os.Process) { func GetJobObjectNameFromPID(pid string) string { return pid + "-" + windowsDaprAppProcJobName } + +func HumanizeDuration(d time.Duration) string { + if d == 0 { + return "" + } + + if d < 0 { + d = -d + } + switch { + case d < time.Microsecond: + return fmt.Sprintf("%dns", d.Nanoseconds()) + case d < time.Millisecond: + return fmt.Sprintf("%.1fµs", float64(d)/1e3) + case d < time.Second: + return fmt.Sprintf("%.1fms", float64(d)/1e6) + case d < time.Minute: + return fmt.Sprintf("%.2fs", d.Seconds()) + case d < time.Hour: + return fmt.Sprintf("%.1fm", d.Minutes()) + default: + return fmt.Sprintf("%.1fh", d.Hours()) + } +} + +func sanitizeCell(s string) string { + s = strings.ReplaceAll(s, "\r\n", " ") + s = strings.ReplaceAll(s, "\n", " ") + s = strings.ReplaceAll(s, "\r", " ") + s = strings.TrimSpace(strings.Join(strings.Fields(s), " ")) + return s +} + +func allBlank(col int, rows [][]string) bool { + for _, r := range rows { + if col < len(r) && strings.TrimSpace(r[col]) != "" { + return false + } + } + return true +}