From b0df8d899f1922745e8524228e4a00bac90169b7 Mon Sep 17 00:00:00 2001 From: Tim Li Date: Thu, 5 Jun 2025 12:21:35 -0700 Subject: [PATCH 1/8] Introduce CronOverlapPolicy in StartWorkflowOptions from cadence-idl --- .gen/go/cadence/cadence.go | 587 +++- .../cadence/workflowserviceclient/client.go | 29 + .../cadence/workflowserviceserver/server.go | 48 +- .gen/go/cadence/workflowservicetest/client.go | 31 + .gen/go/shared/shared.go | 2870 ++++++++++++++++- go.mod | 2 +- go.sum | 4 +- idls | 2 +- internal/client.go | 7 + internal/common/convert.go | 5 + internal/compatibility/enum_test.go | 11 + internal/compatibility/proto/enum.go | 13 + internal/compatibility/thrift/enum.go | 12 + internal/internal_workflow_client.go | 2 + 14 files changed, 3528 insertions(+), 95 deletions(-) diff --git a/.gen/go/cadence/cadence.go b/.gen/go/cadence/cadence.go index 8fcdce667..8f68d3ed7 100644 --- a/.gen/go/cadence/cadence.go +++ b/.gen/go/cadence/cadence.go @@ -21,14 +21,14 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "cadence", Package: "go.uber.org/cadence/.gen/go/cadence", FilePath: "cadence.thrift", - SHA1: "46920c6a1869dbda8563382e770b39a9313e79cd", + SHA1: "a0c3c0c106c1d7399612863e5da37eed3c561f4b", Includes: []*thriftreflect.ThriftModule{ shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\n\nnamespace java com.uber.cadence\n\n/**\n* WorkflowService API is exposed to provide support for long running applications. Application is expected to call\n* StartWorkflowExecution to create an instance for each instance of long running workflow. Such applications are expected\n* to have a worker which regularly polls for DecisionTask and ActivityTask from the WorkflowService. For each\n* DecisionTask, application is expected to process the history of events for that session and respond back with next\n* decisions. For each ActivityTask, application is expected to execute the actual logic for that task and respond back\n* with completion or failure. Worker is expected to regularly heartbeat while activity task is running.\n**/\nservice WorkflowService {\n /**\n * RegisterDomain creates a new domain which can be used as a container for all resources. Domain is a top level\n * entity within Cadence, used as a container for all resources like workflow executions, tasklists, etc. Domain\n * acts as a sandbox and provides isolation for all resources within the domain. All resources belongs to exactly one\n * domain.\n **/\n void RegisterDomain(1: shared.RegisterDomainRequest registerRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.DomainAlreadyExistsError domainExistsError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DescribeDomain returns the information and configuration for a registered domain.\n **/\n shared.DescribeDomainResponse DescribeDomain(1: shared.DescribeDomainRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListDomains returns the information and configuration for all domains.\n **/\n shared.ListDomainsResponse ListDomains(1: shared.ListDomainsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * UpdateDomain is used to update the information and configuration for a registered domain.\n **/\n shared.UpdateDomainResponse UpdateDomain(1: shared.UpdateDomainRequest updateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DeprecateDomain us used to update status of a registered domain to DEPRECATED. Once the domain is deprecated\n * it cannot be used to start new workflow executions. Existing workflow executions will continue to run on\n * deprecated domains.\n **/\n void DeprecateDomain(1: shared.DeprecateDomainRequest deprecateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RestartWorkflowExecution restarts a previous workflow\n * If the workflow is currently running it will terminate and restart\n **/\n shared.RestartWorkflowExecutionResponse RestartWorkflowExecution(1: shared.RestartWorkflowExecutionRequest restartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DiagnoseWorkflowExecution diagnoses a previous workflow execution\n **/\n shared.DiagnoseWorkflowExecutionResponse DiagnoseWorkflowExecution(1: shared.DiagnoseWorkflowExecutionRequest diagnoseRequest)\n throws (\n 1: shared.DomainNotActiveError domainNotActiveError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 5: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: shared.StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * StartWorkflowExecutionAsync starts a new long running workflow instance asynchronously. It will push a StartWorkflowExecutionRequest to a queue\n * and immediately return a response. The request will be processed by a separate consumer eventually.\n **/\n shared.StartWorkflowExecutionAsyncResponse StartWorkflowExecutionAsync(1: shared.StartWorkflowExecutionAsyncRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * Returns the history of specified workflow execution. It fails with 'EntityNotExistError' if speficied workflow\n * execution in unknown to the service.\n **/\n shared.GetWorkflowExecutionHistoryResponse GetWorkflowExecutionHistory(1: shared.GetWorkflowExecutionHistoryRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * PollForDecisionTask is called by application worker to process DecisionTask from a specific taskList. A\n * DecisionTask is dispatched to callers for active workflow executions, with pending decisions.\n * Application is then expected to call 'RespondDecisionTaskCompleted' API when it is done processing the DecisionTask.\n * It will also create a 'DecisionTaskStarted' event in the history for that session before handing off DecisionTask to\n * application worker.\n **/\n shared.PollForDecisionTaskResponse PollForDecisionTask(1: shared.PollForDecisionTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n * The response could contain a new decision task if there is one or if the request asking for one.\n **/\n shared.RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: shared.RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report any panics during DecisionTask processing. Cadence will only append first\n * DecisionTaskFailed event to the history of workflow execution for consecutive failures.\n **/\n void RespondDecisionTaskFailed(1: shared.RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * PollForActivityTask is called by application worker to process ActivityTask from a specific taskList. ActivityTask\n * is dispatched to callers whenever a ScheduleTask decision is made for a workflow execution.\n * Application is expected to call 'RespondActivityTaskCompleted' or 'RespondActivityTaskFailed' once it is done\n * processing the task.\n * Application also needs to call 'RecordActivityTaskHeartbeat' API within 'heartbeatTimeoutSeconds' interval to\n * prevent the task from getting timed out. An event 'ActivityTaskStarted' event is also written to workflow execution\n * history before the ActivityTask is dispatched to application worker.\n **/\n shared.PollForActivityTaskResponse PollForActivityTask(1: shared.PollForActivityTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: shared.RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RecordActivityTaskHeartbeatByID is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeatByID' will\n * fail with 'EntityNotExistsError' in such situations. Instead of using 'taskToken' like in RecordActivityTaskHeartbeat,\n * use Domain, WorkflowID and ActivityID\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeatByID(1: shared.RecordActivityTaskHeartbeatByIDRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: shared.RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCompletedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Similar to RespondActivityTaskCompleted but use Domain,\n * WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCompletedByID(1: shared.RespondActivityTaskCompletedByIDRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: shared.RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskFailedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskFailed but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskFailedByID(1: shared.RespondActivityTaskFailedByIDRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: shared.RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCanceledByID is called by application worker when it is successfully canceled an ActivityTask.\n * It will result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskCanceled but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceledByID(1: shared.RespondActivityTaskCanceledByIDRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: shared.RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 10: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: shared.SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending signal to a workflow.\n * If the workflow is running, this results in WorkflowExecutionSignaled event being recorded in the history\n * and a decision task being created for the execution.\n * If the workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled\n * events being recorded in history, and a decision task being created for the execution\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWithStartWorkflowExecutionAsync is used to ensure sending signal to a workflow asynchronously. It will push a SignalWithStartWorkflowExecutionRequest to a queue\n * and immediately return a response. The request will be processed by a separate consumer eventually.\n **/\n shared.SignalWithStartWorkflowExecutionAsyncResponse SignalWithStartWorkflowExecutionAsync(1: shared.SignalWithStartWorkflowExecutionAsyncRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * ResetWorkflowExecution reset an existing workflow execution to DecisionTaskCompleted event(exclusive).\n * And it will immediately terminating the current execution instance.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: shared.ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: shared.TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific domain.\n **/\n shared.ListOpenWorkflowExecutionsResponse ListOpenWorkflowExecutions(1: shared.ListOpenWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific domain.\n **/\n shared.ListClosedWorkflowExecutionsResponse ListClosedWorkflowExecutions(1: shared.ListClosedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListWorkflowExecutions is a visibility API to list workflow executions in a specific domain.\n **/\n shared.ListWorkflowExecutionsResponse ListWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific domain.\n **/\n shared.ListArchivedWorkflowExecutionsResponse ListArchivedWorkflowExecutions(1: shared.ListArchivedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific domain without order.\n **/\n shared.ListWorkflowExecutionsResponse ScanWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CountWorkflowExecutions is a visibility API to count of workflow executions in a specific domain.\n **/\n shared.CountWorkflowExecutionsResponse CountWorkflowExecutions(1: shared.CountWorkflowExecutionsRequest countRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs\n **/\n shared.GetSearchAttributesResponse GetSearchAttributes()\n throws (\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 4: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondQueryTaskCompleted is called by application worker to complete a QueryTask (which is a DecisionTask for query)\n * as a result of 'PollForDecisionTask' API call. Completing a QueryTask will unblock the client call to 'QueryWorkflow'\n * API and return the query result to client as a response to 'QueryWorkflow' API call.\n **/\n void RespondQueryTaskCompleted(1: shared.RespondQueryTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n shared.ResetStickyTaskListResponse ResetStickyTaskList(1: shared.ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n shared.QueryWorkflowResponse QueryWorkflow(1: shared.QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n\t)\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: shared.DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DescribeTaskList returns information about the target tasklist, right now this API returns the\n * pollers which polled this tasklist in last few minutes.\n **/\n shared.DescribeTaskListResponse DescribeTaskList(1: shared.DescribeTaskListRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetClusterInfo returns information about cadence cluster\n **/\n shared.ClusterInfo GetClusterInfo()\n throws (\n 1: shared.InternalServiceError internalServiceError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetTaskListsByDomain returns the list of all the task lists for a domainName.\n **/\n shared.GetTaskListsByDomainResponse GetTaskListsByDomain(1: shared.GetTaskListsByDomainRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.EntityNotExistsError entityNotExistError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n shared.ListTaskListPartitionsResponse ListTaskListPartitions(1: shared.ListTaskListPartitionsRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RefreshWorkflowTasks refreshes all tasks of a workflow\n **/\n void RefreshWorkflowTasks(1: shared.RefreshWorkflowTasksRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.DomainNotActiveError domainNotActiveError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: shared.AccessDeniedError accessDeniedError,\n )\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\n\nnamespace java com.uber.cadence\n\n/**\n* WorkflowService API is exposed to provide support for long running applications. Application is expected to call\n* StartWorkflowExecution to create an instance for each instance of long running workflow. Such applications are expected\n* to have a worker which regularly polls for DecisionTask and ActivityTask from the WorkflowService. For each\n* DecisionTask, application is expected to process the history of events for that session and respond back with next\n* decisions. For each ActivityTask, application is expected to execute the actual logic for that task and respond back\n* with completion or failure. Worker is expected to regularly heartbeat while activity task is running.\n**/\nservice WorkflowService {\n /**\n * RegisterDomain creates a new domain which can be used as a container for all resources. Domain is a top level\n * entity within Cadence, used as a container for all resources like workflow executions, tasklists, etc. Domain\n * acts as a sandbox and provides isolation for all resources within the domain. All resources belongs to exactly one\n * domain.\n **/\n void RegisterDomain(1: shared.RegisterDomainRequest registerRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.DomainAlreadyExistsError domainExistsError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DescribeDomain returns the information and configuration for a registered domain.\n **/\n shared.DescribeDomainResponse DescribeDomain(1: shared.DescribeDomainRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListDomains returns the information and configuration for all domains.\n **/\n shared.ListDomainsResponse ListDomains(1: shared.ListDomainsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * UpdateDomain is used to update the information and configuration for a registered domain.\n **/\n shared.UpdateDomainResponse UpdateDomain(1: shared.UpdateDomainRequest updateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DeprecateDomain us used to update status of a registered domain to DEPRECATED. Once the domain is deprecated\n * it cannot be used to start new workflow executions. Existing workflow executions will continue to run on\n * deprecated domains.\n **/\n void DeprecateDomain(1: shared.DeprecateDomainRequest deprecateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DeleteDomain permanently removes a domain record. This operation:\n * - Requires domain to be in DEPRECATED status\n * - Cannot be performed on domains with running workflows\n * - Is irreversible and removes all domain data\n * - Requires proper permissions and security token\n **/\n void DeleteDomain(1: shared.DeleteDomainRequest deleteRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 4: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RestartWorkflowExecution restarts a previous workflow\n * If the workflow is currently running it will terminate and restart\n **/\n shared.RestartWorkflowExecutionResponse RestartWorkflowExecution(1: shared.RestartWorkflowExecutionRequest restartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DiagnoseWorkflowExecution diagnoses a previous workflow execution\n **/\n shared.DiagnoseWorkflowExecutionResponse DiagnoseWorkflowExecution(1: shared.DiagnoseWorkflowExecutionRequest diagnoseRequest)\n throws (\n 1: shared.DomainNotActiveError domainNotActiveError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 5: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: shared.StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * StartWorkflowExecutionAsync starts a new long running workflow instance asynchronously. It will push a StartWorkflowExecutionRequest to a queue\n * and immediately return a response. The request will be processed by a separate consumer eventually.\n **/\n shared.StartWorkflowExecutionAsyncResponse StartWorkflowExecutionAsync(1: shared.StartWorkflowExecutionAsyncRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * Returns the history of specified workflow execution. It fails with 'EntityNotExistError' if speficied workflow\n * execution in unknown to the service.\n **/\n shared.GetWorkflowExecutionHistoryResponse GetWorkflowExecutionHistory(1: shared.GetWorkflowExecutionHistoryRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * PollForDecisionTask is called by application worker to process DecisionTask from a specific taskList. A\n * DecisionTask is dispatched to callers for active workflow executions, with pending decisions.\n * Application is then expected to call 'RespondDecisionTaskCompleted' API when it is done processing the DecisionTask.\n * It will also create a 'DecisionTaskStarted' event in the history for that session before handing off DecisionTask to\n * application worker.\n **/\n shared.PollForDecisionTaskResponse PollForDecisionTask(1: shared.PollForDecisionTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n * The response could contain a new decision task if there is one or if the request asking for one.\n **/\n shared.RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: shared.RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report any panics during DecisionTask processing. Cadence will only append first\n * DecisionTaskFailed event to the history of workflow execution for consecutive failures.\n **/\n void RespondDecisionTaskFailed(1: shared.RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * PollForActivityTask is called by application worker to process ActivityTask from a specific taskList. ActivityTask\n * is dispatched to callers whenever a ScheduleTask decision is made for a workflow execution.\n * Application is expected to call 'RespondActivityTaskCompleted' or 'RespondActivityTaskFailed' once it is done\n * processing the task.\n * Application also needs to call 'RecordActivityTaskHeartbeat' API within 'heartbeatTimeoutSeconds' interval to\n * prevent the task from getting timed out. An event 'ActivityTaskStarted' event is also written to workflow execution\n * history before the ActivityTask is dispatched to application worker.\n **/\n shared.PollForActivityTaskResponse PollForActivityTask(1: shared.PollForActivityTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: shared.RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RecordActivityTaskHeartbeatByID is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeatByID' will\n * fail with 'EntityNotExistsError' in such situations. Instead of using 'taskToken' like in RecordActivityTaskHeartbeat,\n * use Domain, WorkflowID and ActivityID\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeatByID(1: shared.RecordActivityTaskHeartbeatByIDRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: shared.RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCompletedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Similar to RespondActivityTaskCompleted but use Domain,\n * WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCompletedByID(1: shared.RespondActivityTaskCompletedByIDRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: shared.RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskFailedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskFailed but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskFailedByID(1: shared.RespondActivityTaskFailedByIDRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: shared.RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCanceledByID is called by application worker when it is successfully canceled an ActivityTask.\n * It will result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskCanceled but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceledByID(1: shared.RespondActivityTaskCanceledByIDRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: shared.RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 10: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: shared.SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending signal to a workflow.\n * If the workflow is running, this results in WorkflowExecutionSignaled event being recorded in the history\n * and a decision task being created for the execution.\n * If the workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled\n * events being recorded in history, and a decision task being created for the execution\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWithStartWorkflowExecutionAsync is used to ensure sending signal to a workflow asynchronously. It will push a SignalWithStartWorkflowExecutionRequest to a queue\n * and immediately return a response. The request will be processed by a separate consumer eventually.\n **/\n shared.SignalWithStartWorkflowExecutionAsyncResponse SignalWithStartWorkflowExecutionAsync(1: shared.SignalWithStartWorkflowExecutionAsyncRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * ResetWorkflowExecution reset an existing workflow execution to DecisionTaskCompleted event(exclusive).\n * And it will immediately terminating the current execution instance.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: shared.ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: shared.TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific domain.\n **/\n shared.ListOpenWorkflowExecutionsResponse ListOpenWorkflowExecutions(1: shared.ListOpenWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific domain.\n **/\n shared.ListClosedWorkflowExecutionsResponse ListClosedWorkflowExecutions(1: shared.ListClosedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListWorkflowExecutions is a visibility API to list workflow executions in a specific domain.\n **/\n shared.ListWorkflowExecutionsResponse ListWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific domain.\n **/\n shared.ListArchivedWorkflowExecutionsResponse ListArchivedWorkflowExecutions(1: shared.ListArchivedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific domain without order.\n **/\n shared.ListWorkflowExecutionsResponse ScanWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CountWorkflowExecutions is a visibility API to count of workflow executions in a specific domain.\n **/\n shared.CountWorkflowExecutionsResponse CountWorkflowExecutions(1: shared.CountWorkflowExecutionsRequest countRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs\n **/\n shared.GetSearchAttributesResponse GetSearchAttributes()\n throws (\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 4: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondQueryTaskCompleted is called by application worker to complete a QueryTask (which is a DecisionTask for query)\n * as a result of 'PollForDecisionTask' API call. Completing a QueryTask will unblock the client call to 'QueryWorkflow'\n * API and return the query result to client as a response to 'QueryWorkflow' API call.\n **/\n void RespondQueryTaskCompleted(1: shared.RespondQueryTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n shared.ResetStickyTaskListResponse ResetStickyTaskList(1: shared.ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n shared.QueryWorkflowResponse QueryWorkflow(1: shared.QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n\t)\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: shared.DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DescribeTaskList returns information about the target tasklist, right now this API returns the\n * pollers which polled this tasklist in last few minutes.\n **/\n shared.DescribeTaskListResponse DescribeTaskList(1: shared.DescribeTaskListRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetClusterInfo returns information about cadence cluster\n **/\n shared.ClusterInfo GetClusterInfo()\n throws (\n 1: shared.InternalServiceError internalServiceError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetTaskListsByDomain returns the list of all the task lists for a domainName.\n **/\n shared.GetTaskListsByDomainResponse GetTaskListsByDomain(1: shared.GetTaskListsByDomainRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.EntityNotExistsError entityNotExistError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n shared.ListTaskListPartitionsResponse ListTaskListPartitions(1: shared.ListTaskListPartitionsRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RefreshWorkflowTasks refreshes all tasks of a workflow\n **/\n void RefreshWorkflowTasks(1: shared.RefreshWorkflowTasksRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.DomainNotActiveError domainNotActiveError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: shared.AccessDeniedError accessDeniedError,\n )\n}\n" // WorkflowService_CountWorkflowExecutions_Args represents the arguments for the WorkflowService.CountWorkflowExecutions function. // @@ -759,6 +759,589 @@ func (v *WorkflowService_CountWorkflowExecutions_Result) EnvelopeType() wire.Env return wire.Reply } +// WorkflowService_DeleteDomain_Args represents the arguments for the WorkflowService.DeleteDomain function. +// +// The arguments for DeleteDomain are sent and received over the wire as this struct. +type WorkflowService_DeleteDomain_Args struct { + DeleteRequest *shared.DeleteDomainRequest `json:"deleteRequest,omitempty"` +} + +// ToWire translates a WorkflowService_DeleteDomain_Args struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *WorkflowService_DeleteDomain_Args) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.DeleteRequest != nil { + w, err = v.DeleteRequest.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _DeleteDomainRequest_Read(w wire.Value) (*shared.DeleteDomainRequest, error) { + var v shared.DeleteDomainRequest + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a WorkflowService_DeleteDomain_Args struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a WorkflowService_DeleteDomain_Args struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v WorkflowService_DeleteDomain_Args +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *WorkflowService_DeleteDomain_Args) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.DeleteRequest, err = _DeleteDomainRequest_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a WorkflowService_DeleteDomain_Args +// struct. +func (v *WorkflowService_DeleteDomain_Args) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.DeleteRequest != nil { + fields[i] = fmt.Sprintf("DeleteRequest: %v", v.DeleteRequest) + i++ + } + + return fmt.Sprintf("WorkflowService_DeleteDomain_Args{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this WorkflowService_DeleteDomain_Args match the +// provided WorkflowService_DeleteDomain_Args. +// +// This function performs a deep comparison. +func (v *WorkflowService_DeleteDomain_Args) Equals(rhs *WorkflowService_DeleteDomain_Args) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.DeleteRequest == nil && rhs.DeleteRequest == nil) || (v.DeleteRequest != nil && rhs.DeleteRequest != nil && v.DeleteRequest.Equals(rhs.DeleteRequest))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of WorkflowService_DeleteDomain_Args. +func (v *WorkflowService_DeleteDomain_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.DeleteRequest != nil { + err = multierr.Append(err, enc.AddObject("deleteRequest", v.DeleteRequest)) + } + return err +} + +// GetDeleteRequest returns the value of DeleteRequest if it is set or its +// zero value if it is unset. +func (v *WorkflowService_DeleteDomain_Args) GetDeleteRequest() (o *shared.DeleteDomainRequest) { + if v != nil && v.DeleteRequest != nil { + return v.DeleteRequest + } + + return +} + +// IsSetDeleteRequest returns true if DeleteRequest is not nil. +func (v *WorkflowService_DeleteDomain_Args) IsSetDeleteRequest() bool { + return v != nil && v.DeleteRequest != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the arguments. +// +// This will always be "DeleteDomain" for this struct. +func (v *WorkflowService_DeleteDomain_Args) MethodName() string { + return "DeleteDomain" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Call for this struct. +func (v *WorkflowService_DeleteDomain_Args) EnvelopeType() wire.EnvelopeType { + return wire.Call +} + +// WorkflowService_DeleteDomain_Helper provides functions that aid in handling the +// parameters and return values of the WorkflowService.DeleteDomain +// function. +var WorkflowService_DeleteDomain_Helper = struct { + // Args accepts the parameters of DeleteDomain in-order and returns + // the arguments struct for the function. + Args func( + deleteRequest *shared.DeleteDomainRequest, + ) *WorkflowService_DeleteDomain_Args + + // IsException returns true if the given error can be thrown + // by DeleteDomain. + // + // An error can be thrown by DeleteDomain only if the + // corresponding exception type was mentioned in the 'throws' + // section for it in the Thrift file. + IsException func(error) bool + + // WrapResponse returns the result struct for DeleteDomain + // given the error returned by it. The provided error may + // be nil if DeleteDomain did not fail. + // + // This allows mapping errors returned by DeleteDomain into a + // serializable result struct. WrapResponse returns a + // non-nil error if the provided error cannot be thrown by + // DeleteDomain + // + // err := DeleteDomain(args) + // result, err := WorkflowService_DeleteDomain_Helper.WrapResponse(err) + // if err != nil { + // return fmt.Errorf("unexpected error from DeleteDomain: %v", err) + // } + // serialize(result) + WrapResponse func(error) (*WorkflowService_DeleteDomain_Result, error) + + // UnwrapResponse takes the result struct for DeleteDomain + // and returns the erorr returned by it (if any). + // + // The error is non-nil only if DeleteDomain threw an + // exception. + // + // result := deserialize(bytes) + // err := WorkflowService_DeleteDomain_Helper.UnwrapResponse(result) + UnwrapResponse func(*WorkflowService_DeleteDomain_Result) error +}{} + +func init() { + WorkflowService_DeleteDomain_Helper.Args = func( + deleteRequest *shared.DeleteDomainRequest, + ) *WorkflowService_DeleteDomain_Args { + return &WorkflowService_DeleteDomain_Args{ + DeleteRequest: deleteRequest, + } + } + + WorkflowService_DeleteDomain_Helper.IsException = func(err error) bool { + switch err.(type) { + case *shared.BadRequestError: + return true + case *shared.ServiceBusyError: + return true + case *shared.ClientVersionNotSupportedError: + return true + case *shared.AccessDeniedError: + return true + default: + return false + } + } + + WorkflowService_DeleteDomain_Helper.WrapResponse = func(err error) (*WorkflowService_DeleteDomain_Result, error) { + if err == nil { + return &WorkflowService_DeleteDomain_Result{}, nil + } + + switch e := err.(type) { + case *shared.BadRequestError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_DeleteDomain_Result.BadRequestError") + } + return &WorkflowService_DeleteDomain_Result{BadRequestError: e}, nil + case *shared.ServiceBusyError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_DeleteDomain_Result.ServiceBusyError") + } + return &WorkflowService_DeleteDomain_Result{ServiceBusyError: e}, nil + case *shared.ClientVersionNotSupportedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_DeleteDomain_Result.ClientVersionNotSupportedError") + } + return &WorkflowService_DeleteDomain_Result{ClientVersionNotSupportedError: e}, nil + case *shared.AccessDeniedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_DeleteDomain_Result.AccessDeniedError") + } + return &WorkflowService_DeleteDomain_Result{AccessDeniedError: e}, nil + } + + return nil, err + } + WorkflowService_DeleteDomain_Helper.UnwrapResponse = func(result *WorkflowService_DeleteDomain_Result) (err error) { + if result.BadRequestError != nil { + err = result.BadRequestError + return + } + if result.ServiceBusyError != nil { + err = result.ServiceBusyError + return + } + if result.ClientVersionNotSupportedError != nil { + err = result.ClientVersionNotSupportedError + return + } + if result.AccessDeniedError != nil { + err = result.AccessDeniedError + return + } + return + } + +} + +// WorkflowService_DeleteDomain_Result represents the result of a WorkflowService.DeleteDomain function call. +// +// The result of a DeleteDomain execution is sent and received over the wire as this struct. +type WorkflowService_DeleteDomain_Result struct { + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + ClientVersionNotSupportedError *shared.ClientVersionNotSupportedError `json:"clientVersionNotSupportedError,omitempty"` + AccessDeniedError *shared.AccessDeniedError `json:"accessDeniedError,omitempty"` +} + +// ToWire translates a WorkflowService_DeleteDomain_Result struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *WorkflowService_DeleteDomain_Result) ToWire() (wire.Value, error) { + var ( + fields [4]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.BadRequestError != nil { + w, err = v.BadRequestError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 2, Value: w} + i++ + } + if v.ClientVersionNotSupportedError != nil { + w, err = v.ClientVersionNotSupportedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 3, Value: w} + i++ + } + if v.AccessDeniedError != nil { + w, err = v.AccessDeniedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 4, Value: w} + i++ + } + + if i > 1 { + return wire.Value{}, fmt.Errorf("WorkflowService_DeleteDomain_Result should have at most one field: got %v fields", i) + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a WorkflowService_DeleteDomain_Result struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a WorkflowService_DeleteDomain_Result struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v WorkflowService_DeleteDomain_Result +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *WorkflowService_DeleteDomain_Result) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.BadRequestError, err = _BadRequestError_Read(field.Value) + if err != nil { + return err + } + + } + case 2: + if field.Value.Type() == wire.TStruct { + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) + if err != nil { + return err + } + + } + case 3: + if field.Value.Type() == wire.TStruct { + v.ClientVersionNotSupportedError, err = _ClientVersionNotSupportedError_Read(field.Value) + if err != nil { + return err + } + + } + case 4: + if field.Value.Type() == wire.TStruct { + v.AccessDeniedError, err = _AccessDeniedError_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + count := 0 + if v.BadRequestError != nil { + count++ + } + if v.ServiceBusyError != nil { + count++ + } + if v.ClientVersionNotSupportedError != nil { + count++ + } + if v.AccessDeniedError != nil { + count++ + } + if count > 1 { + return fmt.Errorf("WorkflowService_DeleteDomain_Result should have at most one field: got %v fields", count) + } + + return nil +} + +// String returns a readable string representation of a WorkflowService_DeleteDomain_Result +// struct. +func (v *WorkflowService_DeleteDomain_Result) String() string { + if v == nil { + return "" + } + + var fields [4]string + i := 0 + if v.BadRequestError != nil { + fields[i] = fmt.Sprintf("BadRequestError: %v", v.BadRequestError) + i++ + } + if v.ServiceBusyError != nil { + fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) + i++ + } + if v.ClientVersionNotSupportedError != nil { + fields[i] = fmt.Sprintf("ClientVersionNotSupportedError: %v", v.ClientVersionNotSupportedError) + i++ + } + if v.AccessDeniedError != nil { + fields[i] = fmt.Sprintf("AccessDeniedError: %v", v.AccessDeniedError) + i++ + } + + return fmt.Sprintf("WorkflowService_DeleteDomain_Result{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this WorkflowService_DeleteDomain_Result match the +// provided WorkflowService_DeleteDomain_Result. +// +// This function performs a deep comparison. +func (v *WorkflowService_DeleteDomain_Result) Equals(rhs *WorkflowService_DeleteDomain_Result) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.BadRequestError == nil && rhs.BadRequestError == nil) || (v.BadRequestError != nil && rhs.BadRequestError != nil && v.BadRequestError.Equals(rhs.BadRequestError))) { + return false + } + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + return false + } + if !((v.ClientVersionNotSupportedError == nil && rhs.ClientVersionNotSupportedError == nil) || (v.ClientVersionNotSupportedError != nil && rhs.ClientVersionNotSupportedError != nil && v.ClientVersionNotSupportedError.Equals(rhs.ClientVersionNotSupportedError))) { + return false + } + if !((v.AccessDeniedError == nil && rhs.AccessDeniedError == nil) || (v.AccessDeniedError != nil && rhs.AccessDeniedError != nil && v.AccessDeniedError.Equals(rhs.AccessDeniedError))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of WorkflowService_DeleteDomain_Result. +func (v *WorkflowService_DeleteDomain_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.BadRequestError != nil { + err = multierr.Append(err, enc.AddObject("badRequestError", v.BadRequestError)) + } + if v.ServiceBusyError != nil { + err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) + } + if v.ClientVersionNotSupportedError != nil { + err = multierr.Append(err, enc.AddObject("clientVersionNotSupportedError", v.ClientVersionNotSupportedError)) + } + if v.AccessDeniedError != nil { + err = multierr.Append(err, enc.AddObject("accessDeniedError", v.AccessDeniedError)) + } + return err +} + +// GetBadRequestError returns the value of BadRequestError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_DeleteDomain_Result) GetBadRequestError() (o *shared.BadRequestError) { + if v != nil && v.BadRequestError != nil { + return v.BadRequestError + } + + return +} + +// IsSetBadRequestError returns true if BadRequestError is not nil. +func (v *WorkflowService_DeleteDomain_Result) IsSetBadRequestError() bool { + return v != nil && v.BadRequestError != nil +} + +// GetServiceBusyError returns the value of ServiceBusyError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_DeleteDomain_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { + if v != nil && v.ServiceBusyError != nil { + return v.ServiceBusyError + } + + return +} + +// IsSetServiceBusyError returns true if ServiceBusyError is not nil. +func (v *WorkflowService_DeleteDomain_Result) IsSetServiceBusyError() bool { + return v != nil && v.ServiceBusyError != nil +} + +// GetClientVersionNotSupportedError returns the value of ClientVersionNotSupportedError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_DeleteDomain_Result) GetClientVersionNotSupportedError() (o *shared.ClientVersionNotSupportedError) { + if v != nil && v.ClientVersionNotSupportedError != nil { + return v.ClientVersionNotSupportedError + } + + return +} + +// IsSetClientVersionNotSupportedError returns true if ClientVersionNotSupportedError is not nil. +func (v *WorkflowService_DeleteDomain_Result) IsSetClientVersionNotSupportedError() bool { + return v != nil && v.ClientVersionNotSupportedError != nil +} + +// GetAccessDeniedError returns the value of AccessDeniedError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_DeleteDomain_Result) GetAccessDeniedError() (o *shared.AccessDeniedError) { + if v != nil && v.AccessDeniedError != nil { + return v.AccessDeniedError + } + + return +} + +// IsSetAccessDeniedError returns true if AccessDeniedError is not nil. +func (v *WorkflowService_DeleteDomain_Result) IsSetAccessDeniedError() bool { + return v != nil && v.AccessDeniedError != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the result. +// +// This will always be "DeleteDomain" for this struct. +func (v *WorkflowService_DeleteDomain_Result) MethodName() string { + return "DeleteDomain" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Reply for this struct. +func (v *WorkflowService_DeleteDomain_Result) EnvelopeType() wire.EnvelopeType { + return wire.Reply +} + // WorkflowService_DeprecateDomain_Args represents the arguments for the WorkflowService.DeprecateDomain function. // // The arguments for DeprecateDomain are sent and received over the wire as this struct. diff --git a/.gen/go/cadence/workflowserviceclient/client.go b/.gen/go/cadence/workflowserviceclient/client.go index e4e040273..b678e24a8 100644 --- a/.gen/go/cadence/workflowserviceclient/client.go +++ b/.gen/go/cadence/workflowserviceclient/client.go @@ -24,6 +24,12 @@ type Interface interface { opts ...yarpc.CallOption, ) (*shared.CountWorkflowExecutionsResponse, error) + DeleteDomain( + ctx context.Context, + DeleteRequest *shared.DeleteDomainRequest, + opts ...yarpc.CallOption, + ) error + DeprecateDomain( ctx context.Context, DeprecateRequest *shared.DeprecateDomainRequest, @@ -328,6 +334,29 @@ func (c client) CountWorkflowExecutions( return } +func (c client) DeleteDomain( + ctx context.Context, + _DeleteRequest *shared.DeleteDomainRequest, + opts ...yarpc.CallOption, +) (err error) { + + args := cadence.WorkflowService_DeleteDomain_Helper.Args(_DeleteRequest) + + var body wire.Value + body, err = c.c.Call(ctx, args, opts...) + if err != nil { + return + } + + var result cadence.WorkflowService_DeleteDomain_Result + if err = result.FromWire(body); err != nil { + return + } + + err = cadence.WorkflowService_DeleteDomain_Helper.UnwrapResponse(&result) + return +} + func (c client) DeprecateDomain( ctx context.Context, _DeprecateRequest *shared.DeprecateDomainRequest, diff --git a/.gen/go/cadence/workflowserviceserver/server.go b/.gen/go/cadence/workflowserviceserver/server.go index b495ae0c7..0456d73d1 100644 --- a/.gen/go/cadence/workflowserviceserver/server.go +++ b/.gen/go/cadence/workflowserviceserver/server.go @@ -22,6 +22,11 @@ type Interface interface { CountRequest *shared.CountWorkflowExecutionsRequest, ) (*shared.CountWorkflowExecutionsResponse, error) + DeleteDomain( + ctx context.Context, + DeleteRequest *shared.DeleteDomainRequest, + ) error + DeprecateDomain( ctx context.Context, DeprecateRequest *shared.DeprecateDomainRequest, @@ -258,6 +263,17 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { ThriftModule: cadence.ThriftModule, }, + thrift.Method{ + Name: "DeleteDomain", + HandlerSpec: thrift.HandlerSpec{ + + Type: transport.Unary, + Unary: thrift.UnaryHandler(h.DeleteDomain), + }, + Signature: "DeleteDomain(DeleteRequest *shared.DeleteDomainRequest)", + ThriftModule: cadence.ThriftModule, + }, + thrift.Method{ Name: "DeprecateDomain", HandlerSpec: thrift.HandlerSpec{ @@ -733,7 +749,7 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { }, } - procedures := make([]transport.Procedure, 0, 44) + procedures := make([]transport.Procedure, 0, 45) procedures = append(procedures, thrift.BuildProcedures(service, opts...)...) return procedures } @@ -774,6 +790,36 @@ func (h handler) CountWorkflowExecutions(ctx context.Context, body wire.Value) ( return response, err } +func (h handler) DeleteDomain(ctx context.Context, body wire.Value) (thrift.Response, error) { + var args cadence.WorkflowService_DeleteDomain_Args + if err := args.FromWire(body); err != nil { + return thrift.Response{}, yarpcerrors.InvalidArgumentErrorf( + "could not decode Thrift request for service 'WorkflowService' procedure 'DeleteDomain': %w", err) + } + + appErr := h.impl.DeleteDomain(ctx, args.DeleteRequest) + + hadError := appErr != nil + result, err := cadence.WorkflowService_DeleteDomain_Helper.WrapResponse(appErr) + + var response thrift.Response + if err == nil { + response.IsApplicationError = hadError + response.Body = result + if namer, ok := appErr.(yarpcErrorNamer); ok { + response.ApplicationErrorName = namer.YARPCErrorName() + } + if extractor, ok := appErr.(yarpcErrorCoder); ok { + response.ApplicationErrorCode = extractor.YARPCErrorCode() + } + if appErr != nil { + response.ApplicationErrorDetails = appErr.Error() + } + } + + return response, err +} + func (h handler) DeprecateDomain(ctx context.Context, body wire.Value) (thrift.Response, error) { var args cadence.WorkflowService_DeprecateDomain_Args if err := args.FromWire(body); err != nil { diff --git a/.gen/go/cadence/workflowservicetest/client.go b/.gen/go/cadence/workflowservicetest/client.go index 280e8b156..8c3e341b5 100644 --- a/.gen/go/cadence/workflowservicetest/client.go +++ b/.gen/go/cadence/workflowservicetest/client.go @@ -77,6 +77,37 @@ func (mr *_MockClientRecorder) CountWorkflowExecutions( return mr.mock.ctrl.RecordCall(mr.mock, "CountWorkflowExecutions", args...) } +// DeleteDomain responds to a DeleteDomain call based on the mock expectations. This +// call will fail if the mock does not expect this call. Use EXPECT to expect +// a call to this function. +// +// client.EXPECT().DeleteDomain(gomock.Any(), ...).Return(...) +// ... := client.DeleteDomain(...) +func (m *MockClient) DeleteDomain( + ctx context.Context, + _DeleteRequest *shared.DeleteDomainRequest, + opts ...yarpc.CallOption, +) (err error) { + + args := []interface{}{ctx, _DeleteRequest} + for _, o := range opts { + args = append(args, o) + } + i := 0 + ret := m.ctrl.Call(m, "DeleteDomain", args...) + err, _ = ret[i].(error) + return +} + +func (mr *_MockClientRecorder) DeleteDomain( + ctx interface{}, + _DeleteRequest interface{}, + opts ...interface{}, +) *gomock.Call { + args := append([]interface{}{ctx, _DeleteRequest}, opts...) + return mr.mock.ctrl.RecordCall(mr.mock, "DeleteDomain", args...) +} + // DeprecateDomain responds to a DeprecateDomain call based on the mock expectations. This // call will fail if the mock does not expect this call. Use EXPECT to expect // a call to this function. diff --git a/.gen/go/shared/shared.go b/.gen/go/shared/shared.go index 147a112c8..aaaeddbe1 100644 --- a/.gen/go/shared/shared.go +++ b/.gen/go/shared/shared.go @@ -160,6 +160,444 @@ func (v *AccessDeniedError) Error() string { return v.String() } +type ActiveClusterInfo struct { + ActiveClusterName *string `json:"activeClusterName,omitempty"` + FailoverVersion *int64 `json:"failoverVersion,omitempty"` +} + +// ToWire translates a ActiveClusterInfo struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ActiveClusterInfo) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.ActiveClusterName != nil { + w, err = wire.NewValueString(*(v.ActiveClusterName)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.FailoverVersion != nil { + w, err = wire.NewValueI64(*(v.FailoverVersion)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a ActiveClusterInfo struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ActiveClusterInfo struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ActiveClusterInfo +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ActiveClusterInfo) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.ActiveClusterName = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.FailoverVersion = &x + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ActiveClusterInfo +// struct. +func (v *ActiveClusterInfo) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.ActiveClusterName != nil { + fields[i] = fmt.Sprintf("ActiveClusterName: %v", *(v.ActiveClusterName)) + i++ + } + if v.FailoverVersion != nil { + fields[i] = fmt.Sprintf("FailoverVersion: %v", *(v.FailoverVersion)) + i++ + } + + return fmt.Sprintf("ActiveClusterInfo{%v}", strings.Join(fields[:i], ", ")) +} + +func _String_EqualsPtr(lhs, rhs *string) bool { + if lhs != nil && rhs != nil { + + x := *lhs + y := *rhs + return (x == y) + } + return lhs == nil && rhs == nil +} + +func _I64_EqualsPtr(lhs, rhs *int64) bool { + if lhs != nil && rhs != nil { + + x := *lhs + y := *rhs + return (x == y) + } + return lhs == nil && rhs == nil +} + +// Equals returns true if all the fields of this ActiveClusterInfo match the +// provided ActiveClusterInfo. +// +// This function performs a deep comparison. +func (v *ActiveClusterInfo) Equals(rhs *ActiveClusterInfo) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_String_EqualsPtr(v.ActiveClusterName, rhs.ActiveClusterName) { + return false + } + if !_I64_EqualsPtr(v.FailoverVersion, rhs.FailoverVersion) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ActiveClusterInfo. +func (v *ActiveClusterInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.ActiveClusterName != nil { + enc.AddString("activeClusterName", *v.ActiveClusterName) + } + if v.FailoverVersion != nil { + enc.AddInt64("failoverVersion", *v.FailoverVersion) + } + return err +} + +// GetActiveClusterName returns the value of ActiveClusterName if it is set or its +// zero value if it is unset. +func (v *ActiveClusterInfo) GetActiveClusterName() (o string) { + if v != nil && v.ActiveClusterName != nil { + return *v.ActiveClusterName + } + + return +} + +// IsSetActiveClusterName returns true if ActiveClusterName is not nil. +func (v *ActiveClusterInfo) IsSetActiveClusterName() bool { + return v != nil && v.ActiveClusterName != nil +} + +// GetFailoverVersion returns the value of FailoverVersion if it is set or its +// zero value if it is unset. +func (v *ActiveClusterInfo) GetFailoverVersion() (o int64) { + if v != nil && v.FailoverVersion != nil { + return *v.FailoverVersion + } + + return +} + +// IsSetFailoverVersion returns true if FailoverVersion is not nil. +func (v *ActiveClusterInfo) IsSetFailoverVersion() bool { + return v != nil && v.FailoverVersion != nil +} + +type ActiveClusters struct { + ActiveClustersByRegion map[string]*ActiveClusterInfo `json:"activeClustersByRegion,omitempty"` +} + +type _Map_String_ActiveClusterInfo_MapItemList map[string]*ActiveClusterInfo + +func (m _Map_String_ActiveClusterInfo_MapItemList) ForEach(f func(wire.MapItem) error) error { + for k, v := range m { + if v == nil { + return fmt.Errorf("invalid [%v]: value is nil", k) + } + kw, err := wire.NewValueString(k), error(nil) + if err != nil { + return err + } + + vw, err := v.ToWire() + if err != nil { + return err + } + err = f(wire.MapItem{Key: kw, Value: vw}) + if err != nil { + return err + } + } + return nil +} + +func (m _Map_String_ActiveClusterInfo_MapItemList) Size() int { + return len(m) +} + +func (_Map_String_ActiveClusterInfo_MapItemList) KeyType() wire.Type { + return wire.TBinary +} + +func (_Map_String_ActiveClusterInfo_MapItemList) ValueType() wire.Type { + return wire.TStruct +} + +func (_Map_String_ActiveClusterInfo_MapItemList) Close() {} + +// ToWire translates a ActiveClusters struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ActiveClusters) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.ActiveClustersByRegion != nil { + w, err = wire.NewValueMap(_Map_String_ActiveClusterInfo_MapItemList(v.ActiveClustersByRegion)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ActiveClusterInfo_Read(w wire.Value) (*ActiveClusterInfo, error) { + var v ActiveClusterInfo + err := v.FromWire(w) + return &v, err +} + +func _Map_String_ActiveClusterInfo_Read(m wire.MapItemList) (map[string]*ActiveClusterInfo, error) { + if m.KeyType() != wire.TBinary { + return nil, nil + } + + if m.ValueType() != wire.TStruct { + return nil, nil + } + + o := make(map[string]*ActiveClusterInfo, m.Size()) + err := m.ForEach(func(x wire.MapItem) error { + k, err := x.Key.GetString(), error(nil) + if err != nil { + return err + } + + v, err := _ActiveClusterInfo_Read(x.Value) + if err != nil { + return err + } + + o[k] = v + return nil + }) + m.Close() + return o, err +} + +// FromWire deserializes a ActiveClusters struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ActiveClusters struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ActiveClusters +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ActiveClusters) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TMap { + v.ActiveClustersByRegion, err = _Map_String_ActiveClusterInfo_Read(field.Value.GetMap()) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ActiveClusters +// struct. +func (v *ActiveClusters) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.ActiveClustersByRegion != nil { + fields[i] = fmt.Sprintf("ActiveClustersByRegion: %v", v.ActiveClustersByRegion) + i++ + } + + return fmt.Sprintf("ActiveClusters{%v}", strings.Join(fields[:i], ", ")) +} + +func _Map_String_ActiveClusterInfo_Equals(lhs, rhs map[string]*ActiveClusterInfo) bool { + if len(lhs) != len(rhs) { + return false + } + + for lk, lv := range lhs { + rv, ok := rhs[lk] + if !ok { + return false + } + if !lv.Equals(rv) { + return false + } + } + return true +} + +// Equals returns true if all the fields of this ActiveClusters match the +// provided ActiveClusters. +// +// This function performs a deep comparison. +func (v *ActiveClusters) Equals(rhs *ActiveClusters) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.ActiveClustersByRegion == nil && rhs.ActiveClustersByRegion == nil) || (v.ActiveClustersByRegion != nil && rhs.ActiveClustersByRegion != nil && _Map_String_ActiveClusterInfo_Equals(v.ActiveClustersByRegion, rhs.ActiveClustersByRegion))) { + return false + } + + return true +} + +type _Map_String_ActiveClusterInfo_Zapper map[string]*ActiveClusterInfo + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of _Map_String_ActiveClusterInfo_Zapper. +func (m _Map_String_ActiveClusterInfo_Zapper) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + for k, v := range m { + err = multierr.Append(err, enc.AddObject((string)(k), v)) + } + return err +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ActiveClusters. +func (v *ActiveClusters) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.ActiveClustersByRegion != nil { + err = multierr.Append(err, enc.AddObject("activeClustersByRegion", (_Map_String_ActiveClusterInfo_Zapper)(v.ActiveClustersByRegion))) + } + return err +} + +// GetActiveClustersByRegion returns the value of ActiveClustersByRegion if it is set or its +// zero value if it is unset. +func (v *ActiveClusters) GetActiveClustersByRegion() (o map[string]*ActiveClusterInfo) { + if v != nil && v.ActiveClustersByRegion != nil { + return v.ActiveClustersByRegion + } + + return +} + +// IsSetActiveClustersByRegion returns true if ActiveClustersByRegion is not nil. +func (v *ActiveClusters) IsSetActiveClustersByRegion() bool { + return v != nil && v.ActiveClustersByRegion != nil +} + type ActivityLocalDispatchInfo struct { ActivityId *string `json:"activityId,omitempty"` ScheduledTimestamp *int64 `json:"scheduledTimestamp,omitempty"` @@ -344,26 +782,6 @@ func (v *ActivityLocalDispatchInfo) String() string { return fmt.Sprintf("ActivityLocalDispatchInfo{%v}", strings.Join(fields[:i], ", ")) } -func _String_EqualsPtr(lhs, rhs *string) bool { - if lhs != nil && rhs != nil { - - x := *lhs - y := *rhs - return (x == y) - } - return lhs == nil && rhs == nil -} - -func _I64_EqualsPtr(lhs, rhs *int64) bool { - if lhs != nil && rhs != nil { - - x := *lhs - y := *rhs - return (x == y) - } - return lhs == nil && rhs == nil -} - // Equals returns true if all the fields of this ActivityLocalDispatchInfo match the // provided ActivityLocalDispatchInfo. // @@ -9426,6 +9844,7 @@ type ContinueAsNewWorkflowExecutionDecisionAttributes struct { Memo *Memo `json:"memo,omitempty"` SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` } // ToWire translates a ContinueAsNewWorkflowExecutionDecisionAttributes struct into a Thrift-level intermediate @@ -9445,7 +9864,7 @@ type ContinueAsNewWorkflowExecutionDecisionAttributes struct { // } func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) ToWire() (wire.Value, error) { var ( - fields [16]wire.Field + fields [17]wire.Field i int = 0 w wire.Value err error @@ -9579,6 +9998,14 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) ToWire() (wire.Value, fields[i] = wire.Field{ID: 160, Value: w} i++ } + if v.CronOverlapPolicy != nil { + w, err = v.CronOverlapPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 170, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -9601,6 +10028,12 @@ func _SearchAttributes_Read(w wire.Value) (*SearchAttributes, error) { return &v, err } +func _CronOverlapPolicy_Read(w wire.Value) (CronOverlapPolicy, error) { + var v CronOverlapPolicy + err := v.FromWire(w) + return v, err +} + // FromWire deserializes a ContinueAsNewWorkflowExecutionDecisionAttributes struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. @@ -9764,6 +10197,16 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) FromWire(w wire.Value return err } + } + case 170: + if field.Value.Type() == wire.TI32 { + var x CronOverlapPolicy + x, err = _CronOverlapPolicy_Read(field.Value) + v.CronOverlapPolicy = &x + if err != nil { + return err + } + } } } @@ -9778,7 +10221,7 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) String() string { return "" } - var fields [16]string + var fields [17]string i := 0 if v.WorkflowType != nil { fields[i] = fmt.Sprintf("WorkflowType: %v", v.WorkflowType) @@ -9844,6 +10287,10 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) String() string { fields[i] = fmt.Sprintf("JitterStartSeconds: %v", *(v.JitterStartSeconds)) i++ } + if v.CronOverlapPolicy != nil { + fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) + i++ + } return fmt.Sprintf("ContinueAsNewWorkflowExecutionDecisionAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -9858,6 +10305,16 @@ func _ContinueAsNewInitiator_EqualsPtr(lhs, rhs *ContinueAsNewInitiator) bool { return lhs == nil && rhs == nil } +func _CronOverlapPolicy_EqualsPtr(lhs, rhs *CronOverlapPolicy) bool { + if lhs != nil && rhs != nil { + + x := *lhs + y := *rhs + return x.Equals(y) + } + return lhs == nil && rhs == nil +} + // Equals returns true if all the fields of this ContinueAsNewWorkflowExecutionDecisionAttributes match the // provided ContinueAsNewWorkflowExecutionDecisionAttributes. // @@ -9916,6 +10373,9 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) Equals(rhs *ContinueA if !_I32_EqualsPtr(v.JitterStartSeconds, rhs.JitterStartSeconds) { return false } + if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { + return false + } return true } @@ -9974,6 +10434,9 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) MarshalLogObject(enc if v.JitterStartSeconds != nil { enc.AddInt32("jitterStartSeconds", *v.JitterStartSeconds) } + if v.CronOverlapPolicy != nil { + err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) + } return err } @@ -10217,6 +10680,21 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) IsSetJitterStartSecon return v != nil && v.JitterStartSeconds != nil } +// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its +// zero value if it is unset. +func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) GetCronOverlapPolicy() (o CronOverlapPolicy) { + if v != nil && v.CronOverlapPolicy != nil { + return *v.CronOverlapPolicy + } + + return +} + +// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. +func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) IsSetCronOverlapPolicy() bool { + return v != nil && v.CronOverlapPolicy != nil +} + type CountWorkflowExecutionsRequest struct { Domain *string `json:"domain,omitempty"` Query *string `json:"query,omitempty"` @@ -10537,6 +11015,177 @@ func (v *CountWorkflowExecutionsResponse) IsSetCount() bool { return v != nil && v.Count != nil } +type CronOverlapPolicy int32 + +const ( + CronOverlapPolicySkipped CronOverlapPolicy = 0 + CronOverlapPolicyBufferone CronOverlapPolicy = 1 +) + +// CronOverlapPolicy_Values returns all recognized values of CronOverlapPolicy. +func CronOverlapPolicy_Values() []CronOverlapPolicy { + return []CronOverlapPolicy{ + CronOverlapPolicySkipped, + CronOverlapPolicyBufferone, + } +} + +// UnmarshalText tries to decode CronOverlapPolicy from a byte slice +// containing its name. +// +// var v CronOverlapPolicy +// err := v.UnmarshalText([]byte("SKIPPED")) +func (v *CronOverlapPolicy) UnmarshalText(value []byte) error { + switch s := string(value); s { + case "SKIPPED": + *v = CronOverlapPolicySkipped + return nil + case "BUFFERONE": + *v = CronOverlapPolicyBufferone + return nil + default: + val, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return fmt.Errorf("unknown enum value %q for %q: %v", s, "CronOverlapPolicy", err) + } + *v = CronOverlapPolicy(val) + return nil + } +} + +// MarshalText encodes CronOverlapPolicy to text. +// +// If the enum value is recognized, its name is returned. Otherwise, +// its integer value is returned. +// +// This implements the TextMarshaler interface. +func (v CronOverlapPolicy) MarshalText() ([]byte, error) { + switch int32(v) { + case 0: + return []byte("SKIPPED"), nil + case 1: + return []byte("BUFFERONE"), nil + } + return []byte(strconv.FormatInt(int64(v), 10)), nil +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of CronOverlapPolicy. +// Enums are logged as objects, where the value is logged with key "value", and +// if this value's name is known, the name is logged with key "name". +func (v CronOverlapPolicy) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt32("value", int32(v)) + switch int32(v) { + case 0: + enc.AddString("name", "SKIPPED") + case 1: + enc.AddString("name", "BUFFERONE") + } + return nil +} + +// Ptr returns a pointer to this enum value. +func (v CronOverlapPolicy) Ptr() *CronOverlapPolicy { + return &v +} + +// ToWire translates CronOverlapPolicy into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// Enums are represented as 32-bit integers over the wire. +func (v CronOverlapPolicy) ToWire() (wire.Value, error) { + return wire.NewValueI32(int32(v)), nil +} + +// FromWire deserializes CronOverlapPolicy from its Thrift-level +// representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TI32) +// if err != nil { +// return CronOverlapPolicy(0), err +// } +// +// var v CronOverlapPolicy +// if err := v.FromWire(x); err != nil { +// return CronOverlapPolicy(0), err +// } +// return v, nil +func (v *CronOverlapPolicy) FromWire(w wire.Value) error { + *v = (CronOverlapPolicy)(w.GetI32()) + return nil +} + +// String returns a readable string representation of CronOverlapPolicy. +func (v CronOverlapPolicy) String() string { + w := int32(v) + switch w { + case 0: + return "SKIPPED" + case 1: + return "BUFFERONE" + } + return fmt.Sprintf("CronOverlapPolicy(%d)", w) +} + +// Equals returns true if this CronOverlapPolicy value matches the provided +// value. +func (v CronOverlapPolicy) Equals(rhs CronOverlapPolicy) bool { + return v == rhs +} + +// MarshalJSON serializes CronOverlapPolicy into JSON. +// +// If the enum value is recognized, its name is returned. Otherwise, +// its integer value is returned. +// +// This implements json.Marshaler. +func (v CronOverlapPolicy) MarshalJSON() ([]byte, error) { + switch int32(v) { + case 0: + return ([]byte)("\"SKIPPED\""), nil + case 1: + return ([]byte)("\"BUFFERONE\""), nil + } + return ([]byte)(strconv.FormatInt(int64(v), 10)), nil +} + +// UnmarshalJSON attempts to decode CronOverlapPolicy from its JSON +// representation. +// +// This implementation supports both, numeric and string inputs. If a +// string is provided, it must be a known enum name. +// +// This implements json.Unmarshaler. +func (v *CronOverlapPolicy) UnmarshalJSON(text []byte) error { + d := json.NewDecoder(bytes.NewReader(text)) + d.UseNumber() + t, err := d.Token() + if err != nil { + return err + } + + switch w := t.(type) { + case json.Number: + x, err := w.Int64() + if err != nil { + return err + } + if x > math.MaxInt32 { + return fmt.Errorf("enum overflow from JSON %q for %q", text, "CronOverlapPolicy") + } + if x < math.MinInt32 { + return fmt.Errorf("enum underflow from JSON %q for %q", text, "CronOverlapPolicy") + } + *v = (CronOverlapPolicy)(x) + return nil + case string: + return v.UnmarshalText([]byte(w)) + default: + return fmt.Errorf("invalid JSON value %q (%T) to unmarshal into %q", t, t, "CronOverlapPolicy") + } +} + type CrossClusterApplyParentClosePolicyRequestAttributes struct { Children []*ApplyParentClosePolicyRequest `json:"children,omitempty"` } @@ -18687,6 +19336,188 @@ func (v *DecisionType) UnmarshalJSON(text []byte) error { } } +type DeleteDomainRequest struct { + Name *string `json:"name,omitempty"` + SecurityToken *string `json:"securityToken,omitempty"` +} + +// ToWire translates a DeleteDomainRequest struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *DeleteDomainRequest) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Name != nil { + w, err = wire.NewValueString(*(v.Name)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.SecurityToken != nil { + w, err = wire.NewValueString(*(v.SecurityToken)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a DeleteDomainRequest struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a DeleteDomainRequest struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v DeleteDomainRequest +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *DeleteDomainRequest) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.Name = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.SecurityToken = &x + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a DeleteDomainRequest +// struct. +func (v *DeleteDomainRequest) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.Name != nil { + fields[i] = fmt.Sprintf("Name: %v", *(v.Name)) + i++ + } + if v.SecurityToken != nil { + fields[i] = fmt.Sprintf("SecurityToken: %v", *(v.SecurityToken)) + i++ + } + + return fmt.Sprintf("DeleteDomainRequest{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this DeleteDomainRequest match the +// provided DeleteDomainRequest. +// +// This function performs a deep comparison. +func (v *DeleteDomainRequest) Equals(rhs *DeleteDomainRequest) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_String_EqualsPtr(v.Name, rhs.Name) { + return false + } + if !_String_EqualsPtr(v.SecurityToken, rhs.SecurityToken) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of DeleteDomainRequest. +func (v *DeleteDomainRequest) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Name != nil { + enc.AddString("name", *v.Name) + } + if v.SecurityToken != nil { + enc.AddString("securityToken", *v.SecurityToken) + } + return err +} + +// GetName returns the value of Name if it is set or its +// zero value if it is unset. +func (v *DeleteDomainRequest) GetName() (o string) { + if v != nil && v.Name != nil { + return *v.Name + } + + return +} + +// IsSetName returns true if Name is not nil. +func (v *DeleteDomainRequest) IsSetName() bool { + return v != nil && v.Name != nil +} + +// GetSecurityToken returns the value of SecurityToken if it is set or its +// zero value if it is unset. +func (v *DeleteDomainRequest) GetSecurityToken() (o string) { + if v != nil && v.SecurityToken != nil { + return *v.SecurityToken + } + + return +} + +// IsSetSecurityToken returns true if SecurityToken is not nil. +func (v *DeleteDomainRequest) IsSetSecurityToken() bool { + return v != nil && v.SecurityToken != nil +} + type DeprecateDomainRequest struct { Name *string `json:"name,omitempty"` SecurityToken *string `json:"securityToken,omitempty"` @@ -21481,8 +22312,9 @@ func (v *DescribeTaskListResponse) IsSetTaskListStatus() bool { } type DescribeWorkflowExecutionRequest struct { - Domain *string `json:"domain,omitempty"` - Execution *WorkflowExecution `json:"execution,omitempty"` + Domain *string `json:"domain,omitempty"` + Execution *WorkflowExecution `json:"execution,omitempty"` + QueryConsistencyLevel *QueryConsistencyLevel `json:"queryConsistencyLevel,omitempty"` } // ToWire translates a DescribeWorkflowExecutionRequest struct into a Thrift-level intermediate @@ -21502,7 +22334,7 @@ type DescribeWorkflowExecutionRequest struct { // } func (v *DescribeWorkflowExecutionRequest) ToWire() (wire.Value, error) { var ( - fields [2]wire.Field + fields [3]wire.Field i int = 0 w wire.Value err error @@ -21524,10 +22356,24 @@ func (v *DescribeWorkflowExecutionRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 20, Value: w} i++ } + if v.QueryConsistencyLevel != nil { + w, err = v.QueryConsistencyLevel.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } +func _QueryConsistencyLevel_Read(w wire.Value) (QueryConsistencyLevel, error) { + var v QueryConsistencyLevel + err := v.FromWire(w) + return v, err +} + // FromWire deserializes a DescribeWorkflowExecutionRequest struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. @@ -21567,6 +22413,16 @@ func (v *DescribeWorkflowExecutionRequest) FromWire(w wire.Value) error { return err } + } + case 30: + if field.Value.Type() == wire.TI32 { + var x QueryConsistencyLevel + x, err = _QueryConsistencyLevel_Read(field.Value) + v.QueryConsistencyLevel = &x + if err != nil { + return err + } + } } } @@ -21581,7 +22437,7 @@ func (v *DescribeWorkflowExecutionRequest) String() string { return "" } - var fields [2]string + var fields [3]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -21591,10 +22447,24 @@ func (v *DescribeWorkflowExecutionRequest) String() string { fields[i] = fmt.Sprintf("Execution: %v", v.Execution) i++ } + if v.QueryConsistencyLevel != nil { + fields[i] = fmt.Sprintf("QueryConsistencyLevel: %v", *(v.QueryConsistencyLevel)) + i++ + } return fmt.Sprintf("DescribeWorkflowExecutionRequest{%v}", strings.Join(fields[:i], ", ")) } +func _QueryConsistencyLevel_EqualsPtr(lhs, rhs *QueryConsistencyLevel) bool { + if lhs != nil && rhs != nil { + + x := *lhs + y := *rhs + return x.Equals(y) + } + return lhs == nil && rhs == nil +} + // Equals returns true if all the fields of this DescribeWorkflowExecutionRequest match the // provided DescribeWorkflowExecutionRequest. // @@ -21611,6 +22481,9 @@ func (v *DescribeWorkflowExecutionRequest) Equals(rhs *DescribeWorkflowExecution if !((v.Execution == nil && rhs.Execution == nil) || (v.Execution != nil && rhs.Execution != nil && v.Execution.Equals(rhs.Execution))) { return false } + if !_QueryConsistencyLevel_EqualsPtr(v.QueryConsistencyLevel, rhs.QueryConsistencyLevel) { + return false + } return true } @@ -21627,6 +22500,9 @@ func (v *DescribeWorkflowExecutionRequest) MarshalLogObject(enc zapcore.ObjectEn if v.Execution != nil { err = multierr.Append(err, enc.AddObject("execution", v.Execution)) } + if v.QueryConsistencyLevel != nil { + err = multierr.Append(err, enc.AddObject("queryConsistencyLevel", *v.QueryConsistencyLevel)) + } return err } @@ -21660,6 +22536,21 @@ func (v *DescribeWorkflowExecutionRequest) IsSetExecution() bool { return v != nil && v.Execution != nil } +// GetQueryConsistencyLevel returns the value of QueryConsistencyLevel if it is set or its +// zero value if it is unset. +func (v *DescribeWorkflowExecutionRequest) GetQueryConsistencyLevel() (o QueryConsistencyLevel) { + if v != nil && v.QueryConsistencyLevel != nil { + return *v.QueryConsistencyLevel + } + + return +} + +// IsSetQueryConsistencyLevel returns true if QueryConsistencyLevel is not nil. +func (v *DescribeWorkflowExecutionRequest) IsSetQueryConsistencyLevel() bool { + return v != nil && v.QueryConsistencyLevel != nil +} + type DescribeWorkflowExecutionResponse struct { ExecutionConfiguration *WorkflowExecutionConfiguration `json:"executionConfiguration,omitempty"` WorkflowExecutionInfo *WorkflowExecutionInfo `json:"workflowExecutionInfo,omitempty"` @@ -23757,10 +24648,11 @@ func (v *DomainInfo) IsSetUUID() bool { } type DomainNotActiveError struct { - Message string `json:"message,required"` - DomainName string `json:"domainName,required"` - CurrentCluster string `json:"currentCluster,required"` - ActiveCluster string `json:"activeCluster,required"` + Message string `json:"message,required"` + DomainName string `json:"domainName,required"` + CurrentCluster string `json:"currentCluster,required"` + ActiveCluster string `json:"activeCluster,required"` + ActiveClusters []string `json:"activeClusters,required"` } // ToWire translates a DomainNotActiveError struct into a Thrift-level intermediate @@ -23780,7 +24672,7 @@ type DomainNotActiveError struct { // } func (v *DomainNotActiveError) ToWire() (wire.Value, error) { var ( - fields [4]wire.Field + fields [5]wire.Field i int = 0 w wire.Value err error @@ -23814,6 +24706,13 @@ func (v *DomainNotActiveError) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 4, Value: w} i++ + w, err = wire.NewValueList(_List_String_ValueList(v.ActiveClusters)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 5, Value: w} + i++ + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -23841,6 +24740,7 @@ func (v *DomainNotActiveError) FromWire(w wire.Value) error { domainNameIsSet := false currentClusterIsSet := false activeClusterIsSet := false + activeClustersIsSet := false for _, field := range w.GetStruct().Fields { switch field.ID { @@ -23876,6 +24776,14 @@ func (v *DomainNotActiveError) FromWire(w wire.Value) error { } activeClusterIsSet = true } + case 5: + if field.Value.Type() == wire.TList { + v.ActiveClusters, err = _List_String_Read(field.Value.GetList()) + if err != nil { + return err + } + activeClustersIsSet = true + } } } @@ -23895,6 +24803,10 @@ func (v *DomainNotActiveError) FromWire(w wire.Value) error { return errors.New("field ActiveCluster of DomainNotActiveError is required") } + if !activeClustersIsSet { + return errors.New("field ActiveClusters of DomainNotActiveError is required") + } + return nil } @@ -23905,7 +24817,7 @@ func (v *DomainNotActiveError) String() string { return "" } - var fields [4]string + var fields [5]string i := 0 fields[i] = fmt.Sprintf("Message: %v", v.Message) i++ @@ -23915,6 +24827,8 @@ func (v *DomainNotActiveError) String() string { i++ fields[i] = fmt.Sprintf("ActiveCluster: %v", v.ActiveCluster) i++ + fields[i] = fmt.Sprintf("ActiveClusters: %v", v.ActiveClusters) + i++ return fmt.Sprintf("DomainNotActiveError{%v}", strings.Join(fields[:i], ", ")) } @@ -23947,6 +24861,9 @@ func (v *DomainNotActiveError) Equals(rhs *DomainNotActiveError) bool { if !(v.ActiveCluster == rhs.ActiveCluster) { return false } + if !_List_String_Equals(v.ActiveClusters, rhs.ActiveClusters) { + return false + } return true } @@ -23961,6 +24878,7 @@ func (v *DomainNotActiveError) MarshalLogObject(enc zapcore.ObjectEncoder) (err enc.AddString("domainName", v.DomainName) enc.AddString("currentCluster", v.CurrentCluster) enc.AddString("activeCluster", v.ActiveCluster) + err = multierr.Append(err, enc.AddArray("activeClusters", (_List_String_Zapper)(v.ActiveClusters))) return err } @@ -24000,6 +24918,20 @@ func (v *DomainNotActiveError) GetActiveCluster() (o string) { return } +// GetActiveClusters returns the value of ActiveClusters if it is set or its +// zero value if it is unset. +func (v *DomainNotActiveError) GetActiveClusters() (o []string) { + if v != nil { + o = v.ActiveClusters + } + return +} + +// IsSetActiveClusters returns true if ActiveClusters is not nil. +func (v *DomainNotActiveError) IsSetActiveClusters() bool { + return v != nil && v.ActiveClusters != nil +} + func (v *DomainNotActiveError) Error() string { return v.String() } @@ -24007,6 +24939,7 @@ func (v *DomainNotActiveError) Error() string { type DomainReplicationConfiguration struct { ActiveClusterName *string `json:"activeClusterName,omitempty"` Clusters []*ClusterReplicationConfiguration `json:"clusters,omitempty"` + ActiveClusters *ActiveClusters `json:"activeClusters,omitempty"` } type _List_ClusterReplicationConfiguration_ValueList []*ClusterReplicationConfiguration @@ -24055,7 +24988,7 @@ func (_List_ClusterReplicationConfiguration_ValueList) Close() {} // } func (v *DomainReplicationConfiguration) ToWire() (wire.Value, error) { var ( - fields [2]wire.Field + fields [3]wire.Field i int = 0 w wire.Value err error @@ -24077,6 +25010,14 @@ func (v *DomainReplicationConfiguration) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 20, Value: w} i++ } + if v.ActiveClusters != nil { + w, err = v.ActiveClusters.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -24105,6 +25046,12 @@ func _List_ClusterReplicationConfiguration_Read(l wire.ValueList) ([]*ClusterRep return o, err } +func _ActiveClusters_Read(w wire.Value) (*ActiveClusters, error) { + var v ActiveClusters + err := v.FromWire(w) + return &v, err +} + // FromWire deserializes a DomainReplicationConfiguration struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. @@ -24144,6 +25091,14 @@ func (v *DomainReplicationConfiguration) FromWire(w wire.Value) error { return err } + } + case 30: + if field.Value.Type() == wire.TStruct { + v.ActiveClusters, err = _ActiveClusters_Read(field.Value) + if err != nil { + return err + } + } } } @@ -24158,7 +25113,7 @@ func (v *DomainReplicationConfiguration) String() string { return "" } - var fields [2]string + var fields [3]string i := 0 if v.ActiveClusterName != nil { fields[i] = fmt.Sprintf("ActiveClusterName: %v", *(v.ActiveClusterName)) @@ -24168,6 +25123,10 @@ func (v *DomainReplicationConfiguration) String() string { fields[i] = fmt.Sprintf("Clusters: %v", v.Clusters) i++ } + if v.ActiveClusters != nil { + fields[i] = fmt.Sprintf("ActiveClusters: %v", v.ActiveClusters) + i++ + } return fmt.Sprintf("DomainReplicationConfiguration{%v}", strings.Join(fields[:i], ", ")) } @@ -24203,6 +25162,9 @@ func (v *DomainReplicationConfiguration) Equals(rhs *DomainReplicationConfigurat if !((v.Clusters == nil && rhs.Clusters == nil) || (v.Clusters != nil && rhs.Clusters != nil && _List_ClusterReplicationConfiguration_Equals(v.Clusters, rhs.Clusters))) { return false } + if !((v.ActiveClusters == nil && rhs.ActiveClusters == nil) || (v.ActiveClusters != nil && rhs.ActiveClusters != nil && v.ActiveClusters.Equals(rhs.ActiveClusters))) { + return false + } return true } @@ -24230,6 +25192,9 @@ func (v *DomainReplicationConfiguration) MarshalLogObject(enc zapcore.ObjectEnco if v.Clusters != nil { err = multierr.Append(err, enc.AddArray("clusters", (_List_ClusterReplicationConfiguration_Zapper)(v.Clusters))) } + if v.ActiveClusters != nil { + err = multierr.Append(err, enc.AddObject("activeClusters", v.ActiveClusters)) + } return err } @@ -24263,6 +25228,21 @@ func (v *DomainReplicationConfiguration) IsSetClusters() bool { return v != nil && v.Clusters != nil } +// GetActiveClusters returns the value of ActiveClusters if it is set or its +// zero value if it is unset. +func (v *DomainReplicationConfiguration) GetActiveClusters() (o *ActiveClusters) { + if v != nil && v.ActiveClusters != nil { + return v.ActiveClusters + } + + return +} + +// IsSetActiveClusters returns true if ActiveClusters is not nil. +func (v *DomainReplicationConfiguration) IsSetActiveClusters() bool { + return v != nil && v.ActiveClusters != nil +} + type DomainStatus int32 const ( @@ -24619,9 +25599,10 @@ func (v *EncodingType) UnmarshalJSON(text []byte) error { } type EntityNotExistsError struct { - Message string `json:"message,required"` - CurrentCluster *string `json:"currentCluster,omitempty"` - ActiveCluster *string `json:"activeCluster,omitempty"` + Message string `json:"message,required"` + CurrentCluster *string `json:"currentCluster,omitempty"` + ActiveCluster *string `json:"activeCluster,omitempty"` + ActiveClusters []string `json:"activeClusters,required"` } // ToWire translates a EntityNotExistsError struct into a Thrift-level intermediate @@ -24641,7 +25622,7 @@ type EntityNotExistsError struct { // } func (v *EntityNotExistsError) ToWire() (wire.Value, error) { var ( - fields [3]wire.Field + fields [4]wire.Field i int = 0 w wire.Value err error @@ -24670,6 +25651,13 @@ func (v *EntityNotExistsError) ToWire() (wire.Value, error) { i++ } + w, err = wire.NewValueList(_List_String_ValueList(v.ActiveClusters)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 4, Value: w} + i++ + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -24695,6 +25683,8 @@ func (v *EntityNotExistsError) FromWire(w wire.Value) error { messageIsSet := false + activeClustersIsSet := false + for _, field := range w.GetStruct().Fields { switch field.ID { case 1: @@ -24725,6 +25715,14 @@ func (v *EntityNotExistsError) FromWire(w wire.Value) error { } } + case 4: + if field.Value.Type() == wire.TList { + v.ActiveClusters, err = _List_String_Read(field.Value.GetList()) + if err != nil { + return err + } + activeClustersIsSet = true + } } } @@ -24732,6 +25730,10 @@ func (v *EntityNotExistsError) FromWire(w wire.Value) error { return errors.New("field Message of EntityNotExistsError is required") } + if !activeClustersIsSet { + return errors.New("field ActiveClusters of EntityNotExistsError is required") + } + return nil } @@ -24742,7 +25744,7 @@ func (v *EntityNotExistsError) String() string { return "" } - var fields [3]string + var fields [4]string i := 0 fields[i] = fmt.Sprintf("Message: %v", v.Message) i++ @@ -24754,6 +25756,8 @@ func (v *EntityNotExistsError) String() string { fields[i] = fmt.Sprintf("ActiveCluster: %v", *(v.ActiveCluster)) i++ } + fields[i] = fmt.Sprintf("ActiveClusters: %v", v.ActiveClusters) + i++ return fmt.Sprintf("EntityNotExistsError{%v}", strings.Join(fields[:i], ", ")) } @@ -24783,6 +25787,9 @@ func (v *EntityNotExistsError) Equals(rhs *EntityNotExistsError) bool { if !_String_EqualsPtr(v.ActiveCluster, rhs.ActiveCluster) { return false } + if !_List_String_Equals(v.ActiveClusters, rhs.ActiveClusters) { + return false + } return true } @@ -24800,6 +25807,7 @@ func (v *EntityNotExistsError) MarshalLogObject(enc zapcore.ObjectEncoder) (err if v.ActiveCluster != nil { enc.AddString("activeCluster", *v.ActiveCluster) } + err = multierr.Append(err, enc.AddArray("activeClusters", (_List_String_Zapper)(v.ActiveClusters))) return err } @@ -24842,6 +25850,20 @@ func (v *EntityNotExistsError) IsSetActiveCluster() bool { return v != nil && v.ActiveCluster != nil } +// GetActiveClusters returns the value of ActiveClusters if it is set or its +// zero value if it is unset. +func (v *EntityNotExistsError) GetActiveClusters() (o []string) { + if v != nil { + o = v.ActiveClusters + } + return +} + +// IsSetActiveClusters returns true if ActiveClusters is not nil. +func (v *EntityNotExistsError) IsSetActiveClusters() bool { + return v != nil && v.ActiveClusters != nil +} + func (v *EntityNotExistsError) Error() string { return v.String() } @@ -28305,6 +29327,7 @@ type GetWorkflowExecutionHistoryRequest struct { WaitForNewEvent *bool `json:"waitForNewEvent,omitempty"` HistoryEventFilterType *HistoryEventFilterType `json:"HistoryEventFilterType,omitempty"` SkipArchival *bool `json:"skipArchival,omitempty"` + QueryConsistencyLevel *QueryConsistencyLevel `json:"queryConsistencyLevel,omitempty"` } // ToWire translates a GetWorkflowExecutionHistoryRequest struct into a Thrift-level intermediate @@ -28324,7 +29347,7 @@ type GetWorkflowExecutionHistoryRequest struct { // } func (v *GetWorkflowExecutionHistoryRequest) ToWire() (wire.Value, error) { var ( - fields [7]wire.Field + fields [8]wire.Field i int = 0 w wire.Value err error @@ -28386,6 +29409,14 @@ func (v *GetWorkflowExecutionHistoryRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 70, Value: w} i++ } + if v.QueryConsistencyLevel != nil { + w, err = v.QueryConsistencyLevel.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 80, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -28483,6 +29514,16 @@ func (v *GetWorkflowExecutionHistoryRequest) FromWire(w wire.Value) error { return err } + } + case 80: + if field.Value.Type() == wire.TI32 { + var x QueryConsistencyLevel + x, err = _QueryConsistencyLevel_Read(field.Value) + v.QueryConsistencyLevel = &x + if err != nil { + return err + } + } } } @@ -28497,7 +29538,7 @@ func (v *GetWorkflowExecutionHistoryRequest) String() string { return "" } - var fields [7]string + var fields [8]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -28527,6 +29568,10 @@ func (v *GetWorkflowExecutionHistoryRequest) String() string { fields[i] = fmt.Sprintf("SkipArchival: %v", *(v.SkipArchival)) i++ } + if v.QueryConsistencyLevel != nil { + fields[i] = fmt.Sprintf("QueryConsistencyLevel: %v", *(v.QueryConsistencyLevel)) + i++ + } return fmt.Sprintf("GetWorkflowExecutionHistoryRequest{%v}", strings.Join(fields[:i], ", ")) } @@ -28572,6 +29617,9 @@ func (v *GetWorkflowExecutionHistoryRequest) Equals(rhs *GetWorkflowExecutionHis if !_Bool_EqualsPtr(v.SkipArchival, rhs.SkipArchival) { return false } + if !_QueryConsistencyLevel_EqualsPtr(v.QueryConsistencyLevel, rhs.QueryConsistencyLevel) { + return false + } return true } @@ -28603,6 +29651,9 @@ func (v *GetWorkflowExecutionHistoryRequest) MarshalLogObject(enc zapcore.Object if v.SkipArchival != nil { enc.AddBool("skipArchival", *v.SkipArchival) } + if v.QueryConsistencyLevel != nil { + err = multierr.Append(err, enc.AddObject("queryConsistencyLevel", *v.QueryConsistencyLevel)) + } return err } @@ -28711,6 +29762,21 @@ func (v *GetWorkflowExecutionHistoryRequest) IsSetSkipArchival() bool { return v != nil && v.SkipArchival != nil } +// GetQueryConsistencyLevel returns the value of QueryConsistencyLevel if it is set or its +// zero value if it is unset. +func (v *GetWorkflowExecutionHistoryRequest) GetQueryConsistencyLevel() (o QueryConsistencyLevel) { + if v != nil && v.QueryConsistencyLevel != nil { + return *v.QueryConsistencyLevel + } + + return +} + +// IsSetQueryConsistencyLevel returns true if QueryConsistencyLevel is not nil. +func (v *GetWorkflowExecutionHistoryRequest) IsSetQueryConsistencyLevel() bool { + return v != nil && v.QueryConsistencyLevel != nil +} + type GetWorkflowExecutionHistoryResponse struct { History *History `json:"history,omitempty"` RawHistory []*DataBlob `json:"rawHistory,omitempty"` @@ -33251,6 +34317,198 @@ func (v *IsolationGroupConfiguration) IsSetIsolationGroups() bool { return v != nil && v.IsolationGroups != nil } +type IsolationGroupMetrics struct { + NewTasksPerSecond *float64 `json:"newTasksPerSecond,omitempty"` + PollerCount *int64 `json:"pollerCount,omitempty"` +} + +// ToWire translates a IsolationGroupMetrics struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *IsolationGroupMetrics) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.NewTasksPerSecond != nil { + w, err = wire.NewValueDouble(*(v.NewTasksPerSecond)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.PollerCount != nil { + w, err = wire.NewValueI64(*(v.PollerCount)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a IsolationGroupMetrics struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a IsolationGroupMetrics struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v IsolationGroupMetrics +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *IsolationGroupMetrics) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TDouble { + var x float64 + x, err = field.Value.GetDouble(), error(nil) + v.NewTasksPerSecond = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.PollerCount = &x + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a IsolationGroupMetrics +// struct. +func (v *IsolationGroupMetrics) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.NewTasksPerSecond != nil { + fields[i] = fmt.Sprintf("NewTasksPerSecond: %v", *(v.NewTasksPerSecond)) + i++ + } + if v.PollerCount != nil { + fields[i] = fmt.Sprintf("PollerCount: %v", *(v.PollerCount)) + i++ + } + + return fmt.Sprintf("IsolationGroupMetrics{%v}", strings.Join(fields[:i], ", ")) +} + +func _Double_EqualsPtr(lhs, rhs *float64) bool { + if lhs != nil && rhs != nil { + + x := *lhs + y := *rhs + return (x == y) + } + return lhs == nil && rhs == nil +} + +// Equals returns true if all the fields of this IsolationGroupMetrics match the +// provided IsolationGroupMetrics. +// +// This function performs a deep comparison. +func (v *IsolationGroupMetrics) Equals(rhs *IsolationGroupMetrics) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_Double_EqualsPtr(v.NewTasksPerSecond, rhs.NewTasksPerSecond) { + return false + } + if !_I64_EqualsPtr(v.PollerCount, rhs.PollerCount) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of IsolationGroupMetrics. +func (v *IsolationGroupMetrics) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.NewTasksPerSecond != nil { + enc.AddFloat64("newTasksPerSecond", *v.NewTasksPerSecond) + } + if v.PollerCount != nil { + enc.AddInt64("pollerCount", *v.PollerCount) + } + return err +} + +// GetNewTasksPerSecond returns the value of NewTasksPerSecond if it is set or its +// zero value if it is unset. +func (v *IsolationGroupMetrics) GetNewTasksPerSecond() (o float64) { + if v != nil && v.NewTasksPerSecond != nil { + return *v.NewTasksPerSecond + } + + return +} + +// IsSetNewTasksPerSecond returns true if NewTasksPerSecond is not nil. +func (v *IsolationGroupMetrics) IsSetNewTasksPerSecond() bool { + return v != nil && v.NewTasksPerSecond != nil +} + +// GetPollerCount returns the value of PollerCount if it is set or its +// zero value if it is unset. +func (v *IsolationGroupMetrics) GetPollerCount() (o int64) { + if v != nil && v.PollerCount != nil { + return *v.PollerCount + } + + return +} + +// IsSetPollerCount returns true if PollerCount is not nil. +func (v *IsolationGroupMetrics) IsSetPollerCount() bool { + return v != nil && v.PollerCount != nil +} + type IsolationGroupPartition struct { Name *string `json:"name,omitempty"` State *IsolationGroupState `json:"state,omitempty"` @@ -41619,16 +42877,6 @@ func (v *PollerInfo) String() string { return fmt.Sprintf("PollerInfo{%v}", strings.Join(fields[:i], ", ")) } -func _Double_EqualsPtr(lhs, rhs *float64) bool { - if lhs != nil && rhs != nil { - - x := *lhs - y := *rhs - return (x == y) - } - return lhs == nil && rhs == nil -} - // Equals returns true if all the fields of this PollerInfo match the // provided PollerInfo. // @@ -42758,12 +44006,6 @@ func _QueryRejectCondition_Read(w wire.Value) (QueryRejectCondition, error) { return v, err } -func _QueryConsistencyLevel_Read(w wire.Value) (QueryConsistencyLevel, error) { - var v QueryConsistencyLevel - err := v.FromWire(w) - return v, err -} - // FromWire deserializes a QueryWorkflowRequest struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. @@ -42881,16 +44123,6 @@ func _QueryRejectCondition_EqualsPtr(lhs, rhs *QueryRejectCondition) bool { return lhs == nil && rhs == nil } -func _QueryConsistencyLevel_EqualsPtr(lhs, rhs *QueryConsistencyLevel) bool { - if lhs != nil && rhs != nil { - - x := *lhs - y := *rhs - return x.Equals(y) - } - return lhs == nil && rhs == nil -} - // Equals returns true if all the fields of this QueryWorkflowRequest match the // provided QueryWorkflowRequest. // @@ -43203,6 +44435,303 @@ func (v *QueryWorkflowResponse) IsSetQueryRejected() bool { return v != nil && v.QueryRejected != nil } +type QueueState struct { + VirtualQueueStates map[int64]*VirtualQueueState `json:"virtualQueueStates,omitempty"` + ExclusiveMaxReadLevel *TaskKey `json:"exclusiveMaxReadLevel,omitempty"` +} + +type _Map_I64_VirtualQueueState_MapItemList map[int64]*VirtualQueueState + +func (m _Map_I64_VirtualQueueState_MapItemList) ForEach(f func(wire.MapItem) error) error { + for k, v := range m { + if v == nil { + return fmt.Errorf("invalid [%v]: value is nil", k) + } + kw, err := wire.NewValueI64(k), error(nil) + if err != nil { + return err + } + + vw, err := v.ToWire() + if err != nil { + return err + } + err = f(wire.MapItem{Key: kw, Value: vw}) + if err != nil { + return err + } + } + return nil +} + +func (m _Map_I64_VirtualQueueState_MapItemList) Size() int { + return len(m) +} + +func (_Map_I64_VirtualQueueState_MapItemList) KeyType() wire.Type { + return wire.TI64 +} + +func (_Map_I64_VirtualQueueState_MapItemList) ValueType() wire.Type { + return wire.TStruct +} + +func (_Map_I64_VirtualQueueState_MapItemList) Close() {} + +// ToWire translates a QueueState struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *QueueState) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.VirtualQueueStates != nil { + w, err = wire.NewValueMap(_Map_I64_VirtualQueueState_MapItemList(v.VirtualQueueStates)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.ExclusiveMaxReadLevel != nil { + w, err = v.ExclusiveMaxReadLevel.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _VirtualQueueState_Read(w wire.Value) (*VirtualQueueState, error) { + var v VirtualQueueState + err := v.FromWire(w) + return &v, err +} + +func _Map_I64_VirtualQueueState_Read(m wire.MapItemList) (map[int64]*VirtualQueueState, error) { + if m.KeyType() != wire.TI64 { + return nil, nil + } + + if m.ValueType() != wire.TStruct { + return nil, nil + } + + o := make(map[int64]*VirtualQueueState, m.Size()) + err := m.ForEach(func(x wire.MapItem) error { + k, err := x.Key.GetI64(), error(nil) + if err != nil { + return err + } + + v, err := _VirtualQueueState_Read(x.Value) + if err != nil { + return err + } + + o[k] = v + return nil + }) + m.Close() + return o, err +} + +func _TaskKey_Read(w wire.Value) (*TaskKey, error) { + var v TaskKey + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a QueueState struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a QueueState struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v QueueState +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *QueueState) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TMap { + v.VirtualQueueStates, err = _Map_I64_VirtualQueueState_Read(field.Value.GetMap()) + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TStruct { + v.ExclusiveMaxReadLevel, err = _TaskKey_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a QueueState +// struct. +func (v *QueueState) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.VirtualQueueStates != nil { + fields[i] = fmt.Sprintf("VirtualQueueStates: %v", v.VirtualQueueStates) + i++ + } + if v.ExclusiveMaxReadLevel != nil { + fields[i] = fmt.Sprintf("ExclusiveMaxReadLevel: %v", v.ExclusiveMaxReadLevel) + i++ + } + + return fmt.Sprintf("QueueState{%v}", strings.Join(fields[:i], ", ")) +} + +func _Map_I64_VirtualQueueState_Equals(lhs, rhs map[int64]*VirtualQueueState) bool { + if len(lhs) != len(rhs) { + return false + } + + for lk, lv := range lhs { + rv, ok := rhs[lk] + if !ok { + return false + } + if !lv.Equals(rv) { + return false + } + } + return true +} + +// Equals returns true if all the fields of this QueueState match the +// provided QueueState. +// +// This function performs a deep comparison. +func (v *QueueState) Equals(rhs *QueueState) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.VirtualQueueStates == nil && rhs.VirtualQueueStates == nil) || (v.VirtualQueueStates != nil && rhs.VirtualQueueStates != nil && _Map_I64_VirtualQueueState_Equals(v.VirtualQueueStates, rhs.VirtualQueueStates))) { + return false + } + if !((v.ExclusiveMaxReadLevel == nil && rhs.ExclusiveMaxReadLevel == nil) || (v.ExclusiveMaxReadLevel != nil && rhs.ExclusiveMaxReadLevel != nil && v.ExclusiveMaxReadLevel.Equals(rhs.ExclusiveMaxReadLevel))) { + return false + } + + return true +} + +type _Map_I64_VirtualQueueState_Item_Zapper struct { + Key int64 + Value *VirtualQueueState +} + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _Map_I64_VirtualQueueState_Item_Zapper. +func (v _Map_I64_VirtualQueueState_Item_Zapper) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + enc.AddInt64("key", v.Key) + err = multierr.Append(err, enc.AddObject("value", v.Value)) + return err +} + +type _Map_I64_VirtualQueueState_Zapper map[int64]*VirtualQueueState + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _Map_I64_VirtualQueueState_Zapper. +func (m _Map_I64_VirtualQueueState_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for k, v := range m { + err = multierr.Append(err, enc.AppendObject(_Map_I64_VirtualQueueState_Item_Zapper{Key: k, Value: v})) + } + return err +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of QueueState. +func (v *QueueState) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.VirtualQueueStates != nil { + err = multierr.Append(err, enc.AddArray("virtualQueueStates", (_Map_I64_VirtualQueueState_Zapper)(v.VirtualQueueStates))) + } + if v.ExclusiveMaxReadLevel != nil { + err = multierr.Append(err, enc.AddObject("exclusiveMaxReadLevel", v.ExclusiveMaxReadLevel)) + } + return err +} + +// GetVirtualQueueStates returns the value of VirtualQueueStates if it is set or its +// zero value if it is unset. +func (v *QueueState) GetVirtualQueueStates() (o map[int64]*VirtualQueueState) { + if v != nil && v.VirtualQueueStates != nil { + return v.VirtualQueueStates + } + + return +} + +// IsSetVirtualQueueStates returns true if VirtualQueueStates is not nil. +func (v *QueueState) IsSetVirtualQueueStates() bool { + return v != nil && v.VirtualQueueStates != nil +} + +// GetExclusiveMaxReadLevel returns the value of ExclusiveMaxReadLevel if it is set or its +// zero value if it is unset. +func (v *QueueState) GetExclusiveMaxReadLevel() (o *TaskKey) { + if v != nil && v.ExclusiveMaxReadLevel != nil { + return v.ExclusiveMaxReadLevel + } + + return +} + +// IsSetExclusiveMaxReadLevel returns true if ExclusiveMaxReadLevel is not nil. +func (v *QueueState) IsSetExclusiveMaxReadLevel() bool { + return v != nil && v.ExclusiveMaxReadLevel != nil +} + type ReapplyEventsRequest struct { DomainName *string `json:"domainName,omitempty"` WorkflowExecution *WorkflowExecution `json:"workflowExecution,omitempty"` @@ -44551,6 +46080,7 @@ type RegisterDomainRequest struct { EmitMetric *bool `json:"emitMetric,omitempty"` Clusters []*ClusterReplicationConfiguration `json:"clusters,omitempty"` ActiveClusterName *string `json:"activeClusterName,omitempty"` + ActiveClustersByRegion map[string]string `json:"activeClustersByRegion,omitempty"` Data map[string]string `json:"data,omitempty"` SecurityToken *string `json:"securityToken,omitempty"` IsGlobalDomain *bool `json:"isGlobalDomain,omitempty"` @@ -44585,7 +46115,7 @@ func Default_RegisterDomainRequest() *RegisterDomainRequest { // } func (v *RegisterDomainRequest) ToWire() (wire.Value, error) { var ( - fields [14]wire.Field + fields [15]wire.Field i int = 0 w wire.Value err error @@ -44651,6 +46181,14 @@ func (v *RegisterDomainRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 70, Value: w} i++ } + if v.ActiveClustersByRegion != nil { + w, err = wire.NewValueMap(_Map_String_String_MapItemList(v.ActiveClustersByRegion)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 75, Value: w} + i++ + } if v.Data != nil { w, err = wire.NewValueMap(_Map_String_String_MapItemList(v.Data)), error(nil) if err != nil { @@ -44800,6 +46338,14 @@ func (v *RegisterDomainRequest) FromWire(w wire.Value) error { return err } + } + case 75: + if field.Value.Type() == wire.TMap { + v.ActiveClustersByRegion, err = _Map_String_String_Read(field.Value.GetMap()) + if err != nil { + return err + } + } case 80: if field.Value.Type() == wire.TMap { @@ -44886,7 +46432,7 @@ func (v *RegisterDomainRequest) String() string { return "" } - var fields [14]string + var fields [15]string i := 0 if v.Name != nil { fields[i] = fmt.Sprintf("Name: %v", *(v.Name)) @@ -44916,6 +46462,10 @@ func (v *RegisterDomainRequest) String() string { fields[i] = fmt.Sprintf("ActiveClusterName: %v", *(v.ActiveClusterName)) i++ } + if v.ActiveClustersByRegion != nil { + fields[i] = fmt.Sprintf("ActiveClustersByRegion: %v", v.ActiveClustersByRegion) + i++ + } if v.Data != nil { fields[i] = fmt.Sprintf("Data: %v", v.Data) i++ @@ -44979,6 +46529,9 @@ func (v *RegisterDomainRequest) Equals(rhs *RegisterDomainRequest) bool { if !_String_EqualsPtr(v.ActiveClusterName, rhs.ActiveClusterName) { return false } + if !((v.ActiveClustersByRegion == nil && rhs.ActiveClustersByRegion == nil) || (v.ActiveClustersByRegion != nil && rhs.ActiveClustersByRegion != nil && _Map_String_String_Equals(v.ActiveClustersByRegion, rhs.ActiveClustersByRegion))) { + return false + } if !((v.Data == nil && rhs.Data == nil) || (v.Data != nil && rhs.Data != nil && _Map_String_String_Equals(v.Data, rhs.Data))) { return false } @@ -45031,6 +46584,9 @@ func (v *RegisterDomainRequest) MarshalLogObject(enc zapcore.ObjectEncoder) (err if v.ActiveClusterName != nil { enc.AddString("activeClusterName", *v.ActiveClusterName) } + if v.ActiveClustersByRegion != nil { + err = multierr.Append(err, enc.AddObject("activeClustersByRegion", (_Map_String_String_Zapper)(v.ActiveClustersByRegion))) + } if v.Data != nil { err = multierr.Append(err, enc.AddObject("data", (_Map_String_String_Zapper)(v.Data))) } @@ -45160,6 +46716,21 @@ func (v *RegisterDomainRequest) IsSetActiveClusterName() bool { return v != nil && v.ActiveClusterName != nil } +// GetActiveClustersByRegion returns the value of ActiveClustersByRegion if it is set or its +// zero value if it is unset. +func (v *RegisterDomainRequest) GetActiveClustersByRegion() (o map[string]string) { + if v != nil && v.ActiveClustersByRegion != nil { + return v.ActiveClustersByRegion + } + + return +} + +// IsSetActiveClustersByRegion returns true if ActiveClustersByRegion is not nil. +func (v *RegisterDomainRequest) IsSetActiveClustersByRegion() bool { + return v != nil && v.ActiveClustersByRegion != nil +} + // GetData returns the value of Data if it is set or its // zero value if it is unset. func (v *RegisterDomainRequest) GetData() (o map[string]string) { @@ -56573,6 +58144,7 @@ type SignalWithStartWorkflowExecutionRequest struct { DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` } // ToWire translates a SignalWithStartWorkflowExecutionRequest struct into a Thrift-level intermediate @@ -56592,7 +58164,7 @@ type SignalWithStartWorkflowExecutionRequest struct { // } func (v *SignalWithStartWorkflowExecutionRequest) ToWire() (wire.Value, error) { var ( - fields [21]wire.Field + fields [22]wire.Field i int = 0 w wire.Value err error @@ -56766,6 +58338,14 @@ func (v *SignalWithStartWorkflowExecutionRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 200, Value: w} i++ } + if v.CronOverlapPolicy != nil { + w, err = v.CronOverlapPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 210, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -56989,6 +58569,16 @@ func (v *SignalWithStartWorkflowExecutionRequest) FromWire(w wire.Value) error { return err } + } + case 210: + if field.Value.Type() == wire.TI32 { + var x CronOverlapPolicy + x, err = _CronOverlapPolicy_Read(field.Value) + v.CronOverlapPolicy = &x + if err != nil { + return err + } + } } } @@ -57003,7 +58593,7 @@ func (v *SignalWithStartWorkflowExecutionRequest) String() string { return "" } - var fields [21]string + var fields [22]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -57089,6 +58679,10 @@ func (v *SignalWithStartWorkflowExecutionRequest) String() string { fields[i] = fmt.Sprintf("FirstRunAtTimestamp: %v", *(v.FirstRunAtTimestamp)) i++ } + if v.CronOverlapPolicy != nil { + fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) + i++ + } return fmt.Sprintf("SignalWithStartWorkflowExecutionRequest{%v}", strings.Join(fields[:i], ", ")) } @@ -57176,6 +58770,9 @@ func (v *SignalWithStartWorkflowExecutionRequest) Equals(rhs *SignalWithStartWor if !_I64_EqualsPtr(v.FirstRunAtTimestamp, rhs.FirstRunAtTimestamp) { return false } + if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { + return false + } return true } @@ -57249,6 +58846,9 @@ func (v *SignalWithStartWorkflowExecutionRequest) MarshalLogObject(enc zapcore.O if v.FirstRunAtTimestamp != nil { enc.AddInt64("firstRunAtTimestamp", *v.FirstRunAtTimestamp) } + if v.CronOverlapPolicy != nil { + err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) + } return err } @@ -57567,6 +59167,21 @@ func (v *SignalWithStartWorkflowExecutionRequest) IsSetFirstRunAtTimestamp() boo return v != nil && v.FirstRunAtTimestamp != nil } +// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its +// zero value if it is unset. +func (v *SignalWithStartWorkflowExecutionRequest) GetCronOverlapPolicy() (o CronOverlapPolicy) { + if v != nil && v.CronOverlapPolicy != nil { + return *v.CronOverlapPolicy + } + + return +} + +// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. +func (v *SignalWithStartWorkflowExecutionRequest) IsSetCronOverlapPolicy() bool { + return v != nil && v.CronOverlapPolicy != nil +} + type SignalWorkflowExecutionRequest struct { Domain *string `json:"domain,omitempty"` WorkflowExecution *WorkflowExecution `json:"workflowExecution,omitempty"` @@ -57979,6 +59594,7 @@ type StartChildWorkflowExecutionDecisionAttributes struct { Header *Header `json:"header,omitempty"` Memo *Memo `json:"memo,omitempty"` SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` } // ToWire translates a StartChildWorkflowExecutionDecisionAttributes struct into a Thrift-level intermediate @@ -57998,7 +59614,7 @@ type StartChildWorkflowExecutionDecisionAttributes struct { // } func (v *StartChildWorkflowExecutionDecisionAttributes) ToWire() (wire.Value, error) { var ( - fields [15]wire.Field + fields [16]wire.Field i int = 0 w wire.Value err error @@ -58124,6 +59740,14 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) ToWire() (wire.Value, er fields[i] = wire.Field{ID: 150, Value: w} i++ } + if v.CronOverlapPolicy != nil { + w, err = v.CronOverlapPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 160, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -58283,6 +59907,16 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) FromWire(w wire.Value) e return err } + } + case 160: + if field.Value.Type() == wire.TI32 { + var x CronOverlapPolicy + x, err = _CronOverlapPolicy_Read(field.Value) + v.CronOverlapPolicy = &x + if err != nil { + return err + } + } } } @@ -58297,7 +59931,7 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) String() string { return "" } - var fields [15]string + var fields [16]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -58359,6 +59993,10 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) String() string { fields[i] = fmt.Sprintf("SearchAttributes: %v", v.SearchAttributes) i++ } + if v.CronOverlapPolicy != nil { + fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) + i++ + } return fmt.Sprintf("StartChildWorkflowExecutionDecisionAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -58418,6 +60056,9 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) Equals(rhs *StartChildWo if !((v.SearchAttributes == nil && rhs.SearchAttributes == nil) || (v.SearchAttributes != nil && rhs.SearchAttributes != nil && v.SearchAttributes.Equals(rhs.SearchAttributes))) { return false } + if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { + return false + } return true } @@ -58473,6 +60114,9 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) MarshalLogObject(enc zap if v.SearchAttributes != nil { err = multierr.Append(err, enc.AddObject("searchAttributes", v.SearchAttributes)) } + if v.CronOverlapPolicy != nil { + err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) + } return err } @@ -58701,6 +60345,21 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) IsSetSearchAttributes() return v != nil && v.SearchAttributes != nil } +// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its +// zero value if it is unset. +func (v *StartChildWorkflowExecutionDecisionAttributes) GetCronOverlapPolicy() (o CronOverlapPolicy) { + if v != nil && v.CronOverlapPolicy != nil { + return *v.CronOverlapPolicy + } + + return +} + +// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. +func (v *StartChildWorkflowExecutionDecisionAttributes) IsSetCronOverlapPolicy() bool { + return v != nil && v.CronOverlapPolicy != nil +} + type StartChildWorkflowExecutionFailedEventAttributes struct { Domain *string `json:"domain,omitempty"` WorkflowId *string `json:"workflowId,omitempty"` @@ -59135,6 +60794,7 @@ type StartChildWorkflowExecutionInitiatedEventAttributes struct { DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` } // ToWire translates a StartChildWorkflowExecutionInitiatedEventAttributes struct into a Thrift-level intermediate @@ -59154,7 +60814,7 @@ type StartChildWorkflowExecutionInitiatedEventAttributes struct { // } func (v *StartChildWorkflowExecutionInitiatedEventAttributes) ToWire() (wire.Value, error) { var ( - fields [19]wire.Field + fields [20]wire.Field i int = 0 w wire.Value err error @@ -59312,6 +60972,14 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) ToWire() (wire.Val fields[i] = wire.Field{ID: 190, Value: w} i++ } + if v.CronOverlapPolicy != nil { + w, err = v.CronOverlapPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 200, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -59511,6 +61179,16 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) FromWire(w wire.Va return err } + } + case 200: + if field.Value.Type() == wire.TI32 { + var x CronOverlapPolicy + x, err = _CronOverlapPolicy_Read(field.Value) + v.CronOverlapPolicy = &x + if err != nil { + return err + } + } } } @@ -59525,7 +61203,7 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) String() string { return "" } - var fields [19]string + var fields [20]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -59603,6 +61281,10 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) String() string { fields[i] = fmt.Sprintf("FirstRunAtTimestamp: %v", *(v.FirstRunAtTimestamp)) i++ } + if v.CronOverlapPolicy != nil { + fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) + i++ + } return fmt.Sprintf("StartChildWorkflowExecutionInitiatedEventAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -59674,6 +61356,9 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) Equals(rhs *StartC if !_I64_EqualsPtr(v.FirstRunAtTimestamp, rhs.FirstRunAtTimestamp) { return false } + if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { + return false + } return true } @@ -59741,6 +61426,9 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) MarshalLogObject(e if v.FirstRunAtTimestamp != nil { enc.AddInt64("firstRunAtTimestamp", *v.FirstRunAtTimestamp) } + if v.CronOverlapPolicy != nil { + err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) + } return err } @@ -60029,6 +61717,21 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) IsSetFirstRunAtTim return v != nil && v.FirstRunAtTimestamp != nil } +// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its +// zero value if it is unset. +func (v *StartChildWorkflowExecutionInitiatedEventAttributes) GetCronOverlapPolicy() (o CronOverlapPolicy) { + if v != nil && v.CronOverlapPolicy != nil { + return *v.CronOverlapPolicy + } + + return +} + +// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. +func (v *StartChildWorkflowExecutionInitiatedEventAttributes) IsSetCronOverlapPolicy() bool { + return v != nil && v.CronOverlapPolicy != nil +} + type StartTimeFilter struct { EarliestTime *int64 `json:"earliestTime,omitempty"` LatestTime *int64 `json:"latestTime,omitempty"` @@ -60644,6 +62347,7 @@ type StartWorkflowExecutionRequest struct { DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` } // ToWire translates a StartWorkflowExecutionRequest struct into a Thrift-level intermediate @@ -60663,7 +62367,7 @@ type StartWorkflowExecutionRequest struct { // } func (v *StartWorkflowExecutionRequest) ToWire() (wire.Value, error) { var ( - fields [18]wire.Field + fields [19]wire.Field i int = 0 w wire.Value err error @@ -60813,6 +62517,14 @@ func (v *StartWorkflowExecutionRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 180, Value: w} i++ } + if v.CronOverlapPolicy != nil { + w, err = v.CronOverlapPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 190, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -61004,6 +62716,16 @@ func (v *StartWorkflowExecutionRequest) FromWire(w wire.Value) error { return err } + } + case 190: + if field.Value.Type() == wire.TI32 { + var x CronOverlapPolicy + x, err = _CronOverlapPolicy_Read(field.Value) + v.CronOverlapPolicy = &x + if err != nil { + return err + } + } } } @@ -61018,7 +62740,7 @@ func (v *StartWorkflowExecutionRequest) String() string { return "" } - var fields [18]string + var fields [19]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -61092,6 +62814,10 @@ func (v *StartWorkflowExecutionRequest) String() string { fields[i] = fmt.Sprintf("FirstRunAtTimestamp: %v", *(v.FirstRunAtTimestamp)) i++ } + if v.CronOverlapPolicy != nil { + fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) + i++ + } return fmt.Sprintf("StartWorkflowExecutionRequest{%v}", strings.Join(fields[:i], ", ")) } @@ -61160,6 +62886,9 @@ func (v *StartWorkflowExecutionRequest) Equals(rhs *StartWorkflowExecutionReques if !_I64_EqualsPtr(v.FirstRunAtTimestamp, rhs.FirstRunAtTimestamp) { return false } + if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { + return false + } return true } @@ -61224,6 +62953,9 @@ func (v *StartWorkflowExecutionRequest) MarshalLogObject(enc zapcore.ObjectEncod if v.FirstRunAtTimestamp != nil { enc.AddInt64("firstRunAtTimestamp", *v.FirstRunAtTimestamp) } + if v.CronOverlapPolicy != nil { + err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) + } return err } @@ -61497,6 +63229,21 @@ func (v *StartWorkflowExecutionRequest) IsSetFirstRunAtTimestamp() bool { return v != nil && v.FirstRunAtTimestamp != nil } +// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its +// zero value if it is unset. +func (v *StartWorkflowExecutionRequest) GetCronOverlapPolicy() (o CronOverlapPolicy) { + if v != nil && v.CronOverlapPolicy != nil { + return *v.CronOverlapPolicy + } + + return +} + +// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. +func (v *StartWorkflowExecutionRequest) IsSetCronOverlapPolicy() bool { + return v != nil && v.CronOverlapPolicy != nil +} + type StartWorkflowExecutionResponse struct { RunId *string `json:"runId,omitempty"` } @@ -62319,6 +64066,188 @@ func (v *TaskIDBlock) IsSetEndID() bool { return v != nil && v.EndID != nil } +type TaskKey struct { + ScheduledTimeNano *int64 `json:"scheduledTimeNano,omitempty"` + TaskID *int64 `json:"taskID,omitempty"` +} + +// ToWire translates a TaskKey struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *TaskKey) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.ScheduledTimeNano != nil { + w, err = wire.NewValueI64(*(v.ScheduledTimeNano)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.TaskID != nil { + w, err = wire.NewValueI64(*(v.TaskID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a TaskKey struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a TaskKey struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v TaskKey +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *TaskKey) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.ScheduledTimeNano = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.TaskID = &x + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a TaskKey +// struct. +func (v *TaskKey) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.ScheduledTimeNano != nil { + fields[i] = fmt.Sprintf("ScheduledTimeNano: %v", *(v.ScheduledTimeNano)) + i++ + } + if v.TaskID != nil { + fields[i] = fmt.Sprintf("TaskID: %v", *(v.TaskID)) + i++ + } + + return fmt.Sprintf("TaskKey{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this TaskKey match the +// provided TaskKey. +// +// This function performs a deep comparison. +func (v *TaskKey) Equals(rhs *TaskKey) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_I64_EqualsPtr(v.ScheduledTimeNano, rhs.ScheduledTimeNano) { + return false + } + if !_I64_EqualsPtr(v.TaskID, rhs.TaskID) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of TaskKey. +func (v *TaskKey) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.ScheduledTimeNano != nil { + enc.AddInt64("scheduledTimeNano", *v.ScheduledTimeNano) + } + if v.TaskID != nil { + enc.AddInt64("taskID", *v.TaskID) + } + return err +} + +// GetScheduledTimeNano returns the value of ScheduledTimeNano if it is set or its +// zero value if it is unset. +func (v *TaskKey) GetScheduledTimeNano() (o int64) { + if v != nil && v.ScheduledTimeNano != nil { + return *v.ScheduledTimeNano + } + + return +} + +// IsSetScheduledTimeNano returns true if ScheduledTimeNano is not nil. +func (v *TaskKey) IsSetScheduledTimeNano() bool { + return v != nil && v.ScheduledTimeNano != nil +} + +// GetTaskID returns the value of TaskID if it is set or its +// zero value if it is unset. +func (v *TaskKey) GetTaskID() (o int64) { + if v != nil && v.TaskID != nil { + return *v.TaskID + } + + return +} + +// IsSetTaskID returns true if TaskID is not nil. +func (v *TaskKey) IsSetTaskID() bool { + return v != nil && v.TaskID != nil +} + type TaskList struct { Name *string `json:"name,omitempty"` Kind *TaskListKind `json:"kind,omitempty"` @@ -63221,13 +65150,53 @@ func (v *TaskListPartitionMetadata) IsSetOwnerHostName() bool { } type TaskListStatus struct { - BacklogCountHint *int64 `json:"backlogCountHint,omitempty"` - ReadLevel *int64 `json:"readLevel,omitempty"` - AckLevel *int64 `json:"ackLevel,omitempty"` - RatePerSecond *float64 `json:"ratePerSecond,omitempty"` - TaskIDBlock *TaskIDBlock `json:"taskIDBlock,omitempty"` + BacklogCountHint *int64 `json:"backlogCountHint,omitempty"` + ReadLevel *int64 `json:"readLevel,omitempty"` + AckLevel *int64 `json:"ackLevel,omitempty"` + RatePerSecond *float64 `json:"ratePerSecond,omitempty"` + TaskIDBlock *TaskIDBlock `json:"taskIDBlock,omitempty"` + IsolationGroupMetrics map[string]*IsolationGroupMetrics `json:"isolationGroupMetrics,omitempty"` + NewTasksPerSecond *float64 `json:"newTasksPerSecond,omitempty"` } +type _Map_String_IsolationGroupMetrics_MapItemList map[string]*IsolationGroupMetrics + +func (m _Map_String_IsolationGroupMetrics_MapItemList) ForEach(f func(wire.MapItem) error) error { + for k, v := range m { + if v == nil { + return fmt.Errorf("invalid [%v]: value is nil", k) + } + kw, err := wire.NewValueString(k), error(nil) + if err != nil { + return err + } + + vw, err := v.ToWire() + if err != nil { + return err + } + err = f(wire.MapItem{Key: kw, Value: vw}) + if err != nil { + return err + } + } + return nil +} + +func (m _Map_String_IsolationGroupMetrics_MapItemList) Size() int { + return len(m) +} + +func (_Map_String_IsolationGroupMetrics_MapItemList) KeyType() wire.Type { + return wire.TBinary +} + +func (_Map_String_IsolationGroupMetrics_MapItemList) ValueType() wire.Type { + return wire.TStruct +} + +func (_Map_String_IsolationGroupMetrics_MapItemList) Close() {} + // ToWire translates a TaskListStatus struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. @@ -63245,7 +65214,7 @@ type TaskListStatus struct { // } func (v *TaskListStatus) ToWire() (wire.Value, error) { var ( - fields [5]wire.Field + fields [7]wire.Field i int = 0 w wire.Value err error @@ -63291,6 +65260,22 @@ func (v *TaskListStatus) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 40, Value: w} i++ } + if v.IsolationGroupMetrics != nil { + w, err = wire.NewValueMap(_Map_String_IsolationGroupMetrics_MapItemList(v.IsolationGroupMetrics)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 50, Value: w} + i++ + } + if v.NewTasksPerSecond != nil { + w, err = wire.NewValueDouble(*(v.NewTasksPerSecond)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 60, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -63301,6 +65286,40 @@ func _TaskIDBlock_Read(w wire.Value) (*TaskIDBlock, error) { return &v, err } +func _IsolationGroupMetrics_Read(w wire.Value) (*IsolationGroupMetrics, error) { + var v IsolationGroupMetrics + err := v.FromWire(w) + return &v, err +} + +func _Map_String_IsolationGroupMetrics_Read(m wire.MapItemList) (map[string]*IsolationGroupMetrics, error) { + if m.KeyType() != wire.TBinary { + return nil, nil + } + + if m.ValueType() != wire.TStruct { + return nil, nil + } + + o := make(map[string]*IsolationGroupMetrics, m.Size()) + err := m.ForEach(func(x wire.MapItem) error { + k, err := x.Key.GetString(), error(nil) + if err != nil { + return err + } + + v, err := _IsolationGroupMetrics_Read(x.Value) + if err != nil { + return err + } + + o[k] = v + return nil + }) + m.Close() + return o, err +} + // FromWire deserializes a TaskListStatus struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. @@ -63370,6 +65389,24 @@ func (v *TaskListStatus) FromWire(w wire.Value) error { return err } + } + case 50: + if field.Value.Type() == wire.TMap { + v.IsolationGroupMetrics, err = _Map_String_IsolationGroupMetrics_Read(field.Value.GetMap()) + if err != nil { + return err + } + + } + case 60: + if field.Value.Type() == wire.TDouble { + var x float64 + x, err = field.Value.GetDouble(), error(nil) + v.NewTasksPerSecond = &x + if err != nil { + return err + } + } } } @@ -63384,7 +65421,7 @@ func (v *TaskListStatus) String() string { return "" } - var fields [5]string + var fields [7]string i := 0 if v.BacklogCountHint != nil { fields[i] = fmt.Sprintf("BacklogCountHint: %v", *(v.BacklogCountHint)) @@ -63406,10 +65443,35 @@ func (v *TaskListStatus) String() string { fields[i] = fmt.Sprintf("TaskIDBlock: %v", v.TaskIDBlock) i++ } + if v.IsolationGroupMetrics != nil { + fields[i] = fmt.Sprintf("IsolationGroupMetrics: %v", v.IsolationGroupMetrics) + i++ + } + if v.NewTasksPerSecond != nil { + fields[i] = fmt.Sprintf("NewTasksPerSecond: %v", *(v.NewTasksPerSecond)) + i++ + } return fmt.Sprintf("TaskListStatus{%v}", strings.Join(fields[:i], ", ")) } +func _Map_String_IsolationGroupMetrics_Equals(lhs, rhs map[string]*IsolationGroupMetrics) bool { + if len(lhs) != len(rhs) { + return false + } + + for lk, lv := range lhs { + rv, ok := rhs[lk] + if !ok { + return false + } + if !lv.Equals(rv) { + return false + } + } + return true +} + // Equals returns true if all the fields of this TaskListStatus match the // provided TaskListStatus. // @@ -63435,10 +65497,27 @@ func (v *TaskListStatus) Equals(rhs *TaskListStatus) bool { if !((v.TaskIDBlock == nil && rhs.TaskIDBlock == nil) || (v.TaskIDBlock != nil && rhs.TaskIDBlock != nil && v.TaskIDBlock.Equals(rhs.TaskIDBlock))) { return false } + if !((v.IsolationGroupMetrics == nil && rhs.IsolationGroupMetrics == nil) || (v.IsolationGroupMetrics != nil && rhs.IsolationGroupMetrics != nil && _Map_String_IsolationGroupMetrics_Equals(v.IsolationGroupMetrics, rhs.IsolationGroupMetrics))) { + return false + } + if !_Double_EqualsPtr(v.NewTasksPerSecond, rhs.NewTasksPerSecond) { + return false + } return true } +type _Map_String_IsolationGroupMetrics_Zapper map[string]*IsolationGroupMetrics + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of _Map_String_IsolationGroupMetrics_Zapper. +func (m _Map_String_IsolationGroupMetrics_Zapper) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + for k, v := range m { + err = multierr.Append(err, enc.AddObject((string)(k), v)) + } + return err +} + // MarshalLogObject implements zapcore.ObjectMarshaler, enabling // fast logging of TaskListStatus. func (v *TaskListStatus) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { @@ -63460,6 +65539,12 @@ func (v *TaskListStatus) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) if v.TaskIDBlock != nil { err = multierr.Append(err, enc.AddObject("taskIDBlock", v.TaskIDBlock)) } + if v.IsolationGroupMetrics != nil { + err = multierr.Append(err, enc.AddObject("isolationGroupMetrics", (_Map_String_IsolationGroupMetrics_Zapper)(v.IsolationGroupMetrics))) + } + if v.NewTasksPerSecond != nil { + enc.AddFloat64("newTasksPerSecond", *v.NewTasksPerSecond) + } return err } @@ -63538,6 +65623,36 @@ func (v *TaskListStatus) IsSetTaskIDBlock() bool { return v != nil && v.TaskIDBlock != nil } +// GetIsolationGroupMetrics returns the value of IsolationGroupMetrics if it is set or its +// zero value if it is unset. +func (v *TaskListStatus) GetIsolationGroupMetrics() (o map[string]*IsolationGroupMetrics) { + if v != nil && v.IsolationGroupMetrics != nil { + return v.IsolationGroupMetrics + } + + return +} + +// IsSetIsolationGroupMetrics returns true if IsolationGroupMetrics is not nil. +func (v *TaskListStatus) IsSetIsolationGroupMetrics() bool { + return v != nil && v.IsolationGroupMetrics != nil +} + +// GetNewTasksPerSecond returns the value of NewTasksPerSecond if it is set or its +// zero value if it is unset. +func (v *TaskListStatus) GetNewTasksPerSecond() (o float64) { + if v != nil && v.NewTasksPerSecond != nil { + return *v.NewTasksPerSecond + } + + return +} + +// IsSetNewTasksPerSecond returns true if NewTasksPerSecond is not nil. +func (v *TaskListStatus) IsSetNewTasksPerSecond() bool { + return v != nil && v.NewTasksPerSecond != nil +} + type TaskListType int32 const ( @@ -63709,6 +65824,184 @@ func (v *TaskListType) UnmarshalJSON(text []byte) error { } } +type TaskRange struct { + InclusiveMin *TaskKey `json:"inclusiveMin,omitempty"` + ExclusiveMax *TaskKey `json:"exclusiveMax,omitempty"` +} + +// ToWire translates a TaskRange struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *TaskRange) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.InclusiveMin != nil { + w, err = v.InclusiveMin.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.ExclusiveMax != nil { + w, err = v.ExclusiveMax.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a TaskRange struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a TaskRange struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v TaskRange +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *TaskRange) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TStruct { + v.InclusiveMin, err = _TaskKey_Read(field.Value) + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TStruct { + v.ExclusiveMax, err = _TaskKey_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a TaskRange +// struct. +func (v *TaskRange) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.InclusiveMin != nil { + fields[i] = fmt.Sprintf("InclusiveMin: %v", v.InclusiveMin) + i++ + } + if v.ExclusiveMax != nil { + fields[i] = fmt.Sprintf("ExclusiveMax: %v", v.ExclusiveMax) + i++ + } + + return fmt.Sprintf("TaskRange{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this TaskRange match the +// provided TaskRange. +// +// This function performs a deep comparison. +func (v *TaskRange) Equals(rhs *TaskRange) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.InclusiveMin == nil && rhs.InclusiveMin == nil) || (v.InclusiveMin != nil && rhs.InclusiveMin != nil && v.InclusiveMin.Equals(rhs.InclusiveMin))) { + return false + } + if !((v.ExclusiveMax == nil && rhs.ExclusiveMax == nil) || (v.ExclusiveMax != nil && rhs.ExclusiveMax != nil && v.ExclusiveMax.Equals(rhs.ExclusiveMax))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of TaskRange. +func (v *TaskRange) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.InclusiveMin != nil { + err = multierr.Append(err, enc.AddObject("inclusiveMin", v.InclusiveMin)) + } + if v.ExclusiveMax != nil { + err = multierr.Append(err, enc.AddObject("exclusiveMax", v.ExclusiveMax)) + } + return err +} + +// GetInclusiveMin returns the value of InclusiveMin if it is set or its +// zero value if it is unset. +func (v *TaskRange) GetInclusiveMin() (o *TaskKey) { + if v != nil && v.InclusiveMin != nil { + return v.InclusiveMin + } + + return +} + +// IsSetInclusiveMin returns true if InclusiveMin is not nil. +func (v *TaskRange) IsSetInclusiveMin() bool { + return v != nil && v.InclusiveMin != nil +} + +// GetExclusiveMax returns the value of ExclusiveMax if it is set or its +// zero value if it is unset. +func (v *TaskRange) GetExclusiveMax() (o *TaskKey) { + if v != nil && v.ExclusiveMax != nil { + return v.ExclusiveMax + } + + return +} + +// IsSetExclusiveMax returns true if ExclusiveMax is not nil. +func (v *TaskRange) IsSetExclusiveMax() bool { + return v != nil && v.ExclusiveMax != nil +} + type TerminateWorkflowExecutionRequest struct { Domain *string `json:"domain,omitempty"` WorkflowExecution *WorkflowExecution `json:"workflowExecution,omitempty"` @@ -67064,6 +69357,363 @@ func (v *VersionHistoryItem) IsSetVersion() bool { return v != nil && v.Version != nil } +type VirtualQueueState struct { + VirtualSliceStates []*VirtualSliceState `json:"virtualSliceStates,omitempty"` +} + +type _List_VirtualSliceState_ValueList []*VirtualSliceState + +func (v _List_VirtualSliceState_ValueList) ForEach(f func(wire.Value) error) error { + for i, x := range v { + if x == nil { + return fmt.Errorf("invalid [%v]: value is nil", i) + } + w, err := x.ToWire() + if err != nil { + return err + } + err = f(w) + if err != nil { + return err + } + } + return nil +} + +func (v _List_VirtualSliceState_ValueList) Size() int { + return len(v) +} + +func (_List_VirtualSliceState_ValueList) ValueType() wire.Type { + return wire.TStruct +} + +func (_List_VirtualSliceState_ValueList) Close() {} + +// ToWire translates a VirtualQueueState struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *VirtualQueueState) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.VirtualSliceStates != nil { + w, err = wire.NewValueList(_List_VirtualSliceState_ValueList(v.VirtualSliceStates)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _VirtualSliceState_Read(w wire.Value) (*VirtualSliceState, error) { + var v VirtualSliceState + err := v.FromWire(w) + return &v, err +} + +func _List_VirtualSliceState_Read(l wire.ValueList) ([]*VirtualSliceState, error) { + if l.ValueType() != wire.TStruct { + return nil, nil + } + + o := make([]*VirtualSliceState, 0, l.Size()) + err := l.ForEach(func(x wire.Value) error { + i, err := _VirtualSliceState_Read(x) + if err != nil { + return err + } + o = append(o, i) + return nil + }) + l.Close() + return o, err +} + +// FromWire deserializes a VirtualQueueState struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a VirtualQueueState struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v VirtualQueueState +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *VirtualQueueState) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TList { + v.VirtualSliceStates, err = _List_VirtualSliceState_Read(field.Value.GetList()) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a VirtualQueueState +// struct. +func (v *VirtualQueueState) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.VirtualSliceStates != nil { + fields[i] = fmt.Sprintf("VirtualSliceStates: %v", v.VirtualSliceStates) + i++ + } + + return fmt.Sprintf("VirtualQueueState{%v}", strings.Join(fields[:i], ", ")) +} + +func _List_VirtualSliceState_Equals(lhs, rhs []*VirtualSliceState) bool { + if len(lhs) != len(rhs) { + return false + } + + for i, lv := range lhs { + rv := rhs[i] + if !lv.Equals(rv) { + return false + } + } + + return true +} + +// Equals returns true if all the fields of this VirtualQueueState match the +// provided VirtualQueueState. +// +// This function performs a deep comparison. +func (v *VirtualQueueState) Equals(rhs *VirtualQueueState) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.VirtualSliceStates == nil && rhs.VirtualSliceStates == nil) || (v.VirtualSliceStates != nil && rhs.VirtualSliceStates != nil && _List_VirtualSliceState_Equals(v.VirtualSliceStates, rhs.VirtualSliceStates))) { + return false + } + + return true +} + +type _List_VirtualSliceState_Zapper []*VirtualSliceState + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _List_VirtualSliceState_Zapper. +func (l _List_VirtualSliceState_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for _, v := range l { + err = multierr.Append(err, enc.AppendObject(v)) + } + return err +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of VirtualQueueState. +func (v *VirtualQueueState) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.VirtualSliceStates != nil { + err = multierr.Append(err, enc.AddArray("virtualSliceStates", (_List_VirtualSliceState_Zapper)(v.VirtualSliceStates))) + } + return err +} + +// GetVirtualSliceStates returns the value of VirtualSliceStates if it is set or its +// zero value if it is unset. +func (v *VirtualQueueState) GetVirtualSliceStates() (o []*VirtualSliceState) { + if v != nil && v.VirtualSliceStates != nil { + return v.VirtualSliceStates + } + + return +} + +// IsSetVirtualSliceStates returns true if VirtualSliceStates is not nil. +func (v *VirtualQueueState) IsSetVirtualSliceStates() bool { + return v != nil && v.VirtualSliceStates != nil +} + +type VirtualSliceState struct { + TaskRange *TaskRange `json:"taskRange,omitempty"` +} + +// ToWire translates a VirtualSliceState struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *VirtualSliceState) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.TaskRange != nil { + w, err = v.TaskRange.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _TaskRange_Read(w wire.Value) (*TaskRange, error) { + var v TaskRange + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a VirtualSliceState struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a VirtualSliceState struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v VirtualSliceState +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *VirtualSliceState) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TStruct { + v.TaskRange, err = _TaskRange_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a VirtualSliceState +// struct. +func (v *VirtualSliceState) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.TaskRange != nil { + fields[i] = fmt.Sprintf("TaskRange: %v", v.TaskRange) + i++ + } + + return fmt.Sprintf("VirtualSliceState{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this VirtualSliceState match the +// provided VirtualSliceState. +// +// This function performs a deep comparison. +func (v *VirtualSliceState) Equals(rhs *VirtualSliceState) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.TaskRange == nil && rhs.TaskRange == nil) || (v.TaskRange != nil && rhs.TaskRange != nil && v.TaskRange.Equals(rhs.TaskRange))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of VirtualSliceState. +func (v *VirtualSliceState) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.TaskRange != nil { + err = multierr.Append(err, enc.AddObject("taskRange", v.TaskRange)) + } + return err +} + +// GetTaskRange returns the value of TaskRange if it is set or its +// zero value if it is unset. +func (v *VirtualSliceState) GetTaskRange() (o *TaskRange) { + if v != nil && v.TaskRange != nil { + return v.TaskRange + } + + return +} + +// IsSetTaskRange returns true if TaskRange is not nil. +func (v *VirtualSliceState) IsSetTaskRange() bool { + return v != nil && v.TaskRange != nil +} + type WorkerVersionInfo struct { Impl *string `json:"impl,omitempty"` FeatureVersion *string `json:"featureVersion,omitempty"` @@ -71242,6 +73892,7 @@ type WorkflowExecutionStartedEventAttributes struct { Header *Header `json:"header,omitempty"` PartitionConfig map[string]string `json:"partitionConfig,omitempty"` RequestId *string `json:"requestId,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` } // ToWire translates a WorkflowExecutionStartedEventAttributes struct into a Thrift-level intermediate @@ -71261,7 +73912,7 @@ type WorkflowExecutionStartedEventAttributes struct { // } func (v *WorkflowExecutionStartedEventAttributes) ToWire() (wire.Value, error) { var ( - fields [28]wire.Field + fields [29]wire.Field i int = 0 w wire.Value err error @@ -71491,6 +74142,14 @@ func (v *WorkflowExecutionStartedEventAttributes) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 160, Value: w} i++ } + if v.CronOverlapPolicy != nil { + w, err = v.CronOverlapPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 170, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -71772,6 +74431,16 @@ func (v *WorkflowExecutionStartedEventAttributes) FromWire(w wire.Value) error { return err } + } + case 170: + if field.Value.Type() == wire.TI32 { + var x CronOverlapPolicy + x, err = _CronOverlapPolicy_Read(field.Value) + v.CronOverlapPolicy = &x + if err != nil { + return err + } + } } } @@ -71786,7 +74455,7 @@ func (v *WorkflowExecutionStartedEventAttributes) String() string { return "" } - var fields [28]string + var fields [29]string i := 0 if v.WorkflowType != nil { fields[i] = fmt.Sprintf("WorkflowType: %v", v.WorkflowType) @@ -71900,6 +74569,10 @@ func (v *WorkflowExecutionStartedEventAttributes) String() string { fields[i] = fmt.Sprintf("RequestId: %v", *(v.RequestId)) i++ } + if v.CronOverlapPolicy != nil { + fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) + i++ + } return fmt.Sprintf("WorkflowExecutionStartedEventAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -71998,6 +74671,9 @@ func (v *WorkflowExecutionStartedEventAttributes) Equals(rhs *WorkflowExecutionS if !_String_EqualsPtr(v.RequestId, rhs.RequestId) { return false } + if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { + return false + } return true } @@ -72092,6 +74768,9 @@ func (v *WorkflowExecutionStartedEventAttributes) MarshalLogObject(enc zapcore.O if v.RequestId != nil { enc.AddString("requestId", *v.RequestId) } + if v.CronOverlapPolicy != nil { + err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) + } return err } @@ -72515,6 +75194,21 @@ func (v *WorkflowExecutionStartedEventAttributes) IsSetRequestId() bool { return v != nil && v.RequestId != nil } +// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionStartedEventAttributes) GetCronOverlapPolicy() (o CronOverlapPolicy) { + if v != nil && v.CronOverlapPolicy != nil { + return *v.CronOverlapPolicy + } + + return +} + +// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. +func (v *WorkflowExecutionStartedEventAttributes) IsSetCronOverlapPolicy() bool { + return v != nil && v.CronOverlapPolicy != nil +} + type WorkflowExecutionTerminatedEventAttributes struct { Reason *string `json:"reason,omitempty"` Details []byte `json:"details,omitempty"` @@ -73775,8 +76469,8 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "shared", Package: "go.uber.org/cadence/.gen/go/shared", FilePath: "shared.thrift", - SHA1: "c24af4a97d8b3051d71619467e1f84024a8f8757", + SHA1: "6967d00b3b3f9ddf007dc5abe69903206ce97485", Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception InternalDataInconsistencyError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception WorkflowExecutionAlreadyCompletedError {\n 1: required string message\n}\n\nexception EntityNotExistsError {\n 1: required string message\n 2: optional string currentCluster\n 3: optional string activeCluster\n}\n\nexception ServiceBusyError {\n 1: required string message\n 2: optional string reason\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") startEventId\n 6: optional i64 (js.type = \"Long\") startEventVersion\n 7: optional i64 (js.type = \"Long\") endEventId\n 8: optional i64 (js.type = \"Long\") endEventVersion\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception FeatureNotEnabledError {\n 1: required string featureFlag\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nexception RemoteSyncMatchedError {\n 10: required string message\n}\n\nexception StickyWorkerUnavailableError {\n 1: required string message\n}\n\nexception TaskListNotOwnedByHostError {\n 1: required string ownedByIdentity\n 2: required string myIdentity\n 3: required string tasklistName\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n /*\n * if a workflow is running using the same workflow ID, terminate it and start a new one\n */\n TerminateIfRunning,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum DecisionTaskTimedOutCause {\n TIMEOUT,\n RESET,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum PendingDecisionState {\n SCHEDULED,\n STARTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nenum QueryConsistencyLevel {\n // EVENTUAL indicates that query should be eventually consistent\n EVENTUAL\n // STRONG indicates that any events that came before query should be reflected in workflow state before running query\n STRONG\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkerVersionInfo {\n 10: optional string impl\n 20: optional string featureVersion\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 71: optional string parentDomainName\n 72: optional i64 parentInitatedId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n 120: optional string taskList\n 130: optional bool isCron\n 140: optional i64 (js.type = \"Long\") updateTime\n 150: optional map partitionConfig\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n 90: optional bool requestLocalDispatch\n}\n\nstruct ActivityLocalDispatchInfo{\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 50: optional binary taskToken\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional i32 jitterStartSeconds\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 62: optional i64 (js.type = \"Long\") firstScheduledTimeNano\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n 150: optional map partitionConfig\n 160: optional string requestId\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // for reset workflow\n 40: optional string baseRunId\n 50: optional string newRunId\n 60: optional i64 (js.type = \"Long\") forkEventVersion\n 70: optional string reason\n 80: optional DecisionTaskTimedOutCause cause\n 90: optional string requestId\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n 90: optional string binaryChecksum\n 100: optional string requestId\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n 50: optional string lastFailureReason\n 60: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // For retry activity, it may have a failure before timeout. It's important to keep those information for debug.\n // Client can also provide the info for making next decision\n 40: optional string lastFailureReason\n 50: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n 50: optional string requestId\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n 170: optional i32 delayStartSeconds\n 180: optional i32 jitterStartSeconds\n 190: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 60: optional IsolationGroupConfiguration isolationgroups\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n 120: optional AsyncWorkflowConfiguration AsyncWorkflowConfiguration\n}\n\nstruct FailoverInfo {\n 10: optional i64 (js.type = \"Long\") failoverVersion\n 20: optional i64 (js.type = \"Long\") failoverStartTimestamp\n 30: optional i64 (js.type = \"Long\") failoverExpireTimestamp\n 40: optional i32 completedShardCount\n 50: optional list pendingShards\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n 10: optional string activeClusterName\n 20: optional list clusters\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric = true\n 60: optional list clusters\n 70: optional string activeClusterName\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n 60: optional FailoverInfo failoverInfo\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n 70: optional i32 failoverTimeoutInSeconds\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n 160: optional i32 delayStartSeconds\n 170: optional i32 jitterStartSeconds\n 180: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct StartWorkflowExecutionAsyncRequest {\n 10: optional StartWorkflowExecutionRequest request\n}\n\nstruct StartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct DiagnoseWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n}\n\nstruct DiagnoseWorkflowExecutionResponse {\n 10: optional string domain\n 20: optional WorkflowExecution diagnosticWorkflowExecution\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional map queries\n 130: optional i64 (js.type = 'Long') nextEventId\n 140: optional i64 (js.type = 'Long') totalHistoryBytes\n 150: optional AutoConfigHint autoConfigHint\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional map queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n 20: optional map activitiesToDispatchLocally\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n 180: optional AutoConfigHint autoConfigHint\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n 50: optional string cause\n 60: optional string firstExecutionRunID\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n 70: optional bool skipArchival\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 11: optional list rawHistory\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n 180: optional i32 delayStartSeconds\n 190: optional i32 jitterStartSeconds\n 200: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncRequest {\n 10: optional SignalWithStartWorkflowExecutionRequest request\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional string identity\n}\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n 60: optional string firstExecutionRunID\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n 60: optional bool skipSignalReapply\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n 50: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n 50: optional WorkerVersionInfo workerVersionInfo\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorMessage\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n 140: optional string startedWorkerIdentity\n 150: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingDecisionInfo {\n 10: optional PendingDecisionState state\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 attempt\n 50: optional i64 (js.type = \"Long\") originalScheduledTimestamp\n 60: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingChildExecutionInfo {\n 1: optional string domain\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n 50: optional PendingDecisionInfo pendingDecision\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n}\n\nstruct GetTaskListsByDomainRequest {\n 10: optional string domainName\n}\n\nstruct GetTaskListsByDomainResponse {\n 10: optional map decisionTaskListMap\n 20: optional map activityTaskListMap\n}\n\nstruct ListTaskListPartitionsRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n}\n\nstruct TaskListPartitionMetadata {\n 10: optional string key\n 20: optional string ownerHostName\n}\n\nstruct ListTaskListPartitionsResponse {\n 10: optional list activityTaskListPartitions\n 20: optional list decisionTaskListPartitions\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n 40: optional i64 (js.type = \"Long\") visibilityTimestamp\n 50: optional string clusterName\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct ResetQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueResponse {\n 10: optional list processingQueueStates\n}\n\nstruct DescribeShardDistributionRequest {\n 10: optional i32 pageSize\n 20: optional i32 pageID\n}\n\nstruct DescribeShardDistributionResponse {\n 10: optional i32 numberOfShards\n\n // ShardID to Address (ip:port) map\n 20: optional map shards\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}\n\n// SupportedClientVersions contains the support versions for client library\nstruct SupportedClientVersions{\n 10: optional string goSdk\n 20: optional string javaSdk\n}\n\n// ClusterInfo contains information about cadence cluster\nstruct ClusterInfo{\n 10: optional SupportedClientVersions supportedClientVersions\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct FeatureFlags {\n\t10: optional bool WorkflowExecutionAlreadyCompletedErrorEnabled\n}\n\nenum CrossClusterTaskType {\n StartChildExecution\n CancelExecution\n SignalExecution\n RecordChildWorkflowExecutionComplete\n ApplyParentClosePolicy\n}\n\nenum CrossClusterTaskFailedCause {\n DOMAIN_NOT_ACTIVE\n DOMAIN_NOT_EXISTS\n WORKFLOW_ALREADY_RUNNING\n WORKFLOW_NOT_EXISTS\n WORKFLOW_ALREADY_COMPLETED\n UNCATEGORIZED\n}\n\nenum GetTaskFailedCause {\n SERVICE_BUSY\n TIMEOUT\n SHARD_OWNERSHIP_LOST\n UNCATEGORIZED\n}\n\nstruct CrossClusterTaskInfo {\n 10: optional string domainID\n 20: optional string workflowID\n 30: optional string runID\n 40: optional CrossClusterTaskType taskType\n 50: optional i16 taskState\n 60: optional i64 (js.type = \"Long\") taskID\n 70: optional i64 (js.type = \"Long\") visibilityTimestamp\n}\n\nstruct CrossClusterStartChildExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string requestID\n 30: optional i64 (js.type = \"Long\") initiatedEventID\n 40: optional StartChildWorkflowExecutionInitiatedEventAttributes initiatedEventAttributes\n // targetRunID is for scheduling first decision task\n // targetWorkflowID is available in initiatedEventAttributes\n 50: optional string targetRunID\n 60: optional map partitionConfig\n}\n\nstruct CrossClusterStartChildExecutionResponseAttributes {\n 10: optional string runID\n}\n\nstruct CrossClusterCancelExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n}\n\nstruct CrossClusterCancelExecutionResponseAttributes {\n}\n\nstruct CrossClusterSignalExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n 70: optional string signalName\n 80: optional binary signalInput\n 90: optional binary control\n}\n\nstruct CrossClusterSignalExecutionResponseAttributes {\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional i64 (js.type = \"Long\") initiatedEventID\n 50: optional HistoryEvent completionEvent\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes {\n}\n\nstruct ApplyParentClosePolicyAttributes {\n 10: optional string childDomainID\n 20: optional string childWorkflowID\n 30: optional string childRunID\n 40: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct ApplyParentClosePolicyStatus {\n 10: optional bool completed\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct ApplyParentClosePolicyRequest {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional ApplyParentClosePolicyStatus status\n}\n\nstruct CrossClusterApplyParentClosePolicyRequestAttributes {\n 10: optional list children\n}\n\nstruct ApplyParentClosePolicyResult {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct CrossClusterApplyParentClosePolicyResponseAttributes {\n 10: optional list childrenStatus\n}\n\nstruct CrossClusterTaskRequest {\n 10: optional CrossClusterTaskInfo taskInfo\n 20: optional CrossClusterStartChildExecutionRequestAttributes startChildExecutionAttributes\n 30: optional CrossClusterCancelExecutionRequestAttributes cancelExecutionAttributes\n 40: optional CrossClusterSignalExecutionRequestAttributes signalExecutionAttributes\n 50: optional CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes recordChildWorkflowExecutionCompleteAttributes\n 60: optional CrossClusterApplyParentClosePolicyRequestAttributes applyParentClosePolicyAttributes\n}\n\nstruct CrossClusterTaskResponse {\n 10: optional i64 (js.type = \"Long\") taskID\n 20: optional CrossClusterTaskType taskType\n 30: optional i16 taskState\n 40: optional CrossClusterTaskFailedCause failedCause\n 50: optional CrossClusterStartChildExecutionResponseAttributes startChildExecutionAttributes\n 60: optional CrossClusterCancelExecutionResponseAttributes cancelExecutionAttributes\n 70: optional CrossClusterSignalExecutionResponseAttributes signalExecutionAttributes\n 80: optional CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes recordChildWorkflowExecutionCompleteAttributes\n 90: optional CrossClusterApplyParentClosePolicyResponseAttributes applyParentClosePolicyAttributes\n}\n\nstruct GetCrossClusterTasksRequest {\n 10: optional list shardIDs\n 20: optional string targetCluster\n}\n\nstruct GetCrossClusterTasksResponse {\n 10: optional map> tasksByShard\n 20: optional map failedCauseByShard\n}\n\nstruct RespondCrossClusterTasksCompletedRequest {\n 10: optional i32 shardID\n 20: optional string targetCluster\n 30: optional list taskResponses\n 40: optional bool fetchNewTasks\n}\n\nstruct RespondCrossClusterTasksCompletedResponse {\n 10: optional list tasks\n}\n\nenum IsolationGroupState {\n INVALID,\n HEALTHY,\n DRAINED,\n}\n\nstruct IsolationGroupPartition {\n 10: optional string name\n 20: optional IsolationGroupState state\n}\n\nstruct IsolationGroupConfiguration {\n 10: optional list isolationGroups\n}\n\nstruct AsyncWorkflowConfiguration {\n 10: optional bool enabled\n // PredefinedQueueName is the name of the predefined queue in cadence server config's asyncWorkflowQueues\n 20: optional string predefinedQueueName\n // queueType is the type of the queue if predefined_queue_name is not used\n 30: optional string queueType\n // queueConfig is the configuration for the queue if predefined_queue_name is not used\n 40: optional DataBlob queueConfig\n}\n\n/**\n* Any is a logical duplicate of google.protobuf.Any.\n*\n* The intent of the type is the same, but it is not intended to be directly\n* compatible with google.protobuf.Any or any Thrift equivalent - this blob is\n* RPC-type agnostic by design (as the underlying data may be transported over\n* proto or thrift), and the data-bytes may be in any encoding.\n*\n* This is intentionally different from DataBlob, which supports only a handful\n* of known encodings so it can be interpreted everywhere. Any supports literally\n* any contents, and needs to be considered opaque until it is given to something\n* that is expecting it.\n*\n* See ValueType to interpret the contents.\n**/\nstruct Any {\n // Type-string describing value's contents, and intentionally avoiding the\n // name \"type\" as it is often a special term.\n // This should usually be a hard-coded string of some kind.\n 10: optional string ValueType\n // Arbitrarily-encoded bytes, to be deserialized by a runtime implementation.\n // The contents are described by ValueType.\n 20: optional binary Value\n}\n\nstruct AutoConfigHint {\n 10: optional bool enableAutoConfig\n 20: optional i64 pollerWaitTimeInMs\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception InternalDataInconsistencyError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception WorkflowExecutionAlreadyCompletedError {\n 1: required string message\n}\n\nexception EntityNotExistsError {\n 1: required string message\n 2: optional string currentCluster\n 3: optional string activeCluster\n // activeClusters is a list of active clusters for active-active domain\n 4: required list activeClusters\n}\n\nexception ServiceBusyError {\n 1: required string message\n 2: optional string reason\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n // activeClusters is a list of active clusters for active-active domain\n 5: required list activeClusters\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") startEventId\n 6: optional i64 (js.type = \"Long\") startEventVersion\n 7: optional i64 (js.type = \"Long\") endEventId\n 8: optional i64 (js.type = \"Long\") endEventVersion\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception FeatureNotEnabledError {\n 1: required string featureFlag\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nexception RemoteSyncMatchedError {\n 10: required string message\n}\n\nexception StickyWorkerUnavailableError {\n 1: required string message\n}\n\nexception TaskListNotOwnedByHostError {\n 1: required string ownedByIdentity\n 2: required string myIdentity\n 3: required string tasklistName\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n /*\n * if a workflow is running using the same workflow ID, terminate it and start a new one\n */\n TerminateIfRunning,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum DecisionTaskTimedOutCause {\n TIMEOUT,\n RESET,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum PendingDecisionState {\n SCHEDULED,\n STARTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum CronOverlapPolicy {\n SKIPPED,\n BUFFERONE,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nenum QueryConsistencyLevel {\n // EVENTUAL indicates that query should be eventually consistent\n EVENTUAL\n // STRONG indicates that any events that came before query should be reflected in workflow state before running query\n STRONG\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkerVersionInfo {\n 10: optional string impl\n 20: optional string featureVersion\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 71: optional string parentDomainName\n 72: optional i64 parentInitatedId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n 120: optional string taskList\n 130: optional bool isCron\n 140: optional i64 (js.type = \"Long\") updateTime\n 150: optional map partitionConfig\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n 90: optional bool requestLocalDispatch\n}\n\nstruct ActivityLocalDispatchInfo{\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 50: optional binary taskToken\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional i32 jitterStartSeconds\n 170: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 62: optional i64 (js.type = \"Long\") firstScheduledTimeNano\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n 150: optional map partitionConfig\n 160: optional string requestId\n 170: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // for reset workflow\n 40: optional string baseRunId\n 50: optional string newRunId\n 60: optional i64 (js.type = \"Long\") forkEventVersion\n 70: optional string reason\n 80: optional DecisionTaskTimedOutCause cause\n 90: optional string requestId\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n 90: optional string binaryChecksum\n 100: optional string requestId\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n 50: optional string lastFailureReason\n 60: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // For retry activity, it may have a failure before timeout. It's important to keep those information for debug.\n // Client can also provide the info for making next decision\n 40: optional string lastFailureReason\n 50: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n 50: optional string requestId\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n 170: optional i32 delayStartSeconds\n 180: optional i32 jitterStartSeconds\n 190: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 200: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 60: optional IsolationGroupConfiguration isolationgroups\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n 120: optional AsyncWorkflowConfiguration AsyncWorkflowConfiguration\n}\n\nstruct FailoverInfo {\n 10: optional i64 (js.type = \"Long\") failoverVersion\n 20: optional i64 (js.type = \"Long\") failoverStartTimestamp\n 30: optional i64 (js.type = \"Long\") failoverExpireTimestamp\n 40: optional i32 completedShardCount\n 50: optional list pendingShards\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n // activeClusterName is the name of the active cluster for active-passive domain\n 10: optional string activeClusterName\n\n // clusters is list of all active and passive clusters of domain\n 20: optional list clusters\n\n // activeClusters contains active cluster(s) information for active-active domain\n 30: optional ActiveClusters activeClusters\n}\n\nstruct ActiveClusters {\n // activeClustersByRegion is a map of region name to active cluster info for active-active domain\n 10: optional map activeClustersByRegion\n}\n\n// ActiveClusterInfo contains the configuration of active-active domain's active cluster & failover version for a specific region\nstruct ActiveClusterInfo {\n 10: optional string activeClusterName\n 20: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric = true\n 60: optional list clusters\n 70: optional string activeClusterName\n // activeClusters is a map of region name to active cluster name for active-active domain\n 75: optional map activeClustersByRegion\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n 60: optional FailoverInfo failoverInfo\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n 70: optional i32 failoverTimeoutInSeconds\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct DeleteDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n 160: optional i32 delayStartSeconds\n 170: optional i32 jitterStartSeconds\n 180: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 190: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct StartWorkflowExecutionAsyncRequest {\n 10: optional StartWorkflowExecutionRequest request\n}\n\nstruct StartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct DiagnoseWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n}\n\nstruct DiagnoseWorkflowExecutionResponse {\n 10: optional string domain\n 20: optional WorkflowExecution diagnosticWorkflowExecution\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional map queries\n 130: optional i64 (js.type = 'Long') nextEventId\n 140: optional i64 (js.type = 'Long') totalHistoryBytes\n 150: optional AutoConfigHint autoConfigHint\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional map queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n 20: optional map activitiesToDispatchLocally\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n 180: optional AutoConfigHint autoConfigHint\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n 50: optional string cause\n 60: optional string firstExecutionRunID\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n 70: optional bool skipArchival\n 80: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 11: optional list rawHistory\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n 180: optional i32 delayStartSeconds\n 190: optional i32 jitterStartSeconds\n 200: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 210: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncRequest {\n 10: optional SignalWithStartWorkflowExecutionRequest request\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional string identity\n}\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n 60: optional string firstExecutionRunID\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n 60: optional bool skipSignalReapply\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n 50: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n 50: optional WorkerVersionInfo workerVersionInfo\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorMessage\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n 140: optional string startedWorkerIdentity\n 150: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingDecisionInfo {\n 10: optional PendingDecisionState state\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 attempt\n 50: optional i64 (js.type = \"Long\") originalScheduledTimestamp\n 60: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingChildExecutionInfo {\n 1: optional string domain\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n 50: optional PendingDecisionInfo pendingDecision\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n}\n\nstruct GetTaskListsByDomainRequest {\n 10: optional string domainName\n}\n\nstruct GetTaskListsByDomainResponse {\n 10: optional map decisionTaskListMap\n 20: optional map activityTaskListMap\n}\n\nstruct ListTaskListPartitionsRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n}\n\nstruct TaskListPartitionMetadata {\n 10: optional string key\n 20: optional string ownerHostName\n}\n\nstruct ListTaskListPartitionsResponse {\n 10: optional list activityTaskListPartitions\n 20: optional list decisionTaskListPartitions\n}\n\nstruct IsolationGroupMetrics {\n 10: optional double newTasksPerSecond\n 20: optional i64 (js.type = \"Long\") pollerCount\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n 50: optional map isolationGroupMetrics\n 60: optional double newTasksPerSecond\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n 40: optional i64 (js.type = \"Long\") visibilityTimestamp\n 50: optional string clusterName\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct ResetQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueResponse {\n 10: optional list processingQueueStates\n}\n\nstruct DescribeShardDistributionRequest {\n 10: optional i32 pageSize\n 20: optional i32 pageID\n}\n\nstruct DescribeShardDistributionResponse {\n 10: optional i32 numberOfShards\n\n // ShardID to Address (ip:port) map\n 20: optional map shards\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}\n\n// SupportedClientVersions contains the support versions for client library\nstruct SupportedClientVersions{\n 10: optional string goSdk\n 20: optional string javaSdk\n}\n\n// ClusterInfo contains information about cadence cluster\nstruct ClusterInfo{\n 10: optional SupportedClientVersions supportedClientVersions\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct FeatureFlags {\n\t10: optional bool WorkflowExecutionAlreadyCompletedErrorEnabled\n}\n\nenum CrossClusterTaskType {\n StartChildExecution\n CancelExecution\n SignalExecution\n RecordChildWorkflowExecutionComplete\n ApplyParentClosePolicy\n}\n\nenum CrossClusterTaskFailedCause {\n DOMAIN_NOT_ACTIVE\n DOMAIN_NOT_EXISTS\n WORKFLOW_ALREADY_RUNNING\n WORKFLOW_NOT_EXISTS\n WORKFLOW_ALREADY_COMPLETED\n UNCATEGORIZED\n}\n\nenum GetTaskFailedCause {\n SERVICE_BUSY\n TIMEOUT\n SHARD_OWNERSHIP_LOST\n UNCATEGORIZED\n}\n\nstruct CrossClusterTaskInfo {\n 10: optional string domainID\n 20: optional string workflowID\n 30: optional string runID\n 40: optional CrossClusterTaskType taskType\n 50: optional i16 taskState\n 60: optional i64 (js.type = \"Long\") taskID\n 70: optional i64 (js.type = \"Long\") visibilityTimestamp\n}\n\nstruct CrossClusterStartChildExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string requestID\n 30: optional i64 (js.type = \"Long\") initiatedEventID\n 40: optional StartChildWorkflowExecutionInitiatedEventAttributes initiatedEventAttributes\n // targetRunID is for scheduling first decision task\n // targetWorkflowID is available in initiatedEventAttributes\n 50: optional string targetRunID\n 60: optional map partitionConfig\n}\n\nstruct CrossClusterStartChildExecutionResponseAttributes {\n 10: optional string runID\n}\n\nstruct CrossClusterCancelExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n}\n\nstruct CrossClusterCancelExecutionResponseAttributes {\n}\n\nstruct CrossClusterSignalExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n 70: optional string signalName\n 80: optional binary signalInput\n 90: optional binary control\n}\n\nstruct CrossClusterSignalExecutionResponseAttributes {\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional i64 (js.type = \"Long\") initiatedEventID\n 50: optional HistoryEvent completionEvent\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes {\n}\n\nstruct ApplyParentClosePolicyAttributes {\n 10: optional string childDomainID\n 20: optional string childWorkflowID\n 30: optional string childRunID\n 40: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct ApplyParentClosePolicyStatus {\n 10: optional bool completed\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct ApplyParentClosePolicyRequest {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional ApplyParentClosePolicyStatus status\n}\n\nstruct CrossClusterApplyParentClosePolicyRequestAttributes {\n 10: optional list children\n}\n\nstruct ApplyParentClosePolicyResult {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct CrossClusterApplyParentClosePolicyResponseAttributes {\n 10: optional list childrenStatus\n}\n\nstruct CrossClusterTaskRequest {\n 10: optional CrossClusterTaskInfo taskInfo\n 20: optional CrossClusterStartChildExecutionRequestAttributes startChildExecutionAttributes\n 30: optional CrossClusterCancelExecutionRequestAttributes cancelExecutionAttributes\n 40: optional CrossClusterSignalExecutionRequestAttributes signalExecutionAttributes\n 50: optional CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes recordChildWorkflowExecutionCompleteAttributes\n 60: optional CrossClusterApplyParentClosePolicyRequestAttributes applyParentClosePolicyAttributes\n}\n\nstruct CrossClusterTaskResponse {\n 10: optional i64 (js.type = \"Long\") taskID\n 20: optional CrossClusterTaskType taskType\n 30: optional i16 taskState\n 40: optional CrossClusterTaskFailedCause failedCause\n 50: optional CrossClusterStartChildExecutionResponseAttributes startChildExecutionAttributes\n 60: optional CrossClusterCancelExecutionResponseAttributes cancelExecutionAttributes\n 70: optional CrossClusterSignalExecutionResponseAttributes signalExecutionAttributes\n 80: optional CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes recordChildWorkflowExecutionCompleteAttributes\n 90: optional CrossClusterApplyParentClosePolicyResponseAttributes applyParentClosePolicyAttributes\n}\n\nstruct GetCrossClusterTasksRequest {\n 10: optional list shardIDs\n 20: optional string targetCluster\n}\n\nstruct GetCrossClusterTasksResponse {\n 10: optional map> tasksByShard\n 20: optional map failedCauseByShard\n}\n\nstruct RespondCrossClusterTasksCompletedRequest {\n 10: optional i32 shardID\n 20: optional string targetCluster\n 30: optional list taskResponses\n 40: optional bool fetchNewTasks\n}\n\nstruct RespondCrossClusterTasksCompletedResponse {\n 10: optional list tasks\n}\n\nenum IsolationGroupState {\n INVALID,\n HEALTHY,\n DRAINED,\n}\n\nstruct IsolationGroupPartition {\n 10: optional string name\n 20: optional IsolationGroupState state\n}\n\nstruct IsolationGroupConfiguration {\n 10: optional list isolationGroups\n}\n\nstruct AsyncWorkflowConfiguration {\n 10: optional bool enabled\n // PredefinedQueueName is the name of the predefined queue in cadence server config's asyncWorkflowQueues\n 20: optional string predefinedQueueName\n // queueType is the type of the queue if predefined_queue_name is not used\n 30: optional string queueType\n // queueConfig is the configuration for the queue if predefined_queue_name is not used\n 40: optional DataBlob queueConfig\n}\n\n/**\n* Any is a logical duplicate of google.protobuf.Any.\n*\n* The intent of the type is the same, but it is not intended to be directly\n* compatible with google.protobuf.Any or any Thrift equivalent - this blob is\n* RPC-type agnostic by design (as the underlying data may be transported over\n* proto or thrift), and the data-bytes may be in any encoding.\n*\n* This is intentionally different from DataBlob, which supports only a handful\n* of known encodings so it can be interpreted everywhere. Any supports literally\n* any contents, and needs to be considered opaque until it is given to something\n* that is expecting it.\n*\n* See ValueType to interpret the contents.\n**/\nstruct Any {\n // Type-string describing value's contents, and intentionally avoiding the\n // name \"type\" as it is often a special term.\n // This should usually be a hard-coded string of some kind.\n 10: optional string ValueType\n // Arbitrarily-encoded bytes, to be deserialized by a runtime implementation.\n // The contents are described by ValueType.\n 20: optional binary Value\n}\n\nstruct AutoConfigHint {\n 10: optional bool enableAutoConfig\n 20: optional i64 pollerWaitTimeInMs\n}\n\nstruct QueueState {\n 10: optional map virtualQueueStates\n 20: optional TaskKey exclusiveMaxReadLevel\n}\n\nstruct VirtualQueueState {\n 10: optional list virtualSliceStates\n}\n\nstruct VirtualSliceState {\n 10: optional TaskRange taskRange\n}\n\nstruct TaskRange {\n 10: optional TaskKey inclusiveMin\n 20: optional TaskKey exclusiveMax\n}\n\nstruct TaskKey {\n 10: optional i64 scheduledTimeNano\n 20: optional i64 taskID\n}\n" diff --git a/go.mod b/go.mod index b8c7bd3f6..966526d02 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/robfig/cron v1.2.0 github.com/stretchr/testify v1.9.0 github.com/uber-go/tally v3.3.15+incompatible - github.com/uber/cadence-idl v0.0.0-20241126065313-57bd6876d48f + github.com/uber/cadence-idl v0.0.0-20250604213822-24397860b164 github.com/uber/jaeger-client-go v2.22.1+incompatible github.com/uber/tchannel-go v1.32.1 go.uber.org/atomic v1.11.0 diff --git a/go.sum b/go.sum index 6767fd226..ce74d387d 100644 --- a/go.sum +++ b/go.sum @@ -201,8 +201,8 @@ github.com/uber-go/mapdecode v1.0.0/go.mod h1:b5nP15FwXTgpjTjeA9A2uTHXV5UJCl4arw github.com/uber-go/tally v3.3.12+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg= github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= -github.com/uber/cadence-idl v0.0.0-20241126065313-57bd6876d48f h1:U2nI6IKh80rrueDb2G3wuhCkCHYCsLp9EFBazeTs7Dk= -github.com/uber/cadence-idl v0.0.0-20241126065313-57bd6876d48f/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= +github.com/uber/cadence-idl v0.0.0-20250604213822-24397860b164 h1:Awn9X7dqzz3xH7N5ydEsrFrnlm9/tDW4srT9Sl+PISs= +github.com/uber/cadence-idl v0.0.0-20250604213822-24397860b164/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM= github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= diff --git a/idls b/idls index 57bd6876d..24397860b 160000 --- a/idls +++ b/idls @@ -1 +1 @@ -Subproject commit 57bd6876d48fe25d17d65a4035068366ba1ec174 +Subproject commit 24397860b1644c22d79cb88e05e9c32a6b479293 diff --git a/internal/client.go b/internal/client.go index c39382365..b0078c467 100644 --- a/internal/client.go +++ b/internal/client.go @@ -471,6 +471,13 @@ type ( // This will only be used and override DelayStart and JitterStart if provided in the first run // Optional: defaulted to Unix epoch time FirstRunAt time.Time + + // CronOverlapPolicy - Policy for handling cron workflow overlaps. + // Currently supported values are: + // - CronOverlapPolicySkip: skip the new execution if the previous one is still running + // - CronOverlapPolicyBufferOne: buffer one execution if the previous one is still running + // Optional: defaulted to CronOverlapPolicySkip + CronOverlapPolicy s.CronOverlapPolicy } // RetryPolicy defines the retry policy. diff --git a/internal/common/convert.go b/internal/common/convert.go index ffeb7501a..fb1cada29 100644 --- a/internal/common/convert.go +++ b/internal/common/convert.go @@ -91,6 +91,11 @@ func QueryResultTypePtr(t s.QueryResultType) *s.QueryResultType { return PtrOf(t) } +// CronOverlapPolicyPtr makes a copy and returns the pointer to a CronOverlapPolicy. +func CronOverlapPolicyPtr(t s.CronOverlapPolicy) *s.CronOverlapPolicy { + return PtrOf(t) +} + // PtrOf makes a copy and returns the pointer to a value. func PtrOf[T any](v T) *T { return &v diff --git a/internal/compatibility/enum_test.go b/internal/compatibility/enum_test.go index 3f383c3ab..e565d210c 100644 --- a/internal/compatibility/enum_test.go +++ b/internal/compatibility/enum_test.go @@ -335,3 +335,14 @@ func TestWorkflowIDReusePolicy(t *testing.T) { assert.Panics(t, func() { proto.WorkflowIdReusePolicy(shared.WorkflowIdReusePolicy(UnknownValue).Ptr()) }) assert.Panics(t, func() { thrift.WorkflowIdReusePolicy(apiv1.WorkflowIdReusePolicy(UnknownValue)) }) } + +func TestCronOverlapPolicy(t *testing.T) { + for _, v := range []apiv1.CronOverlapPolicy{ + apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID, + apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_SKIPPED, + apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_BUFFER_ONE, + } { + assert.Equal(t, v, proto.CronOverlapPolicy(thrift.CronOverlapPolicy(v))) + } + assert.Panics(t, func() { proto.CronOverlapPolicy(thrift.CronOverlapPolicy(999)) }) +} diff --git a/internal/compatibility/proto/enum.go b/internal/compatibility/proto/enum.go index 3b42a8992..e8a868830 100644 --- a/internal/compatibility/proto/enum.go +++ b/internal/compatibility/proto/enum.go @@ -393,3 +393,16 @@ func ChildWorkflowExecutionFailedCause(t *shared.ChildWorkflowExecutionFailedCau } panic("unexpected enum value") } + +func CronOverlapPolicy(t *shared.CronOverlapPolicy) apiv1.CronOverlapPolicy { + if t == nil { + return apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID + } + switch *t { + case shared.CronOverlapPolicySkipped: + return apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_SKIPPED + case shared.CronOverlapPolicyBufferone: + return apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_BUFFER_ONE + } + panic("unexpected enum value") +} diff --git a/internal/compatibility/thrift/enum.go b/internal/compatibility/thrift/enum.go index 71cb8a447..59b98c03c 100644 --- a/internal/compatibility/thrift/enum.go +++ b/internal/compatibility/thrift/enum.go @@ -381,3 +381,15 @@ func ChildWorkflowExecutionFailedCause(t apiv1.ChildWorkflowExecutionFailedCause } panic("unexpected enum value") } + +func CronOverlapPolicy(t apiv1.CronOverlapPolicy) *shared.CronOverlapPolicy { + switch t { + case apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID: + return nil + case apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_SKIPPED: + return shared.CronOverlapPolicySkipped.Ptr() + case apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_BUFFER_ONE: + return shared.CronOverlapPolicyBufferone.Ptr() + } + panic("unexpected enum value") +} diff --git a/internal/internal_workflow_client.go b/internal/internal_workflow_client.go index 58264df0a..4b7f00428 100644 --- a/internal/internal_workflow_client.go +++ b/internal/internal_workflow_client.go @@ -1092,6 +1092,7 @@ func (wc *workflowClient) getWorkflowStartRequest( DelayStartSeconds: common.Int32Ptr(delayStartSeconds), JitterStartSeconds: common.Int32Ptr(jitterStartSeconds), FirstRunAtTimestamp: common.Int64Ptr(firstRunAtTimestamp), + CronOverlapPolicy: common.CronOverlapPolicyPtr(options.CronOverlapPolicy), } return startRequest, nil @@ -1194,6 +1195,7 @@ func (wc *workflowClient) getSignalWithStartRequest( DelayStartSeconds: common.Int32Ptr(delayStartSeconds), JitterStartSeconds: common.Int32Ptr(jitterStartSeconds), FirstRunAtTimestamp: common.Int64Ptr(firstRunAtTimestamp), + CronOverlapPolicy: common.CronOverlapPolicyPtr(options.CronOverlapPolicy), } return signalWithStartRequest, nil From fa0418d76c7dd72719d2ddc11a4009f7e565211d Mon Sep 17 00:00:00 2001 From: Tim Li Date: Wed, 25 Jun 2025 14:01:57 -0700 Subject: [PATCH 2/8] update idl, fix unit test --- go.mod | 2 +- go.sum | 2 ++ idls | 2 +- internal/common/auth/service_wrapper.go | 5 +++++ internal/common/isolationgroup/service_wrapper.go | 5 +++++ internal/common/metrics/service_wrapper.go | 9 +++++++++ internal/compatibility/adapter.go | 5 +++++ internal/internal_workflow_client_test.go | 2 ++ 8 files changed, 30 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 966526d02..095765961 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/robfig/cron v1.2.0 github.com/stretchr/testify v1.9.0 github.com/uber-go/tally v3.3.15+incompatible - github.com/uber/cadence-idl v0.0.0-20250604213822-24397860b164 + github.com/uber/cadence-idl v0.0.0-20250616185004-cc6f52f87bc6 github.com/uber/jaeger-client-go v2.22.1+incompatible github.com/uber/tchannel-go v1.32.1 go.uber.org/atomic v1.11.0 diff --git a/go.sum b/go.sum index ce74d387d..189e9944c 100644 --- a/go.sum +++ b/go.sum @@ -203,6 +203,8 @@ github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9P github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= github.com/uber/cadence-idl v0.0.0-20250604213822-24397860b164 h1:Awn9X7dqzz3xH7N5ydEsrFrnlm9/tDW4srT9Sl+PISs= github.com/uber/cadence-idl v0.0.0-20250604213822-24397860b164/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= +github.com/uber/cadence-idl v0.0.0-20250616185004-cc6f52f87bc6 h1:YJlEu9Unzifwdn6SuE4rrl4zJ5lop5gBfSX8AyodTww= +github.com/uber/cadence-idl v0.0.0-20250616185004-cc6f52f87bc6/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM= github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= diff --git a/idls b/idls index 24397860b..cc6f52f87 160000 --- a/idls +++ b/idls @@ -1 +1 @@ -Subproject commit 24397860b1644c22d79cb88e05e9c32a6b479293 +Subproject commit cc6f52f87bc6e3b78906f518e3a993bcb88c51b5 diff --git a/internal/common/auth/service_wrapper.go b/internal/common/auth/service_wrapper.go index b4b352326..9e35eda2c 100644 --- a/internal/common/auth/service_wrapper.go +++ b/internal/common/auth/service_wrapper.go @@ -511,3 +511,8 @@ func (w *workflowServiceAuthWrapper) RestartWorkflowExecution(ctx context.Contex opts = append(opts, *tokenHeader) return w.service.RestartWorkflowExecution(ctx, request, opts...) } + +func (w *workflowServiceAuthWrapper) DeleteDomain(ctx context.Context, DeleteRequest *shared.DeleteDomainRequest, opts ...yarpc.CallOption) error { + //TODO implement me + return nil +} diff --git a/internal/common/isolationgroup/service_wrapper.go b/internal/common/isolationgroup/service_wrapper.go index b04bd8a9b..ee49d9a91 100644 --- a/internal/common/isolationgroup/service_wrapper.go +++ b/internal/common/isolationgroup/service_wrapper.go @@ -311,3 +311,8 @@ func (w *workflowServiceIsolationGroupWrapper) RestartWorkflowExecution(ctx cont opts = append(opts, w.getIsolationGroupIdentifier()) return w.service.RestartWorkflowExecution(ctx, request, opts...) } + +func (w *workflowServiceIsolationGroupWrapper) DeleteDomain(ctx context.Context, DeleteRequest *shared.DeleteDomainRequest, opts ...yarpc.CallOption) error { + //TODO implement me + return nil +} diff --git a/internal/common/metrics/service_wrapper.go b/internal/common/metrics/service_wrapper.go index 85486a6d0..3e725dfa3 100644 --- a/internal/common/metrics/service_wrapper.go +++ b/internal/common/metrics/service_wrapper.go @@ -93,6 +93,7 @@ const ( scopeRefreshWorkflowTasks = CadenceMetricsPrefix + "RefreshWorkflowTasks" scopeNameGetTaskListsByDomain = CadenceMetricsPrefix + "GetTaskListsByDomain" scopeRestartWorkflowExecution = CadenceMetricsPrefix + "RestartWorkflowExecution" + scopeDeleteDomain = CadenceMetricsPrefix + "DeleteDomain" ) // NewWorkflowServiceWrapper creates a new wrapper to WorkflowService that will emit metrics for each service call. @@ -444,3 +445,11 @@ func (w *workflowServiceMetricsWrapper) RestartWorkflowExecution(ctx context.Con scope.handleError(err) return resp, err } + +func (w *workflowServiceMetricsWrapper) DeleteDomain(ctx context.Context, request *shared.DeleteDomainRequest, opts ...yarpc.CallOption) error { + //TODO implement me + scope := w.getOperationScope(scopeDeleteDomain) + err := w.service.DeleteDomain(ctx, request, opts...) + scope.handleError(err) + return err +} diff --git a/internal/compatibility/adapter.go b/internal/compatibility/adapter.go index 62cbe2e71..305864723 100644 --- a/internal/compatibility/adapter.go +++ b/internal/compatibility/adapter.go @@ -268,6 +268,11 @@ func (a thrift2protoAdapter) RestartWorkflowExecution(ctx context.Context, reque return thrift.RestartWorkflowExecutionResponse(response), proto.Error(err) } +func (a thrift2protoAdapter) DeleteDomain(ctx context.Context, DeleteRequest *shared.DeleteDomainRequest, opts ...yarpc.CallOption) error { + //TODO implement me + return nil +} + type domainAPIthriftAdapter struct { service workflowserviceclient.Interface } diff --git a/internal/internal_workflow_client_test.go b/internal/internal_workflow_client_test.go index 024ec8a55..a2bb63731 100644 --- a/internal/internal_workflow_client_test.go +++ b/internal/internal_workflow_client_test.go @@ -2539,6 +2539,7 @@ func TestGetWorkflowStartRequest(t *testing.T) { DecisionTaskStartToCloseTimeout: 5 * time.Second, DelayStart: 0 * time.Second, JitterStart: 0 * time.Second, + CronOverlapPolicy: shared.CronOverlapPolicyBufferone, }, workflowFunc: func(ctx Context) {}, wantRequest: &shared.StartWorkflowExecutionRequest{ @@ -2558,6 +2559,7 @@ func TestGetWorkflowStartRequest(t *testing.T) { CronSchedule: common.StringPtr(""), Header: &shared.Header{Fields: map[string][]byte{}}, WorkflowIdReusePolicy: shared.WorkflowIdReusePolicyAllowDuplicateFailedOnly.Ptr(), + CronOverlapPolicy: shared.CronOverlapPolicyBufferone.Ptr(), }, }, { From 20cfac68f2b5f52e80c2d9e879b874c4c4d8cdc5 Mon Sep 17 00:00:00 2001 From: Tim Li Date: Wed, 25 Jun 2025 14:26:05 -0700 Subject: [PATCH 3/8] lint --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 189e9944c..ae709a358 100644 --- a/go.sum +++ b/go.sum @@ -201,8 +201,6 @@ github.com/uber-go/mapdecode v1.0.0/go.mod h1:b5nP15FwXTgpjTjeA9A2uTHXV5UJCl4arw github.com/uber-go/tally v3.3.12+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg= github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= -github.com/uber/cadence-idl v0.0.0-20250604213822-24397860b164 h1:Awn9X7dqzz3xH7N5ydEsrFrnlm9/tDW4srT9Sl+PISs= -github.com/uber/cadence-idl v0.0.0-20250604213822-24397860b164/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/uber/cadence-idl v0.0.0-20250616185004-cc6f52f87bc6 h1:YJlEu9Unzifwdn6SuE4rrl4zJ5lop5gBfSX8AyodTww= github.com/uber/cadence-idl v0.0.0-20250616185004-cc6f52f87bc6/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM= From fbe78b9f96e168da91a5b35f753b19d1c4ea5dd6 Mon Sep 17 00:00:00 2001 From: Tim Li Date: Wed, 25 Jun 2025 14:39:17 -0700 Subject: [PATCH 4/8] unit tests --- internal/common/auth/service_wrapper.go | 9 +++++++-- internal/common/auth/service_wrapper_test.go | 16 ++++++++++++++++ internal/common/convert_test.go | 1 + .../common/isolationgroup/service_wrapper.go | 4 ++-- .../isolationgroup/service_wrapper_test.go | 8 ++++++++ 5 files changed, 34 insertions(+), 4 deletions(-) diff --git a/internal/common/auth/service_wrapper.go b/internal/common/auth/service_wrapper.go index 9e35eda2c..db00836fe 100644 --- a/internal/common/auth/service_wrapper.go +++ b/internal/common/auth/service_wrapper.go @@ -513,6 +513,11 @@ func (w *workflowServiceAuthWrapper) RestartWorkflowExecution(ctx context.Contex } func (w *workflowServiceAuthWrapper) DeleteDomain(ctx context.Context, DeleteRequest *shared.DeleteDomainRequest, opts ...yarpc.CallOption) error { - //TODO implement me - return nil + tokenHeader, err := w.getYarpcJWTHeader() + if err != nil { + return err + } + opts = append(opts, *tokenHeader) + err = w.service.DeleteDomain(ctx, DeleteRequest, opts...) + return err } diff --git a/internal/common/auth/service_wrapper_test.go b/internal/common/auth/service_wrapper_test.go index 6126e3186..24f636dca 100644 --- a/internal/common/auth/service_wrapper_test.go +++ b/internal/common/auth/service_wrapper_test.go @@ -731,3 +731,19 @@ func (s *serviceWrapperSuite) TestGetClusterInfoInvalidToken() { _, err := sw.GetClusterInfo(ctx) s.EqualError(err, "error") } + +func (s *serviceWrapperSuite) TestDeleteDomainValidToken() { + s.Service.EXPECT().DeleteDomain(gomock.Any(), gomock.Any(), gomock.Any()).Times(1) + sw := NewWorkflowServiceWrapper(s.Service, s.AuthProvider) + ctx, _ := thrift.NewContext(time.Minute) + err := sw.DeleteDomain(ctx, &shared.DeleteDomainRequest{}) + s.NoError(err) +} + +func (s *serviceWrapperSuite) TestDeleteDomainInvalidToken() { + s.AuthProvider = newJWTAuthIncorrect() + sw := NewWorkflowServiceWrapper(s.Service, s.AuthProvider) + ctx, _ := thrift.NewContext(time.Minute) + err := sw.DeleteDomain(ctx, &shared.DeleteDomainRequest{}) + s.EqualError(err, "error") +} diff --git a/internal/common/convert_test.go b/internal/common/convert_test.go index 94e6fa3bf..6db1d71b9 100644 --- a/internal/common/convert_test.go +++ b/internal/common/convert_test.go @@ -49,6 +49,7 @@ func TestPtrHelpers(t *testing.T) { assert.Equal(t, s.QueryTaskCompletedTypeCompleted, *QueryTaskCompletedTypePtr(s.QueryTaskCompletedTypeCompleted)) assert.Equal(t, s.TaskListKindNormal, *TaskListKindPtr(s.TaskListKindNormal)) assert.Equal(t, s.QueryResultTypeFailed, *QueryResultTypePtr(s.QueryResultTypeFailed)) + assert.Equal(t, s.CronOverlapPolicyBufferone, *CronOverlapPolicyPtr(s.CronOverlapPolicyBufferone)) } func TestCeilHelpers(t *testing.T) { diff --git a/internal/common/isolationgroup/service_wrapper.go b/internal/common/isolationgroup/service_wrapper.go index ee49d9a91..c4a572f0a 100644 --- a/internal/common/isolationgroup/service_wrapper.go +++ b/internal/common/isolationgroup/service_wrapper.go @@ -313,6 +313,6 @@ func (w *workflowServiceIsolationGroupWrapper) RestartWorkflowExecution(ctx cont } func (w *workflowServiceIsolationGroupWrapper) DeleteDomain(ctx context.Context, DeleteRequest *shared.DeleteDomainRequest, opts ...yarpc.CallOption) error { - //TODO implement me - return nil + opts = append(opts, w.getIsolationGroupIdentifier()) + return w.service.DeleteDomain(ctx, DeleteRequest, opts...) } diff --git a/internal/common/isolationgroup/service_wrapper_test.go b/internal/common/isolationgroup/service_wrapper_test.go index 8866c3fea..3d0533542 100644 --- a/internal/common/isolationgroup/service_wrapper_test.go +++ b/internal/common/isolationgroup/service_wrapper_test.go @@ -398,6 +398,14 @@ func TestAPICalls(t *testing.T) { }, expectedResponse: &shared.ClusterInfo{}, }, + "DeleteDomain": { + action: func(ctx context.Context, sw workflowserviceclient.Interface) (interface{}, error) { + return nil, sw.DeleteDomain(ctx, &shared.DeleteDomainRequest{}) + }, + affordance: func(m *workflowservicetest.MockClient) { + m.EXPECT().DeleteDomain(gomock.Any(), &shared.DeleteDomainRequest{}, gomock.Any()).Times(1).Return(nil) + }, + }, } for name, td := range tests { From 61ad3e282712296d9caf5a286a85dfe6019cf59f Mon Sep 17 00:00:00 2001 From: Tim Li Date: Wed, 25 Jun 2025 14:54:34 -0700 Subject: [PATCH 5/8] regenerate types file --- .gen/go/shared/shared.go | 1220 +++++++++++++++++++++++++++++++++----- 1 file changed, 1059 insertions(+), 161 deletions(-) diff --git a/.gen/go/shared/shared.go b/.gen/go/shared/shared.go index aaaeddbe1..aaff55f98 100644 --- a/.gen/go/shared/shared.go +++ b/.gen/go/shared/shared.go @@ -362,6 +362,463 @@ func (v *ActiveClusterInfo) IsSetFailoverVersion() bool { return v != nil && v.FailoverVersion != nil } +type ActiveClusterSelectionPolicy struct { + Strategy *ActiveClusterSelectionStrategy `json:"strategy,omitempty"` + StickyRegion *string `json:"stickyRegion,omitempty"` + ExternalEntityType *string `json:"externalEntityType,omitempty"` + ExternalEntityKey *string `json:"externalEntityKey,omitempty"` +} + +// ToWire translates a ActiveClusterSelectionPolicy struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ActiveClusterSelectionPolicy) ToWire() (wire.Value, error) { + var ( + fields [4]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Strategy != nil { + w, err = v.Strategy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.StickyRegion != nil { + w, err = wire.NewValueString(*(v.StickyRegion)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + if v.ExternalEntityType != nil { + w, err = wire.NewValueString(*(v.ExternalEntityType)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } + if v.ExternalEntityKey != nil { + w, err = wire.NewValueString(*(v.ExternalEntityKey)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 40, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ActiveClusterSelectionStrategy_Read(w wire.Value) (ActiveClusterSelectionStrategy, error) { + var v ActiveClusterSelectionStrategy + err := v.FromWire(w) + return v, err +} + +// FromWire deserializes a ActiveClusterSelectionPolicy struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ActiveClusterSelectionPolicy struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ActiveClusterSelectionPolicy +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ActiveClusterSelectionPolicy) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TI32 { + var x ActiveClusterSelectionStrategy + x, err = _ActiveClusterSelectionStrategy_Read(field.Value) + v.Strategy = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.StickyRegion = &x + if err != nil { + return err + } + + } + case 30: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.ExternalEntityType = &x + if err != nil { + return err + } + + } + case 40: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.ExternalEntityKey = &x + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ActiveClusterSelectionPolicy +// struct. +func (v *ActiveClusterSelectionPolicy) String() string { + if v == nil { + return "" + } + + var fields [4]string + i := 0 + if v.Strategy != nil { + fields[i] = fmt.Sprintf("Strategy: %v", *(v.Strategy)) + i++ + } + if v.StickyRegion != nil { + fields[i] = fmt.Sprintf("StickyRegion: %v", *(v.StickyRegion)) + i++ + } + if v.ExternalEntityType != nil { + fields[i] = fmt.Sprintf("ExternalEntityType: %v", *(v.ExternalEntityType)) + i++ + } + if v.ExternalEntityKey != nil { + fields[i] = fmt.Sprintf("ExternalEntityKey: %v", *(v.ExternalEntityKey)) + i++ + } + + return fmt.Sprintf("ActiveClusterSelectionPolicy{%v}", strings.Join(fields[:i], ", ")) +} + +func _ActiveClusterSelectionStrategy_EqualsPtr(lhs, rhs *ActiveClusterSelectionStrategy) bool { + if lhs != nil && rhs != nil { + + x := *lhs + y := *rhs + return x.Equals(y) + } + return lhs == nil && rhs == nil +} + +// Equals returns true if all the fields of this ActiveClusterSelectionPolicy match the +// provided ActiveClusterSelectionPolicy. +// +// This function performs a deep comparison. +func (v *ActiveClusterSelectionPolicy) Equals(rhs *ActiveClusterSelectionPolicy) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_ActiveClusterSelectionStrategy_EqualsPtr(v.Strategy, rhs.Strategy) { + return false + } + if !_String_EqualsPtr(v.StickyRegion, rhs.StickyRegion) { + return false + } + if !_String_EqualsPtr(v.ExternalEntityType, rhs.ExternalEntityType) { + return false + } + if !_String_EqualsPtr(v.ExternalEntityKey, rhs.ExternalEntityKey) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ActiveClusterSelectionPolicy. +func (v *ActiveClusterSelectionPolicy) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Strategy != nil { + err = multierr.Append(err, enc.AddObject("strategy", *v.Strategy)) + } + if v.StickyRegion != nil { + enc.AddString("stickyRegion", *v.StickyRegion) + } + if v.ExternalEntityType != nil { + enc.AddString("externalEntityType", *v.ExternalEntityType) + } + if v.ExternalEntityKey != nil { + enc.AddString("externalEntityKey", *v.ExternalEntityKey) + } + return err +} + +// GetStrategy returns the value of Strategy if it is set or its +// zero value if it is unset. +func (v *ActiveClusterSelectionPolicy) GetStrategy() (o ActiveClusterSelectionStrategy) { + if v != nil && v.Strategy != nil { + return *v.Strategy + } + + return +} + +// IsSetStrategy returns true if Strategy is not nil. +func (v *ActiveClusterSelectionPolicy) IsSetStrategy() bool { + return v != nil && v.Strategy != nil +} + +// GetStickyRegion returns the value of StickyRegion if it is set or its +// zero value if it is unset. +func (v *ActiveClusterSelectionPolicy) GetStickyRegion() (o string) { + if v != nil && v.StickyRegion != nil { + return *v.StickyRegion + } + + return +} + +// IsSetStickyRegion returns true if StickyRegion is not nil. +func (v *ActiveClusterSelectionPolicy) IsSetStickyRegion() bool { + return v != nil && v.StickyRegion != nil +} + +// GetExternalEntityType returns the value of ExternalEntityType if it is set or its +// zero value if it is unset. +func (v *ActiveClusterSelectionPolicy) GetExternalEntityType() (o string) { + if v != nil && v.ExternalEntityType != nil { + return *v.ExternalEntityType + } + + return +} + +// IsSetExternalEntityType returns true if ExternalEntityType is not nil. +func (v *ActiveClusterSelectionPolicy) IsSetExternalEntityType() bool { + return v != nil && v.ExternalEntityType != nil +} + +// GetExternalEntityKey returns the value of ExternalEntityKey if it is set or its +// zero value if it is unset. +func (v *ActiveClusterSelectionPolicy) GetExternalEntityKey() (o string) { + if v != nil && v.ExternalEntityKey != nil { + return *v.ExternalEntityKey + } + + return +} + +// IsSetExternalEntityKey returns true if ExternalEntityKey is not nil. +func (v *ActiveClusterSelectionPolicy) IsSetExternalEntityKey() bool { + return v != nil && v.ExternalEntityKey != nil +} + +type ActiveClusterSelectionStrategy int32 + +const ( + ActiveClusterSelectionStrategyRegionSticky ActiveClusterSelectionStrategy = 0 + ActiveClusterSelectionStrategyExternalEntity ActiveClusterSelectionStrategy = 1 +) + +// ActiveClusterSelectionStrategy_Values returns all recognized values of ActiveClusterSelectionStrategy. +func ActiveClusterSelectionStrategy_Values() []ActiveClusterSelectionStrategy { + return []ActiveClusterSelectionStrategy{ + ActiveClusterSelectionStrategyRegionSticky, + ActiveClusterSelectionStrategyExternalEntity, + } +} + +// UnmarshalText tries to decode ActiveClusterSelectionStrategy from a byte slice +// containing its name. +// +// var v ActiveClusterSelectionStrategy +// err := v.UnmarshalText([]byte("REGION_STICKY")) +func (v *ActiveClusterSelectionStrategy) UnmarshalText(value []byte) error { + switch s := string(value); s { + case "REGION_STICKY": + *v = ActiveClusterSelectionStrategyRegionSticky + return nil + case "EXTERNAL_ENTITY": + *v = ActiveClusterSelectionStrategyExternalEntity + return nil + default: + val, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return fmt.Errorf("unknown enum value %q for %q: %v", s, "ActiveClusterSelectionStrategy", err) + } + *v = ActiveClusterSelectionStrategy(val) + return nil + } +} + +// MarshalText encodes ActiveClusterSelectionStrategy to text. +// +// If the enum value is recognized, its name is returned. Otherwise, +// its integer value is returned. +// +// This implements the TextMarshaler interface. +func (v ActiveClusterSelectionStrategy) MarshalText() ([]byte, error) { + switch int32(v) { + case 0: + return []byte("REGION_STICKY"), nil + case 1: + return []byte("EXTERNAL_ENTITY"), nil + } + return []byte(strconv.FormatInt(int64(v), 10)), nil +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ActiveClusterSelectionStrategy. +// Enums are logged as objects, where the value is logged with key "value", and +// if this value's name is known, the name is logged with key "name". +func (v ActiveClusterSelectionStrategy) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt32("value", int32(v)) + switch int32(v) { + case 0: + enc.AddString("name", "REGION_STICKY") + case 1: + enc.AddString("name", "EXTERNAL_ENTITY") + } + return nil +} + +// Ptr returns a pointer to this enum value. +func (v ActiveClusterSelectionStrategy) Ptr() *ActiveClusterSelectionStrategy { + return &v +} + +// ToWire translates ActiveClusterSelectionStrategy into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// Enums are represented as 32-bit integers over the wire. +func (v ActiveClusterSelectionStrategy) ToWire() (wire.Value, error) { + return wire.NewValueI32(int32(v)), nil +} + +// FromWire deserializes ActiveClusterSelectionStrategy from its Thrift-level +// representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TI32) +// if err != nil { +// return ActiveClusterSelectionStrategy(0), err +// } +// +// var v ActiveClusterSelectionStrategy +// if err := v.FromWire(x); err != nil { +// return ActiveClusterSelectionStrategy(0), err +// } +// return v, nil +func (v *ActiveClusterSelectionStrategy) FromWire(w wire.Value) error { + *v = (ActiveClusterSelectionStrategy)(w.GetI32()) + return nil +} + +// String returns a readable string representation of ActiveClusterSelectionStrategy. +func (v ActiveClusterSelectionStrategy) String() string { + w := int32(v) + switch w { + case 0: + return "REGION_STICKY" + case 1: + return "EXTERNAL_ENTITY" + } + return fmt.Sprintf("ActiveClusterSelectionStrategy(%d)", w) +} + +// Equals returns true if this ActiveClusterSelectionStrategy value matches the provided +// value. +func (v ActiveClusterSelectionStrategy) Equals(rhs ActiveClusterSelectionStrategy) bool { + return v == rhs +} + +// MarshalJSON serializes ActiveClusterSelectionStrategy into JSON. +// +// If the enum value is recognized, its name is returned. Otherwise, +// its integer value is returned. +// +// This implements json.Marshaler. +func (v ActiveClusterSelectionStrategy) MarshalJSON() ([]byte, error) { + switch int32(v) { + case 0: + return ([]byte)("\"REGION_STICKY\""), nil + case 1: + return ([]byte)("\"EXTERNAL_ENTITY\""), nil + } + return ([]byte)(strconv.FormatInt(int64(v), 10)), nil +} + +// UnmarshalJSON attempts to decode ActiveClusterSelectionStrategy from its JSON +// representation. +// +// This implementation supports both, numeric and string inputs. If a +// string is provided, it must be a known enum name. +// +// This implements json.Unmarshaler. +func (v *ActiveClusterSelectionStrategy) UnmarshalJSON(text []byte) error { + d := json.NewDecoder(bytes.NewReader(text)) + d.UseNumber() + t, err := d.Token() + if err != nil { + return err + } + + switch w := t.(type) { + case json.Number: + x, err := w.Int64() + if err != nil { + return err + } + if x > math.MaxInt32 { + return fmt.Errorf("enum overflow from JSON %q for %q", text, "ActiveClusterSelectionStrategy") + } + if x < math.MinInt32 { + return fmt.Errorf("enum underflow from JSON %q for %q", text, "ActiveClusterSelectionStrategy") + } + *v = (ActiveClusterSelectionStrategy)(x) + return nil + case string: + return v.UnmarshalText([]byte(w)) + default: + return fmt.Errorf("invalid JSON value %q (%T) to unmarshal into %q", t, t, "ActiveClusterSelectionStrategy") + } +} + type ActiveClusters struct { ActiveClustersByRegion map[string]*ActiveClusterInfo `json:"activeClustersByRegion,omitempty"` } @@ -9828,23 +10285,24 @@ func (v *ContinueAsNewInitiator) UnmarshalJSON(text []byte) error { } type ContinueAsNewWorkflowExecutionDecisionAttributes struct { - WorkflowType *WorkflowType `json:"workflowType,omitempty"` - TaskList *TaskList `json:"taskList,omitempty"` - Input []byte `json:"input,omitempty"` - ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` - TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` - BackoffStartIntervalInSeconds *int32 `json:"backoffStartIntervalInSeconds,omitempty"` - RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` - Initiator *ContinueAsNewInitiator `json:"initiator,omitempty"` - FailureReason *string `json:"failureReason,omitempty"` - FailureDetails []byte `json:"failureDetails,omitempty"` - LastCompletionResult []byte `json:"lastCompletionResult,omitempty"` - CronSchedule *string `json:"cronSchedule,omitempty"` - Header *Header `json:"header,omitempty"` - Memo *Memo `json:"memo,omitempty"` - SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` - JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` - CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + WorkflowType *WorkflowType `json:"workflowType,omitempty"` + TaskList *TaskList `json:"taskList,omitempty"` + Input []byte `json:"input,omitempty"` + ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` + TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` + BackoffStartIntervalInSeconds *int32 `json:"backoffStartIntervalInSeconds,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + Initiator *ContinueAsNewInitiator `json:"initiator,omitempty"` + FailureReason *string `json:"failureReason,omitempty"` + FailureDetails []byte `json:"failureDetails,omitempty"` + LastCompletionResult []byte `json:"lastCompletionResult,omitempty"` + CronSchedule *string `json:"cronSchedule,omitempty"` + Header *Header `json:"header,omitempty"` + Memo *Memo `json:"memo,omitempty"` + SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` + JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + ActiveClusterSelectionPolicy *ActiveClusterSelectionPolicy `json:"activeClusterSelectionPolicy,omitempty"` } // ToWire translates a ContinueAsNewWorkflowExecutionDecisionAttributes struct into a Thrift-level intermediate @@ -9864,7 +10322,7 @@ type ContinueAsNewWorkflowExecutionDecisionAttributes struct { // } func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) ToWire() (wire.Value, error) { var ( - fields [17]wire.Field + fields [18]wire.Field i int = 0 w wire.Value err error @@ -10006,6 +10464,14 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) ToWire() (wire.Value, fields[i] = wire.Field{ID: 170, Value: w} i++ } + if v.ActiveClusterSelectionPolicy != nil { + w, err = v.ActiveClusterSelectionPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 180, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -10034,6 +10500,12 @@ func _CronOverlapPolicy_Read(w wire.Value) (CronOverlapPolicy, error) { return v, err } +func _ActiveClusterSelectionPolicy_Read(w wire.Value) (*ActiveClusterSelectionPolicy, error) { + var v ActiveClusterSelectionPolicy + err := v.FromWire(w) + return &v, err +} + // FromWire deserializes a ContinueAsNewWorkflowExecutionDecisionAttributes struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. @@ -10207,6 +10679,14 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) FromWire(w wire.Value return err } + } + case 180: + if field.Value.Type() == wire.TStruct { + v.ActiveClusterSelectionPolicy, err = _ActiveClusterSelectionPolicy_Read(field.Value) + if err != nil { + return err + } + } } } @@ -10221,7 +10701,7 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) String() string { return "" } - var fields [17]string + var fields [18]string i := 0 if v.WorkflowType != nil { fields[i] = fmt.Sprintf("WorkflowType: %v", v.WorkflowType) @@ -10291,6 +10771,10 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) String() string { fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) i++ } + if v.ActiveClusterSelectionPolicy != nil { + fields[i] = fmt.Sprintf("ActiveClusterSelectionPolicy: %v", v.ActiveClusterSelectionPolicy) + i++ + } return fmt.Sprintf("ContinueAsNewWorkflowExecutionDecisionAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -10376,6 +10860,9 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) Equals(rhs *ContinueA if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { return false } + if !((v.ActiveClusterSelectionPolicy == nil && rhs.ActiveClusterSelectionPolicy == nil) || (v.ActiveClusterSelectionPolicy != nil && rhs.ActiveClusterSelectionPolicy != nil && v.ActiveClusterSelectionPolicy.Equals(rhs.ActiveClusterSelectionPolicy))) { + return false + } return true } @@ -10437,6 +10924,9 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) MarshalLogObject(enc if v.CronOverlapPolicy != nil { err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) } + if v.ActiveClusterSelectionPolicy != nil { + err = multierr.Append(err, enc.AddObject("activeClusterSelectionPolicy", v.ActiveClusterSelectionPolicy)) + } return err } @@ -10695,6 +11185,21 @@ func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) IsSetCronOverlapPolic return v != nil && v.CronOverlapPolicy != nil } +// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its +// zero value if it is unset. +func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) GetActiveClusterSelectionPolicy() (o *ActiveClusterSelectionPolicy) { + if v != nil && v.ActiveClusterSelectionPolicy != nil { + return v.ActiveClusterSelectionPolicy + } + + return +} + +// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. +func (v *ContinueAsNewWorkflowExecutionDecisionAttributes) IsSetActiveClusterSelectionPolicy() bool { + return v != nil && v.ActiveClusterSelectionPolicy != nil +} + type CountWorkflowExecutionsRequest struct { Domain *string `json:"domain,omitempty"` Query *string `json:"query,omitempty"` @@ -22051,6 +22556,7 @@ func (v *DescribeTaskListRequest) IsSetIncludeTaskListStatus() bool { type DescribeTaskListResponse struct { Pollers []*PollerInfo `json:"pollers,omitempty"` TaskListStatus *TaskListStatus `json:"taskListStatus,omitempty"` + TaskList *TaskList `json:"taskList,omitempty"` } type _List_PollerInfo_ValueList []*PollerInfo @@ -22099,7 +22605,7 @@ func (_List_PollerInfo_ValueList) Close() {} // } func (v *DescribeTaskListResponse) ToWire() (wire.Value, error) { var ( - fields [2]wire.Field + fields [3]wire.Field i int = 0 w wire.Value err error @@ -22121,6 +22627,14 @@ func (v *DescribeTaskListResponse) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 20, Value: w} i++ } + if v.TaskList != nil { + w, err = v.TaskList.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -22192,6 +22706,14 @@ func (v *DescribeTaskListResponse) FromWire(w wire.Value) error { return err } + } + case 30: + if field.Value.Type() == wire.TStruct { + v.TaskList, err = _TaskList_Read(field.Value) + if err != nil { + return err + } + } } } @@ -22206,7 +22728,7 @@ func (v *DescribeTaskListResponse) String() string { return "" } - var fields [2]string + var fields [3]string i := 0 if v.Pollers != nil { fields[i] = fmt.Sprintf("Pollers: %v", v.Pollers) @@ -22216,6 +22738,10 @@ func (v *DescribeTaskListResponse) String() string { fields[i] = fmt.Sprintf("TaskListStatus: %v", v.TaskListStatus) i++ } + if v.TaskList != nil { + fields[i] = fmt.Sprintf("TaskList: %v", v.TaskList) + i++ + } return fmt.Sprintf("DescribeTaskListResponse{%v}", strings.Join(fields[:i], ", ")) } @@ -22251,6 +22777,9 @@ func (v *DescribeTaskListResponse) Equals(rhs *DescribeTaskListResponse) bool { if !((v.TaskListStatus == nil && rhs.TaskListStatus == nil) || (v.TaskListStatus != nil && rhs.TaskListStatus != nil && v.TaskListStatus.Equals(rhs.TaskListStatus))) { return false } + if !((v.TaskList == nil && rhs.TaskList == nil) || (v.TaskList != nil && rhs.TaskList != nil && v.TaskList.Equals(rhs.TaskList))) { + return false + } return true } @@ -22278,6 +22807,9 @@ func (v *DescribeTaskListResponse) MarshalLogObject(enc zapcore.ObjectEncoder) ( if v.TaskListStatus != nil { err = multierr.Append(err, enc.AddObject("taskListStatus", v.TaskListStatus)) } + if v.TaskList != nil { + err = multierr.Append(err, enc.AddObject("taskList", v.TaskList)) + } return err } @@ -22311,6 +22843,21 @@ func (v *DescribeTaskListResponse) IsSetTaskListStatus() bool { return v != nil && v.TaskListStatus != nil } +// GetTaskList returns the value of TaskList if it is set or its +// zero value if it is unset. +func (v *DescribeTaskListResponse) GetTaskList() (o *TaskList) { + if v != nil && v.TaskList != nil { + return v.TaskList + } + + return +} + +// IsSetTaskList returns true if TaskList is not nil. +func (v *DescribeTaskListResponse) IsSetTaskList() bool { + return v != nil && v.TaskList != nil +} + type DescribeWorkflowExecutionRequest struct { Domain *string `json:"domain,omitempty"` Execution *WorkflowExecution `json:"execution,omitempty"` @@ -58123,28 +58670,29 @@ func (v *SignalWithStartWorkflowExecutionAsyncResponse) MarshalLogObject(enc zap } type SignalWithStartWorkflowExecutionRequest struct { - Domain *string `json:"domain,omitempty"` - WorkflowId *string `json:"workflowId,omitempty"` - WorkflowType *WorkflowType `json:"workflowType,omitempty"` - TaskList *TaskList `json:"taskList,omitempty"` - Input []byte `json:"input,omitempty"` - ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` - TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` - Identity *string `json:"identity,omitempty"` - RequestId *string `json:"requestId,omitempty"` - WorkflowIdReusePolicy *WorkflowIdReusePolicy `json:"workflowIdReusePolicy,omitempty"` - SignalName *string `json:"signalName,omitempty"` - SignalInput []byte `json:"signalInput,omitempty"` - Control []byte `json:"control,omitempty"` - RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` - CronSchedule *string `json:"cronSchedule,omitempty"` - Memo *Memo `json:"memo,omitempty"` - SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` - Header *Header `json:"header,omitempty"` - DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` - JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` - FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` - CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + Domain *string `json:"domain,omitempty"` + WorkflowId *string `json:"workflowId,omitempty"` + WorkflowType *WorkflowType `json:"workflowType,omitempty"` + TaskList *TaskList `json:"taskList,omitempty"` + Input []byte `json:"input,omitempty"` + ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` + TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` + Identity *string `json:"identity,omitempty"` + RequestId *string `json:"requestId,omitempty"` + WorkflowIdReusePolicy *WorkflowIdReusePolicy `json:"workflowIdReusePolicy,omitempty"` + SignalName *string `json:"signalName,omitempty"` + SignalInput []byte `json:"signalInput,omitempty"` + Control []byte `json:"control,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + CronSchedule *string `json:"cronSchedule,omitempty"` + Memo *Memo `json:"memo,omitempty"` + SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` + Header *Header `json:"header,omitempty"` + DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` + JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` + FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + ActiveClusterSelectionPolicy *ActiveClusterSelectionPolicy `json:"activeClusterSelectionPolicy,omitempty"` } // ToWire translates a SignalWithStartWorkflowExecutionRequest struct into a Thrift-level intermediate @@ -58164,7 +58712,7 @@ type SignalWithStartWorkflowExecutionRequest struct { // } func (v *SignalWithStartWorkflowExecutionRequest) ToWire() (wire.Value, error) { var ( - fields [22]wire.Field + fields [23]wire.Field i int = 0 w wire.Value err error @@ -58346,6 +58894,14 @@ func (v *SignalWithStartWorkflowExecutionRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 210, Value: w} i++ } + if v.ActiveClusterSelectionPolicy != nil { + w, err = v.ActiveClusterSelectionPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 220, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -58579,6 +59135,14 @@ func (v *SignalWithStartWorkflowExecutionRequest) FromWire(w wire.Value) error { return err } + } + case 220: + if field.Value.Type() == wire.TStruct { + v.ActiveClusterSelectionPolicy, err = _ActiveClusterSelectionPolicy_Read(field.Value) + if err != nil { + return err + } + } } } @@ -58593,7 +59157,7 @@ func (v *SignalWithStartWorkflowExecutionRequest) String() string { return "" } - var fields [22]string + var fields [23]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -58683,6 +59247,10 @@ func (v *SignalWithStartWorkflowExecutionRequest) String() string { fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) i++ } + if v.ActiveClusterSelectionPolicy != nil { + fields[i] = fmt.Sprintf("ActiveClusterSelectionPolicy: %v", v.ActiveClusterSelectionPolicy) + i++ + } return fmt.Sprintf("SignalWithStartWorkflowExecutionRequest{%v}", strings.Join(fields[:i], ", ")) } @@ -58773,6 +59341,9 @@ func (v *SignalWithStartWorkflowExecutionRequest) Equals(rhs *SignalWithStartWor if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { return false } + if !((v.ActiveClusterSelectionPolicy == nil && rhs.ActiveClusterSelectionPolicy == nil) || (v.ActiveClusterSelectionPolicy != nil && rhs.ActiveClusterSelectionPolicy != nil && v.ActiveClusterSelectionPolicy.Equals(rhs.ActiveClusterSelectionPolicy))) { + return false + } return true } @@ -58849,6 +59420,9 @@ func (v *SignalWithStartWorkflowExecutionRequest) MarshalLogObject(enc zapcore.O if v.CronOverlapPolicy != nil { err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) } + if v.ActiveClusterSelectionPolicy != nil { + err = multierr.Append(err, enc.AddObject("activeClusterSelectionPolicy", v.ActiveClusterSelectionPolicy)) + } return err } @@ -59182,6 +59756,21 @@ func (v *SignalWithStartWorkflowExecutionRequest) IsSetCronOverlapPolicy() bool return v != nil && v.CronOverlapPolicy != nil } +// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its +// zero value if it is unset. +func (v *SignalWithStartWorkflowExecutionRequest) GetActiveClusterSelectionPolicy() (o *ActiveClusterSelectionPolicy) { + if v != nil && v.ActiveClusterSelectionPolicy != nil { + return v.ActiveClusterSelectionPolicy + } + + return +} + +// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. +func (v *SignalWithStartWorkflowExecutionRequest) IsSetActiveClusterSelectionPolicy() bool { + return v != nil && v.ActiveClusterSelectionPolicy != nil +} + type SignalWorkflowExecutionRequest struct { Domain *string `json:"domain,omitempty"` WorkflowExecution *WorkflowExecution `json:"workflowExecution,omitempty"` @@ -59579,22 +60168,23 @@ func (v *SignalWorkflowExecutionRequest) IsSetControl() bool { } type StartChildWorkflowExecutionDecisionAttributes struct { - Domain *string `json:"domain,omitempty"` - WorkflowId *string `json:"workflowId,omitempty"` - WorkflowType *WorkflowType `json:"workflowType,omitempty"` - TaskList *TaskList `json:"taskList,omitempty"` - Input []byte `json:"input,omitempty"` - ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` - TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` - ParentClosePolicy *ParentClosePolicy `json:"parentClosePolicy,omitempty"` - Control []byte `json:"control,omitempty"` - WorkflowIdReusePolicy *WorkflowIdReusePolicy `json:"workflowIdReusePolicy,omitempty"` - RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` - CronSchedule *string `json:"cronSchedule,omitempty"` - Header *Header `json:"header,omitempty"` - Memo *Memo `json:"memo,omitempty"` - SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` - CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + Domain *string `json:"domain,omitempty"` + WorkflowId *string `json:"workflowId,omitempty"` + WorkflowType *WorkflowType `json:"workflowType,omitempty"` + TaskList *TaskList `json:"taskList,omitempty"` + Input []byte `json:"input,omitempty"` + ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` + TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` + ParentClosePolicy *ParentClosePolicy `json:"parentClosePolicy,omitempty"` + Control []byte `json:"control,omitempty"` + WorkflowIdReusePolicy *WorkflowIdReusePolicy `json:"workflowIdReusePolicy,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + CronSchedule *string `json:"cronSchedule,omitempty"` + Header *Header `json:"header,omitempty"` + Memo *Memo `json:"memo,omitempty"` + SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + ActiveClusterSelectionPolicy *ActiveClusterSelectionPolicy `json:"activeClusterSelectionPolicy,omitempty"` } // ToWire translates a StartChildWorkflowExecutionDecisionAttributes struct into a Thrift-level intermediate @@ -59614,7 +60204,7 @@ type StartChildWorkflowExecutionDecisionAttributes struct { // } func (v *StartChildWorkflowExecutionDecisionAttributes) ToWire() (wire.Value, error) { var ( - fields [16]wire.Field + fields [17]wire.Field i int = 0 w wire.Value err error @@ -59748,6 +60338,14 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) ToWire() (wire.Value, er fields[i] = wire.Field{ID: 160, Value: w} i++ } + if v.ActiveClusterSelectionPolicy != nil { + w, err = v.ActiveClusterSelectionPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 170, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -59917,6 +60515,14 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) FromWire(w wire.Value) e return err } + } + case 170: + if field.Value.Type() == wire.TStruct { + v.ActiveClusterSelectionPolicy, err = _ActiveClusterSelectionPolicy_Read(field.Value) + if err != nil { + return err + } + } } } @@ -59931,7 +60537,7 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) String() string { return "" } - var fields [16]string + var fields [17]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -59997,6 +60603,10 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) String() string { fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) i++ } + if v.ActiveClusterSelectionPolicy != nil { + fields[i] = fmt.Sprintf("ActiveClusterSelectionPolicy: %v", v.ActiveClusterSelectionPolicy) + i++ + } return fmt.Sprintf("StartChildWorkflowExecutionDecisionAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -60059,6 +60669,9 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) Equals(rhs *StartChildWo if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { return false } + if !((v.ActiveClusterSelectionPolicy == nil && rhs.ActiveClusterSelectionPolicy == nil) || (v.ActiveClusterSelectionPolicy != nil && rhs.ActiveClusterSelectionPolicy != nil && v.ActiveClusterSelectionPolicy.Equals(rhs.ActiveClusterSelectionPolicy))) { + return false + } return true } @@ -60117,6 +60730,9 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) MarshalLogObject(enc zap if v.CronOverlapPolicy != nil { err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) } + if v.ActiveClusterSelectionPolicy != nil { + err = multierr.Append(err, enc.AddObject("activeClusterSelectionPolicy", v.ActiveClusterSelectionPolicy)) + } return err } @@ -60360,6 +60976,21 @@ func (v *StartChildWorkflowExecutionDecisionAttributes) IsSetCronOverlapPolicy() return v != nil && v.CronOverlapPolicy != nil } +// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its +// zero value if it is unset. +func (v *StartChildWorkflowExecutionDecisionAttributes) GetActiveClusterSelectionPolicy() (o *ActiveClusterSelectionPolicy) { + if v != nil && v.ActiveClusterSelectionPolicy != nil { + return v.ActiveClusterSelectionPolicy + } + + return +} + +// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. +func (v *StartChildWorkflowExecutionDecisionAttributes) IsSetActiveClusterSelectionPolicy() bool { + return v != nil && v.ActiveClusterSelectionPolicy != nil +} + type StartChildWorkflowExecutionFailedEventAttributes struct { Domain *string `json:"domain,omitempty"` WorkflowId *string `json:"workflowId,omitempty"` @@ -60775,26 +61406,27 @@ func (v *StartChildWorkflowExecutionFailedEventAttributes) IsSetDecisionTaskComp } type StartChildWorkflowExecutionInitiatedEventAttributes struct { - Domain *string `json:"domain,omitempty"` - WorkflowId *string `json:"workflowId,omitempty"` - WorkflowType *WorkflowType `json:"workflowType,omitempty"` - TaskList *TaskList `json:"taskList,omitempty"` - Input []byte `json:"input,omitempty"` - ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` - TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` - ParentClosePolicy *ParentClosePolicy `json:"parentClosePolicy,omitempty"` - Control []byte `json:"control,omitempty"` - DecisionTaskCompletedEventId *int64 `json:"decisionTaskCompletedEventId,omitempty"` - WorkflowIdReusePolicy *WorkflowIdReusePolicy `json:"workflowIdReusePolicy,omitempty"` - RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` - CronSchedule *string `json:"cronSchedule,omitempty"` - Header *Header `json:"header,omitempty"` - Memo *Memo `json:"memo,omitempty"` - SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` - DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` - JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` - FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` - CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + Domain *string `json:"domain,omitempty"` + WorkflowId *string `json:"workflowId,omitempty"` + WorkflowType *WorkflowType `json:"workflowType,omitempty"` + TaskList *TaskList `json:"taskList,omitempty"` + Input []byte `json:"input,omitempty"` + ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` + TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` + ParentClosePolicy *ParentClosePolicy `json:"parentClosePolicy,omitempty"` + Control []byte `json:"control,omitempty"` + DecisionTaskCompletedEventId *int64 `json:"decisionTaskCompletedEventId,omitempty"` + WorkflowIdReusePolicy *WorkflowIdReusePolicy `json:"workflowIdReusePolicy,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + CronSchedule *string `json:"cronSchedule,omitempty"` + Header *Header `json:"header,omitempty"` + Memo *Memo `json:"memo,omitempty"` + SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` + DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` + JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` + FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + ActiveClusterSelectionPolicy *ActiveClusterSelectionPolicy `json:"activeClusterSelectionPolicy,omitempty"` } // ToWire translates a StartChildWorkflowExecutionInitiatedEventAttributes struct into a Thrift-level intermediate @@ -60814,7 +61446,7 @@ type StartChildWorkflowExecutionInitiatedEventAttributes struct { // } func (v *StartChildWorkflowExecutionInitiatedEventAttributes) ToWire() (wire.Value, error) { var ( - fields [20]wire.Field + fields [21]wire.Field i int = 0 w wire.Value err error @@ -60980,6 +61612,14 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) ToWire() (wire.Val fields[i] = wire.Field{ID: 200, Value: w} i++ } + if v.ActiveClusterSelectionPolicy != nil { + w, err = v.ActiveClusterSelectionPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 210, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -61189,6 +61829,14 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) FromWire(w wire.Va return err } + } + case 210: + if field.Value.Type() == wire.TStruct { + v.ActiveClusterSelectionPolicy, err = _ActiveClusterSelectionPolicy_Read(field.Value) + if err != nil { + return err + } + } } } @@ -61203,7 +61851,7 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) String() string { return "" } - var fields [20]string + var fields [21]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -61285,6 +61933,10 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) String() string { fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) i++ } + if v.ActiveClusterSelectionPolicy != nil { + fields[i] = fmt.Sprintf("ActiveClusterSelectionPolicy: %v", v.ActiveClusterSelectionPolicy) + i++ + } return fmt.Sprintf("StartChildWorkflowExecutionInitiatedEventAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -61359,6 +62011,9 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) Equals(rhs *StartC if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { return false } + if !((v.ActiveClusterSelectionPolicy == nil && rhs.ActiveClusterSelectionPolicy == nil) || (v.ActiveClusterSelectionPolicy != nil && rhs.ActiveClusterSelectionPolicy != nil && v.ActiveClusterSelectionPolicy.Equals(rhs.ActiveClusterSelectionPolicy))) { + return false + } return true } @@ -61429,6 +62084,9 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) MarshalLogObject(e if v.CronOverlapPolicy != nil { err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) } + if v.ActiveClusterSelectionPolicy != nil { + err = multierr.Append(err, enc.AddObject("activeClusterSelectionPolicy", v.ActiveClusterSelectionPolicy)) + } return err } @@ -61732,6 +62390,21 @@ func (v *StartChildWorkflowExecutionInitiatedEventAttributes) IsSetCronOverlapPo return v != nil && v.CronOverlapPolicy != nil } +// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its +// zero value if it is unset. +func (v *StartChildWorkflowExecutionInitiatedEventAttributes) GetActiveClusterSelectionPolicy() (o *ActiveClusterSelectionPolicy) { + if v != nil && v.ActiveClusterSelectionPolicy != nil { + return v.ActiveClusterSelectionPolicy + } + + return +} + +// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. +func (v *StartChildWorkflowExecutionInitiatedEventAttributes) IsSetActiveClusterSelectionPolicy() bool { + return v != nil && v.ActiveClusterSelectionPolicy != nil +} + type StartTimeFilter struct { EarliestTime *int64 `json:"earliestTime,omitempty"` LatestTime *int64 `json:"latestTime,omitempty"` @@ -62329,25 +63002,26 @@ func (v *StartWorkflowExecutionAsyncResponse) MarshalLogObject(enc zapcore.Objec } type StartWorkflowExecutionRequest struct { - Domain *string `json:"domain,omitempty"` - WorkflowId *string `json:"workflowId,omitempty"` - WorkflowType *WorkflowType `json:"workflowType,omitempty"` - TaskList *TaskList `json:"taskList,omitempty"` - Input []byte `json:"input,omitempty"` - ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` - TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` - Identity *string `json:"identity,omitempty"` - RequestId *string `json:"requestId,omitempty"` - WorkflowIdReusePolicy *WorkflowIdReusePolicy `json:"workflowIdReusePolicy,omitempty"` - RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` - CronSchedule *string `json:"cronSchedule,omitempty"` - Memo *Memo `json:"memo,omitempty"` - SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` - Header *Header `json:"header,omitempty"` - DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` - JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` - FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` - CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + Domain *string `json:"domain,omitempty"` + WorkflowId *string `json:"workflowId,omitempty"` + WorkflowType *WorkflowType `json:"workflowType,omitempty"` + TaskList *TaskList `json:"taskList,omitempty"` + Input []byte `json:"input,omitempty"` + ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` + TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` + Identity *string `json:"identity,omitempty"` + RequestId *string `json:"requestId,omitempty"` + WorkflowIdReusePolicy *WorkflowIdReusePolicy `json:"workflowIdReusePolicy,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + CronSchedule *string `json:"cronSchedule,omitempty"` + Memo *Memo `json:"memo,omitempty"` + SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` + Header *Header `json:"header,omitempty"` + DelayStartSeconds *int32 `json:"delayStartSeconds,omitempty"` + JitterStartSeconds *int32 `json:"jitterStartSeconds,omitempty"` + FirstRunAtTimestamp *int64 `json:"firstRunAtTimestamp,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + ActiveClusterSelectionPolicy *ActiveClusterSelectionPolicy `json:"activeClusterSelectionPolicy,omitempty"` } // ToWire translates a StartWorkflowExecutionRequest struct into a Thrift-level intermediate @@ -62367,7 +63041,7 @@ type StartWorkflowExecutionRequest struct { // } func (v *StartWorkflowExecutionRequest) ToWire() (wire.Value, error) { var ( - fields [19]wire.Field + fields [20]wire.Field i int = 0 w wire.Value err error @@ -62525,6 +63199,14 @@ func (v *StartWorkflowExecutionRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 190, Value: w} i++ } + if v.ActiveClusterSelectionPolicy != nil { + w, err = v.ActiveClusterSelectionPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 200, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -62726,6 +63408,14 @@ func (v *StartWorkflowExecutionRequest) FromWire(w wire.Value) error { return err } + } + case 200: + if field.Value.Type() == wire.TStruct { + v.ActiveClusterSelectionPolicy, err = _ActiveClusterSelectionPolicy_Read(field.Value) + if err != nil { + return err + } + } } } @@ -62740,7 +63430,7 @@ func (v *StartWorkflowExecutionRequest) String() string { return "" } - var fields [19]string + var fields [20]string i := 0 if v.Domain != nil { fields[i] = fmt.Sprintf("Domain: %v", *(v.Domain)) @@ -62818,6 +63508,10 @@ func (v *StartWorkflowExecutionRequest) String() string { fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) i++ } + if v.ActiveClusterSelectionPolicy != nil { + fields[i] = fmt.Sprintf("ActiveClusterSelectionPolicy: %v", v.ActiveClusterSelectionPolicy) + i++ + } return fmt.Sprintf("StartWorkflowExecutionRequest{%v}", strings.Join(fields[:i], ", ")) } @@ -62889,6 +63583,9 @@ func (v *StartWorkflowExecutionRequest) Equals(rhs *StartWorkflowExecutionReques if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { return false } + if !((v.ActiveClusterSelectionPolicy == nil && rhs.ActiveClusterSelectionPolicy == nil) || (v.ActiveClusterSelectionPolicy != nil && rhs.ActiveClusterSelectionPolicy != nil && v.ActiveClusterSelectionPolicy.Equals(rhs.ActiveClusterSelectionPolicy))) { + return false + } return true } @@ -62956,6 +63653,9 @@ func (v *StartWorkflowExecutionRequest) MarshalLogObject(enc zapcore.ObjectEncod if v.CronOverlapPolicy != nil { err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) } + if v.ActiveClusterSelectionPolicy != nil { + err = multierr.Append(err, enc.AddObject("activeClusterSelectionPolicy", v.ActiveClusterSelectionPolicy)) + } return err } @@ -63244,6 +63944,21 @@ func (v *StartWorkflowExecutionRequest) IsSetCronOverlapPolicy() bool { return v != nil && v.CronOverlapPolicy != nil } +// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its +// zero value if it is unset. +func (v *StartWorkflowExecutionRequest) GetActiveClusterSelectionPolicy() (o *ActiveClusterSelectionPolicy) { + if v != nil && v.ActiveClusterSelectionPolicy != nil { + return v.ActiveClusterSelectionPolicy + } + + return +} + +// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. +func (v *StartWorkflowExecutionRequest) IsSetActiveClusterSelectionPolicy() bool { + return v != nil && v.ActiveClusterSelectionPolicy != nil +} + type StartWorkflowExecutionResponse struct { RunId *string `json:"runId,omitempty"` } @@ -64449,8 +65164,9 @@ func (v *TaskList) IsSetKind() bool { type TaskListKind int32 const ( - TaskListKindNormal TaskListKind = 0 - TaskListKindSticky TaskListKind = 1 + TaskListKindNormal TaskListKind = 0 + TaskListKindSticky TaskListKind = 1 + TaskListKindEphemeral TaskListKind = 2 ) // TaskListKind_Values returns all recognized values of TaskListKind. @@ -64458,6 +65174,7 @@ func TaskListKind_Values() []TaskListKind { return []TaskListKind{ TaskListKindNormal, TaskListKindSticky, + TaskListKindEphemeral, } } @@ -64474,6 +65191,9 @@ func (v *TaskListKind) UnmarshalText(value []byte) error { case "STICKY": *v = TaskListKindSticky return nil + case "EPHEMERAL": + *v = TaskListKindEphemeral + return nil default: val, err := strconv.ParseInt(s, 10, 32) if err != nil { @@ -64496,6 +65216,8 @@ func (v TaskListKind) MarshalText() ([]byte, error) { return []byte("NORMAL"), nil case 1: return []byte("STICKY"), nil + case 2: + return []byte("EPHEMERAL"), nil } return []byte(strconv.FormatInt(int64(v), 10)), nil } @@ -64511,6 +65233,8 @@ func (v TaskListKind) MarshalLogObject(enc zapcore.ObjectEncoder) error { enc.AddString("name", "NORMAL") case 1: enc.AddString("name", "STICKY") + case 2: + enc.AddString("name", "EPHEMERAL") } return nil } @@ -64555,6 +65279,8 @@ func (v TaskListKind) String() string { return "NORMAL" case 1: return "STICKY" + case 2: + return "EPHEMERAL" } return fmt.Sprintf("TaskListKind(%d)", w) } @@ -64577,6 +65303,8 @@ func (v TaskListKind) MarshalJSON() ([]byte, error) { return ([]byte)("\"NORMAL\""), nil case 1: return ([]byte)("\"STICKY\""), nil + case 2: + return ([]byte)("\"EPHEMERAL\""), nil } return ([]byte)(strconv.FormatInt(int64(v), 10)), nil } @@ -72718,24 +73446,27 @@ func (v *WorkflowExecutionFilter) IsSetRunId() bool { } type WorkflowExecutionInfo struct { - Execution *WorkflowExecution `json:"execution,omitempty"` - Type *WorkflowType `json:"type,omitempty"` - StartTime *int64 `json:"startTime,omitempty"` - CloseTime *int64 `json:"closeTime,omitempty"` - CloseStatus *WorkflowExecutionCloseStatus `json:"closeStatus,omitempty"` - HistoryLength *int64 `json:"historyLength,omitempty"` - ParentDomainId *string `json:"parentDomainId,omitempty"` - ParentDomainName *string `json:"parentDomainName,omitempty"` - ParentInitatedId *int64 `json:"parentInitatedId,omitempty"` - ParentExecution *WorkflowExecution `json:"parentExecution,omitempty"` - ExecutionTime *int64 `json:"executionTime,omitempty"` - Memo *Memo `json:"memo,omitempty"` - SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` - AutoResetPoints *ResetPoints `json:"autoResetPoints,omitempty"` - TaskList *string `json:"taskList,omitempty"` - IsCron *bool `json:"isCron,omitempty"` - UpdateTime *int64 `json:"updateTime,omitempty"` - PartitionConfig map[string]string `json:"partitionConfig,omitempty"` + Execution *WorkflowExecution `json:"execution,omitempty"` + Type *WorkflowType `json:"type,omitempty"` + StartTime *int64 `json:"startTime,omitempty"` + CloseTime *int64 `json:"closeTime,omitempty"` + CloseStatus *WorkflowExecutionCloseStatus `json:"closeStatus,omitempty"` + HistoryLength *int64 `json:"historyLength,omitempty"` + ParentDomainId *string `json:"parentDomainId,omitempty"` + ParentDomainName *string `json:"parentDomainName,omitempty"` + ParentInitatedId *int64 `json:"parentInitatedId,omitempty"` + ParentExecution *WorkflowExecution `json:"parentExecution,omitempty"` + ExecutionTime *int64 `json:"executionTime,omitempty"` + Memo *Memo `json:"memo,omitempty"` + SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` + AutoResetPoints *ResetPoints `json:"autoResetPoints,omitempty"` + TaskList *string `json:"taskList,omitempty"` + TaskListInfo *TaskList `json:"taskListInfo,omitempty"` + IsCron *bool `json:"isCron,omitempty"` + UpdateTime *int64 `json:"updateTime,omitempty"` + PartitionConfig map[string]string `json:"partitionConfig,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + ActiveClusterSelectionPolicy *ActiveClusterSelectionPolicy `json:"activeClusterSelectionPolicy,omitempty"` } // ToWire translates a WorkflowExecutionInfo struct into a Thrift-level intermediate @@ -72755,7 +73486,7 @@ type WorkflowExecutionInfo struct { // } func (v *WorkflowExecutionInfo) ToWire() (wire.Value, error) { var ( - fields [18]wire.Field + fields [21]wire.Field i int = 0 w wire.Value err error @@ -72881,6 +73612,14 @@ func (v *WorkflowExecutionInfo) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 120, Value: w} i++ } + if v.TaskListInfo != nil { + w, err = v.TaskListInfo.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 121, Value: w} + i++ + } if v.IsCron != nil { w, err = wire.NewValueBool(*(v.IsCron)), error(nil) if err != nil { @@ -72905,6 +73644,22 @@ func (v *WorkflowExecutionInfo) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 150, Value: w} i++ } + if v.CronOverlapPolicy != nil { + w, err = v.CronOverlapPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 160, Value: w} + i++ + } + if v.ActiveClusterSelectionPolicy != nil { + w, err = v.ActiveClusterSelectionPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 170, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -73074,6 +73829,14 @@ func (v *WorkflowExecutionInfo) FromWire(w wire.Value) error { return err } + } + case 121: + if field.Value.Type() == wire.TStruct { + v.TaskListInfo, err = _TaskList_Read(field.Value) + if err != nil { + return err + } + } case 130: if field.Value.Type() == wire.TBool { @@ -73102,6 +73865,24 @@ func (v *WorkflowExecutionInfo) FromWire(w wire.Value) error { return err } + } + case 160: + if field.Value.Type() == wire.TI32 { + var x CronOverlapPolicy + x, err = _CronOverlapPolicy_Read(field.Value) + v.CronOverlapPolicy = &x + if err != nil { + return err + } + + } + case 170: + if field.Value.Type() == wire.TStruct { + v.ActiveClusterSelectionPolicy, err = _ActiveClusterSelectionPolicy_Read(field.Value) + if err != nil { + return err + } + } } } @@ -73116,7 +73897,7 @@ func (v *WorkflowExecutionInfo) String() string { return "" } - var fields [18]string + var fields [21]string i := 0 if v.Execution != nil { fields[i] = fmt.Sprintf("Execution: %v", v.Execution) @@ -73178,6 +73959,10 @@ func (v *WorkflowExecutionInfo) String() string { fields[i] = fmt.Sprintf("TaskList: %v", *(v.TaskList)) i++ } + if v.TaskListInfo != nil { + fields[i] = fmt.Sprintf("TaskListInfo: %v", v.TaskListInfo) + i++ + } if v.IsCron != nil { fields[i] = fmt.Sprintf("IsCron: %v", *(v.IsCron)) i++ @@ -73190,6 +73975,14 @@ func (v *WorkflowExecutionInfo) String() string { fields[i] = fmt.Sprintf("PartitionConfig: %v", v.PartitionConfig) i++ } + if v.CronOverlapPolicy != nil { + fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) + i++ + } + if v.ActiveClusterSelectionPolicy != nil { + fields[i] = fmt.Sprintf("ActiveClusterSelectionPolicy: %v", v.ActiveClusterSelectionPolicy) + i++ + } return fmt.Sprintf("WorkflowExecutionInfo{%v}", strings.Join(fields[:i], ", ")) } @@ -73249,6 +74042,9 @@ func (v *WorkflowExecutionInfo) Equals(rhs *WorkflowExecutionInfo) bool { if !_String_EqualsPtr(v.TaskList, rhs.TaskList) { return false } + if !((v.TaskListInfo == nil && rhs.TaskListInfo == nil) || (v.TaskListInfo != nil && rhs.TaskListInfo != nil && v.TaskListInfo.Equals(rhs.TaskListInfo))) { + return false + } if !_Bool_EqualsPtr(v.IsCron, rhs.IsCron) { return false } @@ -73258,6 +74054,12 @@ func (v *WorkflowExecutionInfo) Equals(rhs *WorkflowExecutionInfo) bool { if !((v.PartitionConfig == nil && rhs.PartitionConfig == nil) || (v.PartitionConfig != nil && rhs.PartitionConfig != nil && _Map_String_String_Equals(v.PartitionConfig, rhs.PartitionConfig))) { return false } + if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { + return false + } + if !((v.ActiveClusterSelectionPolicy == nil && rhs.ActiveClusterSelectionPolicy == nil) || (v.ActiveClusterSelectionPolicy != nil && rhs.ActiveClusterSelectionPolicy != nil && v.ActiveClusterSelectionPolicy.Equals(rhs.ActiveClusterSelectionPolicy))) { + return false + } return true } @@ -73313,6 +74115,9 @@ func (v *WorkflowExecutionInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err if v.TaskList != nil { enc.AddString("taskList", *v.TaskList) } + if v.TaskListInfo != nil { + err = multierr.Append(err, enc.AddObject("taskListInfo", v.TaskListInfo)) + } if v.IsCron != nil { enc.AddBool("isCron", *v.IsCron) } @@ -73322,6 +74127,12 @@ func (v *WorkflowExecutionInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err if v.PartitionConfig != nil { err = multierr.Append(err, enc.AddObject("partitionConfig", (_Map_String_String_Zapper)(v.PartitionConfig))) } + if v.CronOverlapPolicy != nil { + err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) + } + if v.ActiveClusterSelectionPolicy != nil { + err = multierr.Append(err, enc.AddObject("activeClusterSelectionPolicy", v.ActiveClusterSelectionPolicy)) + } return err } @@ -73550,6 +74361,21 @@ func (v *WorkflowExecutionInfo) IsSetTaskList() bool { return v != nil && v.TaskList != nil } +// GetTaskListInfo returns the value of TaskListInfo if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetTaskListInfo() (o *TaskList) { + if v != nil && v.TaskListInfo != nil { + return v.TaskListInfo + } + + return +} + +// IsSetTaskListInfo returns true if TaskListInfo is not nil. +func (v *WorkflowExecutionInfo) IsSetTaskListInfo() bool { + return v != nil && v.TaskListInfo != nil +} + // GetIsCron returns the value of IsCron if it is set or its // zero value if it is unset. func (v *WorkflowExecutionInfo) GetIsCron() (o bool) { @@ -73595,6 +74421,36 @@ func (v *WorkflowExecutionInfo) IsSetPartitionConfig() bool { return v != nil && v.PartitionConfig != nil } +// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetCronOverlapPolicy() (o CronOverlapPolicy) { + if v != nil && v.CronOverlapPolicy != nil { + return *v.CronOverlapPolicy + } + + return +} + +// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. +func (v *WorkflowExecutionInfo) IsSetCronOverlapPolicy() bool { + return v != nil && v.CronOverlapPolicy != nil +} + +// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetActiveClusterSelectionPolicy() (o *ActiveClusterSelectionPolicy) { + if v != nil && v.ActiveClusterSelectionPolicy != nil { + return v.ActiveClusterSelectionPolicy + } + + return +} + +// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. +func (v *WorkflowExecutionInfo) IsSetActiveClusterSelectionPolicy() bool { + return v != nil && v.ActiveClusterSelectionPolicy != nil +} + type WorkflowExecutionSignaledEventAttributes struct { SignalName *string `json:"signalName,omitempty"` Input []byte `json:"input,omitempty"` @@ -73864,35 +74720,36 @@ func (v *WorkflowExecutionSignaledEventAttributes) IsSetRequestId() bool { } type WorkflowExecutionStartedEventAttributes struct { - WorkflowType *WorkflowType `json:"workflowType,omitempty"` - ParentWorkflowDomain *string `json:"parentWorkflowDomain,omitempty"` - ParentWorkflowExecution *WorkflowExecution `json:"parentWorkflowExecution,omitempty"` - ParentInitiatedEventId *int64 `json:"parentInitiatedEventId,omitempty"` - TaskList *TaskList `json:"taskList,omitempty"` - Input []byte `json:"input,omitempty"` - ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` - TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` - ContinuedExecutionRunId *string `json:"continuedExecutionRunId,omitempty"` - Initiator *ContinueAsNewInitiator `json:"initiator,omitempty"` - ContinuedFailureReason *string `json:"continuedFailureReason,omitempty"` - ContinuedFailureDetails []byte `json:"continuedFailureDetails,omitempty"` - LastCompletionResult []byte `json:"lastCompletionResult,omitempty"` - OriginalExecutionRunId *string `json:"originalExecutionRunId,omitempty"` - Identity *string `json:"identity,omitempty"` - FirstExecutionRunId *string `json:"firstExecutionRunId,omitempty"` - FirstScheduledTimeNano *int64 `json:"firstScheduledTimeNano,omitempty"` - RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` - Attempt *int32 `json:"attempt,omitempty"` - ExpirationTimestamp *int64 `json:"expirationTimestamp,omitempty"` - CronSchedule *string `json:"cronSchedule,omitempty"` - FirstDecisionTaskBackoffSeconds *int32 `json:"firstDecisionTaskBackoffSeconds,omitempty"` - Memo *Memo `json:"memo,omitempty"` - SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` - PrevAutoResetPoints *ResetPoints `json:"prevAutoResetPoints,omitempty"` - Header *Header `json:"header,omitempty"` - PartitionConfig map[string]string `json:"partitionConfig,omitempty"` - RequestId *string `json:"requestId,omitempty"` - CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + WorkflowType *WorkflowType `json:"workflowType,omitempty"` + ParentWorkflowDomain *string `json:"parentWorkflowDomain,omitempty"` + ParentWorkflowExecution *WorkflowExecution `json:"parentWorkflowExecution,omitempty"` + ParentInitiatedEventId *int64 `json:"parentInitiatedEventId,omitempty"` + TaskList *TaskList `json:"taskList,omitempty"` + Input []byte `json:"input,omitempty"` + ExecutionStartToCloseTimeoutSeconds *int32 `json:"executionStartToCloseTimeoutSeconds,omitempty"` + TaskStartToCloseTimeoutSeconds *int32 `json:"taskStartToCloseTimeoutSeconds,omitempty"` + ContinuedExecutionRunId *string `json:"continuedExecutionRunId,omitempty"` + Initiator *ContinueAsNewInitiator `json:"initiator,omitempty"` + ContinuedFailureReason *string `json:"continuedFailureReason,omitempty"` + ContinuedFailureDetails []byte `json:"continuedFailureDetails,omitempty"` + LastCompletionResult []byte `json:"lastCompletionResult,omitempty"` + OriginalExecutionRunId *string `json:"originalExecutionRunId,omitempty"` + Identity *string `json:"identity,omitempty"` + FirstExecutionRunId *string `json:"firstExecutionRunId,omitempty"` + FirstScheduledTimeNano *int64 `json:"firstScheduledTimeNano,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + Attempt *int32 `json:"attempt,omitempty"` + ExpirationTimestamp *int64 `json:"expirationTimestamp,omitempty"` + CronSchedule *string `json:"cronSchedule,omitempty"` + FirstDecisionTaskBackoffSeconds *int32 `json:"firstDecisionTaskBackoffSeconds,omitempty"` + Memo *Memo `json:"memo,omitempty"` + SearchAttributes *SearchAttributes `json:"searchAttributes,omitempty"` + PrevAutoResetPoints *ResetPoints `json:"prevAutoResetPoints,omitempty"` + Header *Header `json:"header,omitempty"` + PartitionConfig map[string]string `json:"partitionConfig,omitempty"` + RequestId *string `json:"requestId,omitempty"` + CronOverlapPolicy *CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + ActiveClusterSelectionPolicy *ActiveClusterSelectionPolicy `json:"activeClusterSelectionPolicy,omitempty"` } // ToWire translates a WorkflowExecutionStartedEventAttributes struct into a Thrift-level intermediate @@ -73912,7 +74769,7 @@ type WorkflowExecutionStartedEventAttributes struct { // } func (v *WorkflowExecutionStartedEventAttributes) ToWire() (wire.Value, error) { var ( - fields [29]wire.Field + fields [30]wire.Field i int = 0 w wire.Value err error @@ -74150,6 +75007,14 @@ func (v *WorkflowExecutionStartedEventAttributes) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 170, Value: w} i++ } + if v.ActiveClusterSelectionPolicy != nil { + w, err = v.ActiveClusterSelectionPolicy.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 180, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -74441,6 +75306,14 @@ func (v *WorkflowExecutionStartedEventAttributes) FromWire(w wire.Value) error { return err } + } + case 180: + if field.Value.Type() == wire.TStruct { + v.ActiveClusterSelectionPolicy, err = _ActiveClusterSelectionPolicy_Read(field.Value) + if err != nil { + return err + } + } } } @@ -74455,7 +75328,7 @@ func (v *WorkflowExecutionStartedEventAttributes) String() string { return "" } - var fields [29]string + var fields [30]string i := 0 if v.WorkflowType != nil { fields[i] = fmt.Sprintf("WorkflowType: %v", v.WorkflowType) @@ -74573,6 +75446,10 @@ func (v *WorkflowExecutionStartedEventAttributes) String() string { fields[i] = fmt.Sprintf("CronOverlapPolicy: %v", *(v.CronOverlapPolicy)) i++ } + if v.ActiveClusterSelectionPolicy != nil { + fields[i] = fmt.Sprintf("ActiveClusterSelectionPolicy: %v", v.ActiveClusterSelectionPolicy) + i++ + } return fmt.Sprintf("WorkflowExecutionStartedEventAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -74674,6 +75551,9 @@ func (v *WorkflowExecutionStartedEventAttributes) Equals(rhs *WorkflowExecutionS if !_CronOverlapPolicy_EqualsPtr(v.CronOverlapPolicy, rhs.CronOverlapPolicy) { return false } + if !((v.ActiveClusterSelectionPolicy == nil && rhs.ActiveClusterSelectionPolicy == nil) || (v.ActiveClusterSelectionPolicy != nil && rhs.ActiveClusterSelectionPolicy != nil && v.ActiveClusterSelectionPolicy.Equals(rhs.ActiveClusterSelectionPolicy))) { + return false + } return true } @@ -74771,6 +75651,9 @@ func (v *WorkflowExecutionStartedEventAttributes) MarshalLogObject(enc zapcore.O if v.CronOverlapPolicy != nil { err = multierr.Append(err, enc.AddObject("cronOverlapPolicy", *v.CronOverlapPolicy)) } + if v.ActiveClusterSelectionPolicy != nil { + err = multierr.Append(err, enc.AddObject("activeClusterSelectionPolicy", v.ActiveClusterSelectionPolicy)) + } return err } @@ -75209,6 +76092,21 @@ func (v *WorkflowExecutionStartedEventAttributes) IsSetCronOverlapPolicy() bool return v != nil && v.CronOverlapPolicy != nil } +// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionStartedEventAttributes) GetActiveClusterSelectionPolicy() (o *ActiveClusterSelectionPolicy) { + if v != nil && v.ActiveClusterSelectionPolicy != nil { + return v.ActiveClusterSelectionPolicy + } + + return +} + +// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. +func (v *WorkflowExecutionStartedEventAttributes) IsSetActiveClusterSelectionPolicy() bool { + return v != nil && v.ActiveClusterSelectionPolicy != nil +} + type WorkflowExecutionTerminatedEventAttributes struct { Reason *string `json:"reason,omitempty"` Details []byte `json:"details,omitempty"` @@ -76469,8 +77367,8 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "shared", Package: "go.uber.org/cadence/.gen/go/shared", FilePath: "shared.thrift", - SHA1: "6967d00b3b3f9ddf007dc5abe69903206ce97485", + SHA1: "6fef7aa6070aa7ef099f69b53f2741a379aafcb8", Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception InternalDataInconsistencyError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception WorkflowExecutionAlreadyCompletedError {\n 1: required string message\n}\n\nexception EntityNotExistsError {\n 1: required string message\n 2: optional string currentCluster\n 3: optional string activeCluster\n // activeClusters is a list of active clusters for active-active domain\n 4: required list activeClusters\n}\n\nexception ServiceBusyError {\n 1: required string message\n 2: optional string reason\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n // activeClusters is a list of active clusters for active-active domain\n 5: required list activeClusters\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") startEventId\n 6: optional i64 (js.type = \"Long\") startEventVersion\n 7: optional i64 (js.type = \"Long\") endEventId\n 8: optional i64 (js.type = \"Long\") endEventVersion\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception FeatureNotEnabledError {\n 1: required string featureFlag\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nexception RemoteSyncMatchedError {\n 10: required string message\n}\n\nexception StickyWorkerUnavailableError {\n 1: required string message\n}\n\nexception TaskListNotOwnedByHostError {\n 1: required string ownedByIdentity\n 2: required string myIdentity\n 3: required string tasklistName\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n /*\n * if a workflow is running using the same workflow ID, terminate it and start a new one\n */\n TerminateIfRunning,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum DecisionTaskTimedOutCause {\n TIMEOUT,\n RESET,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum PendingDecisionState {\n SCHEDULED,\n STARTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum CronOverlapPolicy {\n SKIPPED,\n BUFFERONE,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nenum QueryConsistencyLevel {\n // EVENTUAL indicates that query should be eventually consistent\n EVENTUAL\n // STRONG indicates that any events that came before query should be reflected in workflow state before running query\n STRONG\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkerVersionInfo {\n 10: optional string impl\n 20: optional string featureVersion\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 71: optional string parentDomainName\n 72: optional i64 parentInitatedId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n 120: optional string taskList\n 130: optional bool isCron\n 140: optional i64 (js.type = \"Long\") updateTime\n 150: optional map partitionConfig\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n 90: optional bool requestLocalDispatch\n}\n\nstruct ActivityLocalDispatchInfo{\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 50: optional binary taskToken\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional i32 jitterStartSeconds\n 170: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 62: optional i64 (js.type = \"Long\") firstScheduledTimeNano\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n 150: optional map partitionConfig\n 160: optional string requestId\n 170: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // for reset workflow\n 40: optional string baseRunId\n 50: optional string newRunId\n 60: optional i64 (js.type = \"Long\") forkEventVersion\n 70: optional string reason\n 80: optional DecisionTaskTimedOutCause cause\n 90: optional string requestId\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n 90: optional string binaryChecksum\n 100: optional string requestId\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n 50: optional string lastFailureReason\n 60: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // For retry activity, it may have a failure before timeout. It's important to keep those information for debug.\n // Client can also provide the info for making next decision\n 40: optional string lastFailureReason\n 50: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n 50: optional string requestId\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n 170: optional i32 delayStartSeconds\n 180: optional i32 jitterStartSeconds\n 190: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 200: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 60: optional IsolationGroupConfiguration isolationgroups\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n 120: optional AsyncWorkflowConfiguration AsyncWorkflowConfiguration\n}\n\nstruct FailoverInfo {\n 10: optional i64 (js.type = \"Long\") failoverVersion\n 20: optional i64 (js.type = \"Long\") failoverStartTimestamp\n 30: optional i64 (js.type = \"Long\") failoverExpireTimestamp\n 40: optional i32 completedShardCount\n 50: optional list pendingShards\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n // activeClusterName is the name of the active cluster for active-passive domain\n 10: optional string activeClusterName\n\n // clusters is list of all active and passive clusters of domain\n 20: optional list clusters\n\n // activeClusters contains active cluster(s) information for active-active domain\n 30: optional ActiveClusters activeClusters\n}\n\nstruct ActiveClusters {\n // activeClustersByRegion is a map of region name to active cluster info for active-active domain\n 10: optional map activeClustersByRegion\n}\n\n// ActiveClusterInfo contains the configuration of active-active domain's active cluster & failover version for a specific region\nstruct ActiveClusterInfo {\n 10: optional string activeClusterName\n 20: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric = true\n 60: optional list clusters\n 70: optional string activeClusterName\n // activeClusters is a map of region name to active cluster name for active-active domain\n 75: optional map activeClustersByRegion\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n 60: optional FailoverInfo failoverInfo\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n 70: optional i32 failoverTimeoutInSeconds\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct DeleteDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n 160: optional i32 delayStartSeconds\n 170: optional i32 jitterStartSeconds\n 180: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 190: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct StartWorkflowExecutionAsyncRequest {\n 10: optional StartWorkflowExecutionRequest request\n}\n\nstruct StartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct DiagnoseWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n}\n\nstruct DiagnoseWorkflowExecutionResponse {\n 10: optional string domain\n 20: optional WorkflowExecution diagnosticWorkflowExecution\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional map queries\n 130: optional i64 (js.type = 'Long') nextEventId\n 140: optional i64 (js.type = 'Long') totalHistoryBytes\n 150: optional AutoConfigHint autoConfigHint\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional map queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n 20: optional map activitiesToDispatchLocally\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n 180: optional AutoConfigHint autoConfigHint\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n 50: optional string cause\n 60: optional string firstExecutionRunID\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n 70: optional bool skipArchival\n 80: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 11: optional list rawHistory\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n 180: optional i32 delayStartSeconds\n 190: optional i32 jitterStartSeconds\n 200: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 210: optional CronOverlapPolicy cronOverlapPolicy\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncRequest {\n 10: optional SignalWithStartWorkflowExecutionRequest request\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional string identity\n}\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n 60: optional string firstExecutionRunID\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n 60: optional bool skipSignalReapply\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n 50: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n 50: optional WorkerVersionInfo workerVersionInfo\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorMessage\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n 140: optional string startedWorkerIdentity\n 150: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingDecisionInfo {\n 10: optional PendingDecisionState state\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 attempt\n 50: optional i64 (js.type = \"Long\") originalScheduledTimestamp\n 60: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingChildExecutionInfo {\n 1: optional string domain\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n 50: optional PendingDecisionInfo pendingDecision\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n}\n\nstruct GetTaskListsByDomainRequest {\n 10: optional string domainName\n}\n\nstruct GetTaskListsByDomainResponse {\n 10: optional map decisionTaskListMap\n 20: optional map activityTaskListMap\n}\n\nstruct ListTaskListPartitionsRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n}\n\nstruct TaskListPartitionMetadata {\n 10: optional string key\n 20: optional string ownerHostName\n}\n\nstruct ListTaskListPartitionsResponse {\n 10: optional list activityTaskListPartitions\n 20: optional list decisionTaskListPartitions\n}\n\nstruct IsolationGroupMetrics {\n 10: optional double newTasksPerSecond\n 20: optional i64 (js.type = \"Long\") pollerCount\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n 50: optional map isolationGroupMetrics\n 60: optional double newTasksPerSecond\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n 40: optional i64 (js.type = \"Long\") visibilityTimestamp\n 50: optional string clusterName\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct ResetQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueResponse {\n 10: optional list processingQueueStates\n}\n\nstruct DescribeShardDistributionRequest {\n 10: optional i32 pageSize\n 20: optional i32 pageID\n}\n\nstruct DescribeShardDistributionResponse {\n 10: optional i32 numberOfShards\n\n // ShardID to Address (ip:port) map\n 20: optional map shards\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}\n\n// SupportedClientVersions contains the support versions for client library\nstruct SupportedClientVersions{\n 10: optional string goSdk\n 20: optional string javaSdk\n}\n\n// ClusterInfo contains information about cadence cluster\nstruct ClusterInfo{\n 10: optional SupportedClientVersions supportedClientVersions\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct FeatureFlags {\n\t10: optional bool WorkflowExecutionAlreadyCompletedErrorEnabled\n}\n\nenum CrossClusterTaskType {\n StartChildExecution\n CancelExecution\n SignalExecution\n RecordChildWorkflowExecutionComplete\n ApplyParentClosePolicy\n}\n\nenum CrossClusterTaskFailedCause {\n DOMAIN_NOT_ACTIVE\n DOMAIN_NOT_EXISTS\n WORKFLOW_ALREADY_RUNNING\n WORKFLOW_NOT_EXISTS\n WORKFLOW_ALREADY_COMPLETED\n UNCATEGORIZED\n}\n\nenum GetTaskFailedCause {\n SERVICE_BUSY\n TIMEOUT\n SHARD_OWNERSHIP_LOST\n UNCATEGORIZED\n}\n\nstruct CrossClusterTaskInfo {\n 10: optional string domainID\n 20: optional string workflowID\n 30: optional string runID\n 40: optional CrossClusterTaskType taskType\n 50: optional i16 taskState\n 60: optional i64 (js.type = \"Long\") taskID\n 70: optional i64 (js.type = \"Long\") visibilityTimestamp\n}\n\nstruct CrossClusterStartChildExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string requestID\n 30: optional i64 (js.type = \"Long\") initiatedEventID\n 40: optional StartChildWorkflowExecutionInitiatedEventAttributes initiatedEventAttributes\n // targetRunID is for scheduling first decision task\n // targetWorkflowID is available in initiatedEventAttributes\n 50: optional string targetRunID\n 60: optional map partitionConfig\n}\n\nstruct CrossClusterStartChildExecutionResponseAttributes {\n 10: optional string runID\n}\n\nstruct CrossClusterCancelExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n}\n\nstruct CrossClusterCancelExecutionResponseAttributes {\n}\n\nstruct CrossClusterSignalExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n 70: optional string signalName\n 80: optional binary signalInput\n 90: optional binary control\n}\n\nstruct CrossClusterSignalExecutionResponseAttributes {\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional i64 (js.type = \"Long\") initiatedEventID\n 50: optional HistoryEvent completionEvent\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes {\n}\n\nstruct ApplyParentClosePolicyAttributes {\n 10: optional string childDomainID\n 20: optional string childWorkflowID\n 30: optional string childRunID\n 40: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct ApplyParentClosePolicyStatus {\n 10: optional bool completed\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct ApplyParentClosePolicyRequest {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional ApplyParentClosePolicyStatus status\n}\n\nstruct CrossClusterApplyParentClosePolicyRequestAttributes {\n 10: optional list children\n}\n\nstruct ApplyParentClosePolicyResult {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct CrossClusterApplyParentClosePolicyResponseAttributes {\n 10: optional list childrenStatus\n}\n\nstruct CrossClusterTaskRequest {\n 10: optional CrossClusterTaskInfo taskInfo\n 20: optional CrossClusterStartChildExecutionRequestAttributes startChildExecutionAttributes\n 30: optional CrossClusterCancelExecutionRequestAttributes cancelExecutionAttributes\n 40: optional CrossClusterSignalExecutionRequestAttributes signalExecutionAttributes\n 50: optional CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes recordChildWorkflowExecutionCompleteAttributes\n 60: optional CrossClusterApplyParentClosePolicyRequestAttributes applyParentClosePolicyAttributes\n}\n\nstruct CrossClusterTaskResponse {\n 10: optional i64 (js.type = \"Long\") taskID\n 20: optional CrossClusterTaskType taskType\n 30: optional i16 taskState\n 40: optional CrossClusterTaskFailedCause failedCause\n 50: optional CrossClusterStartChildExecutionResponseAttributes startChildExecutionAttributes\n 60: optional CrossClusterCancelExecutionResponseAttributes cancelExecutionAttributes\n 70: optional CrossClusterSignalExecutionResponseAttributes signalExecutionAttributes\n 80: optional CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes recordChildWorkflowExecutionCompleteAttributes\n 90: optional CrossClusterApplyParentClosePolicyResponseAttributes applyParentClosePolicyAttributes\n}\n\nstruct GetCrossClusterTasksRequest {\n 10: optional list shardIDs\n 20: optional string targetCluster\n}\n\nstruct GetCrossClusterTasksResponse {\n 10: optional map> tasksByShard\n 20: optional map failedCauseByShard\n}\n\nstruct RespondCrossClusterTasksCompletedRequest {\n 10: optional i32 shardID\n 20: optional string targetCluster\n 30: optional list taskResponses\n 40: optional bool fetchNewTasks\n}\n\nstruct RespondCrossClusterTasksCompletedResponse {\n 10: optional list tasks\n}\n\nenum IsolationGroupState {\n INVALID,\n HEALTHY,\n DRAINED,\n}\n\nstruct IsolationGroupPartition {\n 10: optional string name\n 20: optional IsolationGroupState state\n}\n\nstruct IsolationGroupConfiguration {\n 10: optional list isolationGroups\n}\n\nstruct AsyncWorkflowConfiguration {\n 10: optional bool enabled\n // PredefinedQueueName is the name of the predefined queue in cadence server config's asyncWorkflowQueues\n 20: optional string predefinedQueueName\n // queueType is the type of the queue if predefined_queue_name is not used\n 30: optional string queueType\n // queueConfig is the configuration for the queue if predefined_queue_name is not used\n 40: optional DataBlob queueConfig\n}\n\n/**\n* Any is a logical duplicate of google.protobuf.Any.\n*\n* The intent of the type is the same, but it is not intended to be directly\n* compatible with google.protobuf.Any or any Thrift equivalent - this blob is\n* RPC-type agnostic by design (as the underlying data may be transported over\n* proto or thrift), and the data-bytes may be in any encoding.\n*\n* This is intentionally different from DataBlob, which supports only a handful\n* of known encodings so it can be interpreted everywhere. Any supports literally\n* any contents, and needs to be considered opaque until it is given to something\n* that is expecting it.\n*\n* See ValueType to interpret the contents.\n**/\nstruct Any {\n // Type-string describing value's contents, and intentionally avoiding the\n // name \"type\" as it is often a special term.\n // This should usually be a hard-coded string of some kind.\n 10: optional string ValueType\n // Arbitrarily-encoded bytes, to be deserialized by a runtime implementation.\n // The contents are described by ValueType.\n 20: optional binary Value\n}\n\nstruct AutoConfigHint {\n 10: optional bool enableAutoConfig\n 20: optional i64 pollerWaitTimeInMs\n}\n\nstruct QueueState {\n 10: optional map virtualQueueStates\n 20: optional TaskKey exclusiveMaxReadLevel\n}\n\nstruct VirtualQueueState {\n 10: optional list virtualSliceStates\n}\n\nstruct VirtualSliceState {\n 10: optional TaskRange taskRange\n}\n\nstruct TaskRange {\n 10: optional TaskKey inclusiveMin\n 20: optional TaskKey exclusiveMax\n}\n\nstruct TaskKey {\n 10: optional i64 scheduledTimeNano\n 20: optional i64 taskID\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception InternalDataInconsistencyError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception WorkflowExecutionAlreadyCompletedError {\n 1: required string message\n}\n\nexception EntityNotExistsError {\n 1: required string message\n 2: optional string currentCluster\n 3: optional string activeCluster\n // activeClusters is a list of active clusters for active-active domain\n 4: required list activeClusters\n}\n\nexception ServiceBusyError {\n 1: required string message\n 2: optional string reason\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n // activeClusters is a list of active clusters for active-active domain\n 5: required list activeClusters\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") startEventId\n 6: optional i64 (js.type = \"Long\") startEventVersion\n 7: optional i64 (js.type = \"Long\") endEventId\n 8: optional i64 (js.type = \"Long\") endEventVersion\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception FeatureNotEnabledError {\n 1: required string featureFlag\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nexception RemoteSyncMatchedError {\n 10: required string message\n}\n\nexception StickyWorkerUnavailableError {\n 1: required string message\n}\n\nexception TaskListNotOwnedByHostError {\n 1: required string ownedByIdentity\n 2: required string myIdentity\n 3: required string tasklistName\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n /*\n * if a workflow is running using the same workflow ID, terminate it and start a new one\n */\n TerminateIfRunning,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum DecisionTaskTimedOutCause {\n TIMEOUT,\n RESET,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum PendingDecisionState {\n SCHEDULED,\n STARTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n EPHEMERAL,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum CronOverlapPolicy {\n SKIPPED,\n BUFFERONE,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nenum QueryConsistencyLevel {\n // EVENTUAL indicates that query should be eventually consistent\n EVENTUAL\n // STRONG indicates that any events that came before query should be reflected in workflow state before running query\n STRONG\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkerVersionInfo {\n 10: optional string impl\n 20: optional string featureVersion\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 71: optional string parentDomainName\n 72: optional i64 parentInitatedId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n 120: optional string taskList\n 121: optional TaskList taskListInfo\n 130: optional bool isCron\n 140: optional i64 (js.type = \"Long\") updateTime\n 150: optional map partitionConfig\n 160: optional CronOverlapPolicy cronOverlapPolicy\n 170: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n 90: optional bool requestLocalDispatch\n}\n\nstruct ActivityLocalDispatchInfo{\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 50: optional binary taskToken\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional i32 jitterStartSeconds\n 170: optional CronOverlapPolicy cronOverlapPolicy\n 180: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional CronOverlapPolicy cronOverlapPolicy\n 170: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 62: optional i64 (js.type = \"Long\") firstScheduledTimeNano\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n 150: optional map partitionConfig\n 160: optional string requestId\n 170: optional CronOverlapPolicy cronOverlapPolicy\n 180: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // for reset workflow\n 40: optional string baseRunId\n 50: optional string newRunId\n 60: optional i64 (js.type = \"Long\") forkEventVersion\n 70: optional string reason\n 80: optional DecisionTaskTimedOutCause cause\n 90: optional string requestId\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n 90: optional string binaryChecksum\n 100: optional string requestId\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n 50: optional string lastFailureReason\n 60: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // For retry activity, it may have a failure before timeout. It's important to keep those information for debug.\n // Client can also provide the info for making next decision\n 40: optional string lastFailureReason\n 50: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n 50: optional string requestId\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n 170: optional i32 delayStartSeconds\n 180: optional i32 jitterStartSeconds\n 190: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 200: optional CronOverlapPolicy cronOverlapPolicy\n 210: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 60: optional IsolationGroupConfiguration isolationgroups\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n 120: optional AsyncWorkflowConfiguration AsyncWorkflowConfiguration\n}\n\nstruct FailoverInfo {\n 10: optional i64 (js.type = \"Long\") failoverVersion\n 20: optional i64 (js.type = \"Long\") failoverStartTimestamp\n 30: optional i64 (js.type = \"Long\") failoverExpireTimestamp\n 40: optional i32 completedShardCount\n 50: optional list pendingShards\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n // activeClusterName is the name of the active cluster for active-passive domain\n 10: optional string activeClusterName\n\n // clusters is list of all active and passive clusters of domain\n 20: optional list clusters\n\n // activeClusters contains active cluster(s) information for active-active domain\n 30: optional ActiveClusters activeClusters\n}\n\nstruct ActiveClusters {\n // activeClustersByRegion is a map of region name to active cluster info for active-active domain\n 10: optional map activeClustersByRegion\n}\n\n// ActiveClusterInfo contains the configuration of active-active domain's active cluster & failover version for a specific region\nstruct ActiveClusterInfo {\n 10: optional string activeClusterName\n 20: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric = true\n 60: optional list clusters\n 70: optional string activeClusterName\n // activeClusters is a map of region name to active cluster name for active-active domain\n 75: optional map activeClustersByRegion\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n 60: optional FailoverInfo failoverInfo\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n 70: optional i32 failoverTimeoutInSeconds\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct DeleteDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n 160: optional i32 delayStartSeconds\n 170: optional i32 jitterStartSeconds\n 180: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 190: optional CronOverlapPolicy cronOverlapPolicy\n 200: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct StartWorkflowExecutionAsyncRequest {\n 10: optional StartWorkflowExecutionRequest request\n}\n\nstruct StartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct DiagnoseWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n}\n\nstruct DiagnoseWorkflowExecutionResponse {\n 10: optional string domain\n 20: optional WorkflowExecution diagnosticWorkflowExecution\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional map queries\n 130: optional i64 (js.type = 'Long') nextEventId\n 140: optional i64 (js.type = 'Long') totalHistoryBytes\n 150: optional AutoConfigHint autoConfigHint\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional map queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n 20: optional map activitiesToDispatchLocally\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n 180: optional AutoConfigHint autoConfigHint\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n 50: optional string cause\n 60: optional string firstExecutionRunID\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n 70: optional bool skipArchival\n 80: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 11: optional list rawHistory\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n 180: optional i32 delayStartSeconds\n 190: optional i32 jitterStartSeconds\n 200: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 210: optional CronOverlapPolicy cronOverlapPolicy\n 220: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncRequest {\n 10: optional SignalWithStartWorkflowExecutionRequest request\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional string identity\n}\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n 60: optional string firstExecutionRunID\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n 60: optional bool skipSignalReapply\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n 50: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n 50: optional WorkerVersionInfo workerVersionInfo\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorMessage\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n 140: optional string startedWorkerIdentity\n 150: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingDecisionInfo {\n 10: optional PendingDecisionState state\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 attempt\n 50: optional i64 (js.type = \"Long\") originalScheduledTimestamp\n 60: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingChildExecutionInfo {\n 1: optional string domain\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n 50: optional PendingDecisionInfo pendingDecision\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n // The TaskList being described\n 30: optional TaskList taskList\n}\n\nstruct GetTaskListsByDomainRequest {\n 10: optional string domainName\n}\n\nstruct GetTaskListsByDomainResponse {\n 10: optional map decisionTaskListMap\n 20: optional map activityTaskListMap\n}\n\nstruct ListTaskListPartitionsRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n}\n\nstruct TaskListPartitionMetadata {\n 10: optional string key\n 20: optional string ownerHostName\n}\n\nstruct ListTaskListPartitionsResponse {\n 10: optional list activityTaskListPartitions\n 20: optional list decisionTaskListPartitions\n}\n\nstruct IsolationGroupMetrics {\n 10: optional double newTasksPerSecond\n 20: optional i64 (js.type = \"Long\") pollerCount\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n 50: optional map isolationGroupMetrics\n 60: optional double newTasksPerSecond\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n 40: optional i64 (js.type = \"Long\") visibilityTimestamp\n 50: optional string clusterName\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct ResetQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueResponse {\n 10: optional list processingQueueStates\n}\n\nstruct DescribeShardDistributionRequest {\n 10: optional i32 pageSize\n 20: optional i32 pageID\n}\n\nstruct DescribeShardDistributionResponse {\n 10: optional i32 numberOfShards\n\n // ShardID to Address (ip:port) map\n 20: optional map shards\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}\n\n// SupportedClientVersions contains the support versions for client library\nstruct SupportedClientVersions{\n 10: optional string goSdk\n 20: optional string javaSdk\n}\n\n// ClusterInfo contains information about cadence cluster\nstruct ClusterInfo{\n 10: optional SupportedClientVersions supportedClientVersions\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct FeatureFlags {\n\t10: optional bool WorkflowExecutionAlreadyCompletedErrorEnabled\n}\n\nenum CrossClusterTaskType {\n StartChildExecution\n CancelExecution\n SignalExecution\n RecordChildWorkflowExecutionComplete\n ApplyParentClosePolicy\n}\n\nenum CrossClusterTaskFailedCause {\n DOMAIN_NOT_ACTIVE\n DOMAIN_NOT_EXISTS\n WORKFLOW_ALREADY_RUNNING\n WORKFLOW_NOT_EXISTS\n WORKFLOW_ALREADY_COMPLETED\n UNCATEGORIZED\n}\n\nenum GetTaskFailedCause {\n SERVICE_BUSY\n TIMEOUT\n SHARD_OWNERSHIP_LOST\n UNCATEGORIZED\n}\n\nstruct CrossClusterTaskInfo {\n 10: optional string domainID\n 20: optional string workflowID\n 30: optional string runID\n 40: optional CrossClusterTaskType taskType\n 50: optional i16 taskState\n 60: optional i64 (js.type = \"Long\") taskID\n 70: optional i64 (js.type = \"Long\") visibilityTimestamp\n}\n\nstruct CrossClusterStartChildExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string requestID\n 30: optional i64 (js.type = \"Long\") initiatedEventID\n 40: optional StartChildWorkflowExecutionInitiatedEventAttributes initiatedEventAttributes\n // targetRunID is for scheduling first decision task\n // targetWorkflowID is available in initiatedEventAttributes\n 50: optional string targetRunID\n 60: optional map partitionConfig\n}\n\nstruct CrossClusterStartChildExecutionResponseAttributes {\n 10: optional string runID\n}\n\nstruct CrossClusterCancelExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n}\n\nstruct CrossClusterCancelExecutionResponseAttributes {\n}\n\nstruct CrossClusterSignalExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n 70: optional string signalName\n 80: optional binary signalInput\n 90: optional binary control\n}\n\nstruct CrossClusterSignalExecutionResponseAttributes {\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional i64 (js.type = \"Long\") initiatedEventID\n 50: optional HistoryEvent completionEvent\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes {\n}\n\nstruct ApplyParentClosePolicyAttributes {\n 10: optional string childDomainID\n 20: optional string childWorkflowID\n 30: optional string childRunID\n 40: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct ApplyParentClosePolicyStatus {\n 10: optional bool completed\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct ApplyParentClosePolicyRequest {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional ApplyParentClosePolicyStatus status\n}\n\nstruct CrossClusterApplyParentClosePolicyRequestAttributes {\n 10: optional list children\n}\n\nstruct ApplyParentClosePolicyResult {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct CrossClusterApplyParentClosePolicyResponseAttributes {\n 10: optional list childrenStatus\n}\n\nstruct CrossClusterTaskRequest {\n 10: optional CrossClusterTaskInfo taskInfo\n 20: optional CrossClusterStartChildExecutionRequestAttributes startChildExecutionAttributes\n 30: optional CrossClusterCancelExecutionRequestAttributes cancelExecutionAttributes\n 40: optional CrossClusterSignalExecutionRequestAttributes signalExecutionAttributes\n 50: optional CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes recordChildWorkflowExecutionCompleteAttributes\n 60: optional CrossClusterApplyParentClosePolicyRequestAttributes applyParentClosePolicyAttributes\n}\n\nstruct CrossClusterTaskResponse {\n 10: optional i64 (js.type = \"Long\") taskID\n 20: optional CrossClusterTaskType taskType\n 30: optional i16 taskState\n 40: optional CrossClusterTaskFailedCause failedCause\n 50: optional CrossClusterStartChildExecutionResponseAttributes startChildExecutionAttributes\n 60: optional CrossClusterCancelExecutionResponseAttributes cancelExecutionAttributes\n 70: optional CrossClusterSignalExecutionResponseAttributes signalExecutionAttributes\n 80: optional CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes recordChildWorkflowExecutionCompleteAttributes\n 90: optional CrossClusterApplyParentClosePolicyResponseAttributes applyParentClosePolicyAttributes\n}\n\nstruct GetCrossClusterTasksRequest {\n 10: optional list shardIDs\n 20: optional string targetCluster\n}\n\nstruct GetCrossClusterTasksResponse {\n 10: optional map> tasksByShard\n 20: optional map failedCauseByShard\n}\n\nstruct RespondCrossClusterTasksCompletedRequest {\n 10: optional i32 shardID\n 20: optional string targetCluster\n 30: optional list taskResponses\n 40: optional bool fetchNewTasks\n}\n\nstruct RespondCrossClusterTasksCompletedResponse {\n 10: optional list tasks\n}\n\nenum IsolationGroupState {\n INVALID,\n HEALTHY,\n DRAINED,\n}\n\nstruct IsolationGroupPartition {\n 10: optional string name\n 20: optional IsolationGroupState state\n}\n\nstruct IsolationGroupConfiguration {\n 10: optional list isolationGroups\n}\n\nstruct AsyncWorkflowConfiguration {\n 10: optional bool enabled\n // PredefinedQueueName is the name of the predefined queue in cadence server config's asyncWorkflowQueues\n 20: optional string predefinedQueueName\n // queueType is the type of the queue if predefined_queue_name is not used\n 30: optional string queueType\n // queueConfig is the configuration for the queue if predefined_queue_name is not used\n 40: optional DataBlob queueConfig\n}\n\n/**\n* Any is a logical duplicate of google.protobuf.Any.\n*\n* The intent of the type is the same, but it is not intended to be directly\n* compatible with google.protobuf.Any or any Thrift equivalent - this blob is\n* RPC-type agnostic by design (as the underlying data may be transported over\n* proto or thrift), and the data-bytes may be in any encoding.\n*\n* This is intentionally different from DataBlob, which supports only a handful\n* of known encodings so it can be interpreted everywhere. Any supports literally\n* any contents, and needs to be considered opaque until it is given to something\n* that is expecting it.\n*\n* See ValueType to interpret the contents.\n**/\nstruct Any {\n // Type-string describing value's contents, and intentionally avoiding the\n // name \"type\" as it is often a special term.\n // This should usually be a hard-coded string of some kind.\n 10: optional string ValueType\n // Arbitrarily-encoded bytes, to be deserialized by a runtime implementation.\n // The contents are described by ValueType.\n 20: optional binary Value\n}\n\nstruct AutoConfigHint {\n 10: optional bool enableAutoConfig\n 20: optional i64 pollerWaitTimeInMs\n}\n\nstruct QueueState {\n 10: optional map virtualQueueStates\n 20: optional TaskKey exclusiveMaxReadLevel\n}\n\nstruct VirtualQueueState {\n 10: optional list virtualSliceStates\n}\n\nstruct VirtualSliceState {\n 10: optional TaskRange taskRange\n}\n\nstruct TaskRange {\n 10: optional TaskKey inclusiveMin\n 20: optional TaskKey exclusiveMax\n}\n\nstruct TaskKey {\n 10: optional i64 scheduledTimeNano\n 20: optional i64 taskID\n}\n\nstruct ActiveClusterSelectionPolicy {\n 10: optional ActiveClusterSelectionStrategy strategy\n\n // sticky_region is the region sticky if strategy is ACTIVE_CLUSTER_SELECTION_STRATEGY_REGION_STICKY\n // This is the default strategy for active-active domains and region would be set to receiver cluster's region if not specified.\n 20: optional string stickyRegion\n\n // external_entity_type/external_entity_key is the type/key of the external entity if strategy is ACTIVE_CLUSTER_SELECTION_STRATEGY_EXTERNAL_ENTITY\n // external entity type must be one of the supported types in active cluster manager. Custom ones can be added by implementing the corresponding interface.\n 30: optional string externalEntityType\n 40: optional string externalEntityKey\n}\n\nenum ActiveClusterSelectionStrategy {\n REGION_STICKY,\n EXTERNAL_ENTITY,\n}\n" From eda0da0950ad789b705e7659b28af97e5466576a Mon Sep 17 00:00:00 2001 From: Tim Li Date: Thu, 26 Jun 2025 13:15:00 -0700 Subject: [PATCH 6/8] respond to comments --- internal/client.go | 18 +++++++++++++++--- internal/common/metrics/service_wrapper.go | 1 - internal/compatibility/adapter.go | 4 ++-- internal/compatibility/proto/request.go | 10 ++++++++++ 4 files changed, 27 insertions(+), 6 deletions(-) diff --git a/internal/client.go b/internal/client.go index b0078c467..50f13a6ac 100644 --- a/internal/client.go +++ b/internal/client.go @@ -472,10 +472,22 @@ type ( // Optional: defaulted to Unix epoch time FirstRunAt time.Time - // CronOverlapPolicy - Policy for handling cron workflow overlaps. + // CronOverlapPolicy - Policy for handling cron workflow overlaps when a previous execution is still running by the + // start of next scheduled execution. + // + // Example scenario: A workflow scheduled to run every 5 minutes takes 8 minutes to complete in one of the runs. + // Without CronOverlapPolicy, assuming the start of that run r1 is t0, then the next scheduled run r2 that is + // supposed to run at t0+5 is skipped, and the next run r3 is scheduled at t0+10. + // // Currently supported values are: - // - CronOverlapPolicySkip: skip the new execution if the previous one is still running - // - CronOverlapPolicyBufferOne: buffer one execution if the previous one is still running + // - CronOverlapPolicySkip: skip the new execution if the previous one takes too long. + // This is the default behavior and is the same with not specifying this option. + // Run r2 will be skipped and next run will be scheduled at t0+10. + // - CronOverlapPolicyBufferOne: start the next run immediately after the previous one that takes too long completes. + // In this case, run r2 will be started at t0+8, and the next run r3 will be scheduled at t0+10. + // (assuming r2 finishes by then, otherwise r3 will start immediately after r2 completes). + // + // Note: JitterStart remains used when determining the actual start time of executions. // Optional: defaulted to CronOverlapPolicySkip CronOverlapPolicy s.CronOverlapPolicy } diff --git a/internal/common/metrics/service_wrapper.go b/internal/common/metrics/service_wrapper.go index 3e725dfa3..9247b8fcc 100644 --- a/internal/common/metrics/service_wrapper.go +++ b/internal/common/metrics/service_wrapper.go @@ -447,7 +447,6 @@ func (w *workflowServiceMetricsWrapper) RestartWorkflowExecution(ctx context.Con } func (w *workflowServiceMetricsWrapper) DeleteDomain(ctx context.Context, request *shared.DeleteDomainRequest, opts ...yarpc.CallOption) error { - //TODO implement me scope := w.getOperationScope(scopeDeleteDomain) err := w.service.DeleteDomain(ctx, request, opts...) scope.handleError(err) diff --git a/internal/compatibility/adapter.go b/internal/compatibility/adapter.go index 043e75212..e70374add 100644 --- a/internal/compatibility/adapter.go +++ b/internal/compatibility/adapter.go @@ -269,8 +269,8 @@ func (a thrift2protoAdapter) RestartWorkflowExecution(ctx context.Context, reque } func (a thrift2protoAdapter) DeleteDomain(ctx context.Context, DeleteRequest *shared.DeleteDomainRequest, opts ...yarpc.CallOption) error { - //TODO implement me - return nil + _, err := a.domain.DeleteDomain(ctx, proto.DeleteDomainRequest(DeleteRequest), opts...) + return err } type domainAPIthriftAdapter struct { diff --git a/internal/compatibility/proto/request.go b/internal/compatibility/proto/request.go index 984244436..574e6990f 100644 --- a/internal/compatibility/proto/request.go +++ b/internal/compatibility/proto/request.go @@ -670,3 +670,13 @@ func RestartWorkflowExecutionRequest(r *shared.RestartWorkflowExecutionRequest) return &request } + +func DeleteDomainRequest(r *shared.DeleteDomainRequest) *apiv1.DeleteDomainRequest { + if r == nil { + return nil + } + return &apiv1.DeleteDomainRequest{ + Name: r.GetName(), + SecurityToken: r.GetSecurityToken(), + } +} From b321e47d2ec1af08a0e3ebbefa86093e01accd21 Mon Sep 17 00:00:00 2001 From: Tim Li Date: Thu, 26 Jun 2025 13:19:46 -0700 Subject: [PATCH 7/8] respond to comments: do not panic, return invalid/skip instead --- internal/compatibility/proto/enum.go | 2 +- internal/compatibility/thrift/enum.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/compatibility/proto/enum.go b/internal/compatibility/proto/enum.go index e4385be4e..90a11bcdf 100644 --- a/internal/compatibility/proto/enum.go +++ b/internal/compatibility/proto/enum.go @@ -404,5 +404,5 @@ func CronOverlapPolicy(t *shared.CronOverlapPolicy) apiv1.CronOverlapPolicy { case shared.CronOverlapPolicyBufferone: return apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_BUFFER_ONE } - panic("unexpected enum value") + return apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID } diff --git a/internal/compatibility/thrift/enum.go b/internal/compatibility/thrift/enum.go index 92bfa025f..42fbbe23d 100644 --- a/internal/compatibility/thrift/enum.go +++ b/internal/compatibility/thrift/enum.go @@ -391,5 +391,5 @@ func CronOverlapPolicy(t apiv1.CronOverlapPolicy) *shared.CronOverlapPolicy { case apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_BUFFER_ONE: return shared.CronOverlapPolicyBufferone.Ptr() } - panic("unexpected enum value") + return shared.CronOverlapPolicySkipped.Ptr() } From d87e37614fd771142c66e63cee51f0dd5b3e3a21 Mon Sep 17 00:00:00 2001 From: Tim Li Date: Thu, 26 Jun 2025 13:29:09 -0700 Subject: [PATCH 8/8] fix test --- internal/compatibility/enum_test.go | 2 +- internal/compatibility/thrift/enum.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/compatibility/enum_test.go b/internal/compatibility/enum_test.go index 777f14d24..2baa1631b 100644 --- a/internal/compatibility/enum_test.go +++ b/internal/compatibility/enum_test.go @@ -344,5 +344,5 @@ func TestCronOverlapPolicy(t *testing.T) { } { assert.Equal(t, v, proto.CronOverlapPolicy(thrift.CronOverlapPolicy(v))) } - assert.Panics(t, func() { proto.CronOverlapPolicy(thrift.CronOverlapPolicy(999)) }) + assert.Equal(t, apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID, proto.CronOverlapPolicy(thrift.CronOverlapPolicy(999))) } diff --git a/internal/compatibility/thrift/enum.go b/internal/compatibility/thrift/enum.go index 42fbbe23d..0fcc3e006 100644 --- a/internal/compatibility/thrift/enum.go +++ b/internal/compatibility/thrift/enum.go @@ -391,5 +391,6 @@ func CronOverlapPolicy(t apiv1.CronOverlapPolicy) *shared.CronOverlapPolicy { case apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_BUFFER_ONE: return shared.CronOverlapPolicyBufferone.Ptr() } - return shared.CronOverlapPolicySkipped.Ptr() + // we treat any unknown value as invalid + return nil }