From afa65c2916eb29b0b1cebfecf000ec8e2b9be7fe Mon Sep 17 00:00:00 2001 From: Shaddoll Date: Mon, 27 Oct 2025 13:05:00 -0700 Subject: [PATCH 1/7] feat: Introduce ListFailoverHistory and active-active refactoring from IDL --- .gen/go/cadence/cadence.go | 647 +++++- .../cadence/workflowserviceclient/client.go | 29 + .../cadence/workflowserviceserver/server.go | 48 +- .gen/go/cadence/workflowservicetest/client.go | 33 + .gen/go/shared/shared.go | 2026 +++++++++++++---- go.mod | 2 +- go.sum | 2 + idls | 2 +- internal/client.go | 15 +- internal/common/auth/service_wrapper.go | 10 + .../common/isolationgroup/service_wrapper.go | 6 + internal/common/metrics/service_wrapper.go | 8 + internal/compatibility/adapter.go | 5 + internal/compatibility/enum_test.go | 12 - internal/compatibility/proto/enum.go | 13 - internal/compatibility/proto/request.go | 33 +- internal/compatibility/proto/types.go | 80 +- internal/compatibility/testdata/common.go | 22 +- internal/compatibility/testdata/decision.go | 4 +- internal/compatibility/testdata/domain.go | 26 +- internal/compatibility/testdata/service.go | 6 +- internal/compatibility/thrift/enum.go | 17 +- internal/compatibility/thrift/request.go | 23 +- internal/compatibility/thrift/response.go | 10 + internal/compatibility/thrift/types.go | 89 +- internal/convert.go | 32 +- internal/convert_test.go | 48 +- internal/internal_workflow_client_test.go | 31 +- 28 files changed, 2678 insertions(+), 601 deletions(-) diff --git a/.gen/go/cadence/cadence.go b/.gen/go/cadence/cadence.go index 51fdc55e1..a95144a4d 100644 --- a/.gen/go/cadence/cadence.go +++ b/.gen/go/cadence/cadence.go @@ -21,14 +21,14 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "cadence", Package: "go.uber.org/cadence/.gen/go/cadence", FilePath: "cadence.thrift", - SHA1: "4ad1f024ec41ab1b9d2c2f79c53b5857bc14b868", + SHA1: "8c644a4a8acae7e865a84d625bc845ffae7ff693", Includes: []*thriftreflect.ThriftModule{ shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\n\nnamespace java com.uber.cadence\n\n/**\n* WorkflowService API is exposed to provide support for long running applications. Application is expected to call\n* StartWorkflowExecution to create an instance for each instance of long running workflow. Such applications are expected\n* to have a worker which regularly polls for DecisionTask and ActivityTask from the WorkflowService. For each\n* DecisionTask, application is expected to process the history of events for that session and respond back with next\n* decisions. For each ActivityTask, application is expected to execute the actual logic for that task and respond back\n* with completion or failure. Worker is expected to regularly heartbeat while activity task is running.\n**/\nservice WorkflowService {\n /**\n * RegisterDomain creates a new domain which can be used as a container for all resources. Domain is a top level\n * entity within Cadence, used as a container for all resources like workflow executions, tasklists, etc. Domain\n * acts as a sandbox and provides isolation for all resources within the domain. All resources belongs to exactly one\n * domain.\n **/\n void RegisterDomain(1: shared.RegisterDomainRequest registerRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.DomainAlreadyExistsError domainExistsError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DescribeDomain returns the information and configuration for a registered domain.\n **/\n shared.DescribeDomainResponse DescribeDomain(1: shared.DescribeDomainRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListDomains returns the information and configuration for all domains.\n **/\n shared.ListDomainsResponse ListDomains(1: shared.ListDomainsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * UpdateDomain is used to update the information and configuration for a registered domain.\n **/\n shared.UpdateDomainResponse UpdateDomain(1: shared.UpdateDomainRequest updateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * FailoverDomain is used to failover a registered domain to different cluster.\n **/\n shared.FailoverDomainResponse FailoverDomain(1: shared.FailoverDomainRequest failoverRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DeprecateDomain us used to update status of a registered domain to DEPRECATED. Once the domain is deprecated\n * it cannot be used to start new workflow executions. Existing workflow executions will continue to run on\n * deprecated domains.\n **/\n void DeprecateDomain(1: shared.DeprecateDomainRequest deprecateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DeleteDomain permanently removes a domain record. This operation:\n * - Requires domain to be in DEPRECATED status\n * - Cannot be performed on domains with running workflows\n * - Is irreversible and removes all domain data\n * - Requires proper permissions and security token\n **/\n void DeleteDomain(1: shared.DeleteDomainRequest deleteRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 4: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RestartWorkflowExecution restarts a previous workflow\n * If the workflow is currently running it will terminate and restart\n **/\n shared.RestartWorkflowExecutionResponse RestartWorkflowExecution(1: shared.RestartWorkflowExecutionRequest restartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DiagnoseWorkflowExecution diagnoses a previous workflow execution\n **/\n shared.DiagnoseWorkflowExecutionResponse DiagnoseWorkflowExecution(1: shared.DiagnoseWorkflowExecutionRequest diagnoseRequest)\n throws (\n 1: shared.DomainNotActiveError domainNotActiveError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 5: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: shared.StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * StartWorkflowExecutionAsync starts a new long running workflow instance asynchronously. It will push a StartWorkflowExecutionRequest to a queue\n * and immediately return a response. The request will be processed by a separate consumer eventually.\n **/\n shared.StartWorkflowExecutionAsyncResponse StartWorkflowExecutionAsync(1: shared.StartWorkflowExecutionAsyncRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * Returns the history of specified workflow execution. It fails with 'EntityNotExistError' if speficied workflow\n * execution in unknown to the service.\n **/\n shared.GetWorkflowExecutionHistoryResponse GetWorkflowExecutionHistory(1: shared.GetWorkflowExecutionHistoryRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * PollForDecisionTask is called by application worker to process DecisionTask from a specific taskList. A\n * DecisionTask is dispatched to callers for active workflow executions, with pending decisions.\n * Application is then expected to call 'RespondDecisionTaskCompleted' API when it is done processing the DecisionTask.\n * It will also create a 'DecisionTaskStarted' event in the history for that session before handing off DecisionTask to\n * application worker.\n **/\n shared.PollForDecisionTaskResponse PollForDecisionTask(1: shared.PollForDecisionTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n * The response could contain a new decision task if there is one or if the request asking for one.\n **/\n shared.RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: shared.RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report any panics during DecisionTask processing. Cadence will only append first\n * DecisionTaskFailed event to the history of workflow execution for consecutive failures.\n **/\n void RespondDecisionTaskFailed(1: shared.RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * PollForActivityTask is called by application worker to process ActivityTask from a specific taskList. ActivityTask\n * is dispatched to callers whenever a ScheduleTask decision is made for a workflow execution.\n * Application is expected to call 'RespondActivityTaskCompleted' or 'RespondActivityTaskFailed' once it is done\n * processing the task.\n * Application also needs to call 'RecordActivityTaskHeartbeat' API within 'heartbeatTimeoutSeconds' interval to\n * prevent the task from getting timed out. An event 'ActivityTaskStarted' event is also written to workflow execution\n * history before the ActivityTask is dispatched to application worker.\n **/\n shared.PollForActivityTaskResponse PollForActivityTask(1: shared.PollForActivityTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: shared.RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RecordActivityTaskHeartbeatByID is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeatByID' will\n * fail with 'EntityNotExistsError' in such situations. Instead of using 'taskToken' like in RecordActivityTaskHeartbeat,\n * use Domain, WorkflowID and ActivityID\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeatByID(1: shared.RecordActivityTaskHeartbeatByIDRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: shared.RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCompletedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Similar to RespondActivityTaskCompleted but use Domain,\n * WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCompletedByID(1: shared.RespondActivityTaskCompletedByIDRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: shared.RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskFailedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskFailed but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskFailedByID(1: shared.RespondActivityTaskFailedByIDRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: shared.RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCanceledByID is called by application worker when it is successfully canceled an ActivityTask.\n * It will result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskCanceled but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceledByID(1: shared.RespondActivityTaskCanceledByIDRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: shared.RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 10: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: shared.SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending signal to a workflow.\n * If the workflow is running, this results in WorkflowExecutionSignaled event being recorded in the history\n * and a decision task being created for the execution.\n * If the workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled\n * events being recorded in history, and a decision task being created for the execution\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWithStartWorkflowExecutionAsync is used to ensure sending signal to a workflow asynchronously. It will push a SignalWithStartWorkflowExecutionRequest to a queue\n * and immediately return a response. The request will be processed by a separate consumer eventually.\n **/\n shared.SignalWithStartWorkflowExecutionAsyncResponse SignalWithStartWorkflowExecutionAsync(1: shared.SignalWithStartWorkflowExecutionAsyncRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * ResetWorkflowExecution reset an existing workflow execution to DecisionTaskCompleted event(exclusive).\n * And it will immediately terminating the current execution instance.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: shared.ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: shared.TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific domain.\n **/\n shared.ListOpenWorkflowExecutionsResponse ListOpenWorkflowExecutions(1: shared.ListOpenWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific domain.\n **/\n shared.ListClosedWorkflowExecutionsResponse ListClosedWorkflowExecutions(1: shared.ListClosedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListWorkflowExecutions is a visibility API to list workflow executions in a specific domain.\n **/\n shared.ListWorkflowExecutionsResponse ListWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific domain.\n **/\n shared.ListArchivedWorkflowExecutionsResponse ListArchivedWorkflowExecutions(1: shared.ListArchivedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific domain without order.\n **/\n shared.ListWorkflowExecutionsResponse ScanWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CountWorkflowExecutions is a visibility API to count of workflow executions in a specific domain.\n **/\n shared.CountWorkflowExecutionsResponse CountWorkflowExecutions(1: shared.CountWorkflowExecutionsRequest countRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs\n **/\n shared.GetSearchAttributesResponse GetSearchAttributes()\n throws (\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 4: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondQueryTaskCompleted is called by application worker to complete a QueryTask (which is a DecisionTask for query)\n * as a result of 'PollForDecisionTask' API call. Completing a QueryTask will unblock the client call to 'QueryWorkflow'\n * API and return the query result to client as a response to 'QueryWorkflow' API call.\n **/\n void RespondQueryTaskCompleted(1: shared.RespondQueryTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n shared.ResetStickyTaskListResponse ResetStickyTaskList(1: shared.ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n shared.QueryWorkflowResponse QueryWorkflow(1: shared.QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n\t)\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: shared.DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DescribeTaskList returns information about the target tasklist, right now this API returns the\n * pollers which polled this tasklist in last few minutes.\n **/\n shared.DescribeTaskListResponse DescribeTaskList(1: shared.DescribeTaskListRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetClusterInfo returns information about cadence cluster\n **/\n shared.ClusterInfo GetClusterInfo()\n throws (\n 1: shared.InternalServiceError internalServiceError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetTaskListsByDomain returns the list of all the task lists for a domainName.\n **/\n shared.GetTaskListsByDomainResponse GetTaskListsByDomain(1: shared.GetTaskListsByDomainRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.EntityNotExistsError entityNotExistError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n shared.ListTaskListPartitionsResponse ListTaskListPartitions(1: shared.ListTaskListPartitionsRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RefreshWorkflowTasks refreshes all tasks of a workflow\n **/\n void RefreshWorkflowTasks(1: shared.RefreshWorkflowTasksRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.DomainNotActiveError domainNotActiveError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: shared.AccessDeniedError accessDeniedError,\n )\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\n\nnamespace java com.uber.cadence\n\n/**\n* WorkflowService API is exposed to provide support for long running applications. Application is expected to call\n* StartWorkflowExecution to create an instance for each instance of long running workflow. Such applications are expected\n* to have a worker which regularly polls for DecisionTask and ActivityTask from the WorkflowService. For each\n* DecisionTask, application is expected to process the history of events for that session and respond back with next\n* decisions. For each ActivityTask, application is expected to execute the actual logic for that task and respond back\n* with completion or failure. Worker is expected to regularly heartbeat while activity task is running.\n**/\nservice WorkflowService {\n /**\n * RegisterDomain creates a new domain which can be used as a container for all resources. Domain is a top level\n * entity within Cadence, used as a container for all resources like workflow executions, tasklists, etc. Domain\n * acts as a sandbox and provides isolation for all resources within the domain. All resources belongs to exactly one\n * domain.\n **/\n void RegisterDomain(1: shared.RegisterDomainRequest registerRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.DomainAlreadyExistsError domainExistsError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DescribeDomain returns the information and configuration for a registered domain.\n **/\n shared.DescribeDomainResponse DescribeDomain(1: shared.DescribeDomainRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListDomains returns the information and configuration for all domains.\n **/\n shared.ListDomainsResponse ListDomains(1: shared.ListDomainsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * UpdateDomain is used to update the information and configuration for a registered domain.\n **/\n shared.UpdateDomainResponse UpdateDomain(1: shared.UpdateDomainRequest updateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * FailoverDomain is used to failover a registered domain to different cluster.\n **/\n shared.FailoverDomainResponse FailoverDomain(1: shared.FailoverDomainRequest failoverRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DeprecateDomain us used to update status of a registered domain to DEPRECATED. Once the domain is deprecated\n * it cannot be used to start new workflow executions. Existing workflow executions will continue to run on\n * deprecated domains.\n **/\n void DeprecateDomain(1: shared.DeprecateDomainRequest deprecateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DeleteDomain permanently removes a domain record. This operation:\n * - Requires domain to be in DEPRECATED status\n * - Cannot be performed on domains with running workflows\n * - Is irreversible and removes all domain data\n * - Requires proper permissions and security token\n **/\n void DeleteDomain(1: shared.DeleteDomainRequest deleteRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 4: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListFailoverHistory returns the history of failover events for a domain.\n **/\n shared.ListFailoverHistoryResponse ListFailoverHistory(1: shared.ListFailoverHistoryRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 4: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RestartWorkflowExecution restarts a previous workflow\n * If the workflow is currently running it will terminate and restart\n **/\n shared.RestartWorkflowExecutionResponse RestartWorkflowExecution(1: shared.RestartWorkflowExecutionRequest restartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DiagnoseWorkflowExecution diagnoses a previous workflow execution\n **/\n shared.DiagnoseWorkflowExecutionResponse DiagnoseWorkflowExecution(1: shared.DiagnoseWorkflowExecutionRequest diagnoseRequest)\n throws (\n 1: shared.DomainNotActiveError domainNotActiveError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 5: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: shared.StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * StartWorkflowExecutionAsync starts a new long running workflow instance asynchronously. It will push a StartWorkflowExecutionRequest to a queue\n * and immediately return a response. The request will be processed by a separate consumer eventually.\n **/\n shared.StartWorkflowExecutionAsyncResponse StartWorkflowExecutionAsync(1: shared.StartWorkflowExecutionAsyncRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * Returns the history of specified workflow execution. It fails with 'EntityNotExistError' if speficied workflow\n * execution in unknown to the service.\n **/\n shared.GetWorkflowExecutionHistoryResponse GetWorkflowExecutionHistory(1: shared.GetWorkflowExecutionHistoryRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * PollForDecisionTask is called by application worker to process DecisionTask from a specific taskList. A\n * DecisionTask is dispatched to callers for active workflow executions, with pending decisions.\n * Application is then expected to call 'RespondDecisionTaskCompleted' API when it is done processing the DecisionTask.\n * It will also create a 'DecisionTaskStarted' event in the history for that session before handing off DecisionTask to\n * application worker.\n **/\n shared.PollForDecisionTaskResponse PollForDecisionTask(1: shared.PollForDecisionTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n * The response could contain a new decision task if there is one or if the request asking for one.\n **/\n shared.RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: shared.RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report any panics during DecisionTask processing. Cadence will only append first\n * DecisionTaskFailed event to the history of workflow execution for consecutive failures.\n **/\n void RespondDecisionTaskFailed(1: shared.RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * PollForActivityTask is called by application worker to process ActivityTask from a specific taskList. ActivityTask\n * is dispatched to callers whenever a ScheduleTask decision is made for a workflow execution.\n * Application is expected to call 'RespondActivityTaskCompleted' or 'RespondActivityTaskFailed' once it is done\n * processing the task.\n * Application also needs to call 'RecordActivityTaskHeartbeat' API within 'heartbeatTimeoutSeconds' interval to\n * prevent the task from getting timed out. An event 'ActivityTaskStarted' event is also written to workflow execution\n * history before the ActivityTask is dispatched to application worker.\n **/\n shared.PollForActivityTaskResponse PollForActivityTask(1: shared.PollForActivityTaskRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.EntityNotExistsError entityNotExistError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: shared.RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RecordActivityTaskHeartbeatByID is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeatByID' will\n * fail with 'EntityNotExistsError' in such situations. Instead of using 'taskToken' like in RecordActivityTaskHeartbeat,\n * use Domain, WorkflowID and ActivityID\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeatByID(1: shared.RecordActivityTaskHeartbeatByIDRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: shared.RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCompletedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Similar to RespondActivityTaskCompleted but use Domain,\n * WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCompletedByID(1: shared.RespondActivityTaskCompletedByIDRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: shared.RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskFailedByID is called by application worker when it is done processing an ActivityTask.\n * It will result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskFailed but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskFailedByID(1: shared.RespondActivityTaskFailedByIDRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: shared.RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondActivityTaskCanceledByID is called by application worker when it is successfully canceled an ActivityTask.\n * It will result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Similar to RespondActivityTaskCanceled but use\n * Domain, WorkflowID and ActivityID instead of 'taskToken' for completion. It fails with 'EntityNotExistsError'\n * if the these IDs are not valid anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceledByID(1: shared.RespondActivityTaskCanceledByIDRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: shared.RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 10: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: shared.SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending signal to a workflow.\n * If the workflow is running, this results in WorkflowExecutionSignaled event being recorded in the history\n * and a decision task being created for the execution.\n * If the workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled\n * events being recorded in history, and a decision task being created for the execution\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * SignalWithStartWorkflowExecutionAsync is used to ensure sending signal to a workflow asynchronously. It will push a SignalWithStartWorkflowExecutionRequest to a queue\n * and immediately return a response. The request will be processed by a separate consumer eventually.\n **/\n shared.SignalWithStartWorkflowExecutionAsyncResponse SignalWithStartWorkflowExecutionAsync(1: shared.SignalWithStartWorkflowExecutionAsyncRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.EntityNotExistsError entityNotExistError,\n 8: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n /**\n * ResetWorkflowExecution reset an existing workflow execution to DecisionTaskCompleted event(exclusive).\n * And it will immediately terminating the current execution instance.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: shared.ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: shared.TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific domain.\n **/\n shared.ListOpenWorkflowExecutionsResponse ListOpenWorkflowExecutions(1: shared.ListOpenWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific domain.\n **/\n shared.ListClosedWorkflowExecutionsResponse ListClosedWorkflowExecutions(1: shared.ListClosedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListWorkflowExecutions is a visibility API to list workflow executions in a specific domain.\n **/\n shared.ListWorkflowExecutionsResponse ListWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific domain.\n **/\n shared.ListArchivedWorkflowExecutionsResponse ListArchivedWorkflowExecutions(1: shared.ListArchivedWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific domain without order.\n **/\n shared.ListWorkflowExecutionsResponse ScanWorkflowExecutions(1: shared.ListWorkflowExecutionsRequest listRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CountWorkflowExecutions is a visibility API to count of workflow executions in a specific domain.\n **/\n shared.CountWorkflowExecutionsResponse CountWorkflowExecutions(1: shared.CountWorkflowExecutionsRequest countRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs\n **/\n shared.GetSearchAttributesResponse GetSearchAttributes()\n throws (\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 4: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RespondQueryTaskCompleted is called by application worker to complete a QueryTask (which is a DecisionTask for query)\n * as a result of 'PollForDecisionTask' API call. Completing a QueryTask will unblock the client call to 'QueryWorkflow'\n * API and return the query result to client as a response to 'QueryWorkflow' API call.\n **/\n void RespondQueryTaskCompleted(1: shared.RespondQueryTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n shared.ResetStickyTaskListResponse ResetStickyTaskList(1: shared.ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.WorkflowExecutionAlreadyCompletedError workflowExecutionAlreadyCompletedError,\n 9: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n shared.QueryWorkflowResponse QueryWorkflow(1: shared.QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 8: shared.AccessDeniedError accessDeniedError,\n\t)\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: shared.DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * DescribeTaskList returns information about the target tasklist, right now this API returns the\n * pollers which polled this tasklist in last few minutes.\n **/\n shared.DescribeTaskListResponse DescribeTaskList(1: shared.DescribeTaskListRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 7: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetClusterInfo returns information about cadence cluster\n **/\n shared.ClusterInfo GetClusterInfo()\n throws (\n 1: shared.InternalServiceError internalServiceError,\n 2: shared.ServiceBusyError serviceBusyError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetTaskListsByDomain returns the list of all the task lists for a domainName.\n **/\n shared.GetTaskListsByDomainResponse GetTaskListsByDomain(1: shared.GetTaskListsByDomainRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.EntityNotExistsError entityNotExistError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n shared.ListTaskListPartitionsResponse ListTaskListPartitions(1: shared.ListTaskListPartitionsRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RefreshWorkflowTasks refreshes all tasks of a workflow\n **/\n void RefreshWorkflowTasks(1: shared.RefreshWorkflowTasksRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.DomainNotActiveError domainNotActiveError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: shared.AccessDeniedError accessDeniedError,\n )\n}\n" // WorkflowService_CountWorkflowExecutions_Args represents the arguments for the WorkflowService.CountWorkflowExecutions function. // @@ -10327,6 +10327,649 @@ func (v *WorkflowService_ListDomains_Result) EnvelopeType() wire.EnvelopeType { return wire.Reply } +// WorkflowService_ListFailoverHistory_Args represents the arguments for the WorkflowService.ListFailoverHistory function. +// +// The arguments for ListFailoverHistory are sent and received over the wire as this struct. +type WorkflowService_ListFailoverHistory_Args struct { + ListRequest *shared.ListFailoverHistoryRequest `json:"listRequest,omitempty"` +} + +// ToWire translates a WorkflowService_ListFailoverHistory_Args struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *WorkflowService_ListFailoverHistory_Args) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.ListRequest != nil { + w, err = v.ListRequest.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ListFailoverHistoryRequest_Read(w wire.Value) (*shared.ListFailoverHistoryRequest, error) { + var v shared.ListFailoverHistoryRequest + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a WorkflowService_ListFailoverHistory_Args struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a WorkflowService_ListFailoverHistory_Args struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v WorkflowService_ListFailoverHistory_Args +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *WorkflowService_ListFailoverHistory_Args) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.ListRequest, err = _ListFailoverHistoryRequest_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a WorkflowService_ListFailoverHistory_Args +// struct. +func (v *WorkflowService_ListFailoverHistory_Args) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.ListRequest != nil { + fields[i] = fmt.Sprintf("ListRequest: %v", v.ListRequest) + i++ + } + + return fmt.Sprintf("WorkflowService_ListFailoverHistory_Args{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this WorkflowService_ListFailoverHistory_Args match the +// provided WorkflowService_ListFailoverHistory_Args. +// +// This function performs a deep comparison. +func (v *WorkflowService_ListFailoverHistory_Args) Equals(rhs *WorkflowService_ListFailoverHistory_Args) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.ListRequest == nil && rhs.ListRequest == nil) || (v.ListRequest != nil && rhs.ListRequest != nil && v.ListRequest.Equals(rhs.ListRequest))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of WorkflowService_ListFailoverHistory_Args. +func (v *WorkflowService_ListFailoverHistory_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.ListRequest != nil { + err = multierr.Append(err, enc.AddObject("listRequest", v.ListRequest)) + } + return err +} + +// GetListRequest returns the value of ListRequest if it is set or its +// zero value if it is unset. +func (v *WorkflowService_ListFailoverHistory_Args) GetListRequest() (o *shared.ListFailoverHistoryRequest) { + if v != nil && v.ListRequest != nil { + return v.ListRequest + } + + return +} + +// IsSetListRequest returns true if ListRequest is not nil. +func (v *WorkflowService_ListFailoverHistory_Args) IsSetListRequest() bool { + return v != nil && v.ListRequest != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the arguments. +// +// This will always be "ListFailoverHistory" for this struct. +func (v *WorkflowService_ListFailoverHistory_Args) MethodName() string { + return "ListFailoverHistory" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Call for this struct. +func (v *WorkflowService_ListFailoverHistory_Args) EnvelopeType() wire.EnvelopeType { + return wire.Call +} + +// WorkflowService_ListFailoverHistory_Helper provides functions that aid in handling the +// parameters and return values of the WorkflowService.ListFailoverHistory +// function. +var WorkflowService_ListFailoverHistory_Helper = struct { + // Args accepts the parameters of ListFailoverHistory in-order and returns + // the arguments struct for the function. + Args func( + listRequest *shared.ListFailoverHistoryRequest, + ) *WorkflowService_ListFailoverHistory_Args + + // IsException returns true if the given error can be thrown + // by ListFailoverHistory. + // + // An error can be thrown by ListFailoverHistory only if the + // corresponding exception type was mentioned in the 'throws' + // section for it in the Thrift file. + IsException func(error) bool + + // WrapResponse returns the result struct for ListFailoverHistory + // given its return value and error. + // + // This allows mapping values and errors returned by + // ListFailoverHistory into a serializable result struct. + // WrapResponse returns a non-nil error if the provided + // error cannot be thrown by ListFailoverHistory + // + // value, err := ListFailoverHistory(args) + // result, err := WorkflowService_ListFailoverHistory_Helper.WrapResponse(value, err) + // if err != nil { + // return fmt.Errorf("unexpected error from ListFailoverHistory: %v", err) + // } + // serialize(result) + WrapResponse func(*shared.ListFailoverHistoryResponse, error) (*WorkflowService_ListFailoverHistory_Result, error) + + // UnwrapResponse takes the result struct for ListFailoverHistory + // and returns the value or error returned by it. + // + // The error is non-nil only if ListFailoverHistory threw an + // exception. + // + // result := deserialize(bytes) + // value, err := WorkflowService_ListFailoverHistory_Helper.UnwrapResponse(result) + UnwrapResponse func(*WorkflowService_ListFailoverHistory_Result) (*shared.ListFailoverHistoryResponse, error) +}{} + +func init() { + WorkflowService_ListFailoverHistory_Helper.Args = func( + listRequest *shared.ListFailoverHistoryRequest, + ) *WorkflowService_ListFailoverHistory_Args { + return &WorkflowService_ListFailoverHistory_Args{ + ListRequest: listRequest, + } + } + + WorkflowService_ListFailoverHistory_Helper.IsException = func(err error) bool { + switch err.(type) { + case *shared.BadRequestError: + return true + case *shared.ServiceBusyError: + return true + case *shared.ClientVersionNotSupportedError: + return true + case *shared.AccessDeniedError: + return true + default: + return false + } + } + + WorkflowService_ListFailoverHistory_Helper.WrapResponse = func(success *shared.ListFailoverHistoryResponse, err error) (*WorkflowService_ListFailoverHistory_Result, error) { + if err == nil { + return &WorkflowService_ListFailoverHistory_Result{Success: success}, nil + } + + switch e := err.(type) { + case *shared.BadRequestError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_ListFailoverHistory_Result.BadRequestError") + } + return &WorkflowService_ListFailoverHistory_Result{BadRequestError: e}, nil + case *shared.ServiceBusyError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_ListFailoverHistory_Result.ServiceBusyError") + } + return &WorkflowService_ListFailoverHistory_Result{ServiceBusyError: e}, nil + case *shared.ClientVersionNotSupportedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_ListFailoverHistory_Result.ClientVersionNotSupportedError") + } + return &WorkflowService_ListFailoverHistory_Result{ClientVersionNotSupportedError: e}, nil + case *shared.AccessDeniedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for WorkflowService_ListFailoverHistory_Result.AccessDeniedError") + } + return &WorkflowService_ListFailoverHistory_Result{AccessDeniedError: e}, nil + } + + return nil, err + } + WorkflowService_ListFailoverHistory_Helper.UnwrapResponse = func(result *WorkflowService_ListFailoverHistory_Result) (success *shared.ListFailoverHistoryResponse, err error) { + if result.BadRequestError != nil { + err = result.BadRequestError + return + } + if result.ServiceBusyError != nil { + err = result.ServiceBusyError + return + } + if result.ClientVersionNotSupportedError != nil { + err = result.ClientVersionNotSupportedError + return + } + if result.AccessDeniedError != nil { + err = result.AccessDeniedError + return + } + + if result.Success != nil { + success = result.Success + return + } + + err = errors.New("expected a non-void result") + return + } + +} + +// WorkflowService_ListFailoverHistory_Result represents the result of a WorkflowService.ListFailoverHistory function call. +// +// The result of a ListFailoverHistory execution is sent and received over the wire as this struct. +// +// Success is set only if the function did not throw an exception. +type WorkflowService_ListFailoverHistory_Result struct { + // Value returned by ListFailoverHistory after a successful execution. + Success *shared.ListFailoverHistoryResponse `json:"success,omitempty"` + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + ClientVersionNotSupportedError *shared.ClientVersionNotSupportedError `json:"clientVersionNotSupportedError,omitempty"` + AccessDeniedError *shared.AccessDeniedError `json:"accessDeniedError,omitempty"` +} + +// ToWire translates a WorkflowService_ListFailoverHistory_Result struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *WorkflowService_ListFailoverHistory_Result) ToWire() (wire.Value, error) { + var ( + fields [5]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Success != nil { + w, err = v.Success.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 0, Value: w} + i++ + } + if v.BadRequestError != nil { + w, err = v.BadRequestError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 2, Value: w} + i++ + } + if v.ClientVersionNotSupportedError != nil { + w, err = v.ClientVersionNotSupportedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 3, Value: w} + i++ + } + if v.AccessDeniedError != nil { + w, err = v.AccessDeniedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 4, Value: w} + i++ + } + + if i != 1 { + return wire.Value{}, fmt.Errorf("WorkflowService_ListFailoverHistory_Result should have exactly one field: got %v fields", i) + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ListFailoverHistoryResponse_Read(w wire.Value) (*shared.ListFailoverHistoryResponse, error) { + var v shared.ListFailoverHistoryResponse + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a WorkflowService_ListFailoverHistory_Result struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a WorkflowService_ListFailoverHistory_Result struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v WorkflowService_ListFailoverHistory_Result +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *WorkflowService_ListFailoverHistory_Result) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 0: + if field.Value.Type() == wire.TStruct { + v.Success, err = _ListFailoverHistoryResponse_Read(field.Value) + if err != nil { + return err + } + + } + case 1: + if field.Value.Type() == wire.TStruct { + v.BadRequestError, err = _BadRequestError_Read(field.Value) + if err != nil { + return err + } + + } + case 2: + if field.Value.Type() == wire.TStruct { + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) + if err != nil { + return err + } + + } + case 3: + if field.Value.Type() == wire.TStruct { + v.ClientVersionNotSupportedError, err = _ClientVersionNotSupportedError_Read(field.Value) + if err != nil { + return err + } + + } + case 4: + if field.Value.Type() == wire.TStruct { + v.AccessDeniedError, err = _AccessDeniedError_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + count := 0 + if v.Success != nil { + count++ + } + if v.BadRequestError != nil { + count++ + } + if v.ServiceBusyError != nil { + count++ + } + if v.ClientVersionNotSupportedError != nil { + count++ + } + if v.AccessDeniedError != nil { + count++ + } + if count != 1 { + return fmt.Errorf("WorkflowService_ListFailoverHistory_Result should have exactly one field: got %v fields", count) + } + + return nil +} + +// String returns a readable string representation of a WorkflowService_ListFailoverHistory_Result +// struct. +func (v *WorkflowService_ListFailoverHistory_Result) String() string { + if v == nil { + return "" + } + + var fields [5]string + i := 0 + if v.Success != nil { + fields[i] = fmt.Sprintf("Success: %v", v.Success) + i++ + } + if v.BadRequestError != nil { + fields[i] = fmt.Sprintf("BadRequestError: %v", v.BadRequestError) + i++ + } + if v.ServiceBusyError != nil { + fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) + i++ + } + if v.ClientVersionNotSupportedError != nil { + fields[i] = fmt.Sprintf("ClientVersionNotSupportedError: %v", v.ClientVersionNotSupportedError) + i++ + } + if v.AccessDeniedError != nil { + fields[i] = fmt.Sprintf("AccessDeniedError: %v", v.AccessDeniedError) + i++ + } + + return fmt.Sprintf("WorkflowService_ListFailoverHistory_Result{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this WorkflowService_ListFailoverHistory_Result match the +// provided WorkflowService_ListFailoverHistory_Result. +// +// This function performs a deep comparison. +func (v *WorkflowService_ListFailoverHistory_Result) Equals(rhs *WorkflowService_ListFailoverHistory_Result) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Success == nil && rhs.Success == nil) || (v.Success != nil && rhs.Success != nil && v.Success.Equals(rhs.Success))) { + return false + } + if !((v.BadRequestError == nil && rhs.BadRequestError == nil) || (v.BadRequestError != nil && rhs.BadRequestError != nil && v.BadRequestError.Equals(rhs.BadRequestError))) { + return false + } + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + return false + } + if !((v.ClientVersionNotSupportedError == nil && rhs.ClientVersionNotSupportedError == nil) || (v.ClientVersionNotSupportedError != nil && rhs.ClientVersionNotSupportedError != nil && v.ClientVersionNotSupportedError.Equals(rhs.ClientVersionNotSupportedError))) { + return false + } + if !((v.AccessDeniedError == nil && rhs.AccessDeniedError == nil) || (v.AccessDeniedError != nil && rhs.AccessDeniedError != nil && v.AccessDeniedError.Equals(rhs.AccessDeniedError))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of WorkflowService_ListFailoverHistory_Result. +func (v *WorkflowService_ListFailoverHistory_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Success != nil { + err = multierr.Append(err, enc.AddObject("success", v.Success)) + } + if v.BadRequestError != nil { + err = multierr.Append(err, enc.AddObject("badRequestError", v.BadRequestError)) + } + if v.ServiceBusyError != nil { + err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) + } + if v.ClientVersionNotSupportedError != nil { + err = multierr.Append(err, enc.AddObject("clientVersionNotSupportedError", v.ClientVersionNotSupportedError)) + } + if v.AccessDeniedError != nil { + err = multierr.Append(err, enc.AddObject("accessDeniedError", v.AccessDeniedError)) + } + return err +} + +// GetSuccess returns the value of Success if it is set or its +// zero value if it is unset. +func (v *WorkflowService_ListFailoverHistory_Result) GetSuccess() (o *shared.ListFailoverHistoryResponse) { + if v != nil && v.Success != nil { + return v.Success + } + + return +} + +// IsSetSuccess returns true if Success is not nil. +func (v *WorkflowService_ListFailoverHistory_Result) IsSetSuccess() bool { + return v != nil && v.Success != nil +} + +// GetBadRequestError returns the value of BadRequestError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_ListFailoverHistory_Result) GetBadRequestError() (o *shared.BadRequestError) { + if v != nil && v.BadRequestError != nil { + return v.BadRequestError + } + + return +} + +// IsSetBadRequestError returns true if BadRequestError is not nil. +func (v *WorkflowService_ListFailoverHistory_Result) IsSetBadRequestError() bool { + return v != nil && v.BadRequestError != nil +} + +// GetServiceBusyError returns the value of ServiceBusyError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_ListFailoverHistory_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { + if v != nil && v.ServiceBusyError != nil { + return v.ServiceBusyError + } + + return +} + +// IsSetServiceBusyError returns true if ServiceBusyError is not nil. +func (v *WorkflowService_ListFailoverHistory_Result) IsSetServiceBusyError() bool { + return v != nil && v.ServiceBusyError != nil +} + +// GetClientVersionNotSupportedError returns the value of ClientVersionNotSupportedError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_ListFailoverHistory_Result) GetClientVersionNotSupportedError() (o *shared.ClientVersionNotSupportedError) { + if v != nil && v.ClientVersionNotSupportedError != nil { + return v.ClientVersionNotSupportedError + } + + return +} + +// IsSetClientVersionNotSupportedError returns true if ClientVersionNotSupportedError is not nil. +func (v *WorkflowService_ListFailoverHistory_Result) IsSetClientVersionNotSupportedError() bool { + return v != nil && v.ClientVersionNotSupportedError != nil +} + +// GetAccessDeniedError returns the value of AccessDeniedError if it is set or its +// zero value if it is unset. +func (v *WorkflowService_ListFailoverHistory_Result) GetAccessDeniedError() (o *shared.AccessDeniedError) { + if v != nil && v.AccessDeniedError != nil { + return v.AccessDeniedError + } + + return +} + +// IsSetAccessDeniedError returns true if AccessDeniedError is not nil. +func (v *WorkflowService_ListFailoverHistory_Result) IsSetAccessDeniedError() bool { + return v != nil && v.AccessDeniedError != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the result. +// +// This will always be "ListFailoverHistory" for this struct. +func (v *WorkflowService_ListFailoverHistory_Result) MethodName() string { + return "ListFailoverHistory" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Reply for this struct. +func (v *WorkflowService_ListFailoverHistory_Result) EnvelopeType() wire.EnvelopeType { + return wire.Reply +} + // WorkflowService_ListOpenWorkflowExecutions_Args represents the arguments for the WorkflowService.ListOpenWorkflowExecutions function. // // The arguments for ListOpenWorkflowExecutions are sent and received over the wire as this struct. diff --git a/.gen/go/cadence/workflowserviceclient/client.go b/.gen/go/cadence/workflowserviceclient/client.go index 421063993..b0c0f6c19 100644 --- a/.gen/go/cadence/workflowserviceclient/client.go +++ b/.gen/go/cadence/workflowserviceclient/client.go @@ -106,6 +106,12 @@ type Interface interface { opts ...yarpc.CallOption, ) (*shared.ListDomainsResponse, error) + ListFailoverHistory( + ctx context.Context, + ListRequest *shared.ListFailoverHistoryRequest, + opts ...yarpc.CallOption, + ) (*shared.ListFailoverHistoryResponse, error) + ListOpenWorkflowExecutions( ctx context.Context, ListRequest *shared.ListOpenWorkflowExecutionsRequest, @@ -660,6 +666,29 @@ func (c client) ListDomains( return } +func (c client) ListFailoverHistory( + ctx context.Context, + _ListRequest *shared.ListFailoverHistoryRequest, + opts ...yarpc.CallOption, +) (success *shared.ListFailoverHistoryResponse, err error) { + + args := cadence.WorkflowService_ListFailoverHistory_Helper.Args(_ListRequest) + + var body wire.Value + body, err = c.c.Call(ctx, args, opts...) + if err != nil { + return + } + + var result cadence.WorkflowService_ListFailoverHistory_Result + if err = result.FromWire(body); err != nil { + return + } + + success, err = cadence.WorkflowService_ListFailoverHistory_Helper.UnwrapResponse(&result) + return +} + func (c client) ListOpenWorkflowExecutions( ctx context.Context, _ListRequest *shared.ListOpenWorkflowExecutionsRequest, diff --git a/.gen/go/cadence/workflowserviceserver/server.go b/.gen/go/cadence/workflowserviceserver/server.go index 92325acdb..2ecbae9f2 100644 --- a/.gen/go/cadence/workflowserviceserver/server.go +++ b/.gen/go/cadence/workflowserviceserver/server.go @@ -90,6 +90,11 @@ type Interface interface { ListRequest *shared.ListDomainsRequest, ) (*shared.ListDomainsResponse, error) + ListFailoverHistory( + ctx context.Context, + ListRequest *shared.ListFailoverHistoryRequest, + ) (*shared.ListFailoverHistoryResponse, error) + ListOpenWorkflowExecutions( ctx context.Context, ListRequest *shared.ListOpenWorkflowExecutionsRequest, @@ -422,6 +427,17 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { ThriftModule: cadence.ThriftModule, }, + thrift.Method{ + Name: "ListFailoverHistory", + HandlerSpec: thrift.HandlerSpec{ + + Type: transport.Unary, + Unary: thrift.UnaryHandler(h.ListFailoverHistory), + }, + Signature: "ListFailoverHistory(ListRequest *shared.ListFailoverHistoryRequest) (*shared.ListFailoverHistoryResponse)", + ThriftModule: cadence.ThriftModule, + }, + thrift.Method{ Name: "ListOpenWorkflowExecutions", HandlerSpec: thrift.HandlerSpec{ @@ -765,7 +781,7 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { }, } - procedures := make([]transport.Procedure, 0, 46) + procedures := make([]transport.Procedure, 0, 47) procedures = append(procedures, thrift.BuildProcedures(service, opts...)...) return procedures } @@ -1226,6 +1242,36 @@ func (h handler) ListDomains(ctx context.Context, body wire.Value) (thrift.Respo return response, err } +func (h handler) ListFailoverHistory(ctx context.Context, body wire.Value) (thrift.Response, error) { + var args cadence.WorkflowService_ListFailoverHistory_Args + if err := args.FromWire(body); err != nil { + return thrift.Response{}, yarpcerrors.InvalidArgumentErrorf( + "could not decode Thrift request for service 'WorkflowService' procedure 'ListFailoverHistory': %w", err) + } + + success, appErr := h.impl.ListFailoverHistory(ctx, args.ListRequest) + + hadError := appErr != nil + result, err := cadence.WorkflowService_ListFailoverHistory_Helper.WrapResponse(success, appErr) + + var response thrift.Response + if err == nil { + response.IsApplicationError = hadError + response.Body = result + if namer, ok := appErr.(yarpcErrorNamer); ok { + response.ApplicationErrorName = namer.YARPCErrorName() + } + if extractor, ok := appErr.(yarpcErrorCoder); ok { + response.ApplicationErrorCode = extractor.YARPCErrorCode() + } + if appErr != nil { + response.ApplicationErrorDetails = appErr.Error() + } + } + + return response, err +} + func (h handler) ListOpenWorkflowExecutions(ctx context.Context, body wire.Value) (thrift.Response, error) { var args cadence.WorkflowService_ListOpenWorkflowExecutions_Args if err := args.FromWire(body); err != nil { diff --git a/.gen/go/cadence/workflowservicetest/client.go b/.gen/go/cadence/workflowservicetest/client.go index b379eeb83..640c5df61 100644 --- a/.gen/go/cadence/workflowservicetest/client.go +++ b/.gen/go/cadence/workflowservicetest/client.go @@ -531,6 +531,39 @@ func (mr *_MockClientRecorder) ListDomains( return mr.mock.ctrl.RecordCall(mr.mock, "ListDomains", args...) } +// ListFailoverHistory responds to a ListFailoverHistory call based on the mock expectations. This +// call will fail if the mock does not expect this call. Use EXPECT to expect +// a call to this function. +// +// client.EXPECT().ListFailoverHistory(gomock.Any(), ...).Return(...) +// ... := client.ListFailoverHistory(...) +func (m *MockClient) ListFailoverHistory( + ctx context.Context, + _ListRequest *shared.ListFailoverHistoryRequest, + opts ...yarpc.CallOption, +) (success *shared.ListFailoverHistoryResponse, err error) { + + args := []interface{}{ctx, _ListRequest} + for _, o := range opts { + args = append(args, o) + } + i := 0 + ret := m.ctrl.Call(m, "ListFailoverHistory", args...) + success, _ = ret[i].(*shared.ListFailoverHistoryResponse) + i++ + err, _ = ret[i].(error) + return +} + +func (mr *_MockClientRecorder) ListFailoverHistory( + ctx interface{}, + _ListRequest interface{}, + opts ...interface{}, +) *gomock.Call { + args := append([]interface{}{ctx, _ListRequest}, opts...) + return mr.mock.ctrl.RecordCall(mr.mock, "ListFailoverHistory", args...) +} + // ListOpenWorkflowExecutions responds to a ListOpenWorkflowExecutions call based on the mock expectations. This // call will fail if the mock does not expect this call. Use EXPECT to expect // a call to this function. diff --git a/.gen/go/shared/shared.go b/.gen/go/shared/shared.go index 314ed5587..0d630e01b 100644 --- a/.gen/go/shared/shared.go +++ b/.gen/go/shared/shared.go @@ -363,11 +363,7 @@ func (v *ActiveClusterInfo) IsSetFailoverVersion() bool { } type ActiveClusterSelectionPolicy struct { - ClusterAttribute *ClusterAttribute `json:"clusterAttribute,omitempty"` - Strategy *ActiveClusterSelectionStrategy `json:"strategy,omitempty"` - StickyRegion *string `json:"stickyRegion,omitempty"` - ExternalEntityType *string `json:"externalEntityType,omitempty"` - ExternalEntityKey *string `json:"externalEntityKey,omitempty"` + ClusterAttribute *ClusterAttribute `json:"clusterAttribute,omitempty"` } // ToWire translates a ActiveClusterSelectionPolicy struct into a Thrift-level intermediate @@ -387,7 +383,7 @@ type ActiveClusterSelectionPolicy struct { // } func (v *ActiveClusterSelectionPolicy) ToWire() (wire.Value, error) { var ( - fields [5]wire.Field + fields [1]wire.Field i int = 0 w wire.Value err error @@ -401,38 +397,6 @@ func (v *ActiveClusterSelectionPolicy) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 1, Value: w} i++ } - if v.Strategy != nil { - w, err = v.Strategy.ToWire() - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 10, Value: w} - i++ - } - if v.StickyRegion != nil { - w, err = wire.NewValueString(*(v.StickyRegion)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 20, Value: w} - i++ - } - if v.ExternalEntityType != nil { - w, err = wire.NewValueString(*(v.ExternalEntityType)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 30, Value: w} - i++ - } - if v.ExternalEntityKey != nil { - w, err = wire.NewValueString(*(v.ExternalEntityKey)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 40, Value: w} - i++ - } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -443,12 +407,6 @@ func _ClusterAttribute_Read(w wire.Value) (*ClusterAttribute, error) { return &v, err } -func _ActiveClusterSelectionStrategy_Read(w wire.Value) (ActiveClusterSelectionStrategy, error) { - var v ActiveClusterSelectionStrategy - err := v.FromWire(w) - return v, err -} - // FromWire deserializes a ActiveClusterSelectionPolicy struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. @@ -478,46 +436,6 @@ func (v *ActiveClusterSelectionPolicy) FromWire(w wire.Value) error { return err } - } - case 10: - if field.Value.Type() == wire.TI32 { - var x ActiveClusterSelectionStrategy - x, err = _ActiveClusterSelectionStrategy_Read(field.Value) - v.Strategy = &x - if err != nil { - return err - } - - } - case 20: - if field.Value.Type() == wire.TBinary { - var x string - x, err = field.Value.GetString(), error(nil) - v.StickyRegion = &x - if err != nil { - return err - } - - } - case 30: - if field.Value.Type() == wire.TBinary { - var x string - x, err = field.Value.GetString(), error(nil) - v.ExternalEntityType = &x - if err != nil { - return err - } - - } - case 40: - if field.Value.Type() == wire.TBinary { - var x string - x, err = field.Value.GetString(), error(nil) - v.ExternalEntityKey = &x - if err != nil { - return err - } - } } } @@ -532,42 +450,16 @@ func (v *ActiveClusterSelectionPolicy) String() string { return "" } - var fields [5]string + var fields [1]string i := 0 if v.ClusterAttribute != nil { fields[i] = fmt.Sprintf("ClusterAttribute: %v", v.ClusterAttribute) i++ } - if v.Strategy != nil { - fields[i] = fmt.Sprintf("Strategy: %v", *(v.Strategy)) - i++ - } - if v.StickyRegion != nil { - fields[i] = fmt.Sprintf("StickyRegion: %v", *(v.StickyRegion)) - i++ - } - if v.ExternalEntityType != nil { - fields[i] = fmt.Sprintf("ExternalEntityType: %v", *(v.ExternalEntityType)) - i++ - } - if v.ExternalEntityKey != nil { - fields[i] = fmt.Sprintf("ExternalEntityKey: %v", *(v.ExternalEntityKey)) - i++ - } return fmt.Sprintf("ActiveClusterSelectionPolicy{%v}", strings.Join(fields[:i], ", ")) } -func _ActiveClusterSelectionStrategy_EqualsPtr(lhs, rhs *ActiveClusterSelectionStrategy) bool { - if lhs != nil && rhs != nil { - - x := *lhs - y := *rhs - return x.Equals(y) - } - return lhs == nil && rhs == nil -} - // Equals returns true if all the fields of this ActiveClusterSelectionPolicy match the // provided ActiveClusterSelectionPolicy. // @@ -581,18 +473,6 @@ func (v *ActiveClusterSelectionPolicy) Equals(rhs *ActiveClusterSelectionPolicy) if !((v.ClusterAttribute == nil && rhs.ClusterAttribute == nil) || (v.ClusterAttribute != nil && rhs.ClusterAttribute != nil && v.ClusterAttribute.Equals(rhs.ClusterAttribute))) { return false } - if !_ActiveClusterSelectionStrategy_EqualsPtr(v.Strategy, rhs.Strategy) { - return false - } - if !_String_EqualsPtr(v.StickyRegion, rhs.StickyRegion) { - return false - } - if !_String_EqualsPtr(v.ExternalEntityType, rhs.ExternalEntityType) { - return false - } - if !_String_EqualsPtr(v.ExternalEntityKey, rhs.ExternalEntityKey) { - return false - } return true } @@ -606,18 +486,6 @@ func (v *ActiveClusterSelectionPolicy) MarshalLogObject(enc zapcore.ObjectEncode if v.ClusterAttribute != nil { err = multierr.Append(err, enc.AddObject("clusterAttribute", v.ClusterAttribute)) } - if v.Strategy != nil { - err = multierr.Append(err, enc.AddObject("strategy", *v.Strategy)) - } - if v.StickyRegion != nil { - enc.AddString("stickyRegion", *v.StickyRegion) - } - if v.ExternalEntityType != nil { - enc.AddString("externalEntityType", *v.ExternalEntityType) - } - if v.ExternalEntityKey != nil { - enc.AddString("externalEntityKey", *v.ExternalEntityKey) - } return err } @@ -636,66 +504,6 @@ func (v *ActiveClusterSelectionPolicy) IsSetClusterAttribute() bool { return v != nil && v.ClusterAttribute != nil } -// GetStrategy returns the value of Strategy if it is set or its -// zero value if it is unset. -func (v *ActiveClusterSelectionPolicy) GetStrategy() (o ActiveClusterSelectionStrategy) { - if v != nil && v.Strategy != nil { - return *v.Strategy - } - - return -} - -// IsSetStrategy returns true if Strategy is not nil. -func (v *ActiveClusterSelectionPolicy) IsSetStrategy() bool { - return v != nil && v.Strategy != nil -} - -// GetStickyRegion returns the value of StickyRegion if it is set or its -// zero value if it is unset. -func (v *ActiveClusterSelectionPolicy) GetStickyRegion() (o string) { - if v != nil && v.StickyRegion != nil { - return *v.StickyRegion - } - - return -} - -// IsSetStickyRegion returns true if StickyRegion is not nil. -func (v *ActiveClusterSelectionPolicy) IsSetStickyRegion() bool { - return v != nil && v.StickyRegion != nil -} - -// GetExternalEntityType returns the value of ExternalEntityType if it is set or its -// zero value if it is unset. -func (v *ActiveClusterSelectionPolicy) GetExternalEntityType() (o string) { - if v != nil && v.ExternalEntityType != nil { - return *v.ExternalEntityType - } - - return -} - -// IsSetExternalEntityType returns true if ExternalEntityType is not nil. -func (v *ActiveClusterSelectionPolicy) IsSetExternalEntityType() bool { - return v != nil && v.ExternalEntityType != nil -} - -// GetExternalEntityKey returns the value of ExternalEntityKey if it is set or its -// zero value if it is unset. -func (v *ActiveClusterSelectionPolicy) GetExternalEntityKey() (o string) { - if v != nil && v.ExternalEntityKey != nil { - return *v.ExternalEntityKey - } - - return -} - -// IsSetExternalEntityKey returns true if ExternalEntityKey is not nil. -func (v *ActiveClusterSelectionPolicy) IsSetExternalEntityKey() bool { - return v != nil && v.ExternalEntityKey != nil -} - type ActiveClusterSelectionStrategy int32 const ( @@ -868,48 +676,9 @@ func (v *ActiveClusterSelectionStrategy) UnmarshalJSON(text []byte) error { } type ActiveClusters struct { - ActiveClustersByRegion map[string]*ActiveClusterInfo `json:"activeClustersByRegion,omitempty"` ActiveClustersByClusterAttribute map[string]*ClusterAttributeScope `json:"activeClustersByClusterAttribute,omitempty"` } -type _Map_String_ActiveClusterInfo_MapItemList map[string]*ActiveClusterInfo - -func (m _Map_String_ActiveClusterInfo_MapItemList) ForEach(f func(wire.MapItem) error) error { - for k, v := range m { - if v == nil { - return fmt.Errorf("invalid [%v]: value is nil", k) - } - kw, err := wire.NewValueString(k), error(nil) - if err != nil { - return err - } - - vw, err := v.ToWire() - if err != nil { - return err - } - err = f(wire.MapItem{Key: kw, Value: vw}) - if err != nil { - return err - } - } - return nil -} - -func (m _Map_String_ActiveClusterInfo_MapItemList) Size() int { - return len(m) -} - -func (_Map_String_ActiveClusterInfo_MapItemList) KeyType() wire.Type { - return wire.TBinary -} - -func (_Map_String_ActiveClusterInfo_MapItemList) ValueType() wire.Type { - return wire.TStruct -} - -func (_Map_String_ActiveClusterInfo_MapItemList) Close() {} - type _Map_String_ClusterAttributeScope_MapItemList map[string]*ClusterAttributeScope func (m _Map_String_ClusterAttributeScope_MapItemList) ForEach(f func(wire.MapItem) error) error { @@ -965,20 +734,12 @@ func (_Map_String_ClusterAttributeScope_MapItemList) Close() {} // } func (v *ActiveClusters) ToWire() (wire.Value, error) { var ( - fields [2]wire.Field + fields [1]wire.Field i int = 0 w wire.Value err error ) - if v.ActiveClustersByRegion != nil { - w, err = wire.NewValueMap(_Map_String_ActiveClusterInfo_MapItemList(v.ActiveClustersByRegion)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 10, Value: w} - i++ - } if v.ActiveClustersByClusterAttribute != nil { w, err = wire.NewValueMap(_Map_String_ClusterAttributeScope_MapItemList(v.ActiveClustersByClusterAttribute)), error(nil) if err != nil { @@ -991,40 +752,6 @@ func (v *ActiveClusters) ToWire() (wire.Value, error) { return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -func _ActiveClusterInfo_Read(w wire.Value) (*ActiveClusterInfo, error) { - var v ActiveClusterInfo - err := v.FromWire(w) - return &v, err -} - -func _Map_String_ActiveClusterInfo_Read(m wire.MapItemList) (map[string]*ActiveClusterInfo, error) { - if m.KeyType() != wire.TBinary { - return nil, nil - } - - if m.ValueType() != wire.TStruct { - return nil, nil - } - - o := make(map[string]*ActiveClusterInfo, m.Size()) - err := m.ForEach(func(x wire.MapItem) error { - k, err := x.Key.GetString(), error(nil) - if err != nil { - return err - } - - v, err := _ActiveClusterInfo_Read(x.Value) - if err != nil { - return err - } - - o[k] = v - return nil - }) - m.Close() - return o, err -} - func _ClusterAttributeScope_Read(w wire.Value) (*ClusterAttributeScope, error) { var v ClusterAttributeScope err := v.FromWire(w) @@ -1081,14 +808,6 @@ func (v *ActiveClusters) FromWire(w wire.Value) error { for _, field := range w.GetStruct().Fields { switch field.ID { - case 10: - if field.Value.Type() == wire.TMap { - v.ActiveClustersByRegion, err = _Map_String_ActiveClusterInfo_Read(field.Value.GetMap()) - if err != nil { - return err - } - - } case 11: if field.Value.Type() == wire.TMap { v.ActiveClustersByClusterAttribute, err = _Map_String_ClusterAttributeScope_Read(field.Value.GetMap()) @@ -1110,12 +829,8 @@ func (v *ActiveClusters) String() string { return "" } - var fields [2]string + var fields [1]string i := 0 - if v.ActiveClustersByRegion != nil { - fields[i] = fmt.Sprintf("ActiveClustersByRegion: %v", v.ActiveClustersByRegion) - i++ - } if v.ActiveClustersByClusterAttribute != nil { fields[i] = fmt.Sprintf("ActiveClustersByClusterAttribute: %v", v.ActiveClustersByClusterAttribute) i++ @@ -1124,23 +839,6 @@ func (v *ActiveClusters) String() string { return fmt.Sprintf("ActiveClusters{%v}", strings.Join(fields[:i], ", ")) } -func _Map_String_ActiveClusterInfo_Equals(lhs, rhs map[string]*ActiveClusterInfo) bool { - if len(lhs) != len(rhs) { - return false - } - - for lk, lv := range lhs { - rv, ok := rhs[lk] - if !ok { - return false - } - if !lv.Equals(rv) { - return false - } - } - return true -} - func _Map_String_ClusterAttributeScope_Equals(lhs, rhs map[string]*ClusterAttributeScope) bool { if len(lhs) != len(rhs) { return false @@ -1168,9 +866,6 @@ func (v *ActiveClusters) Equals(rhs *ActiveClusters) bool { } else if rhs == nil { return false } - if !((v.ActiveClustersByRegion == nil && rhs.ActiveClustersByRegion == nil) || (v.ActiveClustersByRegion != nil && rhs.ActiveClustersByRegion != nil && _Map_String_ActiveClusterInfo_Equals(v.ActiveClustersByRegion, rhs.ActiveClustersByRegion))) { - return false - } if !((v.ActiveClustersByClusterAttribute == nil && rhs.ActiveClustersByClusterAttribute == nil) || (v.ActiveClustersByClusterAttribute != nil && rhs.ActiveClustersByClusterAttribute != nil && _Map_String_ClusterAttributeScope_Equals(v.ActiveClustersByClusterAttribute, rhs.ActiveClustersByClusterAttribute))) { return false } @@ -1178,17 +873,6 @@ func (v *ActiveClusters) Equals(rhs *ActiveClusters) bool { return true } -type _Map_String_ActiveClusterInfo_Zapper map[string]*ActiveClusterInfo - -// MarshalLogObject implements zapcore.ObjectMarshaler, enabling -// fast logging of _Map_String_ActiveClusterInfo_Zapper. -func (m _Map_String_ActiveClusterInfo_Zapper) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { - for k, v := range m { - err = multierr.Append(err, enc.AddObject((string)(k), v)) - } - return err -} - type _Map_String_ClusterAttributeScope_Zapper map[string]*ClusterAttributeScope // MarshalLogObject implements zapcore.ObjectMarshaler, enabling @@ -1206,30 +890,12 @@ func (v *ActiveClusters) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) if v == nil { return nil } - if v.ActiveClustersByRegion != nil { - err = multierr.Append(err, enc.AddObject("activeClustersByRegion", (_Map_String_ActiveClusterInfo_Zapper)(v.ActiveClustersByRegion))) - } if v.ActiveClustersByClusterAttribute != nil { err = multierr.Append(err, enc.AddObject("activeClustersByClusterAttribute", (_Map_String_ClusterAttributeScope_Zapper)(v.ActiveClustersByClusterAttribute))) } return err } -// GetActiveClustersByRegion returns the value of ActiveClustersByRegion if it is set or its -// zero value if it is unset. -func (v *ActiveClusters) GetActiveClustersByRegion() (o map[string]*ActiveClusterInfo) { - if v != nil && v.ActiveClustersByRegion != nil { - return v.ActiveClustersByRegion - } - - return -} - -// IsSetActiveClustersByRegion returns true if ActiveClustersByRegion is not nil. -func (v *ActiveClusters) IsSetActiveClustersByRegion() bool { - return v != nil && v.ActiveClustersByRegion != nil -} - // GetActiveClustersByClusterAttribute returns the value of ActiveClustersByClusterAttribute if it is set or its // zero value if it is unset. func (v *ActiveClusters) GetActiveClustersByClusterAttribute() (o map[string]*ClusterAttributeScope) { @@ -10060,6 +9726,44 @@ type ClusterAttributeScope struct { ClusterAttributes map[string]*ActiveClusterInfo `json:"clusterAttributes,omitempty"` } +type _Map_String_ActiveClusterInfo_MapItemList map[string]*ActiveClusterInfo + +func (m _Map_String_ActiveClusterInfo_MapItemList) ForEach(f func(wire.MapItem) error) error { + for k, v := range m { + if v == nil { + return fmt.Errorf("invalid [%v]: value is nil", k) + } + kw, err := wire.NewValueString(k), error(nil) + if err != nil { + return err + } + + vw, err := v.ToWire() + if err != nil { + return err + } + err = f(wire.MapItem{Key: kw, Value: vw}) + if err != nil { + return err + } + } + return nil +} + +func (m _Map_String_ActiveClusterInfo_MapItemList) Size() int { + return len(m) +} + +func (_Map_String_ActiveClusterInfo_MapItemList) KeyType() wire.Type { + return wire.TBinary +} + +func (_Map_String_ActiveClusterInfo_MapItemList) ValueType() wire.Type { + return wire.TStruct +} + +func (_Map_String_ActiveClusterInfo_MapItemList) Close() {} + // ToWire translates a ClusterAttributeScope struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. @@ -10095,6 +9799,40 @@ func (v *ClusterAttributeScope) ToWire() (wire.Value, error) { return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } +func _ActiveClusterInfo_Read(w wire.Value) (*ActiveClusterInfo, error) { + var v ActiveClusterInfo + err := v.FromWire(w) + return &v, err +} + +func _Map_String_ActiveClusterInfo_Read(m wire.MapItemList) (map[string]*ActiveClusterInfo, error) { + if m.KeyType() != wire.TBinary { + return nil, nil + } + + if m.ValueType() != wire.TStruct { + return nil, nil + } + + o := make(map[string]*ActiveClusterInfo, m.Size()) + err := m.ForEach(func(x wire.MapItem) error { + k, err := x.Key.GetString(), error(nil) + if err != nil { + return err + } + + v, err := _ActiveClusterInfo_Read(x.Value) + if err != nil { + return err + } + + o[k] = v + return nil + }) + m.Close() + return o, err +} + // FromWire deserializes a ClusterAttributeScope struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. @@ -10148,6 +9886,23 @@ func (v *ClusterAttributeScope) String() string { return fmt.Sprintf("ClusterAttributeScope{%v}", strings.Join(fields[:i], ", ")) } +func _Map_String_ActiveClusterInfo_Equals(lhs, rhs map[string]*ActiveClusterInfo) bool { + if len(lhs) != len(rhs) { + return false + } + + for lk, lv := range lhs { + rv, ok := rhs[lk] + if !ok { + return false + } + if !lv.Equals(rv) { + return false + } + } + return true +} + // Equals returns true if all the fields of this ClusterAttributeScope match the // provided ClusterAttributeScope. // @@ -10165,6 +9920,17 @@ func (v *ClusterAttributeScope) Equals(rhs *ClusterAttributeScope) bool { return true } +type _Map_String_ActiveClusterInfo_Zapper map[string]*ActiveClusterInfo + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of _Map_String_ActiveClusterInfo_Zapper. +func (m _Map_String_ActiveClusterInfo_Zapper) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + for k, v := range m { + err = multierr.Append(err, enc.AddObject((string)(k), v)) + } + return err +} + // MarshalLogObject implements zapcore.ObjectMarshaler, enabling // fast logging of ClusterAttributeScope. func (v *ClusterAttributeScope) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { @@ -10192,6 +9958,226 @@ func (v *ClusterAttributeScope) IsSetClusterAttributes() bool { return v != nil && v.ClusterAttributes != nil } +type ClusterFailover struct { + FromCluster *ActiveClusterInfo `json:"fromCluster,omitempty"` + ToCluster *ActiveClusterInfo `json:"toCluster,omitempty"` + ClusterAttribute *ClusterAttribute `json:"clusterAttribute,omitempty"` +} + +// ToWire translates a ClusterFailover struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ClusterFailover) ToWire() (wire.Value, error) { + var ( + fields [3]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.FromCluster != nil { + w, err = v.FromCluster.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.ToCluster != nil { + w, err = v.ToCluster.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + if v.ClusterAttribute != nil { + w, err = v.ClusterAttribute.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a ClusterFailover struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ClusterFailover struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ClusterFailover +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ClusterFailover) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TStruct { + v.FromCluster, err = _ActiveClusterInfo_Read(field.Value) + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TStruct { + v.ToCluster, err = _ActiveClusterInfo_Read(field.Value) + if err != nil { + return err + } + + } + case 30: + if field.Value.Type() == wire.TStruct { + v.ClusterAttribute, err = _ClusterAttribute_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ClusterFailover +// struct. +func (v *ClusterFailover) String() string { + if v == nil { + return "" + } + + var fields [3]string + i := 0 + if v.FromCluster != nil { + fields[i] = fmt.Sprintf("FromCluster: %v", v.FromCluster) + i++ + } + if v.ToCluster != nil { + fields[i] = fmt.Sprintf("ToCluster: %v", v.ToCluster) + i++ + } + if v.ClusterAttribute != nil { + fields[i] = fmt.Sprintf("ClusterAttribute: %v", v.ClusterAttribute) + i++ + } + + return fmt.Sprintf("ClusterFailover{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this ClusterFailover match the +// provided ClusterFailover. +// +// This function performs a deep comparison. +func (v *ClusterFailover) Equals(rhs *ClusterFailover) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.FromCluster == nil && rhs.FromCluster == nil) || (v.FromCluster != nil && rhs.FromCluster != nil && v.FromCluster.Equals(rhs.FromCluster))) { + return false + } + if !((v.ToCluster == nil && rhs.ToCluster == nil) || (v.ToCluster != nil && rhs.ToCluster != nil && v.ToCluster.Equals(rhs.ToCluster))) { + return false + } + if !((v.ClusterAttribute == nil && rhs.ClusterAttribute == nil) || (v.ClusterAttribute != nil && rhs.ClusterAttribute != nil && v.ClusterAttribute.Equals(rhs.ClusterAttribute))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ClusterFailover. +func (v *ClusterFailover) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.FromCluster != nil { + err = multierr.Append(err, enc.AddObject("fromCluster", v.FromCluster)) + } + if v.ToCluster != nil { + err = multierr.Append(err, enc.AddObject("toCluster", v.ToCluster)) + } + if v.ClusterAttribute != nil { + err = multierr.Append(err, enc.AddObject("clusterAttribute", v.ClusterAttribute)) + } + return err +} + +// GetFromCluster returns the value of FromCluster if it is set or its +// zero value if it is unset. +func (v *ClusterFailover) GetFromCluster() (o *ActiveClusterInfo) { + if v != nil && v.FromCluster != nil { + return v.FromCluster + } + + return +} + +// IsSetFromCluster returns true if FromCluster is not nil. +func (v *ClusterFailover) IsSetFromCluster() bool { + return v != nil && v.FromCluster != nil +} + +// GetToCluster returns the value of ToCluster if it is set or its +// zero value if it is unset. +func (v *ClusterFailover) GetToCluster() (o *ActiveClusterInfo) { + if v != nil && v.ToCluster != nil { + return v.ToCluster + } + + return +} + +// IsSetToCluster returns true if ToCluster is not nil. +func (v *ClusterFailover) IsSetToCluster() bool { + return v != nil && v.ToCluster != nil +} + +// GetClusterAttribute returns the value of ClusterAttribute if it is set or its +// zero value if it is unset. +func (v *ClusterFailover) GetClusterAttribute() (o *ClusterAttribute) { + if v != nil && v.ClusterAttribute != nil { + return v.ClusterAttribute + } + + return +} + +// IsSetClusterAttribute returns true if ClusterAttribute is not nil. +func (v *ClusterFailover) IsSetClusterAttribute() bool { + return v != nil && v.ClusterAttribute != nil +} + type ClusterInfo struct { SupportedClientVersions *SupportedClientVersions `json:"supportedClientVersions,omitempty"` } @@ -29086,6 +29072,369 @@ func (v *FailoverDomainResponse) IsSetIsGlobalDomain() bool { return v != nil && v.IsGlobalDomain != nil } +type FailoverEvent struct { + ID *string `json:"id,omitempty"` + CreatedTime *int64 `json:"createdTime,omitempty"` + FailoverType *FailoverType `json:"failoverType,omitempty"` + ClusterFailovers []*ClusterFailover `json:"clusterFailovers,omitempty"` +} + +type _List_ClusterFailover_ValueList []*ClusterFailover + +func (v _List_ClusterFailover_ValueList) ForEach(f func(wire.Value) error) error { + for i, x := range v { + if x == nil { + return fmt.Errorf("invalid [%v]: value is nil", i) + } + w, err := x.ToWire() + if err != nil { + return err + } + err = f(w) + if err != nil { + return err + } + } + return nil +} + +func (v _List_ClusterFailover_ValueList) Size() int { + return len(v) +} + +func (_List_ClusterFailover_ValueList) ValueType() wire.Type { + return wire.TStruct +} + +func (_List_ClusterFailover_ValueList) Close() {} + +// ToWire translates a FailoverEvent struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *FailoverEvent) ToWire() (wire.Value, error) { + var ( + fields [4]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.ID != nil { + w, err = wire.NewValueString(*(v.ID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.CreatedTime != nil { + w, err = wire.NewValueI64(*(v.CreatedTime)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + if v.FailoverType != nil { + w, err = v.FailoverType.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } + if v.ClusterFailovers != nil { + w, err = wire.NewValueList(_List_ClusterFailover_ValueList(v.ClusterFailovers)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 40, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _FailoverType_Read(w wire.Value) (FailoverType, error) { + var v FailoverType + err := v.FromWire(w) + return v, err +} + +func _ClusterFailover_Read(w wire.Value) (*ClusterFailover, error) { + var v ClusterFailover + err := v.FromWire(w) + return &v, err +} + +func _List_ClusterFailover_Read(l wire.ValueList) ([]*ClusterFailover, error) { + if l.ValueType() != wire.TStruct { + return nil, nil + } + + o := make([]*ClusterFailover, 0, l.Size()) + err := l.ForEach(func(x wire.Value) error { + i, err := _ClusterFailover_Read(x) + if err != nil { + return err + } + o = append(o, i) + return nil + }) + l.Close() + return o, err +} + +// FromWire deserializes a FailoverEvent struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a FailoverEvent struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v FailoverEvent +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *FailoverEvent) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.ID = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.CreatedTime = &x + if err != nil { + return err + } + + } + case 30: + if field.Value.Type() == wire.TI32 { + var x FailoverType + x, err = _FailoverType_Read(field.Value) + v.FailoverType = &x + if err != nil { + return err + } + + } + case 40: + if field.Value.Type() == wire.TList { + v.ClusterFailovers, err = _List_ClusterFailover_Read(field.Value.GetList()) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a FailoverEvent +// struct. +func (v *FailoverEvent) String() string { + if v == nil { + return "" + } + + var fields [4]string + i := 0 + if v.ID != nil { + fields[i] = fmt.Sprintf("ID: %v", *(v.ID)) + i++ + } + if v.CreatedTime != nil { + fields[i] = fmt.Sprintf("CreatedTime: %v", *(v.CreatedTime)) + i++ + } + if v.FailoverType != nil { + fields[i] = fmt.Sprintf("FailoverType: %v", *(v.FailoverType)) + i++ + } + if v.ClusterFailovers != nil { + fields[i] = fmt.Sprintf("ClusterFailovers: %v", v.ClusterFailovers) + i++ + } + + return fmt.Sprintf("FailoverEvent{%v}", strings.Join(fields[:i], ", ")) +} + +func _FailoverType_EqualsPtr(lhs, rhs *FailoverType) bool { + if lhs != nil && rhs != nil { + + x := *lhs + y := *rhs + return x.Equals(y) + } + return lhs == nil && rhs == nil +} + +func _List_ClusterFailover_Equals(lhs, rhs []*ClusterFailover) bool { + if len(lhs) != len(rhs) { + return false + } + + for i, lv := range lhs { + rv := rhs[i] + if !lv.Equals(rv) { + return false + } + } + + return true +} + +// Equals returns true if all the fields of this FailoverEvent match the +// provided FailoverEvent. +// +// This function performs a deep comparison. +func (v *FailoverEvent) Equals(rhs *FailoverEvent) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_String_EqualsPtr(v.ID, rhs.ID) { + return false + } + if !_I64_EqualsPtr(v.CreatedTime, rhs.CreatedTime) { + return false + } + if !_FailoverType_EqualsPtr(v.FailoverType, rhs.FailoverType) { + return false + } + if !((v.ClusterFailovers == nil && rhs.ClusterFailovers == nil) || (v.ClusterFailovers != nil && rhs.ClusterFailovers != nil && _List_ClusterFailover_Equals(v.ClusterFailovers, rhs.ClusterFailovers))) { + return false + } + + return true +} + +type _List_ClusterFailover_Zapper []*ClusterFailover + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _List_ClusterFailover_Zapper. +func (l _List_ClusterFailover_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for _, v := range l { + err = multierr.Append(err, enc.AppendObject(v)) + } + return err +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of FailoverEvent. +func (v *FailoverEvent) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.ID != nil { + enc.AddString("id", *v.ID) + } + if v.CreatedTime != nil { + enc.AddInt64("createdTime", *v.CreatedTime) + } + if v.FailoverType != nil { + err = multierr.Append(err, enc.AddObject("failoverType", *v.FailoverType)) + } + if v.ClusterFailovers != nil { + err = multierr.Append(err, enc.AddArray("clusterFailovers", (_List_ClusterFailover_Zapper)(v.ClusterFailovers))) + } + return err +} + +// GetID returns the value of ID if it is set or its +// zero value if it is unset. +func (v *FailoverEvent) GetID() (o string) { + if v != nil && v.ID != nil { + return *v.ID + } + + return +} + +// IsSetID returns true if ID is not nil. +func (v *FailoverEvent) IsSetID() bool { + return v != nil && v.ID != nil +} + +// GetCreatedTime returns the value of CreatedTime if it is set or its +// zero value if it is unset. +func (v *FailoverEvent) GetCreatedTime() (o int64) { + if v != nil && v.CreatedTime != nil { + return *v.CreatedTime + } + + return +} + +// IsSetCreatedTime returns true if CreatedTime is not nil. +func (v *FailoverEvent) IsSetCreatedTime() bool { + return v != nil && v.CreatedTime != nil +} + +// GetFailoverType returns the value of FailoverType if it is set or its +// zero value if it is unset. +func (v *FailoverEvent) GetFailoverType() (o FailoverType) { + if v != nil && v.FailoverType != nil { + return *v.FailoverType + } + + return +} + +// IsSetFailoverType returns true if FailoverType is not nil. +func (v *FailoverEvent) IsSetFailoverType() bool { + return v != nil && v.FailoverType != nil +} + +// GetClusterFailovers returns the value of ClusterFailovers if it is set or its +// zero value if it is unset. +func (v *FailoverEvent) GetClusterFailovers() (o []*ClusterFailover) { + if v != nil && v.ClusterFailovers != nil { + return v.ClusterFailovers + } + + return +} + +// IsSetClusterFailovers returns true if ClusterFailovers is not nil. +func (v *FailoverEvent) IsSetClusterFailovers() bool { + return v != nil && v.ClusterFailovers != nil +} + type FailoverInfo struct { FailoverVersion *int64 `json:"failoverVersion,omitempty"` FailoverStartTimestamp *int64 `json:"failoverStartTimestamp,omitempty"` @@ -29398,6 +29747,190 @@ func (v *FailoverInfo) IsSetPendingShards() bool { return v != nil && v.PendingShards != nil } +type FailoverType int32 + +const ( + FailoverTypeInvalid FailoverType = 0 + FailoverTypeForce FailoverType = 1 + FailoverTypeGraceful FailoverType = 2 +) + +// FailoverType_Values returns all recognized values of FailoverType. +func FailoverType_Values() []FailoverType { + return []FailoverType{ + FailoverTypeInvalid, + FailoverTypeForce, + FailoverTypeGraceful, + } +} + +// UnmarshalText tries to decode FailoverType from a byte slice +// containing its name. +// +// var v FailoverType +// err := v.UnmarshalText([]byte("INVALID")) +func (v *FailoverType) UnmarshalText(value []byte) error { + switch s := string(value); s { + case "INVALID": + *v = FailoverTypeInvalid + return nil + case "FORCE": + *v = FailoverTypeForce + return nil + case "GRACEFUL": + *v = FailoverTypeGraceful + return nil + default: + val, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return fmt.Errorf("unknown enum value %q for %q: %v", s, "FailoverType", err) + } + *v = FailoverType(val) + return nil + } +} + +// MarshalText encodes FailoverType to text. +// +// If the enum value is recognized, its name is returned. Otherwise, +// its integer value is returned. +// +// This implements the TextMarshaler interface. +func (v FailoverType) MarshalText() ([]byte, error) { + switch int32(v) { + case 0: + return []byte("INVALID"), nil + case 1: + return []byte("FORCE"), nil + case 2: + return []byte("GRACEFUL"), nil + } + return []byte(strconv.FormatInt(int64(v), 10)), nil +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of FailoverType. +// Enums are logged as objects, where the value is logged with key "value", and +// if this value's name is known, the name is logged with key "name". +func (v FailoverType) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt32("value", int32(v)) + switch int32(v) { + case 0: + enc.AddString("name", "INVALID") + case 1: + enc.AddString("name", "FORCE") + case 2: + enc.AddString("name", "GRACEFUL") + } + return nil +} + +// Ptr returns a pointer to this enum value. +func (v FailoverType) Ptr() *FailoverType { + return &v +} + +// ToWire translates FailoverType into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// Enums are represented as 32-bit integers over the wire. +func (v FailoverType) ToWire() (wire.Value, error) { + return wire.NewValueI32(int32(v)), nil +} + +// FromWire deserializes FailoverType from its Thrift-level +// representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TI32) +// if err != nil { +// return FailoverType(0), err +// } +// +// var v FailoverType +// if err := v.FromWire(x); err != nil { +// return FailoverType(0), err +// } +// return v, nil +func (v *FailoverType) FromWire(w wire.Value) error { + *v = (FailoverType)(w.GetI32()) + return nil +} + +// String returns a readable string representation of FailoverType. +func (v FailoverType) String() string { + w := int32(v) + switch w { + case 0: + return "INVALID" + case 1: + return "FORCE" + case 2: + return "GRACEFUL" + } + return fmt.Sprintf("FailoverType(%d)", w) +} + +// Equals returns true if this FailoverType value matches the provided +// value. +func (v FailoverType) Equals(rhs FailoverType) bool { + return v == rhs +} + +// MarshalJSON serializes FailoverType into JSON. +// +// If the enum value is recognized, its name is returned. Otherwise, +// its integer value is returned. +// +// This implements json.Marshaler. +func (v FailoverType) MarshalJSON() ([]byte, error) { + switch int32(v) { + case 0: + return ([]byte)("\"INVALID\""), nil + case 1: + return ([]byte)("\"FORCE\""), nil + case 2: + return ([]byte)("\"GRACEFUL\""), nil + } + return ([]byte)(strconv.FormatInt(int64(v), 10)), nil +} + +// UnmarshalJSON attempts to decode FailoverType from its JSON +// representation. +// +// This implementation supports both, numeric and string inputs. If a +// string is provided, it must be a known enum name. +// +// This implements json.Unmarshaler. +func (v *FailoverType) UnmarshalJSON(text []byte) error { + d := json.NewDecoder(bytes.NewReader(text)) + d.UseNumber() + t, err := d.Token() + if err != nil { + return err + } + + switch w := t.(type) { + case json.Number: + x, err := w.Int64() + if err != nil { + return err + } + if x > math.MaxInt32 { + return fmt.Errorf("enum overflow from JSON %q for %q", text, "FailoverType") + } + if x < math.MinInt32 { + return fmt.Errorf("enum underflow from JSON %q for %q", text, "FailoverType") + } + *v = (FailoverType)(x) + return nil + case string: + return v.UnmarshalText([]byte(w)) + default: + return fmt.Errorf("invalid JSON value %q (%T) to unmarshal into %q", t, t, "FailoverType") + } +} + type FeatureFlags struct { WorkflowExecutionAlreadyCompletedErrorEnabled *bool `json:"WorkflowExecutionAlreadyCompletedErrorEnabled,omitempty"` } @@ -38450,6 +38983,591 @@ func (v *ListDomainsResponse) IsSetNextPageToken() bool { return v != nil && v.NextPageToken != nil } +type ListFailoverHistoryRequest struct { + Filters *ListFailoverHistoryRequestFilters `json:"filters,omitempty"` + Pagination *PaginationOptions `json:"pagination,omitempty"` +} + +// ToWire translates a ListFailoverHistoryRequest struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ListFailoverHistoryRequest) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Filters != nil { + w, err = v.Filters.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.Pagination != nil { + w, err = v.Pagination.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _ListFailoverHistoryRequestFilters_Read(w wire.Value) (*ListFailoverHistoryRequestFilters, error) { + var v ListFailoverHistoryRequestFilters + err := v.FromWire(w) + return &v, err +} + +func _PaginationOptions_Read(w wire.Value) (*PaginationOptions, error) { + var v PaginationOptions + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a ListFailoverHistoryRequest struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ListFailoverHistoryRequest struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ListFailoverHistoryRequest +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ListFailoverHistoryRequest) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TStruct { + v.Filters, err = _ListFailoverHistoryRequestFilters_Read(field.Value) + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TStruct { + v.Pagination, err = _PaginationOptions_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ListFailoverHistoryRequest +// struct. +func (v *ListFailoverHistoryRequest) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.Filters != nil { + fields[i] = fmt.Sprintf("Filters: %v", v.Filters) + i++ + } + if v.Pagination != nil { + fields[i] = fmt.Sprintf("Pagination: %v", v.Pagination) + i++ + } + + return fmt.Sprintf("ListFailoverHistoryRequest{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this ListFailoverHistoryRequest match the +// provided ListFailoverHistoryRequest. +// +// This function performs a deep comparison. +func (v *ListFailoverHistoryRequest) Equals(rhs *ListFailoverHistoryRequest) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Filters == nil && rhs.Filters == nil) || (v.Filters != nil && rhs.Filters != nil && v.Filters.Equals(rhs.Filters))) { + return false + } + if !((v.Pagination == nil && rhs.Pagination == nil) || (v.Pagination != nil && rhs.Pagination != nil && v.Pagination.Equals(rhs.Pagination))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ListFailoverHistoryRequest. +func (v *ListFailoverHistoryRequest) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Filters != nil { + err = multierr.Append(err, enc.AddObject("filters", v.Filters)) + } + if v.Pagination != nil { + err = multierr.Append(err, enc.AddObject("pagination", v.Pagination)) + } + return err +} + +// GetFilters returns the value of Filters if it is set or its +// zero value if it is unset. +func (v *ListFailoverHistoryRequest) GetFilters() (o *ListFailoverHistoryRequestFilters) { + if v != nil && v.Filters != nil { + return v.Filters + } + + return +} + +// IsSetFilters returns true if Filters is not nil. +func (v *ListFailoverHistoryRequest) IsSetFilters() bool { + return v != nil && v.Filters != nil +} + +// GetPagination returns the value of Pagination if it is set or its +// zero value if it is unset. +func (v *ListFailoverHistoryRequest) GetPagination() (o *PaginationOptions) { + if v != nil && v.Pagination != nil { + return v.Pagination + } + + return +} + +// IsSetPagination returns true if Pagination is not nil. +func (v *ListFailoverHistoryRequest) IsSetPagination() bool { + return v != nil && v.Pagination != nil +} + +type ListFailoverHistoryRequestFilters struct { + DomainID *string `json:"domainID,omitempty"` +} + +// ToWire translates a ListFailoverHistoryRequestFilters struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ListFailoverHistoryRequestFilters) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.DomainID != nil { + w, err = wire.NewValueString(*(v.DomainID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a ListFailoverHistoryRequestFilters struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ListFailoverHistoryRequestFilters struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ListFailoverHistoryRequestFilters +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ListFailoverHistoryRequestFilters) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.DomainID = &x + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ListFailoverHistoryRequestFilters +// struct. +func (v *ListFailoverHistoryRequestFilters) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.DomainID != nil { + fields[i] = fmt.Sprintf("DomainID: %v", *(v.DomainID)) + i++ + } + + return fmt.Sprintf("ListFailoverHistoryRequestFilters{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this ListFailoverHistoryRequestFilters match the +// provided ListFailoverHistoryRequestFilters. +// +// This function performs a deep comparison. +func (v *ListFailoverHistoryRequestFilters) Equals(rhs *ListFailoverHistoryRequestFilters) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_String_EqualsPtr(v.DomainID, rhs.DomainID) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ListFailoverHistoryRequestFilters. +func (v *ListFailoverHistoryRequestFilters) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.DomainID != nil { + enc.AddString("domainID", *v.DomainID) + } + return err +} + +// GetDomainID returns the value of DomainID if it is set or its +// zero value if it is unset. +func (v *ListFailoverHistoryRequestFilters) GetDomainID() (o string) { + if v != nil && v.DomainID != nil { + return *v.DomainID + } + + return +} + +// IsSetDomainID returns true if DomainID is not nil. +func (v *ListFailoverHistoryRequestFilters) IsSetDomainID() bool { + return v != nil && v.DomainID != nil +} + +type ListFailoverHistoryResponse struct { + FailoverEvents []*FailoverEvent `json:"failoverEvents,omitempty"` + NextPageToken []byte `json:"nextPageToken,omitempty"` +} + +type _List_FailoverEvent_ValueList []*FailoverEvent + +func (v _List_FailoverEvent_ValueList) ForEach(f func(wire.Value) error) error { + for i, x := range v { + if x == nil { + return fmt.Errorf("invalid [%v]: value is nil", i) + } + w, err := x.ToWire() + if err != nil { + return err + } + err = f(w) + if err != nil { + return err + } + } + return nil +} + +func (v _List_FailoverEvent_ValueList) Size() int { + return len(v) +} + +func (_List_FailoverEvent_ValueList) ValueType() wire.Type { + return wire.TStruct +} + +func (_List_FailoverEvent_ValueList) Close() {} + +// ToWire translates a ListFailoverHistoryResponse struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *ListFailoverHistoryResponse) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.FailoverEvents != nil { + w, err = wire.NewValueList(_List_FailoverEvent_ValueList(v.FailoverEvents)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.NextPageToken != nil { + w, err = wire.NewValueBinary(v.NextPageToken), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _FailoverEvent_Read(w wire.Value) (*FailoverEvent, error) { + var v FailoverEvent + err := v.FromWire(w) + return &v, err +} + +func _List_FailoverEvent_Read(l wire.ValueList) ([]*FailoverEvent, error) { + if l.ValueType() != wire.TStruct { + return nil, nil + } + + o := make([]*FailoverEvent, 0, l.Size()) + err := l.ForEach(func(x wire.Value) error { + i, err := _FailoverEvent_Read(x) + if err != nil { + return err + } + o = append(o, i) + return nil + }) + l.Close() + return o, err +} + +// FromWire deserializes a ListFailoverHistoryResponse struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a ListFailoverHistoryResponse struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v ListFailoverHistoryResponse +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *ListFailoverHistoryResponse) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TList { + v.FailoverEvents, err = _List_FailoverEvent_Read(field.Value.GetList()) + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TBinary { + v.NextPageToken, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a ListFailoverHistoryResponse +// struct. +func (v *ListFailoverHistoryResponse) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.FailoverEvents != nil { + fields[i] = fmt.Sprintf("FailoverEvents: %v", v.FailoverEvents) + i++ + } + if v.NextPageToken != nil { + fields[i] = fmt.Sprintf("NextPageToken: %v", v.NextPageToken) + i++ + } + + return fmt.Sprintf("ListFailoverHistoryResponse{%v}", strings.Join(fields[:i], ", ")) +} + +func _List_FailoverEvent_Equals(lhs, rhs []*FailoverEvent) bool { + if len(lhs) != len(rhs) { + return false + } + + for i, lv := range lhs { + rv := rhs[i] + if !lv.Equals(rv) { + return false + } + } + + return true +} + +// Equals returns true if all the fields of this ListFailoverHistoryResponse match the +// provided ListFailoverHistoryResponse. +// +// This function performs a deep comparison. +func (v *ListFailoverHistoryResponse) Equals(rhs *ListFailoverHistoryResponse) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.FailoverEvents == nil && rhs.FailoverEvents == nil) || (v.FailoverEvents != nil && rhs.FailoverEvents != nil && _List_FailoverEvent_Equals(v.FailoverEvents, rhs.FailoverEvents))) { + return false + } + if !((v.NextPageToken == nil && rhs.NextPageToken == nil) || (v.NextPageToken != nil && rhs.NextPageToken != nil && bytes.Equal(v.NextPageToken, rhs.NextPageToken))) { + return false + } + + return true +} + +type _List_FailoverEvent_Zapper []*FailoverEvent + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _List_FailoverEvent_Zapper. +func (l _List_FailoverEvent_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for _, v := range l { + err = multierr.Append(err, enc.AppendObject(v)) + } + return err +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of ListFailoverHistoryResponse. +func (v *ListFailoverHistoryResponse) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.FailoverEvents != nil { + err = multierr.Append(err, enc.AddArray("failoverEvents", (_List_FailoverEvent_Zapper)(v.FailoverEvents))) + } + if v.NextPageToken != nil { + enc.AddString("nextPageToken", base64.StdEncoding.EncodeToString(v.NextPageToken)) + } + return err +} + +// GetFailoverEvents returns the value of FailoverEvents if it is set or its +// zero value if it is unset. +func (v *ListFailoverHistoryResponse) GetFailoverEvents() (o []*FailoverEvent) { + if v != nil && v.FailoverEvents != nil { + return v.FailoverEvents + } + + return +} + +// IsSetFailoverEvents returns true if FailoverEvents is not nil. +func (v *ListFailoverHistoryResponse) IsSetFailoverEvents() bool { + return v != nil && v.FailoverEvents != nil +} + +// GetNextPageToken returns the value of NextPageToken if it is set or its +// zero value if it is unset. +func (v *ListFailoverHistoryResponse) GetNextPageToken() (o []byte) { + if v != nil && v.NextPageToken != nil { + return v.NextPageToken + } + + return +} + +// IsSetNextPageToken returns true if NextPageToken is not nil. +func (v *ListFailoverHistoryResponse) IsSetNextPageToken() bool { + return v != nil && v.NextPageToken != nil +} + type ListOpenWorkflowExecutionsRequest struct { Domain *string `json:"domain,omitempty"` MaximumPageSize *int32 `json:"maximumPageSize,omitempty"` @@ -40263,6 +41381,186 @@ func (v *Memo) IsSetFields() bool { return v != nil && v.Fields != nil } +type PaginationOptions struct { + PageSize *int32 `json:"pageSize,omitempty"` + NextPageToken []byte `json:"nextPageToken,omitempty"` +} + +// ToWire translates a PaginationOptions struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *PaginationOptions) ToWire() (wire.Value, error) { + var ( + fields [2]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.PageSize != nil { + w, err = wire.NewValueI32(*(v.PageSize)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.NextPageToken != nil { + w, err = wire.NewValueBinary(v.NextPageToken), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a PaginationOptions struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a PaginationOptions struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v PaginationOptions +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *PaginationOptions) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TI32 { + var x int32 + x, err = field.Value.GetI32(), error(nil) + v.PageSize = &x + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TBinary { + v.NextPageToken, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a PaginationOptions +// struct. +func (v *PaginationOptions) String() string { + if v == nil { + return "" + } + + var fields [2]string + i := 0 + if v.PageSize != nil { + fields[i] = fmt.Sprintf("PageSize: %v", *(v.PageSize)) + i++ + } + if v.NextPageToken != nil { + fields[i] = fmt.Sprintf("NextPageToken: %v", v.NextPageToken) + i++ + } + + return fmt.Sprintf("PaginationOptions{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this PaginationOptions match the +// provided PaginationOptions. +// +// This function performs a deep comparison. +func (v *PaginationOptions) Equals(rhs *PaginationOptions) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !_I32_EqualsPtr(v.PageSize, rhs.PageSize) { + return false + } + if !((v.NextPageToken == nil && rhs.NextPageToken == nil) || (v.NextPageToken != nil && rhs.NextPageToken != nil && bytes.Equal(v.NextPageToken, rhs.NextPageToken))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of PaginationOptions. +func (v *PaginationOptions) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.PageSize != nil { + enc.AddInt32("pageSize", *v.PageSize) + } + if v.NextPageToken != nil { + enc.AddString("nextPageToken", base64.StdEncoding.EncodeToString(v.NextPageToken)) + } + return err +} + +// GetPageSize returns the value of PageSize if it is set or its +// zero value if it is unset. +func (v *PaginationOptions) GetPageSize() (o int32) { + if v != nil && v.PageSize != nil { + return *v.PageSize + } + + return +} + +// IsSetPageSize returns true if PageSize is not nil. +func (v *PaginationOptions) IsSetPageSize() bool { + return v != nil && v.PageSize != nil +} + +// GetNextPageToken returns the value of NextPageToken if it is set or its +// zero value if it is unset. +func (v *PaginationOptions) GetNextPageToken() (o []byte) { + if v != nil && v.NextPageToken != nil { + return v.NextPageToken + } + + return +} + +// IsSetNextPageToken returns true if NextPageToken is not nil. +func (v *PaginationOptions) IsSetNextPageToken() bool { + return v != nil && v.NextPageToken != nil +} + type ParentClosePolicy int32 const ( @@ -48419,7 +49717,6 @@ type RegisterDomainRequest struct { EmitMetric *bool `json:"emitMetric,omitempty"` Clusters []*ClusterReplicationConfiguration `json:"clusters,omitempty"` ActiveClusterName *string `json:"activeClusterName,omitempty"` - ActiveClustersByRegion map[string]string `json:"activeClustersByRegion,omitempty"` ActiveClusters *ActiveClusters `json:"activeClusters,omitempty"` Data map[string]string `json:"data,omitempty"` SecurityToken *string `json:"securityToken,omitempty"` @@ -48455,7 +49752,7 @@ func Default_RegisterDomainRequest() *RegisterDomainRequest { // } func (v *RegisterDomainRequest) ToWire() (wire.Value, error) { var ( - fields [16]wire.Field + fields [15]wire.Field i int = 0 w wire.Value err error @@ -48521,14 +49818,6 @@ func (v *RegisterDomainRequest) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 70, Value: w} i++ } - if v.ActiveClustersByRegion != nil { - w, err = wire.NewValueMap(_Map_String_String_MapItemList(v.ActiveClustersByRegion)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 75, Value: w} - i++ - } if v.ActiveClusters != nil { w, err = v.ActiveClusters.ToWire() if err != nil { @@ -48686,14 +49975,6 @@ func (v *RegisterDomainRequest) FromWire(w wire.Value) error { return err } - } - case 75: - if field.Value.Type() == wire.TMap { - v.ActiveClustersByRegion, err = _Map_String_String_Read(field.Value.GetMap()) - if err != nil { - return err - } - } case 76: if field.Value.Type() == wire.TStruct { @@ -48788,7 +50069,7 @@ func (v *RegisterDomainRequest) String() string { return "" } - var fields [16]string + var fields [15]string i := 0 if v.Name != nil { fields[i] = fmt.Sprintf("Name: %v", *(v.Name)) @@ -48818,10 +50099,6 @@ func (v *RegisterDomainRequest) String() string { fields[i] = fmt.Sprintf("ActiveClusterName: %v", *(v.ActiveClusterName)) i++ } - if v.ActiveClustersByRegion != nil { - fields[i] = fmt.Sprintf("ActiveClustersByRegion: %v", v.ActiveClustersByRegion) - i++ - } if v.ActiveClusters != nil { fields[i] = fmt.Sprintf("ActiveClusters: %v", v.ActiveClusters) i++ @@ -48889,9 +50166,6 @@ func (v *RegisterDomainRequest) Equals(rhs *RegisterDomainRequest) bool { if !_String_EqualsPtr(v.ActiveClusterName, rhs.ActiveClusterName) { return false } - if !((v.ActiveClustersByRegion == nil && rhs.ActiveClustersByRegion == nil) || (v.ActiveClustersByRegion != nil && rhs.ActiveClustersByRegion != nil && _Map_String_String_Equals(v.ActiveClustersByRegion, rhs.ActiveClustersByRegion))) { - return false - } if !((v.ActiveClusters == nil && rhs.ActiveClusters == nil) || (v.ActiveClusters != nil && rhs.ActiveClusters != nil && v.ActiveClusters.Equals(rhs.ActiveClusters))) { return false } @@ -48947,9 +50221,6 @@ func (v *RegisterDomainRequest) MarshalLogObject(enc zapcore.ObjectEncoder) (err if v.ActiveClusterName != nil { enc.AddString("activeClusterName", *v.ActiveClusterName) } - if v.ActiveClustersByRegion != nil { - err = multierr.Append(err, enc.AddObject("activeClustersByRegion", (_Map_String_String_Zapper)(v.ActiveClustersByRegion))) - } if v.ActiveClusters != nil { err = multierr.Append(err, enc.AddObject("activeClusters", v.ActiveClusters)) } @@ -49082,21 +50353,6 @@ func (v *RegisterDomainRequest) IsSetActiveClusterName() bool { return v != nil && v.ActiveClusterName != nil } -// GetActiveClustersByRegion returns the value of ActiveClustersByRegion if it is set or its -// zero value if it is unset. -func (v *RegisterDomainRequest) GetActiveClustersByRegion() (o map[string]string) { - if v != nil && v.ActiveClustersByRegion != nil { - return v.ActiveClustersByRegion - } - - return -} - -// IsSetActiveClustersByRegion returns true if ActiveClustersByRegion is not nil. -func (v *RegisterDomainRequest) IsSetActiveClustersByRegion() bool { - return v != nil && v.ActiveClustersByRegion != nil -} - // GetActiveClusters returns the value of ActiveClusters if it is set or its // zero value if it is unset. func (v *RegisterDomainRequest) GetActiveClusters() (o *ActiveClusters) { @@ -79469,8 +80725,8 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "shared", Package: "go.uber.org/cadence/.gen/go/shared", FilePath: "shared.thrift", - SHA1: "f6ba7719fb3d987d5173da1b1a6a19f0ce1927c2", + SHA1: "0410114248ab90c8d000ffecc18edbbe3d0819e6", Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception InternalDataInconsistencyError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception WorkflowExecutionAlreadyCompletedError {\n 1: required string message\n}\n\nexception EntityNotExistsError {\n 1: required string message\n 2: optional string currentCluster\n 3: optional string activeCluster\n 4: required list activeClusters // todo(david.porter) remove as its disused\n}\n\nexception ServiceBusyError {\n 1: required string message\n 2: optional string reason\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n 5: required list activeClusters // todo (david.porter) remove this field as it's disused\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") startEventId\n 6: optional i64 (js.type = \"Long\") startEventVersion\n 7: optional i64 (js.type = \"Long\") endEventId\n 8: optional i64 (js.type = \"Long\") endEventVersion\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception FeatureNotEnabledError {\n 1: required string featureFlag\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nexception RemoteSyncMatchedError {\n 10: required string message\n}\n\nexception StickyWorkerUnavailableError {\n 1: required string message\n}\n\nexception TaskListNotOwnedByHostError {\n 1: required string ownedByIdentity\n 2: required string myIdentity\n 3: required string tasklistName\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n /*\n * if a workflow is running using the same workflow ID, terminate it and start a new one\n */\n TerminateIfRunning,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum DecisionTaskTimedOutCause {\n TIMEOUT,\n RESET,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum PendingDecisionState {\n SCHEDULED,\n STARTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n EPHEMERAL,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum CronOverlapPolicy {\n SKIPPED,\n BUFFERONE,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nenum QueryConsistencyLevel {\n // EVENTUAL indicates that query should be eventually consistent\n EVENTUAL\n // STRONG indicates that any events that came before query should be reflected in workflow state before running query\n STRONG\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkerVersionInfo {\n 10: optional string impl\n 20: optional string featureVersion\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 71: optional string parentDomainName\n 72: optional i64 parentInitatedId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n 120: optional string taskList\n 121: optional TaskList taskListInfo\n 130: optional bool isCron\n 140: optional i64 (js.type = \"Long\") updateTime\n 150: optional map partitionConfig\n 160: optional CronOverlapPolicy cronOverlapPolicy\n 170: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n 90: optional bool requestLocalDispatch\n}\n\nstruct ActivityLocalDispatchInfo{\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 50: optional binary taskToken\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional i32 jitterStartSeconds\n 170: optional CronOverlapPolicy cronOverlapPolicy\n 180: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional CronOverlapPolicy cronOverlapPolicy\n 170: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 62: optional i64 (js.type = \"Long\") firstScheduledTimeNano\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n 150: optional map partitionConfig\n 160: optional string requestId\n 170: optional CronOverlapPolicy cronOverlapPolicy\n 180: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional CronOverlapPolicy cronOverlapPolicy\n 170: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // for reset workflow\n 40: optional string baseRunId\n 50: optional string newRunId\n 60: optional i64 (js.type = \"Long\") forkEventVersion\n 70: optional string reason\n 80: optional DecisionTaskTimedOutCause cause\n 90: optional string requestId\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n 90: optional string binaryChecksum\n 100: optional string requestId\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n 50: optional string lastFailureReason\n 60: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // For retry activity, it may have a failure before timeout. It's important to keep those information for debug.\n // Client can also provide the info for making next decision\n 40: optional string lastFailureReason\n 50: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n 50: optional string requestId\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n 170: optional i32 delayStartSeconds\n 180: optional i32 jitterStartSeconds\n 190: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 200: optional CronOverlapPolicy cronOverlapPolicy\n 210: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 60: optional IsolationGroupConfiguration isolationgroups\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n 120: optional AsyncWorkflowConfiguration AsyncWorkflowConfiguration\n}\n\nstruct FailoverInfo {\n 10: optional i64 (js.type = \"Long\") failoverVersion\n 20: optional i64 (js.type = \"Long\") failoverStartTimestamp\n 30: optional i64 (js.type = \"Long\") failoverExpireTimestamp\n 40: optional i32 completedShardCount\n 50: optional list pendingShards\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n // activeClusterName is the name of the active cluster for active-passive domain\n 10: optional string activeClusterName\n\n // clusters is list of all active and passive clusters of domain\n 20: optional list clusters\n\n // activeClusters contains active cluster(s) information for active-active domain\n 30: optional ActiveClusters activeClusters\n}\n\n// ClusterAttributeScope is a mapping of the cluster atribute to the scope's\n// current stae and failover version, indicating how recently the change was made\nstruct ClusterAttributeScope {\n 10: optional map clusterAttributes;\n}\n\n// activeClustersByClusterAttribute is a map of whatever subdivision of the domain chosen\n// to active cluster info for active-active domains. The key refers to the type of\n// cluster attribute and the value refers to its cluster mappings.\n// \n// For example, a request to update the domain for two locations\n// \n// UpdateDomainRequest{\n// ReplicationConfiguration: {\n// ActiveClusters: {\n// ActiveClustersByClusterAttribute: {\n// \"location\": ClusterAttributeScope{\n// \"Tokyo\": {ActiveClusterInfo: \"cluster0, FailoverVersion: 123}, \n// \"Morocco\": {ActiveClusterInfo: \"cluster1\", FailoverVersion: 100}, \n// }\n// }\n// }\n// }\n// }\nstruct ActiveClusters {\n 10: optional map activeClustersByRegion // todo (david.porter) remove this as it's no longer used\n 11: optional map activeClustersByClusterAttribute\n}\n\n// ActiveClusterInfo contains the configuration of active-active domain's active\n// cluster & failover version for a specific region\nstruct ActiveClusterInfo {\n 10: optional string activeClusterName\n 20: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric = true\n 60: optional list clusters\n 70: optional string activeClusterName\n // todo (david.porter) remove this field as it's not going to be used\n 75: optional map activeClustersByRegion\n // activeClusters is a map of cluster-attribute name to active cluster name for active-active domain\n 76: optional ActiveClusters activeClusters\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n 60: optional FailoverInfo failoverInfo\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n 70: optional i32 failoverTimeoutInSeconds\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct FailoverDomainRequest {\n 10: optional string domainName\n 20: optional string domainActiveClusterName\n // only applicable to active-active domains where \n // specific cluster-attributes are being failed over\n 30: optional ActiveClusters activeClusters\n}\n\nstruct FailoverDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct DeleteDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n 160: optional i32 delayStartSeconds\n 170: optional i32 jitterStartSeconds\n 180: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 190: optional CronOverlapPolicy cronOverlapPolicy\n 200: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct StartWorkflowExecutionAsyncRequest {\n 10: optional StartWorkflowExecutionRequest request\n}\n\nstruct StartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct DiagnoseWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n}\n\nstruct DiagnoseWorkflowExecutionResponse {\n 10: optional string domain\n 20: optional WorkflowExecution diagnosticWorkflowExecution\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional map queries\n 130: optional i64 (js.type = 'Long') nextEventId\n 140: optional i64 (js.type = 'Long') totalHistoryBytes\n 150: optional AutoConfigHint autoConfigHint\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional map queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n 20: optional map activitiesToDispatchLocally\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n 180: optional AutoConfigHint autoConfigHint\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n 50: optional string cause\n 60: optional string firstExecutionRunID\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n 70: optional bool skipArchival\n 80: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 11: optional list rawHistory\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n 180: optional i32 delayStartSeconds\n 190: optional i32 jitterStartSeconds\n 200: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 210: optional CronOverlapPolicy cronOverlapPolicy\n 220: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncRequest {\n 10: optional SignalWithStartWorkflowExecutionRequest request\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional string identity\n}\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n 60: optional string firstExecutionRunID\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n 60: optional bool skipSignalReapply\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n 50: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n 50: optional WorkerVersionInfo workerVersionInfo\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorMessage\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n 140: optional string startedWorkerIdentity\n 150: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingDecisionInfo {\n 10: optional PendingDecisionState state\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 attempt\n 50: optional i64 (js.type = \"Long\") originalScheduledTimestamp\n 60: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingChildExecutionInfo {\n 1: optional string domain\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n 50: optional PendingDecisionInfo pendingDecision\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n // The TaskList being described\n 30: optional TaskList taskList\n}\n\nstruct GetTaskListsByDomainRequest {\n 10: optional string domainName\n}\n\nstruct GetTaskListsByDomainResponse {\n 10: optional map decisionTaskListMap\n 20: optional map activityTaskListMap\n}\n\nstruct ListTaskListPartitionsRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n}\n\nstruct TaskListPartitionMetadata {\n 10: optional string key\n 20: optional string ownerHostName\n}\n\nstruct ListTaskListPartitionsResponse {\n 10: optional list activityTaskListPartitions\n 20: optional list decisionTaskListPartitions\n}\n\nstruct IsolationGroupMetrics {\n 10: optional double newTasksPerSecond\n 20: optional i64 (js.type = \"Long\") pollerCount\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n 50: optional map isolationGroupMetrics\n 60: optional double newTasksPerSecond\n 70: optional bool empty\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n 40: optional i64 (js.type = \"Long\") visibilityTimestamp\n 50: optional string clusterName\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct ResetQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueResponse {\n 10: optional list processingQueueStates\n}\n\nstruct DescribeShardDistributionRequest {\n 10: optional i32 pageSize\n 20: optional i32 pageID\n}\n\nstruct DescribeShardDistributionResponse {\n 10: optional i32 numberOfShards\n\n // ShardID to Address (ip:port) map\n 20: optional map shards\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}\n\n// SupportedClientVersions contains the support versions for client library\nstruct SupportedClientVersions{\n 10: optional string goSdk\n 20: optional string javaSdk\n}\n\n// ClusterInfo contains information about cadence cluster\nstruct ClusterInfo{\n 10: optional SupportedClientVersions supportedClientVersions\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct FeatureFlags {\n\t10: optional bool WorkflowExecutionAlreadyCompletedErrorEnabled\n}\n\nenum CrossClusterTaskType {\n StartChildExecution\n CancelExecution\n SignalExecution\n RecordChildWorkflowExecutionComplete\n ApplyParentClosePolicy\n}\n\nenum CrossClusterTaskFailedCause {\n DOMAIN_NOT_ACTIVE\n DOMAIN_NOT_EXISTS\n WORKFLOW_ALREADY_RUNNING\n WORKFLOW_NOT_EXISTS\n WORKFLOW_ALREADY_COMPLETED\n UNCATEGORIZED\n}\n\nenum GetTaskFailedCause {\n SERVICE_BUSY\n TIMEOUT\n SHARD_OWNERSHIP_LOST\n UNCATEGORIZED\n}\n\nstruct CrossClusterTaskInfo {\n 10: optional string domainID\n 20: optional string workflowID\n 30: optional string runID\n 40: optional CrossClusterTaskType taskType\n 50: optional i16 taskState\n 60: optional i64 (js.type = \"Long\") taskID\n 70: optional i64 (js.type = \"Long\") visibilityTimestamp\n}\n\nstruct CrossClusterStartChildExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string requestID\n 30: optional i64 (js.type = \"Long\") initiatedEventID\n 40: optional StartChildWorkflowExecutionInitiatedEventAttributes initiatedEventAttributes\n // targetRunID is for scheduling first decision task\n // targetWorkflowID is available in initiatedEventAttributes\n 50: optional string targetRunID\n 60: optional map partitionConfig\n}\n\nstruct CrossClusterStartChildExecutionResponseAttributes {\n 10: optional string runID\n}\n\nstruct CrossClusterCancelExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n}\n\nstruct CrossClusterCancelExecutionResponseAttributes {\n}\n\nstruct CrossClusterSignalExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n 70: optional string signalName\n 80: optional binary signalInput\n 90: optional binary control\n}\n\nstruct CrossClusterSignalExecutionResponseAttributes {\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional i64 (js.type = \"Long\") initiatedEventID\n 50: optional HistoryEvent completionEvent\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes {\n}\n\nstruct ApplyParentClosePolicyAttributes {\n 10: optional string childDomainID\n 20: optional string childWorkflowID\n 30: optional string childRunID\n 40: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct ApplyParentClosePolicyStatus {\n 10: optional bool completed\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct ApplyParentClosePolicyRequest {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional ApplyParentClosePolicyStatus status\n}\n\nstruct CrossClusterApplyParentClosePolicyRequestAttributes {\n 10: optional list children\n}\n\nstruct ApplyParentClosePolicyResult {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct CrossClusterApplyParentClosePolicyResponseAttributes {\n 10: optional list childrenStatus\n}\n\nstruct CrossClusterTaskRequest {\n 10: optional CrossClusterTaskInfo taskInfo\n 20: optional CrossClusterStartChildExecutionRequestAttributes startChildExecutionAttributes\n 30: optional CrossClusterCancelExecutionRequestAttributes cancelExecutionAttributes\n 40: optional CrossClusterSignalExecutionRequestAttributes signalExecutionAttributes\n 50: optional CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes recordChildWorkflowExecutionCompleteAttributes\n 60: optional CrossClusterApplyParentClosePolicyRequestAttributes applyParentClosePolicyAttributes\n}\n\nstruct CrossClusterTaskResponse {\n 10: optional i64 (js.type = \"Long\") taskID\n 20: optional CrossClusterTaskType taskType\n 30: optional i16 taskState\n 40: optional CrossClusterTaskFailedCause failedCause\n 50: optional CrossClusterStartChildExecutionResponseAttributes startChildExecutionAttributes\n 60: optional CrossClusterCancelExecutionResponseAttributes cancelExecutionAttributes\n 70: optional CrossClusterSignalExecutionResponseAttributes signalExecutionAttributes\n 80: optional CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes recordChildWorkflowExecutionCompleteAttributes\n 90: optional CrossClusterApplyParentClosePolicyResponseAttributes applyParentClosePolicyAttributes\n}\n\nstruct GetCrossClusterTasksRequest {\n 10: optional list shardIDs\n 20: optional string targetCluster\n}\n\nstruct GetCrossClusterTasksResponse {\n 10: optional map> tasksByShard\n 20: optional map failedCauseByShard\n}\n\nstruct RespondCrossClusterTasksCompletedRequest {\n 10: optional i32 shardID\n 20: optional string targetCluster\n 30: optional list taskResponses\n 40: optional bool fetchNewTasks\n}\n\nstruct RespondCrossClusterTasksCompletedResponse {\n 10: optional list tasks\n}\n\nenum IsolationGroupState {\n INVALID,\n HEALTHY,\n DRAINED,\n}\n\nstruct IsolationGroupPartition {\n 10: optional string name\n 20: optional IsolationGroupState state\n}\n\nstruct IsolationGroupConfiguration {\n 10: optional list isolationGroups\n}\n\nstruct AsyncWorkflowConfiguration {\n 10: optional bool enabled\n // PredefinedQueueName is the name of the predefined queue in cadence server config's asyncWorkflowQueues\n 20: optional string predefinedQueueName\n // queueType is the type of the queue if predefined_queue_name is not used\n 30: optional string queueType\n // queueConfig is the configuration for the queue if predefined_queue_name is not used\n 40: optional DataBlob queueConfig\n}\n\n/**\n* Any is a logical duplicate of google.protobuf.Any.\n*\n* The intent of the type is the same, but it is not intended to be directly\n* compatible with google.protobuf.Any or any Thrift equivalent - this blob is\n* RPC-type agnostic by design (as the underlying data may be transported over\n* proto or thrift), and the data-bytes may be in any encoding.\n*\n* This is intentionally different from DataBlob, which supports only a handful\n* of known encodings so it can be interpreted everywhere. Any supports literally\n* any contents, and needs to be considered opaque until it is given to something\n* that is expecting it.\n*\n* See ValueType to interpret the contents.\n**/\nstruct Any {\n // Type-string describing value's contents, and intentionally avoiding the\n // name \"type\" as it is often a special term.\n // This should usually be a hard-coded string of some kind.\n 10: optional string ValueType\n // Arbitrarily-encoded bytes, to be deserialized by a runtime implementation.\n // The contents are described by ValueType.\n 20: optional binary Value\n}\n\nstruct AutoConfigHint {\n 10: optional bool enableAutoConfig\n 20: optional i64 pollerWaitTimeInMs\n}\n\nstruct QueueState {\n 10: optional map virtualQueueStates\n 20: optional TaskKey exclusiveMaxReadLevel\n}\n\nstruct VirtualQueueState {\n 10: optional list virtualSliceStates\n}\n\nstruct VirtualSliceState {\n 10: optional TaskRange taskRange\n 20: optional Predicate predicate\n}\n\nstruct TaskRange {\n 10: optional TaskKey inclusiveMin\n 20: optional TaskKey exclusiveMax\n}\n\nstruct TaskKey {\n 10: optional i64 scheduledTimeNano\n 20: optional i64 taskID\n}\n\n// ActiveClusterSelectionPolicy is for active-active domains, it serves as a means to select\n// the active cluster, by specifying the attribute by which to divide the workflows\n// in that domain.\nstruct ActiveClusterSelectionPolicy {\n 1: optional ClusterAttribute clusterAttribute\n\n 10: optional ActiveClusterSelectionStrategy strategy // todo (david.porter) remove these as they're not used anymore\n 20: optional string stickyRegion // todo (david.porter) remove these as they're not used anymore\n 30: optional string externalEntityType // todo (david.porter) remove these as they're not used anymore\n 40: optional string externalEntityKey // todo (david.porter) remove these as they're not used anymore\n}\n\n// ClusterAttribute is used for subdividing workflows in a domain into their active\n// and passive clusters. Examples of this might be 'region' and 'cluster1' as\n// respective region and scope fields.\n// \n// for example, a workflow may specify this in it's start request:\n// \n// StartWorkflowRequest{\n// ActiveClusterSelectionPolicy: {\n// ClusterAttribute: {\n// Scope: \"cityID\",\n// Name: \"Lisbon\" \n// }\n// }\n// }\n// \n// and this means that this workflow will be associate with the domain's cluster attribute 'Lisbon',\n// be active in the cluster that has Lisbon active and \n// failover when that cluster-attribute is set to failover.\nstruct ClusterAttribute {\n 1: optional string scope\n 2: optional string name\n}\n\n// todo (david.porter) Remove this, as it's no longer needed\n// with the active/active configuration we have\nenum ActiveClusterSelectionStrategy {\n REGION_STICKY,\n EXTERNAL_ENTITY,\n}\n\nenum PredicateType {\n Universal,\n Empty,\n DomainID,\n}\n\nstruct UniversalPredicateAttributes {}\n\nstruct EmptyPredicateAttributes {}\n\nstruct DomainIDPredicateAttributes {\n 10: optional list domainIDs\n 20: optional bool isExclusive\n}\n\nstruct Predicate {\n 10: optional PredicateType predicateType\n 20: optional UniversalPredicateAttributes universalPredicateAttributes\n 30: optional EmptyPredicateAttributes emptyPredicateAttributes\n 40: optional DomainIDPredicateAttributes domainIDPredicateAttributes\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception InternalDataInconsistencyError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception WorkflowExecutionAlreadyCompletedError {\n 1: required string message\n}\n\nexception EntityNotExistsError {\n 1: required string message\n 2: optional string currentCluster\n 3: optional string activeCluster\n 4: required list activeClusters // todo(david.porter) remove as its disused\n}\n\nexception ServiceBusyError {\n 1: required string message\n 2: optional string reason\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n 5: required list activeClusters // todo (david.porter) remove this field as it's disused\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") startEventId\n 6: optional i64 (js.type = \"Long\") startEventVersion\n 7: optional i64 (js.type = \"Long\") endEventId\n 8: optional i64 (js.type = \"Long\") endEventVersion\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception FeatureNotEnabledError {\n 1: required string featureFlag\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nexception RemoteSyncMatchedError {\n 10: required string message\n}\n\nexception StickyWorkerUnavailableError {\n 1: required string message\n}\n\nexception TaskListNotOwnedByHostError {\n 1: required string ownedByIdentity\n 2: required string myIdentity\n 3: required string tasklistName\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n /*\n * if a workflow is running using the same workflow ID, terminate it and start a new one\n */\n TerminateIfRunning,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum DecisionTaskTimedOutCause {\n TIMEOUT,\n RESET,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n WORKFLOW_ALREADY_COMPLETED,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum PendingDecisionState {\n SCHEDULED,\n STARTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n EPHEMERAL,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum CronOverlapPolicy {\n SKIPPED,\n BUFFERONE,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nenum QueryConsistencyLevel {\n // EVENTUAL indicates that query should be eventually consistent\n EVENTUAL\n // STRONG indicates that any events that came before query should be reflected in workflow state before running query\n STRONG\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkerVersionInfo {\n 10: optional string impl\n 20: optional string featureVersion\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 71: optional string parentDomainName\n 72: optional i64 parentInitatedId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n 120: optional string taskList\n 121: optional TaskList taskListInfo\n 130: optional bool isCron\n 140: optional i64 (js.type = \"Long\") updateTime\n 150: optional map partitionConfig\n 160: optional CronOverlapPolicy cronOverlapPolicy\n 170: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n 90: optional bool requestLocalDispatch\n}\n\nstruct ActivityLocalDispatchInfo{\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 50: optional binary taskToken\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional i32 jitterStartSeconds\n 170: optional CronOverlapPolicy cronOverlapPolicy\n 180: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional CronOverlapPolicy cronOverlapPolicy\n 170: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 62: optional i64 (js.type = \"Long\") firstScheduledTimeNano\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n 150: optional map partitionConfig\n 160: optional string requestId\n 170: optional CronOverlapPolicy cronOverlapPolicy\n 180: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n 160: optional CronOverlapPolicy cronOverlapPolicy\n 170: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // for reset workflow\n 40: optional string baseRunId\n 50: optional string newRunId\n 60: optional i64 (js.type = \"Long\") forkEventVersion\n 70: optional string reason\n 80: optional DecisionTaskTimedOutCause cause\n 90: optional string requestId\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n 90: optional string binaryChecksum\n 100: optional string requestId\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n 50: optional string lastFailureReason\n 60: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // For retry activity, it may have a failure before timeout. It's important to keep those information for debug.\n // Client can also provide the info for making next decision\n 40: optional string lastFailureReason\n 50: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n 50: optional string requestId\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n 170: optional i32 delayStartSeconds\n 180: optional i32 jitterStartSeconds\n 190: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 200: optional CronOverlapPolicy cronOverlapPolicy\n 210: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 60: optional IsolationGroupConfiguration isolationgroups\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n 120: optional AsyncWorkflowConfiguration AsyncWorkflowConfiguration\n}\n\nstruct FailoverInfo {\n 10: optional i64 (js.type = \"Long\") failoverVersion\n 20: optional i64 (js.type = \"Long\") failoverStartTimestamp\n 30: optional i64 (js.type = \"Long\") failoverExpireTimestamp\n 40: optional i32 completedShardCount\n 50: optional list pendingShards\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n // activeClusterName is the name of the active cluster for active-passive domain\n 10: optional string activeClusterName\n\n // clusters is list of all active and passive clusters of domain\n 20: optional list clusters\n\n // activeClusters contains active cluster(s) information for active-active domain\n 30: optional ActiveClusters activeClusters\n}\n\n// ClusterAttributeScope is a mapping of the cluster atribute to the scope's\n// current stae and failover version, indicating how recently the change was made\nstruct ClusterAttributeScope {\n 10: optional map clusterAttributes;\n}\n\n// activeClustersByClusterAttribute is a map of whatever subdivision of the domain chosen\n// to active cluster info for active-active domains. The key refers to the type of\n// cluster attribute and the value refers to its cluster mappings.\n//\n// For example, a request to update the domain for two locations\n//\n// UpdateDomainRequest{\n// ReplicationConfiguration: {\n// ActiveClusters: {\n// ActiveClustersByClusterAttribute: {\n// \"location\": ClusterAttributeScope{\n// \"Tokyo\": {ActiveClusterInfo: \"cluster0, FailoverVersion: 123},\n// \"Morocco\": {ActiveClusterInfo: \"cluster1\", FailoverVersion: 100},\n// }\n// }\n// }\n// }\n// }\nstruct ActiveClusters {\n // deprecated 10: optional map activeClustersByRegion\n 11: optional map activeClustersByClusterAttribute\n}\n\n// ActiveClusterInfo contains the configuration of active-active domain's active\n// cluster & failover version for a specific region\nstruct ActiveClusterInfo {\n 10: optional string activeClusterName\n 20: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric = true\n 60: optional list clusters\n 70: optional string activeClusterName\n // deprecated 75: optional map activeClustersByRegion\n // activeClusters is a map of cluster-attribute name to active cluster name for active-active domain\n 76: optional ActiveClusters activeClusters\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n 60: optional FailoverInfo failoverInfo\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n 70: optional i32 failoverTimeoutInSeconds\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct FailoverDomainRequest {\n 10: optional string domainName\n 20: optional string domainActiveClusterName\n // only applicable to active-active domains where\n // specific cluster-attributes are being failed over\n 30: optional ActiveClusters activeClusters\n}\n\nstruct FailoverDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct DeleteDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct ListFailoverHistoryRequest {\n // ListFailoverHistoryRequestFilters specifies the filters to apply to the request.\n // If not provided all failover events will be returned.\n 10: optional ListFailoverHistoryRequestFilters filters\n // PaginationOptions will be used to paginate the results.\n // If not provided the first 5 events will be returned.\n 20: optional PaginationOptions pagination\n}\n\n// ListFailoverHistoryRequestFilters is used to filter the failover history.\n// It will be extended with additional filters (e.g ClusterAttributes) as the active-active feature is developed.\nstruct ListFailoverHistoryRequestFilters {\n // domain_id is the id of the domain to list failover history for.\n 10: optional string domainID\n}\n\nstruct ListFailoverHistoryResponse {\n 10: optional list failoverEvents\n // next_page_token can be passed in a subsequent request to fetch the next set of events.\n 20: optional binary nextPageToken\n}\n\nstruct FailoverEvent {\n // id of the failover event\n // Can be passed with the created time to fetch a specific event.\n 10: optional string id\n // created_time is the time the failover event was created.\n // Can be passed with the ID to fetch a specific event.\n 20: optional i64 (js.type = \"Long\") createdTime\n 30: optional FailoverType failoverType\n 40: optional list clusterFailovers\n}\n\nstruct ClusterFailover {\n 10: optional ActiveClusterInfo fromCluster\n 20: optional ActiveClusterInfo toCluster\n // cluster_attribute is the scope and name for the attribute that was failed over.\n // If the cluster_attribute is not defined this failover can be assumed to be the default ActiveCluster.\n 30: optional ClusterAttribute clusterAttribute\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n 160: optional i32 delayStartSeconds\n 170: optional i32 jitterStartSeconds\n 180: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 190: optional CronOverlapPolicy cronOverlapPolicy\n 200: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct StartWorkflowExecutionAsyncRequest {\n 10: optional StartWorkflowExecutionRequest request\n}\n\nstruct StartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct DiagnoseWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n}\n\nstruct DiagnoseWorkflowExecutionResponse {\n 10: optional string domain\n 20: optional WorkflowExecution diagnosticWorkflowExecution\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional map queries\n 130: optional i64 (js.type = 'Long') nextEventId\n 140: optional i64 (js.type = 'Long') totalHistoryBytes\n 150: optional AutoConfigHint autoConfigHint\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional map queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n 20: optional map activitiesToDispatchLocally\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n 180: optional AutoConfigHint autoConfigHint\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n 50: optional string cause\n 60: optional string firstExecutionRunID\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n 70: optional bool skipArchival\n 80: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 11: optional list rawHistory\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n 180: optional i32 delayStartSeconds\n 190: optional i32 jitterStartSeconds\n 200: optional i64 (js.type = \"Long\") firstRunAtTimestamp\n 210: optional CronOverlapPolicy cronOverlapPolicy\n 220: optional ActiveClusterSelectionPolicy activeClusterSelectionPolicy\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncRequest {\n 10: optional SignalWithStartWorkflowExecutionRequest request\n}\n\nstruct SignalWithStartWorkflowExecutionAsyncResponse {\n}\n\nstruct RestartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional string identity\n}\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n 60: optional string firstExecutionRunID\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n 60: optional bool skipSignalReapply\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n 50: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n 50: optional WorkerVersionInfo workerVersionInfo\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorMessage\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n 140: optional string startedWorkerIdentity\n 150: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingDecisionInfo {\n 10: optional PendingDecisionState state\n 20: optional i64 (js.type = \"Long\") scheduledTimestamp\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 attempt\n 50: optional i64 (js.type = \"Long\") originalScheduledTimestamp\n 60: optional i64 (js.type = \"Long\") scheduleID\n}\n\nstruct PendingChildExecutionInfo {\n 1: optional string domain\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n 50: optional PendingDecisionInfo pendingDecision\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n // The TaskList being described\n 30: optional TaskList taskList\n}\n\nstruct GetTaskListsByDomainRequest {\n 10: optional string domainName\n}\n\nstruct GetTaskListsByDomainResponse {\n 10: optional map decisionTaskListMap\n 20: optional map activityTaskListMap\n}\n\nstruct ListTaskListPartitionsRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n}\n\nstruct TaskListPartitionMetadata {\n 10: optional string key\n 20: optional string ownerHostName\n}\n\nstruct ListTaskListPartitionsResponse {\n 10: optional list activityTaskListPartitions\n 20: optional list decisionTaskListPartitions\n}\n\nstruct IsolationGroupMetrics {\n 10: optional double newTasksPerSecond\n 20: optional i64 (js.type = \"Long\") pollerCount\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n 50: optional map isolationGroupMetrics\n 60: optional double newTasksPerSecond\n 70: optional bool empty\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n 40: optional i64 (js.type = \"Long\") visibilityTimestamp\n 50: optional string clusterName\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct ResetQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueRequest {\n 10: optional i32 shardID\n 20: optional string clusterName\n 30: optional i32 type\n}\n\nstruct DescribeQueueResponse {\n 10: optional list processingQueueStates\n}\n\nstruct DescribeShardDistributionRequest {\n 10: optional i32 pageSize\n 20: optional i32 pageID\n}\n\nstruct DescribeShardDistributionResponse {\n 10: optional i32 numberOfShards\n\n // ShardID to Address (ip:port) map\n 20: optional map shards\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}\n\n// SupportedClientVersions contains the support versions for client library\nstruct SupportedClientVersions{\n 10: optional string goSdk\n 20: optional string javaSdk\n}\n\n// ClusterInfo contains information about cadence cluster\nstruct ClusterInfo{\n 10: optional SupportedClientVersions supportedClientVersions\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct FeatureFlags {\n\t10: optional bool WorkflowExecutionAlreadyCompletedErrorEnabled\n}\n\nenum CrossClusterTaskType {\n StartChildExecution\n CancelExecution\n SignalExecution\n RecordChildWorkflowExecutionComplete\n ApplyParentClosePolicy\n}\n\nenum CrossClusterTaskFailedCause {\n DOMAIN_NOT_ACTIVE\n DOMAIN_NOT_EXISTS\n WORKFLOW_ALREADY_RUNNING\n WORKFLOW_NOT_EXISTS\n WORKFLOW_ALREADY_COMPLETED\n UNCATEGORIZED\n}\n\nenum GetTaskFailedCause {\n SERVICE_BUSY\n TIMEOUT\n SHARD_OWNERSHIP_LOST\n UNCATEGORIZED\n}\n\nstruct CrossClusterTaskInfo {\n 10: optional string domainID\n 20: optional string workflowID\n 30: optional string runID\n 40: optional CrossClusterTaskType taskType\n 50: optional i16 taskState\n 60: optional i64 (js.type = \"Long\") taskID\n 70: optional i64 (js.type = \"Long\") visibilityTimestamp\n}\n\nstruct CrossClusterStartChildExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string requestID\n 30: optional i64 (js.type = \"Long\") initiatedEventID\n 40: optional StartChildWorkflowExecutionInitiatedEventAttributes initiatedEventAttributes\n // targetRunID is for scheduling first decision task\n // targetWorkflowID is available in initiatedEventAttributes\n 50: optional string targetRunID\n 60: optional map partitionConfig\n}\n\nstruct CrossClusterStartChildExecutionResponseAttributes {\n 10: optional string runID\n}\n\nstruct CrossClusterCancelExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n}\n\nstruct CrossClusterCancelExecutionResponseAttributes {\n}\n\nstruct CrossClusterSignalExecutionRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional string requestID\n 50: optional i64 (js.type = \"Long\") initiatedEventID\n 60: optional bool childWorkflowOnly\n 70: optional string signalName\n 80: optional binary signalInput\n 90: optional binary control\n}\n\nstruct CrossClusterSignalExecutionResponseAttributes {\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes {\n 10: optional string targetDomainID\n 20: optional string targetWorkflowID\n 30: optional string targetRunID\n 40: optional i64 (js.type = \"Long\") initiatedEventID\n 50: optional HistoryEvent completionEvent\n}\n\nstruct CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes {\n}\n\nstruct ApplyParentClosePolicyAttributes {\n 10: optional string childDomainID\n 20: optional string childWorkflowID\n 30: optional string childRunID\n 40: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct ApplyParentClosePolicyStatus {\n 10: optional bool completed\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct ApplyParentClosePolicyRequest {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional ApplyParentClosePolicyStatus status\n}\n\nstruct CrossClusterApplyParentClosePolicyRequestAttributes {\n 10: optional list children\n}\n\nstruct ApplyParentClosePolicyResult {\n 10: optional ApplyParentClosePolicyAttributes child\n 20: optional CrossClusterTaskFailedCause failedCause\n}\n\nstruct CrossClusterApplyParentClosePolicyResponseAttributes {\n 10: optional list childrenStatus\n}\n\nstruct CrossClusterTaskRequest {\n 10: optional CrossClusterTaskInfo taskInfo\n 20: optional CrossClusterStartChildExecutionRequestAttributes startChildExecutionAttributes\n 30: optional CrossClusterCancelExecutionRequestAttributes cancelExecutionAttributes\n 40: optional CrossClusterSignalExecutionRequestAttributes signalExecutionAttributes\n 50: optional CrossClusterRecordChildWorkflowExecutionCompleteRequestAttributes recordChildWorkflowExecutionCompleteAttributes\n 60: optional CrossClusterApplyParentClosePolicyRequestAttributes applyParentClosePolicyAttributes\n}\n\nstruct CrossClusterTaskResponse {\n 10: optional i64 (js.type = \"Long\") taskID\n 20: optional CrossClusterTaskType taskType\n 30: optional i16 taskState\n 40: optional CrossClusterTaskFailedCause failedCause\n 50: optional CrossClusterStartChildExecutionResponseAttributes startChildExecutionAttributes\n 60: optional CrossClusterCancelExecutionResponseAttributes cancelExecutionAttributes\n 70: optional CrossClusterSignalExecutionResponseAttributes signalExecutionAttributes\n 80: optional CrossClusterRecordChildWorkflowExecutionCompleteResponseAttributes recordChildWorkflowExecutionCompleteAttributes\n 90: optional CrossClusterApplyParentClosePolicyResponseAttributes applyParentClosePolicyAttributes\n}\n\nstruct GetCrossClusterTasksRequest {\n 10: optional list shardIDs\n 20: optional string targetCluster\n}\n\nstruct GetCrossClusterTasksResponse {\n 10: optional map> tasksByShard\n 20: optional map failedCauseByShard\n}\n\nstruct RespondCrossClusterTasksCompletedRequest {\n 10: optional i32 shardID\n 20: optional string targetCluster\n 30: optional list taskResponses\n 40: optional bool fetchNewTasks\n}\n\nstruct RespondCrossClusterTasksCompletedResponse {\n 10: optional list tasks\n}\n\nenum IsolationGroupState {\n INVALID,\n HEALTHY,\n DRAINED,\n}\n\nstruct IsolationGroupPartition {\n 10: optional string name\n 20: optional IsolationGroupState state\n}\n\nstruct IsolationGroupConfiguration {\n 10: optional list isolationGroups\n}\n\nstruct AsyncWorkflowConfiguration {\n 10: optional bool enabled\n // PredefinedQueueName is the name of the predefined queue in cadence server config's asyncWorkflowQueues\n 20: optional string predefinedQueueName\n // queueType is the type of the queue if predefined_queue_name is not used\n 30: optional string queueType\n // queueConfig is the configuration for the queue if predefined_queue_name is not used\n 40: optional DataBlob queueConfig\n}\n\n/**\n* Any is a logical duplicate of google.protobuf.Any.\n*\n* The intent of the type is the same, but it is not intended to be directly\n* compatible with google.protobuf.Any or any Thrift equivalent - this blob is\n* RPC-type agnostic by design (as the underlying data may be transported over\n* proto or thrift), and the data-bytes may be in any encoding.\n*\n* This is intentionally different from DataBlob, which supports only a handful\n* of known encodings so it can be interpreted everywhere. Any supports literally\n* any contents, and needs to be considered opaque until it is given to something\n* that is expecting it.\n*\n* See ValueType to interpret the contents.\n**/\nstruct Any {\n // Type-string describing value's contents, and intentionally avoiding the\n // name \"type\" as it is often a special term.\n // This should usually be a hard-coded string of some kind.\n 10: optional string ValueType\n // Arbitrarily-encoded bytes, to be deserialized by a runtime implementation.\n // The contents are described by ValueType.\n 20: optional binary Value\n}\n\nstruct AutoConfigHint {\n 10: optional bool enableAutoConfig\n 20: optional i64 pollerWaitTimeInMs\n}\n\nstruct QueueState {\n 10: optional map virtualQueueStates\n 20: optional TaskKey exclusiveMaxReadLevel\n}\n\nstruct VirtualQueueState {\n 10: optional list virtualSliceStates\n}\n\nstruct VirtualSliceState {\n 10: optional TaskRange taskRange\n 20: optional Predicate predicate\n}\n\nstruct TaskRange {\n 10: optional TaskKey inclusiveMin\n 20: optional TaskKey exclusiveMax\n}\n\nstruct TaskKey {\n 10: optional i64 scheduledTimeNano\n 20: optional i64 taskID\n}\n\n// ActiveClusterSelectionPolicy is for active-active domains, it serves as a means to select\n// the active cluster, by specifying the attribute by which to divide the workflows\n// in that domain.\nstruct ActiveClusterSelectionPolicy {\n 1: optional ClusterAttribute clusterAttribute\n\n // deprecated 10: optional ActiveClusterSelectionStrategy strategy\n // deprecated 20: optional string stickyRegion\n // deprecated 30: optional string externalEntityType\n // deprecated 40: optional string externalEntityKey\n}\n\n// ClusterAttribute is used for subdividing workflows in a domain into their active\n// and passive clusters. Examples of this might be 'region' and 'cluster1' as\n// respective region and scope fields.\n//\n// for example, a workflow may specify this in it's start request:\n//\n// StartWorkflowRequest{\n// ActiveClusterSelectionPolicy: {\n// ClusterAttribute: {\n// Scope: \"cityID\",\n// Name: \"Lisbon\"\n// }\n// }\n// }\n//\n// and this means that this workflow will be associate with the domain's cluster attribute 'Lisbon',\n// be active in the cluster that has Lisbon active and\n// failover when that cluster-attribute is set to failover.\nstruct ClusterAttribute {\n 1: optional string scope\n 2: optional string name\n}\n\n// FailoverType describes how a failover operation will be performed.\nenum FailoverType {\n INVALID\n FORCE\n GRACEFUL\n}\n\n// PaginationOptions provides common options for paginated RPCs.\nstruct PaginationOptions {\n // page_size configures the number of results to be returned as part of each page\n 10: optional i32 pageSize\n // next_page_token should be provided from a previous response to fetch the next page.\n // if empty, the first page will be returned.\n 20: optional binary nextPageToken\n}\n\n// todo (david.porter) Remove this, as it's no longer needed\n// with the active/active configuration we have\nenum ActiveClusterSelectionStrategy {\n REGION_STICKY,\n EXTERNAL_ENTITY,\n}\n\nenum PredicateType {\n Universal,\n Empty,\n DomainID,\n}\n\nstruct UniversalPredicateAttributes {}\n\nstruct EmptyPredicateAttributes {}\n\nstruct DomainIDPredicateAttributes {\n 10: optional list domainIDs\n 20: optional bool isExclusive\n}\n\nstruct Predicate {\n 10: optional PredicateType predicateType\n 20: optional UniversalPredicateAttributes universalPredicateAttributes\n 30: optional EmptyPredicateAttributes emptyPredicateAttributes\n 40: optional DomainIDPredicateAttributes domainIDPredicateAttributes\n}\n" diff --git a/go.mod b/go.mod index 2abf825ca..2f437bfb7 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/robfig/cron v1.2.0 github.com/stretchr/testify v1.9.0 github.com/uber-go/tally v3.3.15+incompatible - github.com/uber/cadence-idl v0.0.0-20251002070144-16af6755557f + github.com/uber/cadence-idl v0.0.0-20251027163002-a2e93bbe969c github.com/uber/jaeger-client-go v2.22.1+incompatible github.com/uber/tchannel-go v1.32.1 go.uber.org/atomic v1.11.0 diff --git a/go.sum b/go.sum index 5ec028e0d..bc711f363 100644 --- a/go.sum +++ b/go.sum @@ -204,6 +204,8 @@ github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9P github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= github.com/uber/cadence-idl v0.0.0-20251002070144-16af6755557f h1:fNoE3zYdIA4VzvjPQ201b9PQ+DijsnyBfnpVu07nFEI= github.com/uber/cadence-idl v0.0.0-20251002070144-16af6755557f/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= +github.com/uber/cadence-idl v0.0.0-20251027163002-a2e93bbe969c h1:upx/g+hfrthvkoK45eKgw61XRhqxu7DpzwEk6hUd0Bw= +github.com/uber/cadence-idl v0.0.0-20251027163002-a2e93bbe969c/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM= github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= diff --git a/idls b/idls index 16af67555..0857e5075 160000 --- a/idls +++ b/idls @@ -1 +1 @@ -Subproject commit 16af6755557f3750296213473a03013dc475e8e2 +Subproject commit 0857e5075a96a114cc3a8ef94b5381387fd9232b diff --git a/internal/client.go b/internal/client.go index 775577e0b..90c09f2bf 100644 --- a/internal/client.go +++ b/internal/client.go @@ -598,6 +598,10 @@ type ( // Valid values are QueryConsistencyLevelEventual (served by the receiving cluster), and QueryConsistencyLevelStrong (redirects to the active cluster). QueryConsistencyLevel int + ClusterAttribute struct { + Scope string + Name string + } // ActiveClusterSelectionPolicy defines the policy for selecting the active cluster to start the workflow execution on for active-active domains. // Active-active domains can be configured to be active in multiple clusters (at most one in a given region). // Individual workflows can be configured to be active in one of the active clusters of the domain. @@ -609,17 +613,8 @@ type ( // Cadence server must be aware of the external entity type used. Custom types can be registered following the documentation in // https://github.com/cadence-workflow/cadence/blob/master/docs/design/active-active/active-active.md ActiveClusterSelectionPolicy struct { - Strategy ActiveClusterSelectionStrategy - ExternalEntityType string - ExternalEntityKey string + ClusterAttribute *ClusterAttribute } - - ActiveClusterSelectionStrategy int -) - -const ( - ActiveClusterSelectionStrategyRegionSticky ActiveClusterSelectionStrategy = iota - ActiveClusterSelectionStrategyExternalEntity ) const ( diff --git a/internal/common/auth/service_wrapper.go b/internal/common/auth/service_wrapper.go index 403e40944..18957f127 100644 --- a/internal/common/auth/service_wrapper.go +++ b/internal/common/auth/service_wrapper.go @@ -531,3 +531,13 @@ func (w *workflowServiceAuthWrapper) FailoverDomain(ctx context.Context, request result, err := w.service.FailoverDomain(ctx, request, opts...) return result, err } + +func (w *workflowServiceAuthWrapper) ListFailoverHistory(ctx context.Context, request *shared.ListFailoverHistoryRequest, opts ...yarpc.CallOption) (*shared.ListFailoverHistoryResponse, error) { + tokenHeader, err := w.getYarpcJWTHeader() + if err != nil { + return nil, err + } + opts = append(opts, *tokenHeader) + result, err := w.service.ListFailoverHistory(ctx, request, opts...) + return result, err +} diff --git a/internal/common/isolationgroup/service_wrapper.go b/internal/common/isolationgroup/service_wrapper.go index c1d2a7702..35882a626 100644 --- a/internal/common/isolationgroup/service_wrapper.go +++ b/internal/common/isolationgroup/service_wrapper.go @@ -322,3 +322,9 @@ func (w *workflowServiceIsolationGroupWrapper) FailoverDomain(ctx context.Contex result, err := w.service.FailoverDomain(ctx, request, opts...) return result, err } + +func (w *workflowServiceIsolationGroupWrapper) ListFailoverHistory(ctx context.Context, request *shared.ListFailoverHistoryRequest, opts ...yarpc.CallOption) (*shared.ListFailoverHistoryResponse, error) { + opts = append(opts, w.getIsolationGroupIdentifier()) + result, err := w.service.ListFailoverHistory(ctx, request, opts...) + return result, err +} diff --git a/internal/common/metrics/service_wrapper.go b/internal/common/metrics/service_wrapper.go index bbcdfd390..d853de776 100644 --- a/internal/common/metrics/service_wrapper.go +++ b/internal/common/metrics/service_wrapper.go @@ -95,6 +95,7 @@ const ( scopeRestartWorkflowExecution = CadenceMetricsPrefix + "RestartWorkflowExecution" scopeDeleteDomain = CadenceMetricsPrefix + "DeleteDomain" scopeNameFailoverDomain = CadenceMetricsPrefix + "FailoverDomain" + scopeNameListFailoverHistory = CadenceMetricsPrefix + "ListFailoverHistory" ) // NewWorkflowServiceWrapper creates a new wrapper to WorkflowService that will emit metrics for each service call. @@ -460,3 +461,10 @@ func (w *workflowServiceMetricsWrapper) FailoverDomain(ctx context.Context, requ scope.handleError(err) return result, err } + +func (w *workflowServiceMetricsWrapper) ListFailoverHistory(ctx context.Context, request *shared.ListFailoverHistoryRequest, opts ...yarpc.CallOption) (*shared.ListFailoverHistoryResponse, error) { + scope := w.getOperationScope(scopeNameListFailoverHistory) + result, err := w.service.ListFailoverHistory(ctx, request, opts...) + scope.handleError(err) + return result, err +} diff --git a/internal/compatibility/adapter.go b/internal/compatibility/adapter.go index 853eb39f8..782340042 100644 --- a/internal/compatibility/adapter.go +++ b/internal/compatibility/adapter.go @@ -278,6 +278,11 @@ func (a thrift2protoAdapter) FailoverDomain(ctx context.Context, request *shared return thrift.FailoverDomainResponse(response), thrift.Error(err) } +func (a thrift2protoAdapter) ListFailoverHistory(ctx context.Context, request *shared.ListFailoverHistoryRequest, opts ...yarpc.CallOption) (*shared.ListFailoverHistoryResponse, error) { + response, err := a.domain.ListFailoverHistory(ctx, proto.ListFailoverHistoryRequest(request), opts...) + return thrift.ListFailoverHistoryResponse(response), thrift.Error(err) +} + type domainAPIthriftAdapter struct { service workflowserviceclient.Interface } diff --git a/internal/compatibility/enum_test.go b/internal/compatibility/enum_test.go index f34aa6145..97f9db2d5 100644 --- a/internal/compatibility/enum_test.go +++ b/internal/compatibility/enum_test.go @@ -347,15 +347,3 @@ func TestCronOverlapPolicy(t *testing.T) { } assert.Equal(t, apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID, proto.CronOverlapPolicy(thrift.CronOverlapPolicy(999))) } - -func TestActiveClusterSelectionStrategy(t *testing.T) { - for _, v := range []apiv1.ActiveClusterSelectionStrategy{ - apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_INVALID, - apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_REGION_STICKY, - apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_EXTERNAL_ENTITY, - } { - assert.Equal(t, v, proto.ActiveClusterSelectionStrategy(thrift.ActiveClusterSelectionStrategy(v))) - } - - assert.Equal(t, apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_INVALID, proto.ActiveClusterSelectionStrategy(thrift.ActiveClusterSelectionStrategy(999))) -} diff --git a/internal/compatibility/proto/enum.go b/internal/compatibility/proto/enum.go index f711cb93e..96ec249bb 100644 --- a/internal/compatibility/proto/enum.go +++ b/internal/compatibility/proto/enum.go @@ -408,16 +408,3 @@ func CronOverlapPolicy(t *shared.CronOverlapPolicy) apiv1.CronOverlapPolicy { } return apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID } - -func ActiveClusterSelectionStrategy(t *shared.ActiveClusterSelectionStrategy) apiv1.ActiveClusterSelectionStrategy { - if t == nil { - return apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_INVALID - } - switch *t { - case shared.ActiveClusterSelectionStrategyRegionSticky: - return apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_REGION_STICKY - case shared.ActiveClusterSelectionStrategyExternalEntity: - return apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_EXTERNAL_ENTITY - } - return apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_INVALID -} diff --git a/internal/compatibility/proto/request.go b/internal/compatibility/proto/request.go index b72c9e4a3..819c38554 100644 --- a/internal/compatibility/proto/request.go +++ b/internal/compatibility/proto/request.go @@ -225,7 +225,7 @@ func RegisterDomainRequest(t *shared.RegisterDomainRequest) *apiv1.RegisterDomai WorkflowExecutionRetentionPeriod: daysToDuration(t.WorkflowExecutionRetentionPeriodInDays), Clusters: ClusterReplicationConfigurationArray(t.Clusters), ActiveClusterName: t.GetActiveClusterName(), - ActiveClustersByRegion: t.GetActiveClustersByRegion(), + ActiveClusters: ActiveClusters(t.ActiveClusters), Data: t.Data, SecurityToken: t.GetSecurityToken(), IsGlobalDomain: t.GetIsGlobalDomain(), @@ -704,35 +704,12 @@ func FailoverDomainRequest(t *shared.FailoverDomainRequest) *apiv1.FailoverDomai } } -func ActiveClusterSelectionPolicy(t *shared.ActiveClusterSelectionPolicy) *apiv1.ActiveClusterSelectionPolicy { +func ListFailoverHistoryRequest(t *shared.ListFailoverHistoryRequest) *apiv1.ListFailoverHistoryRequest { if t == nil { return nil } - plc := &apiv1.ActiveClusterSelectionPolicy{ - Strategy: ActiveClusterSelectionStrategy(t.Strategy), + return &apiv1.ListFailoverHistoryRequest{ + Filters: ListFailoverHistoryRequestFilters(t.Filters), + Pagination: PaginationOptions(t.Pagination), } - - if plc.Strategy == apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_INVALID { - return nil - } - - switch *t.Strategy { - case shared.ActiveClusterSelectionStrategyRegionSticky: - plc.StrategyConfig = &apiv1.ActiveClusterSelectionPolicy_ActiveClusterStickyRegionConfig{ - ActiveClusterStickyRegionConfig: &apiv1.ActiveClusterStickyRegionConfig{ - StickyRegion: *t.StickyRegion, - }, - } - case shared.ActiveClusterSelectionStrategyExternalEntity: - plc.StrategyConfig = &apiv1.ActiveClusterSelectionPolicy_ActiveClusterExternalEntityConfig{ - ActiveClusterExternalEntityConfig: &apiv1.ActiveClusterExternalEntityConfig{ - ExternalEntityType: *t.ExternalEntityType, - ExternalEntityKey: *t.ExternalEntityKey, - }, - } - default: - return nil - } - - return plc } diff --git a/internal/compatibility/proto/types.go b/internal/compatibility/proto/types.go index b207bf846..9a8b21d3b 100644 --- a/internal/compatibility/proto/types.go +++ b/internal/compatibility/proto/types.go @@ -698,20 +698,84 @@ func ActivityLocalDispatchInfoMap(t map[string]*shared.ActivityLocalDispatchInfo return v } -func ActiveClusters(ac *shared.ActiveClusters) *apiv1.ActiveClusters { - if ac == nil { +func ActiveClusterSelectionPolicy(t *shared.ActiveClusterSelectionPolicy) *apiv1.ActiveClusterSelectionPolicy { + if t == nil { + return nil + } + return &apiv1.ActiveClusterSelectionPolicy{ + ClusterAttribute: ClusterAttribute(t.ClusterAttribute), + } +} + +func ClusterAttribute(t *shared.ClusterAttribute) *apiv1.ClusterAttribute { + if t == nil { return nil } + return &apiv1.ClusterAttribute{ + Scope: t.GetScope(), + Name: t.GetName(), + } +} - regToCl := make(map[string]*apiv1.ActiveClusterInfo) - for reg, clusterInfo := range ac.ActiveClustersByRegion { - regToCl[reg] = &apiv1.ActiveClusterInfo{ - ActiveClusterName: clusterInfo.GetActiveClusterName(), - FailoverVersion: clusterInfo.GetFailoverVersion(), +func ActiveClusters(t *shared.ActiveClusters) *apiv1.ActiveClusters { + if t == nil { + return nil + } + var activeClustersByClusterAttribute map[string]*apiv1.ClusterAttributeScope + if t.ActiveClustersByClusterAttribute != nil { + activeClustersByClusterAttribute = make(map[string]*apiv1.ClusterAttributeScope) + for scopeType, scope := range t.ActiveClustersByClusterAttribute { + activeClustersByClusterAttribute[scopeType] = ClusterAttributeScope(scope) } } return &apiv1.ActiveClusters{ - RegionToCluster: regToCl, + ActiveClustersByClusterAttribute: activeClustersByClusterAttribute, + } +} + +func ClusterAttributeScope(t *shared.ClusterAttributeScope) *apiv1.ClusterAttributeScope { + if t == nil { + return nil + } + var clusterAttributes map[string]*apiv1.ActiveClusterInfo + if len(t.ClusterAttributes) > 0 { + clusterAttributes = make(map[string]*apiv1.ActiveClusterInfo) + for name, clusterInfo := range t.ClusterAttributes { + clusterAttributes[name] = ActiveClusterInfo(clusterInfo) + } + } + + return &apiv1.ClusterAttributeScope{ + ClusterAttributes: clusterAttributes, + } +} + +func ActiveClusterInfo(t *shared.ActiveClusterInfo) *apiv1.ActiveClusterInfo { + if t == nil { + return nil + } + return &apiv1.ActiveClusterInfo{ + ActiveClusterName: t.GetActiveClusterName(), + FailoverVersion: t.GetFailoverVersion(), + } +} + +func PaginationOptions(t *shared.PaginationOptions) *apiv1.PaginationOptions { + if t == nil { + return nil + } + return &apiv1.PaginationOptions{ + PageSize: t.GetPageSize(), + NextPageToken: t.NextPageToken, + } +} + +func ListFailoverHistoryRequestFilters(t *shared.ListFailoverHistoryRequestFilters) *apiv1.ListFailoverHistoryRequestFilters { + if t == nil { + return nil + } + return &apiv1.ListFailoverHistoryRequestFilters{ + DomainId: t.GetDomainID(), } } diff --git a/internal/compatibility/testdata/common.go b/internal/compatibility/testdata/common.go index 4a3e4c0b0..c2be6e7c6 100644 --- a/internal/compatibility/testdata/common.go +++ b/internal/compatibility/testdata/common.go @@ -171,21 +171,17 @@ var ( }, } - ActiveClusterSelectionPolicyRegionSticky = &apiv1.ActiveClusterSelectionPolicy{ - Strategy: apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_REGION_STICKY, - StrategyConfig: &apiv1.ActiveClusterSelectionPolicy_ActiveClusterStickyRegionConfig{ - ActiveClusterStickyRegionConfig: &apiv1.ActiveClusterStickyRegionConfig{ - StickyRegion: "us-east-1", - }, + ActiveClusterSelectionPolicy = &apiv1.ActiveClusterSelectionPolicy{ + ClusterAttribute: &apiv1.ClusterAttribute{ + Scope: "region", + Name: "us-east-1", }, } - ActiveClusterSelectionPolicyExternalEntity = &apiv1.ActiveClusterSelectionPolicy{ - Strategy: apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_EXTERNAL_ENTITY, - StrategyConfig: &apiv1.ActiveClusterSelectionPolicy_ActiveClusterExternalEntityConfig{ - ActiveClusterExternalEntityConfig: &apiv1.ActiveClusterExternalEntityConfig{ - ExternalEntityType: "external-entity-type", - ExternalEntityKey: "external-entity-key", - }, + + ActiveClusterSelectionPolicy2 = &apiv1.ActiveClusterSelectionPolicy{ + ClusterAttribute: &apiv1.ClusterAttribute{ + Scope: "location", + Name: "London", }, } diff --git a/internal/compatibility/testdata/decision.go b/internal/compatibility/testdata/decision.go index 25a22598a..31741f59f 100644 --- a/internal/compatibility/testdata/decision.go +++ b/internal/compatibility/testdata/decision.go @@ -107,7 +107,7 @@ var ( SearchAttributes: &SearchAttributes, JitterStart: Duration4, CronOverlapPolicy: apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_SKIPPED, - ActiveClusterSelectionPolicy: ActiveClusterSelectionPolicyRegionSticky, + ActiveClusterSelectionPolicy: ActiveClusterSelectionPolicy, } FailWorkflowExecutionDecisionAttributes = apiv1.FailWorkflowExecutionDecisionAttributes{ Failure: &Failure, @@ -165,7 +165,7 @@ var ( Memo: &Memo, SearchAttributes: &SearchAttributes, CronOverlapPolicy: apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_SKIPPED, - ActiveClusterSelectionPolicy: ActiveClusterSelectionPolicyRegionSticky, + ActiveClusterSelectionPolicy: ActiveClusterSelectionPolicy, } StartTimerDecisionAttributes = apiv1.StartTimerDecisionAttributes{ TimerId: TimerID, diff --git a/internal/compatibility/testdata/domain.go b/internal/compatibility/testdata/domain.go index 20dbf42d1..ce42b2011 100644 --- a/internal/compatibility/testdata/domain.go +++ b/internal/compatibility/testdata/domain.go @@ -89,19 +89,23 @@ var ( ClusterReplicationConfigurationArray = []*apiv1.ClusterReplicationConfiguration{ &ClusterReplicationConfiguration, } - ActiveClustersByRegion = map[string]string{ - "Region1": ClusterName1, - "Region2": ClusterName2, - } ActiveClusters = &apiv1.ActiveClusters{ - RegionToCluster: map[string]*apiv1.ActiveClusterInfo{ - "Region1": &apiv1.ActiveClusterInfo{ - ActiveClusterName: ClusterName1, - FailoverVersion: 0, + ActiveClustersByClusterAttribute: map[string]*apiv1.ClusterAttributeScope{ + "region": &apiv1.ClusterAttributeScope{ + ClusterAttributes: map[string]*apiv1.ActiveClusterInfo{ + "us-east-1": &apiv1.ActiveClusterInfo{ + ActiveClusterName: ClusterName1, + FailoverVersion: 0, + }, + }, }, - "Region2": &apiv1.ActiveClusterInfo{ - ActiveClusterName: ClusterName2, - FailoverVersion: 0, + "city": &apiv1.ClusterAttributeScope{ + ClusterAttributes: map[string]*apiv1.ActiveClusterInfo{ + "london": &apiv1.ActiveClusterInfo{ + ActiveClusterName: ClusterName2, + FailoverVersion: 0, + }, + }, }, }, } diff --git a/internal/compatibility/testdata/service.go b/internal/compatibility/testdata/service.go index 0c667ec18..c0e1b1c39 100644 --- a/internal/compatibility/testdata/service.go +++ b/internal/compatibility/testdata/service.go @@ -34,7 +34,7 @@ var ( WorkflowExecutionRetentionPeriod: DomainRetention, Clusters: ClusterReplicationConfigurationArray, ActiveClusterName: ClusterName1, - ActiveClustersByRegion: ActiveClustersByRegion, + ActiveClusters: ActiveClusters, Data: DomainData, SecurityToken: SecurityToken, IsGlobalDomain: true, @@ -370,7 +370,7 @@ var ( SearchAttributes: &SearchAttributes, Header: &Header, CronOverlapPolicy: apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_SKIPPED, - ActiveClusterSelectionPolicy: ActiveClusterSelectionPolicyRegionSticky, + ActiveClusterSelectionPolicy: ActiveClusterSelectionPolicy, } StartWorkflowExecutionRequestWithCronAndActiveClusterSelectionPolicy2 = apiv1.StartWorkflowExecutionRequest{ Domain: DomainName, @@ -389,7 +389,7 @@ var ( SearchAttributes: &SearchAttributes, Header: &Header, CronOverlapPolicy: apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_BUFFER_ONE, - ActiveClusterSelectionPolicy: ActiveClusterSelectionPolicyExternalEntity, + ActiveClusterSelectionPolicy: ActiveClusterSelectionPolicy2, } StartWorkflowExecutionResponse = apiv1.StartWorkflowExecutionResponse{ RunId: RunID, diff --git a/internal/compatibility/thrift/enum.go b/internal/compatibility/thrift/enum.go index f7b1f5b66..97e4b9a85 100644 --- a/internal/compatibility/thrift/enum.go +++ b/internal/compatibility/thrift/enum.go @@ -397,15 +397,14 @@ func CronOverlapPolicy(t apiv1.CronOverlapPolicy) *shared.CronOverlapPolicy { return nil } -func ActiveClusterSelectionStrategy(t apiv1.ActiveClusterSelectionStrategy) *shared.ActiveClusterSelectionStrategy { +func FailoverType(t apiv1.FailoverType) *shared.FailoverType { switch t { - case apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_INVALID: - return nil - case apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_REGION_STICKY: - return shared.ActiveClusterSelectionStrategyRegionSticky.Ptr() - case apiv1.ActiveClusterSelectionStrategy_ACTIVE_CLUSTER_SELECTION_STRATEGY_EXTERNAL_ENTITY: - return shared.ActiveClusterSelectionStrategyExternalEntity.Ptr() + case apiv1.FailoverType_FAILOVER_TYPE_INVALID: + return shared.FailoverTypeInvalid.Ptr() + case apiv1.FailoverType_FAILOVER_TYPE_FORCE: + return shared.FailoverTypeForce.Ptr() + case apiv1.FailoverType_FAILOVER_TYPE_GRACEFUL: + return shared.FailoverTypeGraceful.Ptr() } - // we treat any unknown value as invalid - return nil + return shared.FailoverTypeInvalid.Ptr() } diff --git a/internal/compatibility/thrift/request.go b/internal/compatibility/thrift/request.go index efd07f563..7aeaff416 100644 --- a/internal/compatibility/thrift/request.go +++ b/internal/compatibility/thrift/request.go @@ -22,7 +22,6 @@ package thrift import ( "go.uber.org/cadence/.gen/go/shared" - "go.uber.org/cadence/internal/common" apiv1 "github.com/uber/cadence-idl/go/proto/api/v1" ) @@ -228,7 +227,7 @@ func RegisterDomainRequest(t *apiv1.RegisterDomainRequest) *shared.RegisterDomai WorkflowExecutionRetentionPeriodInDays: durationToDays(t.WorkflowExecutionRetentionPeriod), Clusters: ClusterReplicationConfigurationArray(t.Clusters), ActiveClusterName: &t.ActiveClusterName, - ActiveClustersByRegion: t.ActiveClustersByRegion, + ActiveClusters: ActiveClusters(t.ActiveClusters), Data: t.Data, SecurityToken: &t.SecurityToken, IsGlobalDomain: &t.IsGlobalDomain, @@ -609,21 +608,17 @@ func ActiveClusterSelectionPolicy(t *apiv1.ActiveClusterSelectionPolicy) *shared if t == nil { return nil } - plc := &shared.ActiveClusterSelectionPolicy{ - Strategy: ActiveClusterSelectionStrategy(t.Strategy), + return &shared.ActiveClusterSelectionPolicy{ + ClusterAttribute: ClusterAttribute(t.ClusterAttribute), } +} - if plc.Strategy == nil { +func ClusterAttribute(t *apiv1.ClusterAttribute) *shared.ClusterAttribute { + if t == nil { return nil } - - switch *plc.Strategy { - case shared.ActiveClusterSelectionStrategyRegionSticky: - plc.StickyRegion = common.StringPtr(t.GetActiveClusterStickyRegionConfig().GetStickyRegion()) - case shared.ActiveClusterSelectionStrategyExternalEntity: - plc.ExternalEntityType = common.StringPtr(t.GetActiveClusterExternalEntityConfig().GetExternalEntityType()) - plc.ExternalEntityKey = common.StringPtr(t.GetActiveClusterExternalEntityConfig().GetExternalEntityKey()) + return &shared.ClusterAttribute{ + Scope: &t.Scope, + Name: &t.Name, } - - return plc } diff --git a/internal/compatibility/thrift/response.go b/internal/compatibility/thrift/response.go index efbe4e7e3..622009f9d 100644 --- a/internal/compatibility/thrift/response.go +++ b/internal/compatibility/thrift/response.go @@ -403,3 +403,13 @@ func FailoverDomainResponse(t *apiv1.FailoverDomainResponse) *shared.FailoverDom IsGlobalDomain: &t.Domain.IsGlobalDomain, } } + +func ListFailoverHistoryResponse(t *apiv1.ListFailoverHistoryResponse) *shared.ListFailoverHistoryResponse { + if t == nil { + return nil + } + return &shared.ListFailoverHistoryResponse{ + FailoverEvents: FailoverEventArray(t.FailoverEvents), + NextPageToken: t.NextPageToken, + } +} diff --git a/internal/compatibility/thrift/types.go b/internal/compatibility/thrift/types.go index b5211bfe0..4e96b613f 100644 --- a/internal/compatibility/thrift/types.go +++ b/internal/compatibility/thrift/types.go @@ -710,20 +710,91 @@ func ActivityLocalDispatchInfoMap(t map[string]*apiv1.ActivityLocalDispatchInfo) return v } -func ActiveClusters(ac *apiv1.ActiveClusters) *shared.ActiveClusters { - if ac == nil { +func ActiveClusters(t *apiv1.ActiveClusters) *shared.ActiveClusters { + if t == nil { return nil } - - clByRegion := make(map[string]*shared.ActiveClusterInfo) - for region, clInfo := range ac.RegionToCluster { - clByRegion[region] = &shared.ActiveClusterInfo{ - ActiveClusterName: &clInfo.ActiveClusterName, - FailoverVersion: &clInfo.FailoverVersion, + var activeClustersByClusterAttribute map[string]*shared.ClusterAttributeScope + if t.ActiveClustersByClusterAttribute != nil { + activeClustersByClusterAttribute = make(map[string]*shared.ClusterAttributeScope) + for scopeType, scope := range t.ActiveClustersByClusterAttribute { + activeClustersByClusterAttribute[scopeType] = ClusterAttributeScope(scope) } } return &shared.ActiveClusters{ - ActiveClustersByRegion: clByRegion, + ActiveClustersByClusterAttribute: activeClustersByClusterAttribute, + } +} + +func ClusterAttributeScope(t *apiv1.ClusterAttributeScope) *shared.ClusterAttributeScope { + if t == nil { + return nil + } + var clusterAttributes map[string]*shared.ActiveClusterInfo + if len(t.ClusterAttributes) > 0 { + clusterAttributes = make(map[string]*shared.ActiveClusterInfo) + for name, clusterInfo := range t.ClusterAttributes { + clusterAttributes[name] = ActiveClusterInfo(clusterInfo) + } + } + + return &shared.ClusterAttributeScope{ + ClusterAttributes: clusterAttributes, + } +} + +func ActiveClusterInfo(t *apiv1.ActiveClusterInfo) *shared.ActiveClusterInfo { + if t == nil { + return nil + } + return &shared.ActiveClusterInfo{ + ActiveClusterName: &t.ActiveClusterName, + FailoverVersion: &t.FailoverVersion, + } +} + +func ClusterFailoverArray(t []*apiv1.ClusterFailover) []*shared.ClusterFailover { + if t == nil { + return nil + } + v := make([]*shared.ClusterFailover, len(t)) + for i := range t { + v[i] = ClusterFailover(t[i]) + } + return v +} + +func ClusterFailover(t *apiv1.ClusterFailover) *shared.ClusterFailover { + if t == nil { + return nil + } + return &shared.ClusterFailover{ + FromCluster: ActiveClusterInfo(t.FromCluster), + ToCluster: ActiveClusterInfo(t.ToCluster), + ClusterAttribute: ClusterAttribute(t.ClusterAttribute), + } +} + +func FailoverEventArray(t []*apiv1.FailoverEvent) []*shared.FailoverEvent { + if t == nil { + return nil + } + v := make([]*shared.FailoverEvent, len(t)) + for i := range t { + v[i] = FailoverEvent(t[i]) + } + return v +} + +func FailoverEvent(t *apiv1.FailoverEvent) *shared.FailoverEvent { + if t == nil { + return nil + } + return &shared.FailoverEvent{ + ID: &t.Id, + CreatedTime: timeToUnixNano(t.CreatedTime), + FailoverType: FailoverType(t.FailoverType), + ClusterFailovers: ClusterFailoverArray(t.ClusterFailovers), } } diff --git a/internal/convert.go b/internal/convert.go index 399ddf24a..e8139d817 100644 --- a/internal/convert.go +++ b/internal/convert.go @@ -21,8 +21,6 @@ package internal import ( - "fmt" - s "go.uber.org/cadence/.gen/go/shared" "go.uber.org/cadence/internal/common" "go.uber.org/cadence/internal/common/backoff" @@ -50,26 +48,18 @@ func convertActiveClusterSelectionPolicy(policy *ActiveClusterSelectionPolicy) ( if policy == nil { return nil, nil } + return &s.ActiveClusterSelectionPolicy{ + ClusterAttribute: convertClusterAttribute(policy.ClusterAttribute), + }, nil +} - switch policy.Strategy { - case ActiveClusterSelectionStrategyRegionSticky: - return &s.ActiveClusterSelectionPolicy{ - Strategy: s.ActiveClusterSelectionStrategyRegionSticky.Ptr(), - }, nil - case ActiveClusterSelectionStrategyExternalEntity: - if policy.ExternalEntityType == "" { - return nil, fmt.Errorf("external entity type is required for external entity strategy") - } - if policy.ExternalEntityKey == "" { - return nil, fmt.Errorf("external entity key is required for external entity strategy") - } - return &s.ActiveClusterSelectionPolicy{ - Strategy: s.ActiveClusterSelectionStrategyExternalEntity.Ptr(), - ExternalEntityType: common.StringPtr(policy.ExternalEntityType), - ExternalEntityKey: common.StringPtr(policy.ExternalEntityKey), - }, nil - default: - return nil, fmt.Errorf("invalid active cluster selection strategy: %d", policy.Strategy) +func convertClusterAttribute(attr *ClusterAttribute) *s.ClusterAttribute { + if attr == nil { + return nil + } + return &s.ClusterAttribute{ + Scope: &attr.Scope, + Name: &attr.Name, } } diff --git a/internal/convert_test.go b/internal/convert_test.go index 3be8872c9..10f518556 100644 --- a/internal/convert_test.go +++ b/internal/convert_test.go @@ -101,50 +101,20 @@ func TestConvertActiveClusterSelectionPolicy(t *testing.T) { thriftPolicy: nil, }, { - name: "region sticky policy", + name: "valid policy", policy: &ActiveClusterSelectionPolicy{ - Strategy: ActiveClusterSelectionStrategyRegionSticky, + ClusterAttribute: &ClusterAttribute{ + Scope: "region", + Name: "us-east-1", + }, }, thriftPolicy: &s.ActiveClusterSelectionPolicy{ - Strategy: s.ActiveClusterSelectionStrategyRegionSticky.Ptr(), + ClusterAttribute: &s.ClusterAttribute{ + Scope: common.StringPtr("region"), + Name: common.StringPtr("us-east-1"), + }, }, }, - { - name: "external entity policy - success", - policy: &ActiveClusterSelectionPolicy{ - Strategy: ActiveClusterSelectionStrategyExternalEntity, - ExternalEntityType: "test-type", - ExternalEntityKey: "test-key", - }, - thriftPolicy: &s.ActiveClusterSelectionPolicy{ - Strategy: s.ActiveClusterSelectionStrategyExternalEntity.Ptr(), - ExternalEntityType: common.StringPtr("test-type"), - ExternalEntityKey: common.StringPtr("test-key"), - }, - }, - { - name: "external entity policy - missing type", - policy: &ActiveClusterSelectionPolicy{ - Strategy: ActiveClusterSelectionStrategyExternalEntity, - ExternalEntityKey: "test-key", - }, - wantErr: true, - }, - { - name: "external entity policy - missing key", - policy: &ActiveClusterSelectionPolicy{ - Strategy: ActiveClusterSelectionStrategyExternalEntity, - ExternalEntityType: "test-type", - }, - wantErr: true, - }, - { - name: "invalid strategy", - policy: &ActiveClusterSelectionPolicy{ - Strategy: ActiveClusterSelectionStrategy(-1), - }, - wantErr: true, - }, } for _, test := range tests { diff --git a/internal/internal_workflow_client_test.go b/internal/internal_workflow_client_test.go index ccd951d36..b97bc5d75 100644 --- a/internal/internal_workflow_client_test.go +++ b/internal/internal_workflow_client_test.go @@ -2866,7 +2866,12 @@ func TestGetWorkflowStartRequest(t *testing.T) { DelayStart: 0 * time.Second, JitterStart: 0 * time.Second, CronOverlapPolicy: shared.CronOverlapPolicyBufferone, - ActiveClusterSelectionPolicy: &ActiveClusterSelectionPolicy{Strategy: ActiveClusterSelectionStrategyRegionSticky}, + ActiveClusterSelectionPolicy: &ActiveClusterSelectionPolicy{ + ClusterAttribute: &ClusterAttribute{ + Scope: "region", + Name: "us-east-1", + }, + }, }, workflowFunc: func(ctx Context) {}, wantRequest: &shared.StartWorkflowExecutionRequest{ @@ -2887,7 +2892,12 @@ func TestGetWorkflowStartRequest(t *testing.T) { Header: &shared.Header{Fields: map[string][]byte{}}, WorkflowIdReusePolicy: shared.WorkflowIdReusePolicyAllowDuplicateFailedOnly.Ptr(), CronOverlapPolicy: shared.CronOverlapPolicyBufferone.Ptr(), - ActiveClusterSelectionPolicy: &shared.ActiveClusterSelectionPolicy{Strategy: shared.ActiveClusterSelectionStrategyRegionSticky.Ptr()}, + ActiveClusterSelectionPolicy: &shared.ActiveClusterSelectionPolicy{ + ClusterAttribute: &shared.ClusterAttribute{ + Scope: common.StringPtr("region"), + Name: common.StringPtr("us-east-1"), + }, + }, }, }, { @@ -2982,23 +2992,6 @@ func TestGetWorkflowStartRequest(t *testing.T) { args: []interface{}{}, wantErr: "expected 2 args for function", }, - { - name: "missing external entity type in active cluster selection policy", - options: StartWorkflowOptions{ - ID: workflowID, - TaskList: tasklist, - ExecutionStartToCloseTimeout: 10 * time.Second, - DecisionTaskStartToCloseTimeout: 5 * time.Second, - DelayStart: 0 * time.Second, - JitterStart: 0 * time.Second, - CronOverlapPolicy: shared.CronOverlapPolicyBufferone, - ActiveClusterSelectionPolicy: &ActiveClusterSelectionPolicy{ - Strategy: ActiveClusterSelectionStrategyExternalEntity, - }, - }, - workflowFunc: func(ctx Context) {}, - wantErr: "external entity type is required for external entity strategy", - }, } for _, tc := range tests { From 325a719123948442683a467f63e7f38d2cc9bb6f Mon Sep 17 00:00:00 2001 From: Zijian Chen Date: Mon, 27 Oct 2025 13:46:49 -0700 Subject: [PATCH 2/7] go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index bc711f363..9c17504b7 100644 --- a/go.sum +++ b/go.sum @@ -202,8 +202,6 @@ github.com/uber-go/mapdecode v1.0.0/go.mod h1:b5nP15FwXTgpjTjeA9A2uTHXV5UJCl4arw github.com/uber-go/tally v3.3.12+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg= github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= -github.com/uber/cadence-idl v0.0.0-20251002070144-16af6755557f h1:fNoE3zYdIA4VzvjPQ201b9PQ+DijsnyBfnpVu07nFEI= -github.com/uber/cadence-idl v0.0.0-20251002070144-16af6755557f/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/uber/cadence-idl v0.0.0-20251027163002-a2e93bbe969c h1:upx/g+hfrthvkoK45eKgw61XRhqxu7DpzwEk6hUd0Bw= github.com/uber/cadence-idl v0.0.0-20251027163002-a2e93bbe969c/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM= From d56f3434983e76dc9461a987fff7480989d8b160 Mon Sep 17 00:00:00 2001 From: Zijian Chen Date: Mon, 27 Oct 2025 14:10:25 -0700 Subject: [PATCH 3/7] codecov --- internal/common/auth/service_wrapper_test.go | 16 ++++++++++++++++ .../isolationgroup/service_wrapper_test.go | 9 +++++++++ internal/convert_test.go | 5 +++++ 3 files changed, 30 insertions(+) diff --git a/internal/common/auth/service_wrapper_test.go b/internal/common/auth/service_wrapper_test.go index c355f7061..95deee5ee 100644 --- a/internal/common/auth/service_wrapper_test.go +++ b/internal/common/auth/service_wrapper_test.go @@ -763,3 +763,19 @@ func (s *serviceWrapperSuite) TestFailoverDomainInvalidToken() { _, err := sw.FailoverDomain(ctx, &shared.FailoverDomainRequest{}) s.EqualError(err, "error") } + +func (s *serviceWrapperSuite) TestListFailoverHistoryValidToken() { + s.Service.EXPECT().ListFailoverHistory(gomock.Any(), gomock.Any(), gomock.Any()).Times(1) + sw := NewWorkflowServiceWrapper(s.Service, s.AuthProvider) + ctx, _ := thrift.NewContext(time.Minute) + _, err := sw.ListFailoverHistory(ctx, &shared.ListFailoverHistoryRequest{}) + s.NoError(err) +} + +func (s *serviceWrapperSuite) TestListFailoverHistoryInvalidToken() { + s.AuthProvider = newJWTAuthIncorrect() + sw := NewWorkflowServiceWrapper(s.Service, s.AuthProvider) + ctx, _ := thrift.NewContext(time.Minute) + _, err := sw.ListFailoverHistory(ctx, &shared.ListFailoverHistoryRequest{}) + s.EqualError(err, "error") +} diff --git a/internal/common/isolationgroup/service_wrapper_test.go b/internal/common/isolationgroup/service_wrapper_test.go index d96a7a729..4e75ef04e 100644 --- a/internal/common/isolationgroup/service_wrapper_test.go +++ b/internal/common/isolationgroup/service_wrapper_test.go @@ -415,6 +415,15 @@ func TestAPICalls(t *testing.T) { }, expectedResponse: &shared.FailoverDomainResponse{}, }, + "ListFailoverHistory": { + action: func(ctx context.Context, sw workflowserviceclient.Interface) (interface{}, error) { + return sw.ListFailoverHistory(ctx, &shared.ListFailoverHistoryRequest{}) + }, + affordance: func(m *workflowservicetest.MockClient) { + m.EXPECT().ListFailoverHistory(gomock.Any(), &shared.ListFailoverHistoryRequest{}, gomock.Any()).Times(1).Return(&shared.ListFailoverHistoryResponse{}, nil) + }, + expectedResponse: &shared.ListFailoverHistoryResponse{}, + }, } for name, td := range tests { diff --git a/internal/convert_test.go b/internal/convert_test.go index 10f518556..22df0312c 100644 --- a/internal/convert_test.go +++ b/internal/convert_test.go @@ -100,6 +100,11 @@ func TestConvertActiveClusterSelectionPolicy(t *testing.T) { policy: nil, thriftPolicy: nil, }, + { + name: "empty policy", + policy: &ActiveClusterSelectionPolicy{}, + thriftPolicy: &s.ActiveClusterSelectionPolicy{}, + }, { name: "valid policy", policy: &ActiveClusterSelectionPolicy{ From 36f66e8aa52e972450a7392fbbdcb1c1a267dd17 Mon Sep 17 00:00:00 2001 From: Zijian Chen Date: Mon, 27 Oct 2025 17:03:00 -0700 Subject: [PATCH 4/7] update comment --- internal/client.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/internal/client.go b/internal/client.go index 90c09f2bf..84f4e02e4 100644 --- a/internal/client.go +++ b/internal/client.go @@ -603,15 +603,11 @@ type ( Name string } // ActiveClusterSelectionPolicy defines the policy for selecting the active cluster to start the workflow execution on for active-active domains. - // Active-active domains can be configured to be active in multiple clusters (at most one in a given region). - // Individual workflows can be configured to be active in one of the active clusters of the domain. - // - // There are two supported strategies: - // - Region sticky: The workflow will be active in the active cluster of the region that start workflow request is sent to. - // - External entity: The workflow can be associated with an external entity which has a corresponding region. - // The workflow will be considered active in the active cluster of the region that the external entity is in. - // Cadence server must be aware of the external entity type used. Custom types can be registered following the documentation in - // https://github.com/cadence-workflow/cadence/blob/master/docs/design/active-active/active-active.md + // Active-active domains are domains having cluster attributes. Each cluster attribute have their own active cluster which can be different from domain's active cluster. + // Workflows in active-active domains are divided into sub-groups by their cluster attributes. + // A workflow can specify its cluster attribute by setting the active cluster selection policy in its start workflow request. + // If no active cluster selection policy is provided or the cluster attribute is not found in domain's metadata, + // the workflow's active cluster is the domain's active cluster. ActiveClusterSelectionPolicy struct { ClusterAttribute *ClusterAttribute } From 40fc72c2e5f175bd7e9816c3280a90944c43bdaf Mon Sep 17 00:00:00 2001 From: Zijian Chen Date: Mon, 27 Oct 2025 19:01:03 -0700 Subject: [PATCH 5/7] cov --- internal/compatibility/api_test.go | 16 +++------------- internal/compatibility/enum_test.go | 11 +++++++++++ internal/compatibility/proto/enum.go | 15 +++++++++++++++ 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/internal/compatibility/api_test.go b/internal/compatibility/api_test.go index 2745da72c..4cf923353 100644 --- a/internal/compatibility/api_test.go +++ b/internal/compatibility/api_test.go @@ -584,9 +584,6 @@ func TestDescribeDomainResponse(t *testing.T) { } }, }, - ExcludedFields: []string{ - "ActiveClusters", // [BUG,NEEDS INVESTIGATION] Appears to be a nil pointer dereference in mapper conversion - }, }, ) } @@ -2019,11 +2016,6 @@ func TestStartWorkflowExecutionRequest(t *testing.T) { c.Fuzz(&req.CronOverlapPolicy) }, }, - ExcludedFields: []string{ - // [BUG,NEEDS INVESTIGATION] ActiveClusterSelectionPolicy appears to be failing the round trip - // TODO: Investigate the mappers and determine where it is failing fuzz testing - "ActiveClusterSelectionPolicy", - }, }, ) } @@ -2264,10 +2256,9 @@ func TestUpdateDomainRequest(t *testing.T) { }, }, ExcludedFields: []string{ - "UpdateMask", // [NOT INVESTIGATED] Complex nested structure with protobuf metadata issues - mapper incorrectly populates UpdateMask paths - "BadBinaries", // [NOT INVESTIGATED] Appears to be a fuzzing issue, tested in TestBadBinaries - "Clusters", // [NOT INVESTIGATED] Appears to be a fuzzing issue - "ActiveClusters", // [NOT INVESTIGATED] Appears to be a fuzzing issue + "UpdateMask", // [NOT INVESTIGATED] Complex nested structure with protobuf metadata issues - mapper incorrectly populates UpdateMask paths + "BadBinaries", // [NOT INVESTIGATED] Appears to be a fuzzing issue, tested in TestBadBinaries + "Clusters", // [NOT INVESTIGATED] Appears to be a fuzzing issue }, }, ) @@ -2304,7 +2295,6 @@ func TestUpdateDomainResponse(t *testing.T) { }, ExcludedFields: []string{ // Exclude nested fields that have complex issues like in DescribeDomainResponse - "Domain.ActiveClusters", // [BUG,NEEDS INVESTIGATION] Nil pointer dereference in mapper conversion "Domain.Clusters", // [NOT INVESTIGATED] Protobuf metadata issues in nested ClusterReplicationConfiguration "Domain.FailoverInfo", // [NOT INVESTIGATED] Protobuf metadata issues in nested structures "Domain.IsolationGroups", // [NOT INVESTIGATED] Protobuf metadata issues in nested structures diff --git a/internal/compatibility/enum_test.go b/internal/compatibility/enum_test.go index 97f9db2d5..f842205d4 100644 --- a/internal/compatibility/enum_test.go +++ b/internal/compatibility/enum_test.go @@ -347,3 +347,14 @@ func TestCronOverlapPolicy(t *testing.T) { } assert.Equal(t, apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID, proto.CronOverlapPolicy(thrift.CronOverlapPolicy(999))) } + +func TestFailoverType(t *testing.T) { + for _, v := range []apiv1.FailoverType{ + apiv1.FailoverType_FAILOVER_TYPE_INVALID, + apiv1.FailoverType_FAILOVER_TYPE_FORCE, + apiv1.FailoverType_FAILOVER_TYPE_GRACEFUL, + } { + assert.Equal(t, v, proto.FailoverType(thrift.FailoverType(v))) + } + assert.Equal(t, apiv1.FailoverType_FAILOVER_TYPE_INVALID, proto.FailoverType(thrift.FailoverType(999))) +} diff --git a/internal/compatibility/proto/enum.go b/internal/compatibility/proto/enum.go index 96ec249bb..0d543b340 100644 --- a/internal/compatibility/proto/enum.go +++ b/internal/compatibility/proto/enum.go @@ -408,3 +408,18 @@ func CronOverlapPolicy(t *shared.CronOverlapPolicy) apiv1.CronOverlapPolicy { } return apiv1.CronOverlapPolicy_CRON_OVERLAP_POLICY_INVALID } + +func FailoverType(t *shared.FailoverType) apiv1.FailoverType { + if t == nil { + return apiv1.FailoverType_FAILOVER_TYPE_INVALID + } + switch *t { + case shared.FailoverTypeInvalid: + return apiv1.FailoverType_FAILOVER_TYPE_INVALID + case shared.FailoverTypeForce: + return apiv1.FailoverType_FAILOVER_TYPE_FORCE + case shared.FailoverTypeGraceful: + return apiv1.FailoverType_FAILOVER_TYPE_GRACEFUL + } + return apiv1.FailoverType_FAILOVER_TYPE_INVALID +} From 1437c972cfb2201dcd1eb30542ecaa89cf216406 Mon Sep 17 00:00:00 2001 From: Zijian Chen Date: Mon, 27 Oct 2025 19:38:05 -0700 Subject: [PATCH 6/7] test list failvoer history req --- internal/compatibility/api_test.go | 11 +++++++ internal/compatibility/testdata/common.go | 7 ++++ internal/compatibility/testdata/service.go | 4 +++ internal/compatibility/thrift/request.go | 17 +++------- internal/compatibility/thrift/types.go | 38 ++++++++++++++++++++++ 5 files changed, 64 insertions(+), 13 deletions(-) diff --git a/internal/compatibility/api_test.go b/internal/compatibility/api_test.go index 4cf923353..003ad6bfd 100644 --- a/internal/compatibility/api_test.go +++ b/internal/compatibility/api_test.go @@ -2211,6 +2211,17 @@ func TestTimerStartedEventAttributes(t *testing.T) { FuzzOptions{}, ) } +func TestListFailoverHistoryRequest(t *testing.T) { + for _, item := range []*apiv1.ListFailoverHistoryRequest{nil, {}, &testdata.ListFailoverHistoryRequest} { + assert.Equal(t, item, proto.ListFailoverHistoryRequest(thrift.ListFailoverHistoryRequest(item))) + } + + runFuzzTest(t, + thrift.ListFailoverHistoryRequest, + proto.ListFailoverHistoryRequest, + FuzzOptions{}, + ) +} func TestUpdateDomainRequest(t *testing.T) { for _, item := range []*apiv1.UpdateDomainRequest{nil, {UpdateMask: &gogo.FieldMask{}}, &testdata.UpdateDomainRequest} { assert.Equal(t, item, proto.UpdateDomainRequest(thrift.UpdateDomainRequest(item))) diff --git a/internal/compatibility/testdata/common.go b/internal/compatibility/testdata/common.go index c2be6e7c6..4e333b697 100644 --- a/internal/compatibility/testdata/common.go +++ b/internal/compatibility/testdata/common.go @@ -363,4 +363,11 @@ var ( Attempt: Attempt, OriginalScheduledTime: Timestamp3, } + PaginationOptions = apiv1.PaginationOptions{ + PageSize: 5, + NextPageToken: []byte(`token`), + } + ListFailoverHistoryRequestFilters = apiv1.ListFailoverHistoryRequestFilters{ + DomainId: "id", + } ) diff --git a/internal/compatibility/testdata/service.go b/internal/compatibility/testdata/service.go index c0e1b1c39..e0f8a7d28 100644 --- a/internal/compatibility/testdata/service.go +++ b/internal/compatibility/testdata/service.go @@ -43,6 +43,10 @@ var ( VisibilityArchivalStatus: ArchivalStatus, VisibilityArchivalUri: VisibilityArchivalURI, } + ListFailoverHistoryRequest = apiv1.ListFailoverHistoryRequest{ + Filters: &ListFailoverHistoryRequestFilters, + Pagination: &PaginationOptions, + } DescribeDomainRequest_ID = apiv1.DescribeDomainRequest{ DescribeBy: &apiv1.DescribeDomainRequest_Id{Id: DomainID}, } diff --git a/internal/compatibility/thrift/request.go b/internal/compatibility/thrift/request.go index 7aeaff416..b0f9a6142 100644 --- a/internal/compatibility/thrift/request.go +++ b/internal/compatibility/thrift/request.go @@ -604,21 +604,12 @@ func ListOpenWorkflowExecutionsRequest(r *apiv1.ListOpenWorkflowExecutionsReques } } -func ActiveClusterSelectionPolicy(t *apiv1.ActiveClusterSelectionPolicy) *shared.ActiveClusterSelectionPolicy { +func ListFailoverHistoryRequest(t *apiv1.ListFailoverHistoryRequest) *shared.ListFailoverHistoryRequest { if t == nil { return nil } - return &shared.ActiveClusterSelectionPolicy{ - ClusterAttribute: ClusterAttribute(t.ClusterAttribute), - } -} - -func ClusterAttribute(t *apiv1.ClusterAttribute) *shared.ClusterAttribute { - if t == nil { - return nil - } - return &shared.ClusterAttribute{ - Scope: &t.Scope, - Name: &t.Name, + return &shared.ListFailoverHistoryRequest{ + Filters: ListFailoverHistoryRequestFilters(t.Filters), + Pagination: PaginationOptions(t.Pagination), } } diff --git a/internal/compatibility/thrift/types.go b/internal/compatibility/thrift/types.go index 4e96b613f..dc574906b 100644 --- a/internal/compatibility/thrift/types.go +++ b/internal/compatibility/thrift/types.go @@ -710,6 +710,25 @@ func ActivityLocalDispatchInfoMap(t map[string]*apiv1.ActivityLocalDispatchInfo) return v } +func ActiveClusterSelectionPolicy(t *apiv1.ActiveClusterSelectionPolicy) *shared.ActiveClusterSelectionPolicy { + if t == nil { + return nil + } + return &shared.ActiveClusterSelectionPolicy{ + ClusterAttribute: ClusterAttribute(t.ClusterAttribute), + } +} + +func ClusterAttribute(t *apiv1.ClusterAttribute) *shared.ClusterAttribute { + if t == nil { + return nil + } + return &shared.ClusterAttribute{ + Scope: &t.Scope, + Name: &t.Name, + } +} + func ActiveClusters(t *apiv1.ActiveClusters) *shared.ActiveClusters { if t == nil { return nil @@ -798,3 +817,22 @@ func FailoverEvent(t *apiv1.FailoverEvent) *shared.FailoverEvent { ClusterFailovers: ClusterFailoverArray(t.ClusterFailovers), } } + +func PaginationOptions(t *apiv1.PaginationOptions) *shared.PaginationOptions { + if t == nil { + return nil + } + return &shared.PaginationOptions{ + PageSize: &t.PageSize, + NextPageToken: t.NextPageToken, + } +} + +func ListFailoverHistoryRequestFilters(t *apiv1.ListFailoverHistoryRequestFilters) *shared.ListFailoverHistoryRequestFilters { + if t == nil { + return nil + } + return &shared.ListFailoverHistoryRequestFilters{ + DomainID: &t.DomainId, + } +} From 6311f6783800bcc230c72ec3f7db45a499cf89de Mon Sep 17 00:00:00 2001 From: Zijian Chen Date: Mon, 27 Oct 2025 19:46:47 -0700 Subject: [PATCH 7/7] test list failover res --- internal/compatibility/api_test.go | 22 ++++++++++++ internal/compatibility/proto/response.go | 10 ++++++ internal/compatibility/proto/types.go | 45 ++++++++++++++++++++++++ 3 files changed, 77 insertions(+) diff --git a/internal/compatibility/api_test.go b/internal/compatibility/api_test.go index 003ad6bfd..476d387d5 100644 --- a/internal/compatibility/api_test.go +++ b/internal/compatibility/api_test.go @@ -2222,6 +2222,28 @@ func TestListFailoverHistoryRequest(t *testing.T) { FuzzOptions{}, ) } +func TestListFailoverHistoryResponse(t *testing.T) { + for _, item := range []*apiv1.ListFailoverHistoryResponse{nil, {}} { + assert.Equal(t, item, proto.ListFailoverHistoryResponse(thrift.ListFailoverHistoryResponse(item))) + } + + runFuzzTest(t, + thrift.ListFailoverHistoryResponse, + proto.ListFailoverHistoryResponse, + FuzzOptions{ + CustomFuncs: []interface{}{ + func(e *apiv1.FailoverType, c fuzz.Continue) { + validValues := []apiv1.FailoverType{ + apiv1.FailoverType_FAILOVER_TYPE_INVALID, + apiv1.FailoverType_FAILOVER_TYPE_FORCE, + apiv1.FailoverType_FAILOVER_TYPE_GRACEFUL, + } + *e = validValues[c.Intn(len(validValues))] + }, + }, + }, + ) +} func TestUpdateDomainRequest(t *testing.T) { for _, item := range []*apiv1.UpdateDomainRequest{nil, {UpdateMask: &gogo.FieldMask{}}, &testdata.UpdateDomainRequest} { assert.Equal(t, item, proto.UpdateDomainRequest(thrift.UpdateDomainRequest(item))) diff --git a/internal/compatibility/proto/response.go b/internal/compatibility/proto/response.go index 9d1a16b0e..ed202230c 100644 --- a/internal/compatibility/proto/response.go +++ b/internal/compatibility/proto/response.go @@ -349,3 +349,13 @@ func UpdateDomainResponse(t *shared.UpdateDomainResponse) *apiv1.UpdateDomainRes Domain: domain, } } + +func ListFailoverHistoryResponse(t *shared.ListFailoverHistoryResponse) *apiv1.ListFailoverHistoryResponse { + if t == nil { + return nil + } + return &apiv1.ListFailoverHistoryResponse{ + FailoverEvents: FailoverEventArray(t.FailoverEvents), + NextPageToken: t.NextPageToken, + } +} diff --git a/internal/compatibility/proto/types.go b/internal/compatibility/proto/types.go index 9a8b21d3b..9fa71bd7a 100644 --- a/internal/compatibility/proto/types.go +++ b/internal/compatibility/proto/types.go @@ -761,6 +761,51 @@ func ActiveClusterInfo(t *shared.ActiveClusterInfo) *apiv1.ActiveClusterInfo { } } +func ClusterFailoverArray(t []*shared.ClusterFailover) []*apiv1.ClusterFailover { + if t == nil { + return nil + } + v := make([]*apiv1.ClusterFailover, len(t)) + for i := range t { + v[i] = ClusterFailover(t[i]) + } + return v +} + +func ClusterFailover(t *shared.ClusterFailover) *apiv1.ClusterFailover { + if t == nil { + return nil + } + return &apiv1.ClusterFailover{ + FromCluster: ActiveClusterInfo(t.FromCluster), + ToCluster: ActiveClusterInfo(t.ToCluster), + ClusterAttribute: ClusterAttribute(t.ClusterAttribute), + } +} + +func FailoverEventArray(t []*shared.FailoverEvent) []*apiv1.FailoverEvent { + if t == nil { + return nil + } + v := make([]*apiv1.FailoverEvent, len(t)) + for i := range t { + v[i] = FailoverEvent(t[i]) + } + return v +} + +func FailoverEvent(t *shared.FailoverEvent) *apiv1.FailoverEvent { + if t == nil { + return nil + } + return &apiv1.FailoverEvent{ + Id: t.GetID(), + CreatedTime: unixNanoToTime(t.CreatedTime), + FailoverType: FailoverType(t.FailoverType), + ClusterFailovers: ClusterFailoverArray(t.ClusterFailovers), + } +} + func PaginationOptions(t *shared.PaginationOptions) *apiv1.PaginationOptions { if t == nil { return nil