diff --git a/.gen/go/replicator/replicator.go b/.gen/go/replicator/replicator.go index c6def29eaaa..063f5d098cf 100644 --- a/.gen/go/replicator/replicator.go +++ b/.gen/go/replicator/replicator.go @@ -2139,6 +2139,7 @@ type HistoryMetadataTaskAttributes struct { RunId *string `json:"runId,omitempty"` FirstEventId *int64 `json:"firstEventId,omitempty"` NextEventId *int64 `json:"nextEventId,omitempty"` + Version *int64 `json:"version,omitempty"` } type _List_String_ValueList []string @@ -2184,7 +2185,7 @@ func (_List_String_ValueList) Close() {} // } func (v *HistoryMetadataTaskAttributes) ToWire() (wire.Value, error) { var ( - fields [6]wire.Field + fields [7]wire.Field i int = 0 w wire.Value err error @@ -2238,6 +2239,14 @@ func (v *HistoryMetadataTaskAttributes) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 50, Value: w} i++ } + if v.Version != nil { + w, err = wire.NewValueI64(*(v.Version)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 60, Value: w} + i++ + } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } @@ -2339,6 +2348,16 @@ func (v *HistoryMetadataTaskAttributes) FromWire(w wire.Value) error { return err } + } + case 60: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.Version = &x + if err != nil { + return err + } + } } } @@ -2353,7 +2372,7 @@ func (v *HistoryMetadataTaskAttributes) String() string { return "" } - var fields [6]string + var fields [7]string i := 0 if v.TargetClusters != nil { fields[i] = fmt.Sprintf("TargetClusters: %v", v.TargetClusters) @@ -2379,6 +2398,10 @@ func (v *HistoryMetadataTaskAttributes) String() string { fields[i] = fmt.Sprintf("NextEventId: %v", *(v.NextEventId)) i++ } + if v.Version != nil { + fields[i] = fmt.Sprintf("Version: %v", *(v.Version)) + i++ + } return fmt.Sprintf("HistoryMetadataTaskAttributes{%v}", strings.Join(fields[:i], ", ")) } @@ -2426,6 +2449,9 @@ func (v *HistoryMetadataTaskAttributes) Equals(rhs *HistoryMetadataTaskAttribute if !_I64_EqualsPtr(v.NextEventId, rhs.NextEventId) { return false } + if !_I64_EqualsPtr(v.Version, rhs.Version) { + return false + } return true } @@ -2465,6 +2491,9 @@ func (v *HistoryMetadataTaskAttributes) MarshalLogObject(enc zapcore.ObjectEncod if v.NextEventId != nil { enc.AddInt64("nextEventId", *v.NextEventId) } + if v.Version != nil { + enc.AddInt64("version", *v.Version) + } return err } @@ -2558,6 +2587,21 @@ func (v *HistoryMetadataTaskAttributes) IsSetNextEventId() bool { return v != nil && v.NextEventId != nil } +// GetVersion returns the value of Version if it is set or its +// zero value if it is unset. +func (v *HistoryMetadataTaskAttributes) GetVersion() (o int64) { + if v != nil && v.Version != nil { + return *v.Version + } + + return +} + +// IsSetVersion returns true if Version is not nil. +func (v *HistoryMetadataTaskAttributes) IsSetVersion() bool { + return v != nil && v.Version != nil +} + type HistoryTaskAttributes struct { TargetClusters []string `json:"targetClusters,omitempty"` DomainId *string `json:"domainId,omitempty"` @@ -8053,11 +8097,11 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "replicator", Package: "github.com/uber/cadence/.gen/go/replicator", FilePath: "replicator.thrift", - SHA1: "bbc99019851d06ddce17eeff72efa392eae738b1", + SHA1: "bcdcb0e87f13752b5940b4085de361ccc43210b0", Includes: []*thriftreflect.ThriftModule{ shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.replicator\n\ninclude \"shared.thrift\"\n\nenum ReplicationTaskType {\n Domain\n History\n SyncShardStatus\n SyncActivity\n HistoryMetadata\n HistoryV2\n}\n\nenum DomainOperation {\n Create\n Update\n}\n\nstruct DomainTaskAttributes {\n 05: optional DomainOperation domainOperation\n 10: optional string id\n 20: optional shared.DomainInfo info\n 30: optional shared.DomainConfiguration config\n 40: optional shared.DomainReplicationConfiguration replicationConfig\n 50: optional i64 (js.type = \"Long\") configVersion\n 60: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct HistoryTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional i32 eventStoreVersion\n 110: optional i32 newRunEventStoreVersion\n 120: optional bool resetWorkflow\n 130: optional bool newRunNDC\n}\n\nstruct HistoryMetadataTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n}\n\nstruct SyncShardStatusTaskAttributes {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityTaskAttributes {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n 140: optional binary lastFailureDetails\n 150: optional shared.VersionHistory versionHistory\n}\n\nstruct HistoryTaskV2Attributes {\n 05: optional i64 (js.type = \"Long\") taskId\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional list versionHistoryItems\n 50: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 70: optional shared.DataBlob newRunEvents\n}\n\nstruct ReplicationTask {\n 10: optional ReplicationTaskType taskType\n 11: optional i64 (js.type = \"Long\") sourceTaskId\n 20: optional DomainTaskAttributes domainTaskAttributes\n 30: optional HistoryTaskAttributes historyTaskAttributes // TODO deprecate once NDC migration is done\n 40: optional SyncShardStatusTaskAttributes syncShardStatusTaskAttributes\n 50: optional SyncActivityTaskAttributes syncActivityTaskAttributes\n 60: optional HistoryMetadataTaskAttributes historyMetadataTaskAttributes // TODO deprecate once kafka deprecation is done\n 70: optional HistoryTaskV2Attributes historyTaskV2Attributes\n}\n\nstruct ReplicationToken {\n 10: optional i32 shardID\n // lastRetrivedMessageId is where the next fetch should begin with\n 20: optional i64 (js.type = \"Long\") lastRetrievedMessageId\n // lastProcessedMessageId is the last messageId that is processed on the passive side.\n // This can be different than lastRetrievedMessageId if passive side supports prefetching messages.\n 30: optional i64 (js.type = \"Long\") lastProcessedMessageId\n}\n\nstruct SyncShardStatus {\n 10: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct ReplicationMessages {\n 10: optional list replicationTasks\n // This can be different than the last taskId in the above list, because sender can decide to skip tasks (e.g. for completed workflows).\n 20: optional i64 (js.type = \"Long\") lastRetrievedMessageId\n 30: optional bool hasMore // Hint for flow control\n 40: optional SyncShardStatus syncShardStatus\n}\n\nstruct ReplicationTaskInfo {\n 10: optional string domainID\n 20: optional string workflowID\n 30: optional string runID\n 40: optional i16 taskType\n 50: optional i64 (js.type = \"Long\") taskID\n 60: optional i64 (js.type = \"Long\") version\n 70: optional i64 (js.type = \"Long\") firstEventID\n 80: optional i64 (js.type = \"Long\") nextEventID\n 90: optional i64 (js.type = \"Long\") scheduledID\n}\n\nstruct GetReplicationMessagesRequest {\n 10: optional list tokens\n 20: optional string clusterName\n}\n\nstruct GetReplicationMessagesResponse {\n 10: optional map messagesByShard\n}\n\nstruct GetDomainReplicationMessagesRequest {\n // lastRetrievedMessageId is where the next fetch should begin with\n 10: optional i64 (js.type = \"Long\") lastRetrievedMessageId\n // lastProcessedMessageId is the last messageId that is processed on the passive side.\n // This can be different than lastRetrievedMessageId if passive side supports prefetching messages.\n 20: optional i64 (js.type = \"Long\") lastProcessedMessageId\n // clusterName is the name of the pulling cluster\n 30: optional string clusterName\n}\n\nstruct GetDomainReplicationMessagesResponse {\n 10: optional ReplicationMessages messages\n}\n\nstruct GetDLQReplicationMessagesRequest {\n 10: optional list taskInfos\n}\n\nstruct GetDLQReplicationMessagesResponse {\n 10: optional list replicationTasks\n}\n\nenum DLQType {\n Replication,\n Domain,\n}\n\nstruct ReadDLQMessagesRequest{\n 10: optional DLQType type\n 20: optional i32 shardID\n 30: optional string sourceCluster\n 40: optional i64 (js.type = \"Long\") inclusiveEndMessageID\n 50: optional i32 maximumPageSize\n 60: optional binary nextPageToken\n}\n\nstruct ReadDLQMessagesResponse{\n 10: optional DLQType type\n 20: optional list replicationTasks\n 30: optional binary nextPageToken\n}\n\nstruct PurgeDLQMessagesRequest{\n 10: optional DLQType type\n 20: optional i32 shardID\n 30: optional string sourceCluster\n 40: optional i64 (js.type = \"Long\") inclusiveEndMessageID\n}\n\nstruct MergeDLQMessagesRequest{\n 10: optional DLQType type\n 20: optional i32 shardID\n 30: optional string sourceCluster\n 40: optional i64 (js.type = \"Long\") inclusiveEndMessageID\n 50: optional i32 maximumPageSize\n 60: optional binary nextPageToken\n}\n\nstruct MergeDLQMessagesResponse{\n 10: optional binary nextPageToken\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.replicator\n\ninclude \"shared.thrift\"\n\nenum ReplicationTaskType {\n Domain\n History\n SyncShardStatus\n SyncActivity\n HistoryMetadata\n HistoryV2\n}\n\nenum DomainOperation {\n Create\n Update\n}\n\nstruct DomainTaskAttributes {\n 05: optional DomainOperation domainOperation\n 10: optional string id\n 20: optional shared.DomainInfo info\n 30: optional shared.DomainConfiguration config\n 40: optional shared.DomainReplicationConfiguration replicationConfig\n 50: optional i64 (js.type = \"Long\") configVersion\n 60: optional i64 (js.type = \"Long\") failoverVersion\n}\n\nstruct HistoryTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional i32 eventStoreVersion\n 110: optional i32 newRunEventStoreVersion\n 120: optional bool resetWorkflow\n 130: optional bool newRunNDC\n}\n\nstruct HistoryMetadataTaskAttributes {\n 05: optional list targetClusters\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n}\n\nstruct SyncShardStatusTaskAttributes {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityTaskAttributes {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n 140: optional binary lastFailureDetails\n 150: optional shared.VersionHistory versionHistory\n}\n\nstruct HistoryTaskV2Attributes {\n 05: optional i64 (js.type = \"Long\") taskId\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional list versionHistoryItems\n 50: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 70: optional shared.DataBlob newRunEvents\n}\n\nstruct ReplicationTask {\n 10: optional ReplicationTaskType taskType\n 11: optional i64 (js.type = \"Long\") sourceTaskId\n 20: optional DomainTaskAttributes domainTaskAttributes\n 30: optional HistoryTaskAttributes historyTaskAttributes // TODO deprecate once NDC migration is done\n 40: optional SyncShardStatusTaskAttributes syncShardStatusTaskAttributes\n 50: optional SyncActivityTaskAttributes syncActivityTaskAttributes\n 60: optional HistoryMetadataTaskAttributes historyMetadataTaskAttributes // TODO deprecate once kafka deprecation is done\n 70: optional HistoryTaskV2Attributes historyTaskV2Attributes\n}\n\nstruct ReplicationToken {\n 10: optional i32 shardID\n // lastRetrivedMessageId is where the next fetch should begin with\n 20: optional i64 (js.type = \"Long\") lastRetrievedMessageId\n // lastProcessedMessageId is the last messageId that is processed on the passive side.\n // This can be different than lastRetrievedMessageId if passive side supports prefetching messages.\n 30: optional i64 (js.type = \"Long\") lastProcessedMessageId\n}\n\nstruct SyncShardStatus {\n 10: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct ReplicationMessages {\n 10: optional list replicationTasks\n // This can be different than the last taskId in the above list, because sender can decide to skip tasks (e.g. for completed workflows).\n 20: optional i64 (js.type = \"Long\") lastRetrievedMessageId\n 30: optional bool hasMore // Hint for flow control\n 40: optional SyncShardStatus syncShardStatus\n}\n\nstruct ReplicationTaskInfo {\n 10: optional string domainID\n 20: optional string workflowID\n 30: optional string runID\n 40: optional i16 taskType\n 50: optional i64 (js.type = \"Long\") taskID\n 60: optional i64 (js.type = \"Long\") version\n 70: optional i64 (js.type = \"Long\") firstEventID\n 80: optional i64 (js.type = \"Long\") nextEventID\n 90: optional i64 (js.type = \"Long\") scheduledID\n}\n\nstruct GetReplicationMessagesRequest {\n 10: optional list tokens\n 20: optional string clusterName\n}\n\nstruct GetReplicationMessagesResponse {\n 10: optional map messagesByShard\n}\n\nstruct GetDomainReplicationMessagesRequest {\n // lastRetrievedMessageId is where the next fetch should begin with\n 10: optional i64 (js.type = \"Long\") lastRetrievedMessageId\n // lastProcessedMessageId is the last messageId that is processed on the passive side.\n // This can be different than lastRetrievedMessageId if passive side supports prefetching messages.\n 20: optional i64 (js.type = \"Long\") lastProcessedMessageId\n // clusterName is the name of the pulling cluster\n 30: optional string clusterName\n}\n\nstruct GetDomainReplicationMessagesResponse {\n 10: optional ReplicationMessages messages\n}\n\nstruct GetDLQReplicationMessagesRequest {\n 10: optional list taskInfos\n}\n\nstruct GetDLQReplicationMessagesResponse {\n 10: optional list replicationTasks\n}\n\nenum DLQType {\n Replication,\n Domain,\n}\n\nstruct ReadDLQMessagesRequest{\n 10: optional DLQType type\n 20: optional i32 shardID\n 30: optional string sourceCluster\n 40: optional i64 (js.type = \"Long\") inclusiveEndMessageID\n 50: optional i32 maximumPageSize\n 60: optional binary nextPageToken\n}\n\nstruct ReadDLQMessagesResponse{\n 10: optional DLQType type\n 20: optional list replicationTasks\n 30: optional binary nextPageToken\n}\n\nstruct PurgeDLQMessagesRequest{\n 10: optional DLQType type\n 20: optional i32 shardID\n 30: optional string sourceCluster\n 40: optional i64 (js.type = \"Long\") inclusiveEndMessageID\n}\n\nstruct MergeDLQMessagesRequest{\n 10: optional DLQType type\n 20: optional i32 shardID\n 30: optional string sourceCluster\n 40: optional i64 (js.type = \"Long\") inclusiveEndMessageID\n 50: optional i32 maximumPageSize\n 60: optional binary nextPageToken\n}\n\nstruct MergeDLQMessagesResponse{\n 10: optional binary nextPageToken\n}\n" diff --git a/.gen/go/shared/shared.go b/.gen/go/shared/shared.go index 0e09bf5f70e..7ab2715728f 100644 --- a/.gen/go/shared/shared.go +++ b/.gen/go/shared/shared.go @@ -58612,6 +58612,7 @@ const ( WorkflowIdReusePolicyAllowDuplicateFailedOnly WorkflowIdReusePolicy = 0 WorkflowIdReusePolicyAllowDuplicate WorkflowIdReusePolicy = 1 WorkflowIdReusePolicyRejectDuplicate WorkflowIdReusePolicy = 2 + WorkflowIdReusePolicyTerminateIfRunning WorkflowIdReusePolicy = 3 ) // WorkflowIdReusePolicy_Values returns all recognized values of WorkflowIdReusePolicy. @@ -58620,6 +58621,7 @@ func WorkflowIdReusePolicy_Values() []WorkflowIdReusePolicy { WorkflowIdReusePolicyAllowDuplicateFailedOnly, WorkflowIdReusePolicyAllowDuplicate, WorkflowIdReusePolicyRejectDuplicate, + WorkflowIdReusePolicyTerminateIfRunning, } } @@ -58639,6 +58641,9 @@ func (v *WorkflowIdReusePolicy) UnmarshalText(value []byte) error { case "RejectDuplicate": *v = WorkflowIdReusePolicyRejectDuplicate return nil + case "TerminateIfRunning": + *v = WorkflowIdReusePolicyTerminateIfRunning + return nil default: val, err := strconv.ParseInt(s, 10, 32) if err != nil { @@ -58663,6 +58668,8 @@ func (v WorkflowIdReusePolicy) MarshalText() ([]byte, error) { return []byte("AllowDuplicate"), nil case 2: return []byte("RejectDuplicate"), nil + case 3: + return []byte("TerminateIfRunning"), nil } return []byte(strconv.FormatInt(int64(v), 10)), nil } @@ -58680,6 +58687,8 @@ func (v WorkflowIdReusePolicy) MarshalLogObject(enc zapcore.ObjectEncoder) error enc.AddString("name", "AllowDuplicate") case 2: enc.AddString("name", "RejectDuplicate") + case 3: + enc.AddString("name", "TerminateIfRunning") } return nil } @@ -58726,6 +58735,8 @@ func (v WorkflowIdReusePolicy) String() string { return "AllowDuplicate" case 2: return "RejectDuplicate" + case 3: + return "TerminateIfRunning" } return fmt.Sprintf("WorkflowIdReusePolicy(%d)", w) } @@ -58750,6 +58761,8 @@ func (v WorkflowIdReusePolicy) MarshalJSON() ([]byte, error) { return ([]byte)("\"AllowDuplicate\""), nil case 2: return ([]byte)("\"RejectDuplicate\""), nil + case 3: + return ([]byte)("\"TerminateIfRunning\""), nil } return ([]byte)(strconv.FormatInt(int64(v), 10)), nil } @@ -59491,8 +59504,8 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "shared", Package: "github.com/uber/cadence/.gen/go/shared", FilePath: "shared.thrift", - SHA1: "7c8bf71c40c0e97e2db7d55e4ebb5fc84d5e6a7c", + SHA1: "f9bb214476e32cb6d29e71e44a423bd0e27a3815", Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception InternalDataInconsistencyError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception EntityNotExistsError {\n 1: required string message\n 2: optional string currentCluster\n 3: optional string activeCluster\n}\n\nexception ServiceBusyError {\n 1: required string message\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskError {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") nextEventId\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") startEventId\n 6: optional i64 (js.type = \"Long\") startEventVersion\n 7: optional i64 (js.type = \"Long\") endEventId\n 8: optional i64 (js.type = \"Long\") endEventVersion\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nenum QueryConsistencyLevel {\n // EVENTUAL indicates that query should be eventually consistent\n EVENTUAL\n // STRONG indicates that any events that came before query should be reflected in workflow state before running query\n STRONG\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct ReplicationInfo {\n 10: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") lastEventId\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkerVersionInfo {\n 10: optional string impl\n 20: optional string featureVersion\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n 120: optional string taskList\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n 90: optional string binaryChecksum\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n 50: optional string lastFailureReason\n 60: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // For retry activity, it may have a failure before timeout. It's important to keep those information for debug.\n // Client can also provide the info for making next decision\n 40: optional string lastFailureReason\n 50: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n 10: optional string activeClusterName\n 20: optional list clusters\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric = true\n 60: optional list clusters\n 70: optional string activeClusterName\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n 70: optional i32 failoverTimeoutInSeconds\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional map queries\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional map queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n 70: optional bool skipArchival\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 11: optional list rawHistory\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n 50: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n 50: optional WorkerVersionInfo workerVersionInfo\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorMessage\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n}\n\nstruct PendingChildExecutionInfo {\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n}\n\nstruct ListTaskListPartitionsRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n}\n\nstruct TaskListPartitionMetadata {\n 10: optional string key\n 20: optional string ownerHostName\n}\n\nstruct ListTaskListPartitionsResponse {\n 10: optional list activityTaskListPartitions\n 20: optional list decisionTaskListPartitions\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n 40: optional i64 (js.type = \"Long\") visibilityTimestamp\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}\n\n// SupportedClientVersions contains the support versions for client library\nstruct SupportedClientVersions{\n 10: optional string goSdk\n 20: optional string javaSdk\n}\n\n// ClusterInfo contains information about cadence cluster\nstruct ClusterInfo{\n 10: optional SupportedClientVersions supportedClientVersions\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence\n\nexception BadRequestError {\n 1: required string message\n}\n\nexception InternalServiceError {\n 1: required string message\n}\n\nexception InternalDataInconsistencyError {\n 1: required string message\n}\n\nexception DomainAlreadyExistsError {\n 1: required string message\n}\n\nexception WorkflowExecutionAlreadyStartedError {\n 10: optional string message\n 20: optional string startRequestId\n 30: optional string runId\n}\n\nexception EntityNotExistsError {\n 1: required string message\n 2: optional string currentCluster\n 3: optional string activeCluster\n}\n\nexception ServiceBusyError {\n 1: required string message\n}\n\nexception CancellationAlreadyRequestedError {\n 1: required string message\n}\n\nexception QueryFailedError {\n 1: required string message\n}\n\nexception DomainNotActiveError {\n 1: required string message\n 2: required string domainName\n 3: required string currentCluster\n 4: required string activeCluster\n}\n\nexception LimitExceededError {\n 1: required string message\n}\n\nexception AccessDeniedError {\n 1: required string message\n}\n\nexception RetryTaskError {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") nextEventId\n}\n\nexception RetryTaskV2Error {\n 1: required string message\n 2: optional string domainId\n 3: optional string workflowId\n 4: optional string runId\n 5: optional i64 (js.type = \"Long\") startEventId\n 6: optional i64 (js.type = \"Long\") startEventVersion\n 7: optional i64 (js.type = \"Long\") endEventId\n 8: optional i64 (js.type = \"Long\") endEventVersion\n}\n\nexception ClientVersionNotSupportedError {\n 1: required string featureVersion\n 2: required string clientImpl\n 3: required string supportedVersions\n}\n\nexception CurrentBranchChangedError {\n 10: required string message\n 20: required binary currentBranchToken\n}\n\nenum WorkflowIdReusePolicy {\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running, and the last execution close state is in\n * [terminated, cancelled, timeouted, failed].\n */\n AllowDuplicateFailedOnly,\n /*\n * allow start a workflow execution using the same workflow ID,\n * when workflow not running.\n */\n AllowDuplicate,\n /*\n * do not allow start a workflow execution using the same workflow ID at all\n */\n RejectDuplicate,\n /*\n * if a workflow is running using the same workflow ID, terminate it and start a new one\n */\n TerminateIfRunning,\n}\n\nenum DomainStatus {\n REGISTERED,\n DEPRECATED,\n DELETED,\n}\n\nenum TimeoutType {\n START_TO_CLOSE,\n SCHEDULE_TO_START,\n SCHEDULE_TO_CLOSE,\n HEARTBEAT,\n}\n\nenum ParentClosePolicy {\n\tABANDON,\n\tREQUEST_CANCEL,\n\tTERMINATE,\n}\n\n\n// whenever this list of decision is changed\n// do change the mutableStateBuilder.go\n// function shouldBufferEvent\n// to make sure wo do the correct event ordering\nenum DecisionType {\n ScheduleActivityTask,\n RequestCancelActivityTask,\n StartTimer,\n CompleteWorkflowExecution,\n FailWorkflowExecution,\n CancelTimer,\n CancelWorkflowExecution,\n RequestCancelExternalWorkflowExecution,\n RecordMarker,\n ContinueAsNewWorkflowExecution,\n StartChildWorkflowExecution,\n SignalExternalWorkflowExecution,\n UpsertWorkflowSearchAttributes,\n}\n\nenum EventType {\n WorkflowExecutionStarted,\n WorkflowExecutionCompleted,\n WorkflowExecutionFailed,\n WorkflowExecutionTimedOut,\n DecisionTaskScheduled,\n DecisionTaskStarted,\n DecisionTaskCompleted,\n DecisionTaskTimedOut\n DecisionTaskFailed,\n ActivityTaskScheduled,\n ActivityTaskStarted,\n ActivityTaskCompleted,\n ActivityTaskFailed,\n ActivityTaskTimedOut,\n ActivityTaskCancelRequested,\n RequestCancelActivityTaskFailed,\n ActivityTaskCanceled,\n TimerStarted,\n TimerFired,\n CancelTimerFailed,\n TimerCanceled,\n WorkflowExecutionCancelRequested,\n WorkflowExecutionCanceled,\n RequestCancelExternalWorkflowExecutionInitiated,\n RequestCancelExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionCancelRequested,\n MarkerRecorded,\n WorkflowExecutionSignaled,\n WorkflowExecutionTerminated,\n WorkflowExecutionContinuedAsNew,\n StartChildWorkflowExecutionInitiated,\n StartChildWorkflowExecutionFailed,\n ChildWorkflowExecutionStarted,\n ChildWorkflowExecutionCompleted,\n ChildWorkflowExecutionFailed,\n ChildWorkflowExecutionCanceled,\n ChildWorkflowExecutionTimedOut,\n ChildWorkflowExecutionTerminated,\n SignalExternalWorkflowExecutionInitiated,\n SignalExternalWorkflowExecutionFailed,\n ExternalWorkflowExecutionSignaled,\n UpsertWorkflowSearchAttributes,\n}\n\nenum DecisionTaskFailedCause {\n UNHANDLED_DECISION,\n BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,\n BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,\n BAD_START_TIMER_ATTRIBUTES,\n BAD_CANCEL_TIMER_ATTRIBUTES,\n BAD_RECORD_MARKER_ATTRIBUTES,\n BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_CONTINUE_AS_NEW_ATTRIBUTES,\n START_TIMER_DUPLICATE_ID,\n RESET_STICKY_TASKLIST,\n WORKFLOW_WORKER_UNHANDLED_FAILURE,\n BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,\n BAD_START_CHILD_EXECUTION_ATTRIBUTES,\n FORCE_CLOSE_DECISION,\n FAILOVER_CLOSE_DECISION,\n BAD_SIGNAL_INPUT_SIZE,\n RESET_WORKFLOW,\n BAD_BINARY,\n SCHEDULE_ACTIVITY_DUPLICATE_ID,\n BAD_SEARCH_ATTRIBUTES,\n}\n\nenum CancelExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n}\n\nenum SignalExternalWorkflowExecutionFailedCause {\n UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,\n}\n\nenum ChildWorkflowExecutionFailedCause {\n WORKFLOW_ALREADY_RUNNING,\n}\n\n// TODO: when migrating to gRPC, add a running / none status,\n// currently, customer is using null / nil as an indication\n// that workflow is still running\nenum WorkflowExecutionCloseStatus {\n COMPLETED,\n FAILED,\n CANCELED,\n TERMINATED,\n CONTINUED_AS_NEW,\n TIMED_OUT,\n}\n\nenum QueryTaskCompletedType {\n COMPLETED,\n FAILED,\n}\n\nenum QueryResultType {\n ANSWERED,\n FAILED,\n}\n\nenum PendingActivityState {\n SCHEDULED,\n STARTED,\n CANCEL_REQUESTED,\n}\n\nenum HistoryEventFilterType {\n ALL_EVENT,\n CLOSE_EVENT,\n}\n\nenum TaskListKind {\n NORMAL,\n STICKY,\n}\n\nenum ArchivalStatus {\n DISABLED,\n ENABLED,\n}\n\nenum IndexedValueType {\n STRING,\n KEYWORD,\n INT,\n DOUBLE,\n BOOL,\n DATETIME,\n}\n\nstruct Header {\n 10: optional map fields\n}\n\nstruct WorkflowType {\n 10: optional string name\n}\n\nstruct ActivityType {\n 10: optional string name\n}\n\nstruct TaskList {\n 10: optional string name\n 20: optional TaskListKind kind\n}\n\nenum EncodingType {\n ThriftRW,\n JSON,\n}\n\nenum QueryRejectCondition {\n // NOT_OPEN indicates that query should be rejected if workflow is not open\n NOT_OPEN\n // NOT_COMPLETED_CLEANLY indicates that query should be rejected if workflow did not complete cleanly\n NOT_COMPLETED_CLEANLY\n}\n\nenum QueryConsistencyLevel {\n // EVENTUAL indicates that query should be eventually consistent\n EVENTUAL\n // STRONG indicates that any events that came before query should be reflected in workflow state before running query\n STRONG\n}\n\nstruct DataBlob {\n 10: optional EncodingType EncodingType\n 20: optional binary Data\n}\n\nstruct ReplicationInfo {\n 10: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") lastEventId\n}\n\nstruct TaskListMetadata {\n 10: optional double maxTasksPerSecond\n}\n\nstruct WorkflowExecution {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct Memo {\n 10: optional map fields\n}\n\nstruct SearchAttributes {\n 10: optional map indexedFields\n}\n\nstruct WorkerVersionInfo {\n 10: optional string impl\n 20: optional string featureVersion\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional WorkflowExecution execution\n 20: optional WorkflowType type\n 30: optional i64 (js.type = \"Long\") startTime\n 40: optional i64 (js.type = \"Long\") closeTime\n 50: optional WorkflowExecutionCloseStatus closeStatus\n 60: optional i64 (js.type = \"Long\") historyLength\n 70: optional string parentDomainId\n 80: optional WorkflowExecution parentExecution\n 90: optional i64 (js.type = \"Long\") executionTime\n 100: optional Memo memo\n 101: optional SearchAttributes searchAttributes\n 110: optional ResetPoints autoResetPoints\n 120: optional string taskList\n}\n\nstruct WorkflowExecutionConfiguration {\n 10: optional TaskList taskList\n 20: optional i32 executionStartToCloseTimeoutSeconds\n 30: optional i32 taskStartToCloseTimeoutSeconds\n// 40: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n}\n\nstruct TransientDecisionInfo {\n 10: optional HistoryEvent scheduledEvent\n 20: optional HistoryEvent startedEvent\n}\n\nstruct ScheduleActivityTaskDecisionAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional Header header\n}\n\nstruct RequestCancelActivityTaskDecisionAttributes {\n 10: optional string activityId\n}\n\nstruct StartTimerDecisionAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n}\n\nstruct CompleteWorkflowExecutionDecisionAttributes {\n 10: optional binary result\n}\n\nstruct FailWorkflowExecutionDecisionAttributes {\n 10: optional string reason\n 20: optional binary details\n}\n\nstruct CancelTimerDecisionAttributes {\n 10: optional string timerId\n}\n\nstruct CancelWorkflowExecutionDecisionAttributes {\n 10: optional binary details\n}\n\nstruct RequestCancelExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional string runId\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional string signalName\n 40: optional binary input\n 50: optional binary control\n 60: optional bool childWorkflowOnly\n}\n\nstruct UpsertWorkflowSearchAttributesDecisionAttributes {\n 10: optional SearchAttributes searchAttributes\n}\n\nstruct RecordMarkerDecisionAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional Header header\n}\n\nstruct ContinueAsNewWorkflowExecutionDecisionAttributes {\n 10: optional WorkflowType workflowType\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n 60: optional i32 backoffStartIntervalInSeconds\n 70: optional RetryPolicy retryPolicy\n 80: optional ContinueAsNewInitiator initiator\n 90: optional string failureReason\n 100: optional binary failureDetails\n 110: optional binary lastCompletionResult\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionDecisionAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional RetryPolicy retryPolicy\n 120: optional string cronSchedule\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct Decision {\n 10: optional DecisionType decisionType\n 20: optional ScheduleActivityTaskDecisionAttributes scheduleActivityTaskDecisionAttributes\n 25: optional StartTimerDecisionAttributes startTimerDecisionAttributes\n 30: optional CompleteWorkflowExecutionDecisionAttributes completeWorkflowExecutionDecisionAttributes\n 35: optional FailWorkflowExecutionDecisionAttributes failWorkflowExecutionDecisionAttributes\n 40: optional RequestCancelActivityTaskDecisionAttributes requestCancelActivityTaskDecisionAttributes\n 50: optional CancelTimerDecisionAttributes cancelTimerDecisionAttributes\n 60: optional CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes\n 70: optional RequestCancelExternalWorkflowExecutionDecisionAttributes requestCancelExternalWorkflowExecutionDecisionAttributes\n 80: optional RecordMarkerDecisionAttributes recordMarkerDecisionAttributes\n 90: optional ContinueAsNewWorkflowExecutionDecisionAttributes continueAsNewWorkflowExecutionDecisionAttributes\n 100: optional StartChildWorkflowExecutionDecisionAttributes startChildWorkflowExecutionDecisionAttributes\n 110: optional SignalExternalWorkflowExecutionDecisionAttributes signalExternalWorkflowExecutionDecisionAttributes\n 120: optional UpsertWorkflowSearchAttributesDecisionAttributes upsertWorkflowSearchAttributesDecisionAttributes\n}\n\nstruct WorkflowExecutionStartedEventAttributes {\n 10: optional WorkflowType workflowType\n 12: optional string parentWorkflowDomain\n 14: optional WorkflowExecution parentWorkflowExecution\n 16: optional i64 (js.type = \"Long\") parentInitiatedEventId\n 20: optional TaskList taskList\n 30: optional binary input\n 40: optional i32 executionStartToCloseTimeoutSeconds\n 50: optional i32 taskStartToCloseTimeoutSeconds\n// 52: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 54: optional string continuedExecutionRunId\n 55: optional ContinueAsNewInitiator initiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 59: optional string originalExecutionRunId // This is the runID when the WorkflowExecutionStarted event is written\n 60: optional string identity\n 61: optional string firstExecutionRunId // This is the very first runID along the chain of ContinueAsNew and Reset.\n 70: optional RetryPolicy retryPolicy\n 80: optional i32 attempt\n 90: optional i64 (js.type = \"Long\") expirationTimestamp\n 100: optional string cronSchedule\n 110: optional i32 firstDecisionTaskBackoffSeconds\n 120: optional Memo memo\n 121: optional SearchAttributes searchAttributes\n 130: optional ResetPoints prevAutoResetPoints\n 140: optional Header header\n}\n\nstruct ResetPoints{\n 10: optional list points\n}\n\n struct ResetPointInfo{\n 10: optional string binaryChecksum\n 20: optional string runId\n 30: optional i64 firstDecisionCompletedId\n 40: optional i64 (js.type = \"Long\") createdTimeNano\n 50: optional i64 (js.type = \"Long\") expiringTimeNano //the time that the run is deleted due to retention\n 60: optional bool resettable // false if the resset point has pending childWFs/reqCancels/signalExternals.\n}\n\nstruct WorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct WorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n}\n\nenum ContinueAsNewInitiator {\n Decider,\n RetryPolicy,\n CronSchedule,\n}\n\nstruct WorkflowExecutionContinuedAsNewEventAttributes {\n 10: optional string newExecutionRunId\n 20: optional WorkflowType workflowType\n 30: optional TaskList taskList\n 40: optional binary input\n 50: optional i32 executionStartToCloseTimeoutSeconds\n 60: optional i32 taskStartToCloseTimeoutSeconds\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 80: optional i32 backoffStartIntervalInSeconds\n 90: optional ContinueAsNewInitiator initiator\n 100: optional string failureReason\n 110: optional binary failureDetails\n 120: optional binary lastCompletionResult\n 130: optional Header header\n 140: optional Memo memo\n 150: optional SearchAttributes searchAttributes\n}\n\nstruct DecisionTaskScheduledEventAttributes {\n 10: optional TaskList taskList\n 20: optional i32 startToCloseTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") attempt\n}\n\nstruct DecisionTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n}\n\nstruct DecisionTaskCompletedEventAttributes {\n 10: optional binary executionContext\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct DecisionTaskTimedOutEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n}\n\nstruct DecisionTaskFailedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional DecisionTaskFailedCause cause\n 35: optional binary details\n 40: optional string identity\n 50: optional string reason\n // for reset workflow\n 60: optional string baseRunId\n 70: optional string newRunId\n 80: optional i64 (js.type = \"Long\") forkEventVersion\n 90: optional string binaryChecksum\n}\n\nstruct ActivityTaskScheduledEventAttributes {\n 10: optional string activityId\n 20: optional ActivityType activityType\n 25: optional string domain\n 30: optional TaskList taskList\n 40: optional binary input\n 45: optional i32 scheduleToCloseTimeoutSeconds\n 50: optional i32 scheduleToStartTimeoutSeconds\n 55: optional i32 startToCloseTimeoutSeconds\n 60: optional i32 heartbeatTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional RetryPolicy retryPolicy\n 120: optional Header header\n}\n\nstruct ActivityTaskStartedEventAttributes {\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional string identity\n 30: optional string requestId\n 40: optional i32 attempt\n 50: optional string lastFailureReason\n 60: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCompletedEventAttributes {\n 10: optional binary result\n 20: optional i64 (js.type = \"Long\") scheduledEventId\n 30: optional i64 (js.type = \"Long\") startedEventId\n 40: optional string identity\n}\n\nstruct ActivityTaskFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct ActivityTaskTimedOutEventAttributes {\n 05: optional binary details\n 10: optional i64 (js.type = \"Long\") scheduledEventId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional TimeoutType timeoutType\n // For retry activity, it may have a failure before timeout. It's important to keep those information for debug.\n // Client can also provide the info for making next decision\n 40: optional string lastFailureReason\n 50: optional binary lastFailureDetails\n}\n\nstruct ActivityTaskCancelRequestedEventAttributes {\n 10: optional string activityId\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct RequestCancelActivityTaskFailedEventAttributes{\n 10: optional string activityId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ActivityTaskCanceledEventAttributes {\n 10: optional binary details\n 20: optional i64 (js.type = \"Long\") latestCancelRequestedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional string identity\n}\n\nstruct TimerStartedEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startToFireTimeoutSeconds\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct TimerFiredEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct TimerCanceledEventAttributes {\n 10: optional string timerId\n 20: optional i64 (js.type = \"Long\") startedEventId\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct CancelTimerFailedEventAttributes {\n 10: optional string timerId\n 20: optional string cause\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCancelRequestedEventAttributes {\n 10: optional string cause\n 20: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 30: optional WorkflowExecution externalWorkflowExecution\n 40: optional string identity\n}\n\nstruct WorkflowExecutionCanceledEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional binary details\n}\n\nstruct MarkerRecordedEventAttributes {\n 10: optional string markerName\n 20: optional binary details\n 30: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 40: optional Header header\n}\n\nstruct WorkflowExecutionSignaledEventAttributes {\n 10: optional string signalName\n 20: optional binary input\n 30: optional string identity\n}\n\nstruct WorkflowExecutionTerminatedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RequestCancelExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n 50: optional bool childWorkflowOnly\n}\n\nstruct RequestCancelExternalWorkflowExecutionFailedEventAttributes {\n 10: optional CancelExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionCancelRequestedEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n}\n\nstruct SignalExternalWorkflowExecutionInitiatedEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional string signalName\n 50: optional binary input\n 60: optional binary control\n 70: optional bool childWorkflowOnly\n}\n\nstruct SignalExternalWorkflowExecutionFailedEventAttributes {\n 10: optional SignalExternalWorkflowExecutionFailedCause cause\n 20: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional binary control\n}\n\nstruct ExternalWorkflowExecutionSignaledEventAttributes {\n 10: optional i64 (js.type = \"Long\") initiatedEventId\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional binary control\n}\n\nstruct UpsertWorkflowSearchAttributesEventAttributes {\n 10: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 20: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionInitiatedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n// 80: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 81: optional ParentClosePolicy parentClosePolicy\n 90: optional binary control\n 100: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n 110: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Header header\n 150: optional Memo memo\n 160: optional SearchAttributes searchAttributes\n}\n\nstruct StartChildWorkflowExecutionFailedEventAttributes {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional ChildWorkflowExecutionFailedCause cause\n 50: optional binary control\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") decisionTaskCompletedEventId\n}\n\nstruct ChildWorkflowExecutionStartedEventAttributes {\n 10: optional string domain\n 20: optional i64 (js.type = \"Long\") initiatedEventId\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional Header header\n}\n\nstruct ChildWorkflowExecutionCompletedEventAttributes {\n 10: optional binary result\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionFailedEventAttributes {\n 10: optional string reason\n 20: optional binary details\n 30: optional string domain\n 40: optional WorkflowExecution workflowExecution\n 50: optional WorkflowType workflowType\n 60: optional i64 (js.type = \"Long\") initiatedEventId\n 70: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionCanceledEventAttributes {\n 10: optional binary details\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTimedOutEventAttributes {\n 10: optional TimeoutType timeoutType\n 20: optional string domain\n 30: optional WorkflowExecution workflowExecution\n 40: optional WorkflowType workflowType\n 50: optional i64 (js.type = \"Long\") initiatedEventId\n 60: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct ChildWorkflowExecutionTerminatedEventAttributes {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") initiatedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n}\n\nstruct HistoryEvent {\n 10: optional i64 (js.type = \"Long\") eventId\n 20: optional i64 (js.type = \"Long\") timestamp\n 30: optional EventType eventType\n 35: optional i64 (js.type = \"Long\") version\n 36: optional i64 (js.type = \"Long\") taskId\n 40: optional WorkflowExecutionStartedEventAttributes workflowExecutionStartedEventAttributes\n 50: optional WorkflowExecutionCompletedEventAttributes workflowExecutionCompletedEventAttributes\n 60: optional WorkflowExecutionFailedEventAttributes workflowExecutionFailedEventAttributes\n 70: optional WorkflowExecutionTimedOutEventAttributes workflowExecutionTimedOutEventAttributes\n 80: optional DecisionTaskScheduledEventAttributes decisionTaskScheduledEventAttributes\n 90: optional DecisionTaskStartedEventAttributes decisionTaskStartedEventAttributes\n 100: optional DecisionTaskCompletedEventAttributes decisionTaskCompletedEventAttributes\n 110: optional DecisionTaskTimedOutEventAttributes decisionTaskTimedOutEventAttributes\n 120: optional DecisionTaskFailedEventAttributes decisionTaskFailedEventAttributes\n 130: optional ActivityTaskScheduledEventAttributes activityTaskScheduledEventAttributes\n 140: optional ActivityTaskStartedEventAttributes activityTaskStartedEventAttributes\n 150: optional ActivityTaskCompletedEventAttributes activityTaskCompletedEventAttributes\n 160: optional ActivityTaskFailedEventAttributes activityTaskFailedEventAttributes\n 170: optional ActivityTaskTimedOutEventAttributes activityTaskTimedOutEventAttributes\n 180: optional TimerStartedEventAttributes timerStartedEventAttributes\n 190: optional TimerFiredEventAttributes timerFiredEventAttributes\n 200: optional ActivityTaskCancelRequestedEventAttributes activityTaskCancelRequestedEventAttributes\n 210: optional RequestCancelActivityTaskFailedEventAttributes requestCancelActivityTaskFailedEventAttributes\n 220: optional ActivityTaskCanceledEventAttributes activityTaskCanceledEventAttributes\n 230: optional TimerCanceledEventAttributes timerCanceledEventAttributes\n 240: optional CancelTimerFailedEventAttributes cancelTimerFailedEventAttributes\n 250: optional MarkerRecordedEventAttributes markerRecordedEventAttributes\n 260: optional WorkflowExecutionSignaledEventAttributes workflowExecutionSignaledEventAttributes\n 270: optional WorkflowExecutionTerminatedEventAttributes workflowExecutionTerminatedEventAttributes\n 280: optional WorkflowExecutionCancelRequestedEventAttributes workflowExecutionCancelRequestedEventAttributes\n 290: optional WorkflowExecutionCanceledEventAttributes workflowExecutionCanceledEventAttributes\n 300: optional RequestCancelExternalWorkflowExecutionInitiatedEventAttributes requestCancelExternalWorkflowExecutionInitiatedEventAttributes\n 310: optional RequestCancelExternalWorkflowExecutionFailedEventAttributes requestCancelExternalWorkflowExecutionFailedEventAttributes\n 320: optional ExternalWorkflowExecutionCancelRequestedEventAttributes externalWorkflowExecutionCancelRequestedEventAttributes\n 330: optional WorkflowExecutionContinuedAsNewEventAttributes workflowExecutionContinuedAsNewEventAttributes\n 340: optional StartChildWorkflowExecutionInitiatedEventAttributes startChildWorkflowExecutionInitiatedEventAttributes\n 350: optional StartChildWorkflowExecutionFailedEventAttributes startChildWorkflowExecutionFailedEventAttributes\n 360: optional ChildWorkflowExecutionStartedEventAttributes childWorkflowExecutionStartedEventAttributes\n 370: optional ChildWorkflowExecutionCompletedEventAttributes childWorkflowExecutionCompletedEventAttributes\n 380: optional ChildWorkflowExecutionFailedEventAttributes childWorkflowExecutionFailedEventAttributes\n 390: optional ChildWorkflowExecutionCanceledEventAttributes childWorkflowExecutionCanceledEventAttributes\n 400: optional ChildWorkflowExecutionTimedOutEventAttributes childWorkflowExecutionTimedOutEventAttributes\n 410: optional ChildWorkflowExecutionTerminatedEventAttributes childWorkflowExecutionTerminatedEventAttributes\n 420: optional SignalExternalWorkflowExecutionInitiatedEventAttributes signalExternalWorkflowExecutionInitiatedEventAttributes\n 430: optional SignalExternalWorkflowExecutionFailedEventAttributes signalExternalWorkflowExecutionFailedEventAttributes\n 440: optional ExternalWorkflowExecutionSignaledEventAttributes externalWorkflowExecutionSignaledEventAttributes\n 450: optional UpsertWorkflowSearchAttributesEventAttributes upsertWorkflowSearchAttributesEventAttributes\n}\n\nstruct History {\n 10: optional list events\n}\n\nstruct WorkflowExecutionFilter {\n 10: optional string workflowId\n 20: optional string runId\n}\n\nstruct WorkflowTypeFilter {\n 10: optional string name\n}\n\nstruct StartTimeFilter {\n 10: optional i64 (js.type = \"Long\") earliestTime\n 20: optional i64 (js.type = \"Long\") latestTime\n}\n\nstruct DomainInfo {\n 10: optional string name\n 20: optional DomainStatus status\n 30: optional string description\n 40: optional string ownerEmail\n // A key-value map for any customized purpose\n 50: optional map data\n 60: optional string uuid\n}\n\nstruct DomainConfiguration {\n 10: optional i32 workflowExecutionRetentionPeriodInDays\n 20: optional bool emitMetric\n 70: optional BadBinaries badBinaries\n 80: optional ArchivalStatus historyArchivalStatus\n 90: optional string historyArchivalURI\n 100: optional ArchivalStatus visibilityArchivalStatus\n 110: optional string visibilityArchivalURI\n}\n\nstruct BadBinaries{\n 10: optional map binaries\n}\n\nstruct BadBinaryInfo{\n 10: optional string reason\n 20: optional string operator\n 30: optional i64 (js.type = \"Long\") createdTimeNano\n}\n\nstruct UpdateDomainInfo {\n 10: optional string description\n 20: optional string ownerEmail\n // A key-value map for any customized purpose\n 30: optional map data\n}\n\nstruct ClusterReplicationConfiguration {\n 10: optional string clusterName\n}\n\nstruct DomainReplicationConfiguration {\n 10: optional string activeClusterName\n 20: optional list clusters\n}\n\nstruct RegisterDomainRequest {\n 10: optional string name\n 20: optional string description\n 30: optional string ownerEmail\n 40: optional i32 workflowExecutionRetentionPeriodInDays\n 50: optional bool emitMetric = true\n 60: optional list clusters\n 70: optional string activeClusterName\n // A key-value map for any customized purpose\n 80: optional map data\n 90: optional string securityToken\n 120: optional bool isGlobalDomain\n 130: optional ArchivalStatus historyArchivalStatus\n 140: optional string historyArchivalURI\n 150: optional ArchivalStatus visibilityArchivalStatus\n 160: optional string visibilityArchivalURI\n}\n\nstruct ListDomainsRequest {\n 10: optional i32 pageSize\n 20: optional binary nextPageToken\n}\n\nstruct ListDomainsResponse {\n 10: optional list domains\n 20: optional binary nextPageToken\n}\n\nstruct DescribeDomainRequest {\n 10: optional string name\n 20: optional string uuid\n}\n\nstruct DescribeDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct UpdateDomainRequest {\n 10: optional string name\n 20: optional UpdateDomainInfo updatedInfo\n 30: optional DomainConfiguration configuration\n 40: optional DomainReplicationConfiguration replicationConfiguration\n 50: optional string securityToken\n 60: optional string deleteBadBinary\n 70: optional i32 failoverTimeoutInSeconds\n}\n\nstruct UpdateDomainResponse {\n 10: optional DomainInfo domainInfo\n 20: optional DomainConfiguration configuration\n 30: optional DomainReplicationConfiguration replicationConfiguration\n 40: optional i64 (js.type = \"Long\") failoverVersion\n 50: optional bool isGlobalDomain\n}\n\nstruct DeprecateDomainRequest {\n 10: optional string name\n 20: optional string securityToken\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n// 110: optional ChildPolicy childPolicy -- Removed but reserve the IDL order number\n 120: optional RetryPolicy retryPolicy\n 130: optional string cronSchedule\n 140: optional Memo memo\n 141: optional SearchAttributes searchAttributes\n 150: optional Header header\n}\n\nstruct StartWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct PollForDecisionTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional string binaryChecksum\n}\n\nstruct PollForDecisionTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional WorkflowType workflowType\n 40: optional i64 (js.type = \"Long\") previousStartedEventId\n 50: optional i64 (js.type = \"Long\") startedEventId\n 51: optional i64 (js.type = 'Long') attempt\n 54: optional i64 (js.type = \"Long\") backlogCountHint\n 60: optional History history\n 70: optional binary nextPageToken\n 80: optional WorkflowQuery query\n 90: optional TaskList WorkflowExecutionTaskList\n 100: optional i64 (js.type = \"Long\") scheduledTimestamp\n 110: optional i64 (js.type = \"Long\") startedTimestamp\n 120: optional map queries\n}\n\nstruct StickyExecutionAttributes {\n 10: optional TaskList workerTaskList\n 20: optional i32 scheduleToStartTimeoutSeconds\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional list decisions\n 30: optional binary executionContext\n 40: optional string identity\n 50: optional StickyExecutionAttributes stickyAttributes\n 60: optional bool returnNewDecisionTask\n 70: optional bool forceCreateNewDecisionTask\n 80: optional string binaryChecksum\n 90: optional map queryResults\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional PollForDecisionTaskResponse decisionTask\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional DecisionTaskFailedCause cause\n 30: optional binary details\n 40: optional string identity\n 50: optional string binaryChecksum\n}\n\nstruct PollForActivityTaskRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional string identity\n 40: optional TaskListMetadata taskListMetadata\n}\n\nstruct PollForActivityTaskResponse {\n 10: optional binary taskToken\n 20: optional WorkflowExecution workflowExecution\n 30: optional string activityId\n 40: optional ActivityType activityType\n 50: optional binary input\n 70: optional i64 (js.type = \"Long\") scheduledTimestamp\n 80: optional i32 scheduleToCloseTimeoutSeconds\n 90: optional i64 (js.type = \"Long\") startedTimestamp\n 100: optional i32 startToCloseTimeoutSeconds\n 110: optional i32 heartbeatTimeoutSeconds\n 120: optional i32 attempt\n 130: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 140: optional binary heartbeatDetails\n 150: optional WorkflowType workflowType\n 160: optional string workflowDomain\n 170: optional Header header\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RecordActivityTaskHeartbeatResponse {\n 10: optional bool cancelRequested\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional binary result\n 30: optional string identity\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional binary taskToken\n 20: optional string reason\n 30: optional binary details\n 40: optional string identity\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional binary taskToken\n 20: optional binary details\n 30: optional string identity\n}\n\nstruct RespondActivityTaskCompletedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary result\n 60: optional string identity\n}\n\nstruct RespondActivityTaskFailedByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional string reason\n 60: optional binary details\n 70: optional string identity\n}\n\nstruct RespondActivityTaskCanceledByIDRequest {\n 10: optional string domain\n 20: optional string workflowID\n 30: optional string runID\n 40: optional string activityID\n 50: optional binary details\n 60: optional string identity\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string identity\n 40: optional string requestId\n}\n\nstruct GetWorkflowExecutionHistoryRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional i32 maximumPageSize\n 40: optional binary nextPageToken\n 50: optional bool waitForNewEvent\n 60: optional HistoryEventFilterType HistoryEventFilterType\n 70: optional bool skipArchival\n}\n\nstruct GetWorkflowExecutionHistoryResponse {\n 10: optional History history\n 11: optional list rawHistory\n 20: optional binary nextPageToken\n 30: optional bool archived\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string signalName\n 40: optional binary input\n 50: optional string identity\n 60: optional string requestId\n 70: optional binary control\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional string workflowId\n 30: optional WorkflowType workflowType\n 40: optional TaskList taskList\n 50: optional binary input\n 60: optional i32 executionStartToCloseTimeoutSeconds\n 70: optional i32 taskStartToCloseTimeoutSeconds\n 80: optional string identity\n 90: optional string requestId\n 100: optional WorkflowIdReusePolicy workflowIdReusePolicy\n 110: optional string signalName\n 120: optional binary signalInput\n 130: optional binary control\n 140: optional RetryPolicy retryPolicy\n 150: optional string cronSchedule\n 160: optional Memo memo\n 161: optional SearchAttributes searchAttributes\n 170: optional Header header\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional binary details\n 50: optional string identity\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution workflowExecution\n 30: optional string reason\n 40: optional i64 (js.type = \"Long\") decisionFinishEventId\n 50: optional string requestId\n}\n\nstruct ResetWorkflowExecutionResponse {\n 10: optional string runId\n}\n\nstruct ListOpenWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n}\n\nstruct ListOpenWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListClosedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 maximumPageSize\n 30: optional binary nextPageToken\n 40: optional StartTimeFilter StartTimeFilter\n 50: optional WorkflowExecutionFilter executionFilter\n 60: optional WorkflowTypeFilter typeFilter\n 70: optional WorkflowExecutionCloseStatus statusFilter\n}\n\nstruct ListClosedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct ListArchivedWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional i32 pageSize\n 30: optional binary nextPageToken\n 40: optional string query\n}\n\nstruct ListArchivedWorkflowExecutionsResponse {\n 10: optional list executions\n 20: optional binary nextPageToken\n}\n\nstruct CountWorkflowExecutionsRequest {\n 10: optional string domain\n 20: optional string query\n}\n\nstruct CountWorkflowExecutionsResponse {\n 10: optional i64 count\n}\n\nstruct GetSearchAttributesResponse {\n 10: optional map keys\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n 30: optional WorkflowQuery query\n // QueryRejectCondition can used to reject the query if workflow state does not satisify condition\n 40: optional QueryRejectCondition queryRejectCondition\n 50: optional QueryConsistencyLevel queryConsistencyLevel\n}\n\nstruct QueryRejected {\n 10: optional WorkflowExecutionCloseStatus closeStatus\n}\n\nstruct QueryWorkflowResponse {\n 10: optional binary queryResult\n 20: optional QueryRejected queryRejected\n}\n\nstruct WorkflowQuery {\n 10: optional string queryType\n 20: optional binary queryArgs\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondQueryTaskCompletedRequest {\n 10: optional binary taskToken\n 20: optional QueryTaskCompletedType completedType\n 30: optional binary queryResult\n 40: optional string errorMessage\n 50: optional WorkerVersionInfo workerVersionInfo\n}\n\nstruct WorkflowQueryResult {\n 10: optional QueryResultType resultType\n 20: optional binary answer\n 30: optional string errorMessage\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n\nstruct PendingActivityInfo {\n 10: optional string activityID\n 20: optional ActivityType activityType\n 30: optional PendingActivityState state\n 40: optional binary heartbeatDetails\n 50: optional i64 (js.type = \"Long\") lastHeartbeatTimestamp\n 60: optional i64 (js.type = \"Long\") lastStartedTimestamp\n 70: optional i32 attempt\n 80: optional i32 maximumAttempts\n 90: optional i64 (js.type = \"Long\") scheduledTimestamp\n 100: optional i64 (js.type = \"Long\") expirationTimestamp\n 110: optional string lastFailureReason\n 120: optional string lastWorkerIdentity\n 130: optional binary lastFailureDetails\n}\n\nstruct PendingChildExecutionInfo {\n 10: optional string workflowID\n 20: optional string runID\n 30: optional string workflowTypName\n 40: optional i64 (js.type = \"Long\") initiatedID\n 50: optional ParentClosePolicy parentClosePolicy\n}\n\nstruct DescribeWorkflowExecutionResponse {\n 10: optional WorkflowExecutionConfiguration executionConfiguration\n 20: optional WorkflowExecutionInfo workflowExecutionInfo\n 30: optional list pendingActivities\n 40: optional list pendingChildren\n}\n\nstruct DescribeTaskListRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n 30: optional TaskListType taskListType\n 40: optional bool includeTaskListStatus\n}\n\nstruct DescribeTaskListResponse {\n 10: optional list pollers\n 20: optional TaskListStatus taskListStatus\n}\n\nstruct ListTaskListPartitionsRequest {\n 10: optional string domain\n 20: optional TaskList taskList\n}\n\nstruct TaskListPartitionMetadata {\n 10: optional string key\n 20: optional string ownerHostName\n}\n\nstruct ListTaskListPartitionsResponse {\n 10: optional list activityTaskListPartitions\n 20: optional list decisionTaskListPartitions\n}\n\nstruct TaskListStatus {\n 10: optional i64 (js.type = \"Long\") backlogCountHint\n 20: optional i64 (js.type = \"Long\") readLevel\n 30: optional i64 (js.type = \"Long\") ackLevel\n 35: optional double ratePerSecond\n 40: optional TaskIDBlock taskIDBlock\n}\n\nstruct TaskIDBlock {\n 10: optional i64 (js.type = \"Long\") startID\n 20: optional i64 (js.type = \"Long\") endID\n}\n\n//At least one of the parameters needs to be provided\nstruct DescribeHistoryHostRequest {\n 10: optional string hostAddress //ip:port\n 20: optional i32 shardIdForHost\n 30: optional WorkflowExecution executionForHost\n}\n\nstruct RemoveTaskRequest {\n 10: optional i32 shardID\n 20: optional i32 type\n 30: optional i64 (js.type = \"Long\") taskID\n 40: optional i64 (js.type = \"Long\") visibilityTimestamp\n}\n\nstruct CloseShardRequest {\n 10: optional i32 shardID\n}\n\nstruct DescribeHistoryHostResponse{\n 10: optional i32 numberOfShards\n 20: optional list shardIDs\n 30: optional DomainCacheInfo domainCache\n 40: optional string shardControllerStatus\n 50: optional string address\n}\n\nstruct DomainCacheInfo{\n 10: optional i64 numOfItemsInCacheByID\n 20: optional i64 numOfItemsInCacheByName\n}\n\nenum TaskListType {\n /*\n * Decision type of tasklist\n */\n Decision,\n /*\n * Activity type of tasklist\n */\n Activity,\n}\n\nstruct PollerInfo {\n // Unix Nano\n 10: optional i64 (js.type = \"Long\") lastAccessTime\n 20: optional string identity\n 30: optional double ratePerSecond\n}\n\nstruct RetryPolicy {\n // Interval of the first retry. If coefficient is 1.0 then it is used for all retries.\n 10: optional i32 initialIntervalInSeconds\n\n // Coefficient used to calculate the next retry interval.\n // The next retry interval is previous interval multiplied by the coefficient.\n // Must be 1 or larger.\n 20: optional double backoffCoefficient\n\n // Maximum interval between retries. Exponential backoff leads to interval increase.\n // This value is the cap of the increase. Default is 100x of initial interval.\n 30: optional i32 maximumIntervalInSeconds\n\n // Maximum number of attempts. When exceeded the retries stop even if not expired yet.\n // Must be 1 or bigger. Default is unlimited.\n 40: optional i32 maximumAttempts\n\n // Non-Retriable errors. Will stop retrying if error matches this list.\n 50: optional list nonRetriableErrorReasons\n\n // Expiration time for the whole retry process.\n 60: optional i32 expirationIntervalInSeconds\n}\n\n// HistoryBranchRange represents a piece of range for a branch.\nstruct HistoryBranchRange{\n // branchID of original branch forked from\n 10: optional string branchID\n // beinning node for the range, inclusive\n 20: optional i64 beginNodeID\n // ending node for the range, exclusive\n 30: optional i64 endNodeID\n}\n\n// For history persistence to serialize/deserialize branch details\nstruct HistoryBranch{\n 10: optional string treeID\n 20: optional string branchID\n 30: optional list ancestors\n}\n\n// VersionHistoryItem contains signal eventID and the corresponding version\nstruct VersionHistoryItem{\n 10: optional i64 (js.type = \"Long\") eventID\n 20: optional i64 (js.type = \"Long\") version\n}\n\n// VersionHistory contains the version history of a branch\nstruct VersionHistory{\n 10: optional binary branchToken\n 20: optional list items\n}\n\n// VersionHistories contains all version histories from all branches\nstruct VersionHistories{\n 10: optional i32 currentVersionHistoryIndex\n 20: optional list histories\n}\n\n// ReapplyEventsRequest is the request for reapply events API\nstruct ReapplyEventsRequest{\n 10: optional string domainName\n 20: optional WorkflowExecution workflowExecution\n 30: optional DataBlob events\n}\n\n// SupportedClientVersions contains the support versions for client library\nstruct SupportedClientVersions{\n 10: optional string goSdk\n 20: optional string javaSdk\n}\n\n// ClusterInfo contains information about cadence cluster\nstruct ClusterInfo{\n 10: optional SupportedClientVersions supportedClientVersions\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domain\n 20: optional WorkflowExecution execution\n}\n" diff --git a/common/archiver/filestore/historyArchiver.go b/common/archiver/filestore/historyArchiver.go index 98b5b84c8e2..b79a5661ce0 100644 --- a/common/archiver/filestore/historyArchiver.go +++ b/common/archiver/filestore/historyArchiver.go @@ -47,6 +47,7 @@ import ( "github.com/uber/cadence/common/backoff" "github.com/uber/cadence/common/log/tag" "github.com/uber/cadence/common/service/config" + "github.com/uber/cadence/common/util" ) const ( @@ -168,13 +169,13 @@ func (h *historyArchiver) Archive( } dirPath := URI.Path() - if err = common.MkdirAll(dirPath, h.dirMode); err != nil { + if err = util.MkdirAll(dirPath, h.dirMode); err != nil { logger.Error(archiver.ArchiveNonRetriableErrorMsg, tag.ArchivalArchiveFailReason(errMakeDirectory), tag.Error(err)) return err } filename := constructHistoryFilename(request.DomainID, request.WorkflowID, request.RunID, request.CloseFailoverVersion) - if err := common.WriteFile(path.Join(dirPath, filename), encodedHistoryBatches, h.fileMode); err != nil { + if err := util.WriteFile(path.Join(dirPath, filename), encodedHistoryBatches, h.fileMode); err != nil { logger.Error(archiver.ArchiveNonRetriableErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) return err } @@ -196,7 +197,7 @@ func (h *historyArchiver) Get( } dirPath := URI.Path() - exists, err := common.DirectoryExists(dirPath) + exists, err := util.DirectoryExists(dirPath) if err != nil { return nil, &shared.InternalServiceError{Message: err.Error()} } @@ -228,7 +229,7 @@ func (h *historyArchiver) Get( filename := constructHistoryFilename(request.DomainID, request.WorkflowID, request.RunID, token.CloseFailoverVersion) filepath := path.Join(dirPath, filename) - exists, err = common.FileExists(filepath) + exists, err = util.FileExists(filepath) if err != nil { return nil, &shared.InternalServiceError{Message: err.Error()} } @@ -236,7 +237,7 @@ func (h *historyArchiver) Get( return nil, &shared.EntityNotExistsError{Message: archiver.ErrHistoryNotExist.Error()} } - encodedHistoryBatches, err := common.ReadFile(filepath) + encodedHistoryBatches, err := util.ReadFile(filepath) if err != nil { return nil, &shared.InternalServiceError{Message: err.Error()} } @@ -298,7 +299,7 @@ func getNextHistoryBlob(ctx context.Context, historyIterator archiver.HistoryIte } func getHighestVersion(dirPath string, request *archiver.GetHistoryRequest) (*int64, error) { - filenames, err := common.ListFilesByPrefix(dirPath, constructHistoryFilenamePrefix(request.DomainID, request.WorkflowID, request.RunID)) + filenames, err := util.ListFilesByPrefix(dirPath, constructHistoryFilenamePrefix(request.DomainID, request.WorkflowID, request.RunID)) if err != nil { return nil, err } diff --git a/common/archiver/filestore/historyArchiver_test.go b/common/archiver/filestore/historyArchiver_test.go index eb14858e3a7..a67ed068c61 100644 --- a/common/archiver/filestore/historyArchiver_test.go +++ b/common/archiver/filestore/historyArchiver_test.go @@ -39,6 +39,7 @@ import ( "github.com/uber/cadence/common/archiver" "github.com/uber/cadence/common/log/loggerimpl" "github.com/uber/cadence/common/service/config" + "github.com/uber/cadence/common/util" ) const ( @@ -578,12 +579,12 @@ func (s *historyArchiverSuite) writeHistoryBatchesForGetTest(historyBatches []*s data, err := encode(historyBatches) s.Require().NoError(err) filename := constructHistoryFilename(testDomainID, testWorkflowID, testRunID, version) - err = common.WriteFile(path.Join(s.testGetDirectory, filename), data, testFileMode) + err = util.WriteFile(path.Join(s.testGetDirectory, filename), data, testFileMode) s.Require().NoError(err) } func (s *historyArchiverSuite) assertFileExists(filepath string) { - exists, err := common.FileExists(filepath) + exists, err := util.FileExists(filepath) s.NoError(err) s.True(exists) } diff --git a/common/archiver/filestore/util.go b/common/archiver/filestore/util.go index 0b3f2996157..028a199e23c 100644 --- a/common/archiver/filestore/util.go +++ b/common/archiver/filestore/util.go @@ -32,8 +32,8 @@ import ( "github.com/dgryski/go-farm" "github.com/uber/cadence/.gen/go/shared" - "github.com/uber/cadence/common" "github.com/uber/cadence/common/archiver" + "github.com/uber/cadence/common/util" ) var ( @@ -116,7 +116,7 @@ func validateDirPath(dirPath string) error { return err } if !info.IsDir() { - return common.ErrDirectoryExpected + return util.ErrDirectoryExpected } return nil } diff --git a/common/archiver/filestore/util_test.go b/common/archiver/filestore/util_test.go index 5dbb61fa5ad..48a764d3e27 100644 --- a/common/archiver/filestore/util_test.go +++ b/common/archiver/filestore/util_test.go @@ -33,6 +33,7 @@ import ( "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" "github.com/uber/cadence/common/archiver" + "github.com/uber/cadence/common/util" ) const ( @@ -120,7 +121,7 @@ func (s *UtilSuite) TestValidateDirPath() { }, { dirPath: fpath, - expectedErr: common.ErrDirectoryExpected, + expectedErr: util.ErrDirectoryExpected, }, } @@ -314,7 +315,7 @@ func (s *UtilSuite) createFile(dir string, filename string) { } func (s *UtilSuite) assertDirectoryExists(path string) { - exists, err := common.DirectoryExists(path) + exists, err := util.DirectoryExists(path) s.NoError(err) s.True(exists) } diff --git a/common/archiver/filestore/visibilityArchiver.go b/common/archiver/filestore/visibilityArchiver.go index dac73596b39..37ef2828441 100644 --- a/common/archiver/filestore/visibilityArchiver.go +++ b/common/archiver/filestore/visibilityArchiver.go @@ -34,6 +34,7 @@ import ( "github.com/uber/cadence/common/archiver" "github.com/uber/cadence/common/log/tag" "github.com/uber/cadence/common/service/config" + "github.com/uber/cadence/common/util" ) const ( @@ -110,7 +111,7 @@ func (v *visibilityArchiver) Archive( } dirPath := path.Join(URI.Path(), request.DomainID) - if err = common.MkdirAll(dirPath, v.dirMode); err != nil { + if err = util.MkdirAll(dirPath, v.dirMode); err != nil { logger.Error(archiver.ArchiveNonRetriableErrorMsg, tag.ArchivalArchiveFailReason(errMakeDirectory), tag.Error(err)) return err } @@ -124,7 +125,7 @@ func (v *visibilityArchiver) Archive( // The filename has the format: closeTimestamp_hash(runID).visibility // This format allows the archiver to sort all records without reading the file contents filename := constructVisibilityFilename(request.CloseTimestamp, request.RunID) - if err := common.WriteFile(path.Join(dirPath, filename), encodedVisibilityRecord, v.fileMode); err != nil { + if err := util.WriteFile(path.Join(dirPath, filename), encodedVisibilityRecord, v.fileMode); err != nil { logger.Error(archiver.ArchiveNonRetriableErrorMsg, tag.ArchivalArchiveFailReason(errWriteFile), tag.Error(err)) return err } @@ -177,7 +178,7 @@ func (v *visibilityArchiver) query( } dirPath := path.Join(URI.Path(), request.domainID) - exists, err := common.DirectoryExists(dirPath) + exists, err := util.DirectoryExists(dirPath) if err != nil { return nil, &shared.InternalServiceError{Message: err.Error()} } @@ -185,7 +186,7 @@ func (v *visibilityArchiver) query( return &archiver.QueryVisibilityResponse{}, nil } - files, err := common.ListFiles(dirPath) + files, err := util.ListFiles(dirPath) if err != nil { return nil, &shared.InternalServiceError{Message: err.Error()} } @@ -200,7 +201,7 @@ func (v *visibilityArchiver) query( response := &archiver.QueryVisibilityResponse{} for idx, file := range files { - encodedRecord, err := common.ReadFile(path.Join(dirPath, file)) + encodedRecord, err := util.ReadFile(path.Join(dirPath, file)) if err != nil { return nil, &shared.InternalServiceError{Message: err.Error()} } diff --git a/common/archiver/filestore/visibilityArchiver_test.go b/common/archiver/filestore/visibilityArchiver_test.go index a726f97a06e..5ba5323b126 100644 --- a/common/archiver/filestore/visibilityArchiver_test.go +++ b/common/archiver/filestore/visibilityArchiver_test.go @@ -40,6 +40,7 @@ import ( "github.com/uber/cadence/common/archiver" "github.com/uber/cadence/common/log/loggerimpl" "github.com/uber/cadence/common/service/config" + "github.com/uber/cadence/common/util" ) const ( @@ -189,7 +190,7 @@ func (s *visibilityArchiverSuite) TestArchive_Success() { filepath := path.Join(dir, testDomainID, expectedFilename) s.assertFileExists(filepath) - data, err := common.ReadFile(filepath) + data, err := util.ReadFile(filepath) s.NoError(err) archivedRecord := &archiver.ArchiveVisibilityRequest{} @@ -573,12 +574,12 @@ func (s *visibilityArchiverSuite) writeVisibilityRecordForQueryTest(record *visi s.Require().NoError(err) filename := constructVisibilityFilename(record.CloseTimestamp, record.RunID) s.Require().NoError(os.MkdirAll(path.Join(s.testQueryDirectory, record.DomainID), testDirMode)) - err = common.WriteFile(path.Join(s.testQueryDirectory, record.DomainID, filename), data, testFileMode) + err = util.WriteFile(path.Join(s.testQueryDirectory, record.DomainID, filename), data, testFileMode) s.Require().NoError(err) } func (s *visibilityArchiverSuite) assertFileExists(filepath string) { - exists, err := common.FileExists(filepath) + exists, err := util.FileExists(filepath) s.NoError(err) s.True(exists) } diff --git a/common/blobstore/filestore/client.go b/common/blobstore/filestore/client.go index 028e378722e..8367dc7f7ea 100644 --- a/common/blobstore/filestore/client.go +++ b/common/blobstore/filestore/client.go @@ -29,9 +29,9 @@ import ( "fmt" "os" - "github.com/uber/cadence/common" "github.com/uber/cadence/common/blobstore" "github.com/uber/cadence/common/service/config" + "github.com/uber/cadence/common/util" ) type ( @@ -49,12 +49,12 @@ func NewFilestoreClient(cfg *config.FileBlobstore) (blobstore.Client, error) { return nil, errors.New("output directory not given for file blobstore") } outputDirectory := cfg.OutputDirectory - exists, err := common.DirectoryExists(outputDirectory) + exists, err := util.DirectoryExists(outputDirectory) if err != nil { return nil, err } if !exists { - if err := common.MkdirAll(outputDirectory, os.FileMode(0766)); err != nil { + if err := util.MkdirAll(outputDirectory, os.FileMode(0766)); err != nil { return nil, err } } @@ -69,7 +69,7 @@ func (c *client) Put(_ context.Context, request *blobstore.PutRequest) (*blobsto if err != nil { return nil, err } - if err := common.WriteFile(c.filepath(request.Key), data, os.FileMode(0666)); err != nil { + if err := util.WriteFile(c.filepath(request.Key), data, os.FileMode(0666)); err != nil { return nil, err } return &blobstore.PutResponse{}, nil @@ -77,7 +77,7 @@ func (c *client) Put(_ context.Context, request *blobstore.PutRequest) (*blobsto // Get fetches a blob func (c *client) Get(_ context.Context, request *blobstore.GetRequest) (*blobstore.GetResponse, error) { - data, err := common.ReadFile(c.filepath(request.Key)) + data, err := util.ReadFile(c.filepath(request.Key)) if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (c *client) Get(_ context.Context, request *blobstore.GetRequest) (*blobsto // Exists determines if a blob exists func (c *client) Exists(_ context.Context, request *blobstore.ExistsRequest) (*blobstore.ExistsResponse, error) { - exists, err := common.FileExists(c.filepath(request.Key)) + exists, err := util.FileExists(c.filepath(request.Key)) if err != nil { return nil, err } diff --git a/common/blobstore/filestore/client_test.go b/common/blobstore/filestore/client_test.go index 7a10b518c36..23a410d6fcd 100644 --- a/common/blobstore/filestore/client_test.go +++ b/common/blobstore/filestore/client_test.go @@ -31,9 +31,9 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/uber/cadence/common" "github.com/uber/cadence/common/blobstore" "github.com/uber/cadence/common/service/config" + "github.com/uber/cadence/common/util" ) type ClientSuite struct { @@ -67,13 +67,13 @@ func (s *ClientSuite) TestNewFilestoreClient_DirectoryAlreadyExists() { func (s *ClientSuite) TestNewFilestoreClient_DirectoryNotAlreadyExists() { name := s.createTempDir("TestNewFilestoreClient_DirectoryNotAlreadyExists") os.RemoveAll(name) - exists, err := common.DirectoryExists(name) + exists, err := util.DirectoryExists(name) s.NoError(err) s.False(exists) c, err := NewFilestoreClient(&config.FileBlobstore{OutputDirectory: name}) s.NoError(err) s.Equal(name, c.(*client).outputDirectory) - exists, err = common.DirectoryExists(name) + exists, err = util.DirectoryExists(name) s.NoError(err) s.True(exists) os.RemoveAll(name) diff --git a/common/cache/domainCache.go b/common/cache/domainCache.go index e459eb5304d..c3bfe0eed63 100644 --- a/common/cache/domainCache.go +++ b/common/cache/domainCache.go @@ -131,6 +131,7 @@ type ( failoverVersion int64 isGlobalDomain bool failoverNotificationVersion int64 + failoverEndTime *int64 notificationVersion int64 initialized bool } @@ -547,6 +548,7 @@ func (c *domainCache) updateIDToDomainCache( entry.failoverVersion = record.failoverVersion entry.isGlobalDomain = record.isGlobalDomain entry.failoverNotificationVersion = record.failoverNotificationVersion + entry.failoverEndTime = record.failoverEndTime entry.notificationVersion = record.notificationVersion entry.initialized = record.initialized @@ -675,6 +677,7 @@ func (c *domainCache) buildEntryFromRecord( newEntry.failoverVersion = record.FailoverVersion newEntry.isGlobalDomain = record.IsGlobalDomain newEntry.failoverNotificationVersion = record.FailoverNotificationVersion + newEntry.failoverEndTime = record.FailoverEndTime newEntry.notificationVersion = record.NotificationVersion newEntry.initialized = true return newEntry @@ -723,6 +726,7 @@ func (entry *DomainCacheEntry) duplicate() *DomainCacheEntry { result.failoverVersion = entry.failoverVersion result.isGlobalDomain = entry.isGlobalDomain result.failoverNotificationVersion = entry.failoverNotificationVersion + result.failoverEndTime = entry.failoverEndTime result.notificationVersion = entry.notificationVersion result.initialized = entry.initialized return result @@ -774,7 +778,16 @@ func (entry *DomainCacheEntry) IsDomainActive() bool { // domain is not a global domain, meaning domain is always "active" within each cluster return true } - return entry.clusterMetadata.GetCurrentClusterName() == entry.replicationConfig.ActiveClusterName + return entry.clusterMetadata.GetCurrentClusterName() == entry.replicationConfig.ActiveClusterName && !entry.IsDomainPendingActive() +} + +// IsDomainPendingActive returns whether the domain is in pending active state +func (entry *DomainCacheEntry) IsDomainPendingActive() bool { + if !entry.isGlobalDomain { + // domain is not a global domain, meaning domain is always "active" within each cluster + return true + } + return entry.failoverEndTime != nil } // GetReplicationPolicy return the derived workflow replication policy @@ -793,6 +806,12 @@ func (entry *DomainCacheEntry) GetDomainNotActiveErr() error { // domain is consider active return nil } + if entry.IsDomainPendingActive() { + return errors.NewDomainPendingActiveError( + entry.info.Name, + entry.clusterMetadata.GetCurrentClusterName(), + ) + } return errors.NewDomainNotActiveError( entry.info.Name, entry.clusterMetadata.GetCurrentClusterName(), diff --git a/common/errors/domainNotActiveError.go b/common/errors/domainNotActiveError.go index d573331aad4..839e2e6fece 100644 --- a/common/errors/domainNotActiveError.go +++ b/common/errors/domainNotActiveError.go @@ -40,3 +40,16 @@ func NewDomainNotActiveError(domainName string, currentCluster string, activeClu ActiveCluster: activeCluster, } } + +// NewDomainPendingActiveError return a domain not active error +func NewDomainPendingActiveError(domainName string, currentCluster string) *workflow.DomainNotActiveError { + return &workflow.DomainNotActiveError{ + Message: fmt.Sprintf( + "Domain: %s is pending active in cluster: %s.", + domainName, + currentCluster, + ), + DomainName: domainName, + CurrentCluster: currentCluster, + } +} diff --git a/common/service/dynamicconfig/constants.go b/common/service/dynamicconfig/constants.go index 55482c6bb73..49e7e3617e4 100644 --- a/common/service/dynamicconfig/constants.go +++ b/common/service/dynamicconfig/constants.go @@ -99,6 +99,7 @@ var keys = map[Key]string{ EnableClientVersionCheck: "frontend.enableClientVersionCheck", ValidSearchAttributes: "frontend.validSearchAttributes", SendRawWorkflowHistory: "frontend.sendRawWorkflowHistory", + FrontendEnableRPCReplication: "frontend.enableRPCReplication", SearchAttributesNumberOfKeysLimit: "frontend.searchAttributesNumberOfKeysLimit", SearchAttributesSizeOfValueLimit: "frontend.searchAttributesSizeOfValueLimit", SearchAttributesTotalSizeLimit: "frontend.searchAttributesTotalSizeLimit", @@ -232,6 +233,8 @@ var keys = map[Key]string{ ReplicationTaskProcessorNoTaskInitialWait: "history.ReplicationTaskProcessorNoTaskInitialWait", ReplicationTaskProcessorCleanupInterval: "history.ReplicationTaskProcessorCleanupInterval", ReplicationTaskProcessorCleanupJitterCoefficient: "history.ReplicationTaskProcessorCleanupJitterCoefficient", + HistoryEnableRPCReplication: "history.EnableRPCReplication", + HistoryEnableKafkaReplication: "history.EnableKafkaReplication", EnableConsistentQuery: "history.EnableConsistentQuery", EnableConsistentQueryByDomain: "history.EnableConsistentQueryByDomain", MaxBufferedQueryCount: "history.MaxBufferedQueryCount", @@ -251,6 +254,7 @@ var keys = map[Key]string{ WorkerReplicationTaskMaxRetryDuration: "worker.replicationTaskMaxRetryDuration", WorkerReplicationTaskContextDuration: "worker.replicationTaskContextDuration", WorkerReReplicationContextTimeout: "worker.workerReReplicationContextTimeout", + WorkerEnableRPCReplication: "worker.enableWorkerRPCReplication", WorkerIndexerConcurrency: "worker.indexerConcurrency", WorkerESProcessorNumOfWorkers: "worker.ESProcessorNumOfWorkers", WorkerESProcessorBulkActions: "worker.ESProcessorBulkActions", @@ -382,6 +386,8 @@ const ( ValidSearchAttributes // SendRawWorkflowHistory is whether to enable raw history retrieving SendRawWorkflowHistory + // FrontendEnableRPCReplication is a feature flag for rpc replication + FrontendEnableRPCReplication // SearchAttributesNumberOfKeysLimit is the limit of number of keys SearchAttributesNumberOfKeysLimit // SearchAttributesSizeOfValueLimit is the size limit of each value @@ -657,6 +663,8 @@ const ( WorkerReplicationTaskContextDuration // WorkerReReplicationContextTimeout is the context timeout for end to end re-replication process WorkerReReplicationContextTimeout + // WorkerEnableRPCReplication is the feature flag for RPC replication + WorkerEnableRPCReplication // WorkerIndexerConcurrency is the max concurrent messages to be processed at any given time WorkerIndexerConcurrency // WorkerESProcessorNumOfWorkers is num of workers for esProcessor @@ -718,6 +726,10 @@ const ( ReplicationTaskProcessorCleanupInterval // ReplicationTaskProcessorCleanupJitterCoefficient is the jitter for cleanup timer ReplicationTaskProcessorCleanupJitterCoefficient + // HistoryEnableRPCReplication is the feature flag for RPC replication + HistoryEnableRPCReplication + // HistoryEnableKafkaReplication is the migration flag for Kafka replication + HistoryEnableKafkaReplication // EnableConsistentQuery indicates if consistent query is enabled for the cluster EnableConsistentQuery // EnableConsistentQueryByDomain indicates if consistent query is enabled for a domain diff --git a/common/util/bufferedWriter.go b/common/util/bufferedWriter.go new file mode 100644 index 00000000000..ad041189071 --- /dev/null +++ b/common/util/bufferedWriter.go @@ -0,0 +1,145 @@ +// The MIT License (MIT) +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package util + +import ( + "bytes" + "sync" +) + +type ( + // BufferedWriter is used to buffer entities, construct byte blobs and invoke handle function on byte blobs. + // BufferedWriter is thread safe and makes defensive copies in and out. + // BufferedWriter's state is unchanged whenever any method returns an error. + BufferedWriter interface { + Add(interface{}) error + Flush() error + LastFlushedPage() int + } + + // HandleFn is invoked whenever a byte blob needs to be flushed. + // Takes in deep copy of constructed byte blob and the current page number. + // Returns an error on failure to handle or nil otherwise. + HandleFn func([]byte, int) error + + // SerializeFn is used to serialize entities. + SerializeFn func(interface{}) ([]byte, error) + + bufferedWriter struct { + sync.Mutex + + buffer *bytes.Buffer + page int + + flushThreshold int + separatorToken []byte + handleFn HandleFn + serializeFn SerializeFn + } +) + +// NewBufferedWriter constructs a new BufferedWriter +func NewBufferedWriter( + handleFn HandleFn, + serializeFn SerializeFn, + flushThreshold int, + separatorToken []byte, +) BufferedWriter { + separatorTokenCopy := make([]byte, len(separatorToken), len(separatorToken)) + copy(separatorTokenCopy, separatorToken) + return &bufferedWriter{ + buffer: &bytes.Buffer{}, + page: 0, + + flushThreshold: flushThreshold, + separatorToken: separatorTokenCopy, + handleFn: handleFn, + serializeFn: serializeFn, + } +} + +// Add adds element to buffer. Triggers flush if exceeds flushThreshold. +func (bw *bufferedWriter) Add(e interface{}) error { + bw.Lock() + defer bw.Unlock() + + if err := bw.writeToBuffer(e); err != nil { + return err + } + if bw.shouldFlush() { + if err := bw.flush(); err != nil { + return err + } + } + return nil +} + +// Flush invokes PutFn and advances state of bufferedWriter to next page. +func (bw *bufferedWriter) Flush() error { + bw.Lock() + defer bw.Unlock() + + return bw.flush() +} + +// LastFlushedPage returns the page number of the last page that was flushed. +// Returns -1 if no pages have been flushed. +func (bw *bufferedWriter) LastFlushedPage() int { + bw.Lock() + defer bw.Unlock() + + return bw.page - 1 +} + +func (bw *bufferedWriter) flush() error { + src := bw.buffer.Bytes() + dest := make([]byte, len(src), len(src)) + copy(dest, src) + err := bw.handleFn(dest, bw.page) + if err != nil { + return err + } + bw.startNewPage() + return nil +} + +func (bw *bufferedWriter) writeToBuffer(e interface{}) error { + data, err := bw.serializeFn(e) + if err != nil { + return err + } + + // write will never return an error, so it can be safely ignored + bw.buffer.Write(data) + bw.buffer.Write(bw.separatorToken) + return nil +} + +func (bw *bufferedWriter) shouldFlush() bool { + return bw.buffer.Len() >= bw.flushThreshold +} + +func (bw *bufferedWriter) startNewPage() { + bw.buffer = &bytes.Buffer{} + bw.page = bw.page + 1 +} diff --git a/common/util/bufferedWriterIterator_test.go b/common/util/bufferedWriterIterator_test.go new file mode 100644 index 00000000000..2bd813486a9 --- /dev/null +++ b/common/util/bufferedWriterIterator_test.go @@ -0,0 +1,60 @@ +// The MIT License (MIT) +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package util + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBufferedWriterWithIterator(t *testing.T) { + blobMap := make(map[string][]byte) + handleFn := func(data []byte, page int) error { + key := fmt.Sprintf("key_%v", page) + blobMap[key] = data + return nil + } + bw := NewBufferedWriter(handleFn, json.Marshal, 100, []byte("\r\n")) + for i := 0; i < 1000; i++ { + assert.NoError(t, bw.Add(i)) + } + assert.NoError(t, bw.Flush()) + lastFlushedPage := bw.LastFlushedPage() + getFn := func(page int) ([]byte, error) { + key := fmt.Sprintf("key_%v", page) + return blobMap[key], nil + } + itr := NewIterator(0, lastFlushedPage, getFn, []byte("\r\n")) + i := 0 + for itr.HasNext() { + val, err := itr.Next() + assert.NoError(t, err) + expectedVal, err := json.Marshal(i) + assert.NoError(t, err) + assert.Equal(t, expectedVal, val) + i++ + } +} diff --git a/common/util/bufferedWriter_test.go b/common/util/bufferedWriter_test.go new file mode 100644 index 00000000000..fab36b4ff5b --- /dev/null +++ b/common/util/bufferedWriter_test.go @@ -0,0 +1,115 @@ +// The MIT License (MIT) +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package util + +import ( + "bytes" + "encoding/json" + "errors" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type BufferedWriterSuite struct { + *require.Assertions + suite.Suite +} + +func TestBufferedWriterSuite(t *testing.T) { + suite.Run(t, new(BufferedWriterSuite)) +} + +func (s *BufferedWriterSuite) SetupTest() { + s.Assertions = require.New(s.T()) +} + +func (s *BufferedWriterSuite) TestShouldFlush() { + bw := &bufferedWriter{ + buffer: bytes.NewBuffer([]byte{1, 2, 3}), + flushThreshold: 10, + } + s.False(bw.shouldFlush()) + bw.flushThreshold = 3 + s.True(bw.shouldFlush()) +} + +func (s *BufferedWriterSuite) TestWriteToBuffer() { + bw := &bufferedWriter{ + buffer: &bytes.Buffer{}, + separatorToken: []byte("\r\n"), + serializeFn: json.Marshal, + } + s.Error(bw.writeToBuffer(make(chan struct{}))) + s.Equal("", bw.buffer.String()) + + s.NoError(bw.writeToBuffer("first")) + s.Error(bw.writeToBuffer(make(chan struct{}))) + s.Equal("\"first\"\r\n", bw.buffer.String()) +} + +func (s *BufferedWriterSuite) TestFlush_HandleReturnsError() { + handleFn := func(data []byte, page int) error { + return errors.New("put function returns error") + } + bw := &bufferedWriter{ + buffer: bytes.NewBuffer([]byte{1, 2, 3}), + page: 1, + handleFn: handleFn, + } + s.Error(bw.flush()) + s.Equal(1, bw.page) + s.Equal([]byte{1, 2, 3}, bw.buffer.Bytes()) +} + +func (s *BufferedWriterSuite) TestFlush_Success() { + handleFn := func(data []byte, page int) error { + return nil + } + bw := &bufferedWriter{ + buffer: bytes.NewBuffer([]byte{1, 2, 3}), + page: 1, + handleFn: handleFn, + } + s.NoError(bw.flush()) + s.Len(bw.buffer.Bytes(), 0) + s.Equal(1, bw.LastFlushedPage()) +} + +func (s *BufferedWriterSuite) TestAddAndFlush() { + handleFn := func(data []byte, page int) error { + return nil + } + bw := NewBufferedWriter(handleFn, json.Marshal, 10, []byte("\r\n")).(*bufferedWriter) + expectedLastFlushedPage := -1 + for i := 1; i <= 100; i++ { + s.NoError(bw.Add(0)) + if i%4 == 0 { + expectedLastFlushedPage++ + } + s.Equal(expectedLastFlushedPage, bw.LastFlushedPage()) + } + s.Equal(24, bw.LastFlushedPage()) + s.Len(bw.buffer.Bytes(), 0) +} diff --git a/common/file_util.go b/common/util/file_util.go similarity index 99% rename from common/file_util.go rename to common/util/file_util.go index 78fc42c6f32..5279925cadb 100644 --- a/common/file_util.go +++ b/common/util/file_util.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package common +package util import ( "errors" diff --git a/common/file_util_test.go b/common/util/file_util_test.go similarity index 99% rename from common/file_util_test.go rename to common/util/file_util_test.go index 7da38eee837..5a27f2426d4 100644 --- a/common/file_util_test.go +++ b/common/util/file_util_test.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package common +package util import ( "io/ioutil" diff --git a/common/util/iterator.go b/common/util/iterator.go new file mode 100644 index 00000000000..df10281cf96 --- /dev/null +++ b/common/util/iterator.go @@ -0,0 +1,165 @@ +// The MIT License (MIT) +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package util + +import ( + "bytes" + "errors" + "sync" +) + +var ( + // ErrIteratorFinished indicates that Next was called on an iterator + // which has already reached the end of its input. + ErrIteratorFinished = errors.New("iterator has reached end") +) + +type ( + // Iterator is used to iterate over entities. + // Each entity is represented as a byte slice. + // Iterator will fetch pages using the provided GetFn. + // Pages will be fetched starting from provided minPage and continuing until provided maxPage. + // Iterator will skip over any pages with empty input and will skip over any empty elements within a page. + // Iterator is thread safe and makes deep copies of all in and out data. + // If Next returns an error all subsequent calls to Next will return the same error. + Iterator interface { + Next() ([]byte, error) + HasNext() bool + } + + // GetFn fetches bytes for given page number. Returns error on failure. + GetFn func(int) ([]byte, error) + + iterator struct { + sync.Mutex + + currentPage int + page [][]byte + pageIndex int + nextResult []byte + nextError error + + separatorToken []byte + getFn GetFn + minPage int + maxPage int + } +) + +// NewIterator constructs a new iterator. +func NewIterator( + minPage int, + maxPage int, + getFn GetFn, + separatorToken []byte, +) Iterator { + separatorTokenCopy := make([]byte, len(separatorToken), len(separatorToken)) + copy(separatorTokenCopy, separatorToken) + itr := &iterator{ + currentPage: -1, + + separatorToken: separatorTokenCopy, + getFn: getFn, + minPage: minPage, + maxPage: maxPage, + } + itr.advance(true) + return itr +} + +// Next returns the next element in the iterator. +// Returns an error if no elements are left or if a non-recoverable error occurred. +func (i *iterator) Next() ([]byte, error) { + i.Lock() + defer i.Unlock() + + result := i.nextResult + error := i.nextError + + i.advance(false) + + copyResult := make([]byte, len(result), len(result)) + copy(copyResult, result) + return copyResult, error +} + +// HasNext returns true if next invocation of Next will return on-empty byte blob and nil error, false otherwise. +func (i *iterator) HasNext() bool { + i.Lock() + defer i.Unlock() + + return i.hasNext() +} + +func (i *iterator) advance(initialization bool) { + if !i.hasNext() && !initialization { + return + } + i.advanceOnce() + for len(i.nextResult) == 0 && i.nextError == nil { + i.advanceOnce() + } +} + +func (i *iterator) advanceOnce() { + if i.pageIndex < len(i.page) { + i.consumeFromCurrentPage() + return + } + if i.currentPage >= i.maxPage { + i.setIteratorToTerminalState(ErrIteratorFinished) + return + } + i.page = nil + i.currentPage++ + i.pageIndex = 0 + data, err := i.getFn(i.currentPage) + if err != nil { + i.setIteratorToTerminalState(err) + return + } + if len(data) == 0 { + i.nextResult = nil + i.nextError = nil + } else { + copyData := make([]byte, len(data), len(data)) + copy(copyData, data) + i.page = bytes.Split(copyData, i.separatorToken) + i.consumeFromCurrentPage() + } +} + +func (i *iterator) consumeFromCurrentPage() { + i.nextResult = i.page[i.pageIndex] + i.nextError = nil + i.pageIndex = i.pageIndex + 1 +} + +func (i *iterator) setIteratorToTerminalState(err error) { + i.nextResult = nil + i.nextError = err +} + +func (i *iterator) hasNext() bool { + return len(i.nextResult) > 0 && i.nextError == nil +} diff --git a/common/util/iterator_test.go b/common/util/iterator_test.go new file mode 100644 index 00000000000..0e44e9c3f34 --- /dev/null +++ b/common/util/iterator_test.go @@ -0,0 +1,104 @@ +// The MIT License (MIT) +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package util + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +var getMap = map[int][]byte{ + 0: nil, + 1: {}, + 2: []byte("\r\n\r\n\r\n"), + 3: []byte("\"one\"\r\n\"two\"\r\n"), + 4: []byte("\"three\"\r\n\"four\"\r\n\r\n\"five\"\r\n"), + 5: []byte("\r\n\"six\"\r\n\"seven\"\r\n\"eight\"\r\n"), +} + +type IteratorSuite struct { + *require.Assertions + suite.Suite +} + +func TestIteratorSuite(t *testing.T) { + suite.Run(t, new(IteratorSuite)) +} + +func (s *IteratorSuite) SetupTest() { + s.Assertions = require.New(s.T()) +} + +func (s *IteratorSuite) TestInitializedToEmpty() { + getFn := func(page int) ([]byte, error) { + return getMap[page], nil + } + itr := NewIterator(0, 2, getFn, []byte("\r\n")) + s.False(itr.HasNext()) + _, err := itr.Next() + s.Error(err) +} + +func (s *IteratorSuite) TestNonEmptyNoErrors() { + getFn := func(page int) ([]byte, error) { + return getMap[page], nil + } + itr := NewIterator(0, 5, getFn, []byte("\r\n")) + expectedResults := []string{"\"one\"", "\"two\"", "\"three\"", "\"four\"", "\"five\"", "\"six\"", "\"seven\"", "\"eight\""} + i := 0 + for itr.HasNext() { + curr, err := itr.Next() + s.NoError(err) + expectedCurr := []byte(expectedResults[i]) + s.Equal(expectedCurr, curr) + i++ + } + s.False(itr.HasNext()) + _, err := itr.Next() + s.Error(err) +} + +func (s *IteratorSuite) TestNonEmptyWithErrors() { + getFn := func(page int) ([]byte, error) { + if page > 4 { + return nil, errors.New("error getting next page") + } + return getMap[page], nil + } + itr := NewIterator(0, 5, getFn, []byte("\r\n")) + expectedResults := []string{"\"one\"", "\"two\"", "\"three\"", "\"four\"", "\"five\""} + i := 0 + for itr.HasNext() { + curr, err := itr.Next() + s.NoError(err) + expectedCurr := []byte(expectedResults[i]) + s.Equal(expectedCurr, curr) + i++ + } + s.False(itr.HasNext()) + _, err := itr.Next() + s.Error(err) +} diff --git a/host/dynamicconfig.go b/host/dynamicconfig.go index 2e0a771222c..6a989387fad 100644 --- a/host/dynamicconfig.go +++ b/host/dynamicconfig.go @@ -44,6 +44,10 @@ var ( dynamicconfig.ReplicationTaskFetcherErrorRetryWait: 50 * time.Millisecond, dynamicconfig.ReplicationTaskProcessorErrorRetryWait: time.Millisecond, dynamicconfig.EnableConsistentQueryByDomain: true, + dynamicconfig.FrontendEnableRPCReplication: true, + dynamicconfig.HistoryEnableRPCReplication: true, + dynamicconfig.HistoryEnableKafkaReplication: false, + dynamicconfig.WorkerEnableRPCReplication: true, } ) diff --git a/host/integration_test.go b/host/integration_test.go index 7083cdf31d0..5ecfadb23dc 100644 --- a/host/integration_test.go +++ b/host/integration_test.go @@ -41,6 +41,8 @@ import ( workflow "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" "github.com/uber/cadence/common/log/tag" + cadencehistory "github.com/uber/cadence/service/history" + "github.com/uber/cadence/service/history/execution" "github.com/uber/cadence/service/matching" ) @@ -120,6 +122,166 @@ func (s *integrationSuite) TestStartWorkflowExecution() { s.Nil(we2) } +func (s *integrationSuite) TestStartWorkflowExecution_IDReusePolicy() { + id := "integration-start-workflow-id-reuse-test" + wt := "integration-start-workflow-id-reuse-type" + tl := "integration-start-workflow-id-reuse-tasklist" + identity := "worker1" + + workflowType := &workflow.WorkflowType{} + workflowType.Name = common.StringPtr(wt) + + taskList := &workflow.TaskList{} + taskList.Name = common.StringPtr(tl) + + createStartRequest := func(policy workflow.WorkflowIdReusePolicy) *workflow.StartWorkflowExecutionRequest { + return &workflow.StartWorkflowExecutionRequest{ + RequestId: common.StringPtr(uuid.New()), + Domain: common.StringPtr(s.domainName), + WorkflowId: common.StringPtr(id), + WorkflowType: workflowType, + TaskList: taskList, + Input: nil, + ExecutionStartToCloseTimeoutSeconds: common.Int32Ptr(100), + TaskStartToCloseTimeoutSeconds: common.Int32Ptr(1), + Identity: common.StringPtr(identity), + WorkflowIdReusePolicy: &policy, + } + } + + request := createStartRequest(workflow.WorkflowIdReusePolicyAllowDuplicateFailedOnly) + we, err := s.engine.StartWorkflowExecution(createContext(), request) + s.Nil(err) + + // Test policies when workflow is running + policies := []workflow.WorkflowIdReusePolicy{ + workflow.WorkflowIdReusePolicyAllowDuplicateFailedOnly, + workflow.WorkflowIdReusePolicyAllowDuplicate, + workflow.WorkflowIdReusePolicyRejectDuplicate, + } + for _, policy := range policies { + newRequest := createStartRequest(policy) + we1, err1 := s.engine.StartWorkflowExecution(createContext(), newRequest) + s.Error(err1) + s.IsType(&workflow.WorkflowExecutionAlreadyStartedError{}, err1) + s.Nil(we1) + } + + // Test TerminateIfRunning policy when workflow is running + policy := workflow.WorkflowIdReusePolicyTerminateIfRunning + newRequest := createStartRequest(policy) + we1, err1 := s.engine.StartWorkflowExecution(createContext(), newRequest) + s.NoError(err1) + s.NotEqual(we.GetRunId(), we1.GetRunId()) + // verify terminate status + executionTerminated := false +GetHistoryLoop: + for i := 0; i < 10; i++ { + historyResponse, err := s.engine.GetWorkflowExecutionHistory(createContext(), &workflow.GetWorkflowExecutionHistoryRequest{ + Domain: common.StringPtr(s.domainName), + Execution: &workflow.WorkflowExecution{ + WorkflowId: common.StringPtr(id), + RunId: we.RunId, + }, + }) + s.Nil(err) + history := historyResponse.History + + lastEvent := history.Events[len(history.Events)-1] + if lastEvent.GetEventType() != workflow.EventTypeWorkflowExecutionTerminated { + s.Logger.Warn("Execution not terminated yet.") + time.Sleep(100 * time.Millisecond) + continue GetHistoryLoop + } + + terminateEventAttributes := lastEvent.WorkflowExecutionTerminatedEventAttributes + s.Equal(cadencehistory.TerminateIfRunningReason, terminateEventAttributes.GetReason()) + s.Equal(fmt.Sprintf(cadencehistory.TerminateIfRunningDetailsTemplate, we1.GetRunId()), string(terminateEventAttributes.Details)) + s.Equal(execution.IdentityHistoryService, terminateEventAttributes.GetIdentity()) + executionTerminated = true + break GetHistoryLoop + } + s.True(executionTerminated) + + // Terminate current workflow execution + err = s.engine.TerminateWorkflowExecution(createContext(), &workflow.TerminateWorkflowExecutionRequest{ + Domain: common.StringPtr(s.domainName), + WorkflowExecution: &workflow.WorkflowExecution{ + WorkflowId: common.StringPtr(id), + RunId: we1.RunId, + }, + Reason: common.StringPtr("kill workflow"), + Identity: common.StringPtr(identity), + }) + s.Nil(err) + + // test policy AllowDuplicateFailedOnly + policy = workflow.WorkflowIdReusePolicyAllowDuplicateFailedOnly + newRequest = createStartRequest(policy) + we2, err2 := s.engine.StartWorkflowExecution(createContext(), newRequest) + s.NoError(err2) + s.NotEqual(we1.GetRunId(), we2.GetRunId()) + // complete workflow instead of terminate + dtHandler := func(execution *workflow.WorkflowExecution, wt *workflow.WorkflowType, + previousStartedEventID, startedEventID int64, history *workflow.History) ([]byte, []*workflow.Decision, error) { + return []byte(strconv.Itoa(0)), []*workflow.Decision{{ + DecisionType: common.DecisionTypePtr(workflow.DecisionTypeCompleteWorkflowExecution), + CompleteWorkflowExecutionDecisionAttributes: &workflow.CompleteWorkflowExecutionDecisionAttributes{ + Result: []byte("Done."), + }, + }}, nil + } + poller := &TaskPoller{ + Engine: s.engine, + Domain: s.domainName, + TaskList: taskList, + Identity: identity, + DecisionHandler: dtHandler, + Logger: s.Logger, + T: s.T(), + } + _, err = poller.PollAndProcessDecisionTask(true, false) + s.Logger.Info("PollAndProcessDecisionTask", tag.Error(err)) + s.Nil(err) + // duplicate requests + we3, err3 := s.engine.StartWorkflowExecution(createContext(), newRequest) + s.NoError(err3) + s.Equal(we2.GetRunId(), we3.GetRunId()) + // new request, same policy + newRequest = createStartRequest(policy) + we3, err3 = s.engine.StartWorkflowExecution(createContext(), newRequest) + s.Error(err3) + s.IsType(&workflow.WorkflowExecutionAlreadyStartedError{}, err3) + s.Nil(we3) + + // test policy RejectDuplicate + policy = workflow.WorkflowIdReusePolicyRejectDuplicate + newRequest = createStartRequest(policy) + we3, err3 = s.engine.StartWorkflowExecution(createContext(), newRequest) + s.Error(err3) + s.IsType(&workflow.WorkflowExecutionAlreadyStartedError{}, err3) + s.Nil(we3) + + // test policy AllowDuplicate + policy = workflow.WorkflowIdReusePolicyAllowDuplicate + newRequest = createStartRequest(policy) + we4, err4 := s.engine.StartWorkflowExecution(createContext(), newRequest) + s.NoError(err4) + s.NotEqual(we3.GetRunId(), we4.GetRunId()) + + // complete workflow + _, err = poller.PollAndProcessDecisionTask(true, false) + s.Logger.Info("PollAndProcessDecisionTask", tag.Error(err)) + s.Nil(err) + + // test policy TerminateIfRunning + policy = workflow.WorkflowIdReusePolicyTerminateIfRunning + newRequest = createStartRequest(policy) + we5, err5 := s.engine.StartWorkflowExecution(createContext(), newRequest) + s.NoError(err5) + s.NotEqual(we4.GetRunId(), we5.GetRunId()) +} + func (s *integrationSuite) TestTerminateWorkflow() { id := "integration-terminate-workflow-test" wt := "integration-terminate-workflow-test-type" diff --git a/host/signalworkflow_test.go b/host/signalworkflow_test.go index f0d37cb0d08..6a321c7cdb7 100644 --- a/host/signalworkflow_test.go +++ b/host/signalworkflow_test.go @@ -24,6 +24,7 @@ import ( "bytes" "context" "encoding/binary" + "fmt" "strconv" "strings" "time" @@ -33,6 +34,8 @@ import ( workflow "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" "github.com/uber/cadence/common/log/tag" + cadencehistory "github.com/uber/cadence/service/history" + "github.com/uber/cadence/service/history/execution" ) func (s *integrationSuite) TestSignalWorkflow() { @@ -1537,20 +1540,67 @@ func (s *integrationSuite) TestSignalWithStartWorkflow_IDReusePolicy() { s.NotEmpty(resp.GetRunId()) // Terminate workflow execution - err = s.engine.TerminateWorkflowExecution(createContext(), &workflow.TerminateWorkflowExecutionRequest{ - Domain: common.StringPtr(s.domainName), - WorkflowExecution: &workflow.WorkflowExecution{ - WorkflowId: common.StringPtr(id), - }, - Reason: common.StringPtr("test WorkflowIdReusePolicyAllowDuplicateFailedOnly"), - Details: nil, - Identity: common.StringPtr(identity), - }) - s.Nil(err) + terminateWorkflow := func() { + err = s.engine.TerminateWorkflowExecution(createContext(), &workflow.TerminateWorkflowExecutionRequest{ + Domain: common.StringPtr(s.domainName), + WorkflowExecution: &workflow.WorkflowExecution{ + WorkflowId: common.StringPtr(id), + }, + Reason: common.StringPtr("test WorkflowIdReusePolicyAllowDuplicateFailedOnly"), + Details: nil, + Identity: common.StringPtr(identity), + }) + s.Nil(err) + } + terminateWorkflow() // test policy WorkflowIdReusePolicyAllowDuplicateFailedOnly success start wfIDReusePolicy = workflow.WorkflowIdReusePolicyAllowDuplicateFailedOnly resp, err = s.engine.SignalWithStartWorkflowExecution(createContext(), sRequest) s.Nil(err) s.NotEmpty(resp.GetRunId()) + + // test policy WorkflowIdReusePolicyTerminateIfRunning + wfIDReusePolicy = workflow.WorkflowIdReusePolicyTerminateIfRunning + sRequest.RequestId = common.StringPtr(uuid.New()) + resp1, err1 := s.engine.SignalWithStartWorkflowExecution(createContext(), sRequest) + s.Nil(err1) + s.NotEmpty(resp1) + // verify terminate status + executionTerminated := false +GetHistoryLoop: + for i := 0; i < 10; i++ { + historyResponse, err := s.engine.GetWorkflowExecutionHistory(createContext(), &workflow.GetWorkflowExecutionHistoryRequest{ + Domain: common.StringPtr(s.domainName), + Execution: &workflow.WorkflowExecution{ + WorkflowId: common.StringPtr(id), + RunId: resp.RunId, + }, + }) + s.Nil(err) + history := historyResponse.History + + lastEvent := history.Events[len(history.Events)-1] + if lastEvent.GetEventType() != workflow.EventTypeWorkflowExecutionTerminated { + s.Logger.Warn("Execution not terminated yet.") + time.Sleep(100 * time.Millisecond) + continue GetHistoryLoop + } + + terminateEventAttributes := lastEvent.WorkflowExecutionTerminatedEventAttributes + s.Equal(cadencehistory.TerminateIfRunningReason, terminateEventAttributes.GetReason()) + s.Equal(fmt.Sprintf(cadencehistory.TerminateIfRunningDetailsTemplate, resp1.GetRunId()), string(terminateEventAttributes.Details)) + s.Equal(execution.IdentityHistoryService, terminateEventAttributes.GetIdentity()) + executionTerminated = true + break GetHistoryLoop + } + s.True(executionTerminated) + // terminate current run + terminateWorkflow() + // test clean start with WorkflowIdReusePolicyTerminateIfRunning + sRequest.RequestId = common.StringPtr(uuid.New()) + resp2, err2 := s.engine.SignalWithStartWorkflowExecution(createContext(), sRequest) + s.Nil(err2) + s.NotEmpty(resp2) + s.NotEqual(resp1.GetRunId(), resp2.GetRunId()) } diff --git a/idls b/idls index fb394b0ea9f..00375785cce 160000 --- a/idls +++ b/idls @@ -1 +1 @@ -Subproject commit fb394b0ea9f1d67ca3f4851a4c25cbd473686440 +Subproject commit 00375785cce4592c306e2ddd4bed1e14eb3e9606 diff --git a/service/frontend/service.go b/service/frontend/service.go index 1303eb1ef18..41f6176010d 100644 --- a/service/frontend/service.go +++ b/service/frontend/service.go @@ -93,6 +93,8 @@ type Config struct { VisibilityArchivalQueryMaxPageSize dynamicconfig.IntPropertyFn SendRawWorkflowHistory dynamicconfig.BoolPropertyFnWithDomainFilter + + EnableRPCReplication dynamicconfig.BoolPropertyFn } // NewConfig returns new service config with default values @@ -132,6 +134,7 @@ func NewConfig(dc *dynamicconfig.Collection, numHistoryShards int, enableReadFro VisibilityArchivalQueryMaxPageSize: dc.GetIntProperty(dynamicconfig.VisibilityArchivalQueryMaxPageSize, 10000), DisallowQuery: dc.GetBoolPropertyFnWithDomainFilter(dynamicconfig.DisallowQuery, false), SendRawWorkflowHistory: dc.GetBoolPropertyFnWithDomainFilter(dynamicconfig.SendRawWorkflowHistory, false), + EnableRPCReplication: dc.GetBoolProperty(dynamicconfig.FrontendEnableRPCReplication, false), } } @@ -222,7 +225,9 @@ func (s *Service) Start() { clusterMetadata := s.GetClusterMetadata() if clusterMetadata.IsGlobalDomainEnabled() { consumerConfig := clusterMetadata.GetReplicationConsumerConfig() - if consumerConfig != nil && consumerConfig.Type == config.ReplicationConsumerTypeRPC { + if consumerConfig != nil && + consumerConfig.Type == config.ReplicationConsumerTypeRPC && + s.config.EnableRPCReplication() { replicationMessageSink = s.GetDomainReplicationQueue() } else { var err error diff --git a/service/history/config/config.go b/service/history/config/config.go index 74807a5c77e..3571e34e707 100644 --- a/service/history/config/config.go +++ b/service/history/config/config.go @@ -203,6 +203,9 @@ type Config struct { ReplicationTaskProcessorNoTaskRetryWait dynamicconfig.DurationPropertyFn ReplicationTaskProcessorCleanupInterval dynamicconfig.DurationPropertyFn ReplicationTaskProcessorCleanupJitterCoefficient dynamicconfig.FloatPropertyFn + // TODO: those two flags are for migration. Consider remove them after the migration complete + EnableRPCReplication dynamicconfig.BoolPropertyFn + EnableKafkaReplication dynamicconfig.BoolPropertyFn // The following are used by consistent query EnableConsistentQuery dynamicconfig.BoolPropertyFn @@ -366,6 +369,8 @@ func New(dc *dynamicconfig.Collection, numberOfShards int, storeType string, isA ReplicationTaskProcessorNoTaskRetryWait: dc.GetDurationProperty(dynamicconfig.ReplicationTaskProcessorNoTaskInitialWait, 2*time.Second), ReplicationTaskProcessorCleanupInterval: dc.GetDurationProperty(dynamicconfig.ReplicationTaskProcessorCleanupInterval, 1*time.Minute), ReplicationTaskProcessorCleanupJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicationTaskProcessorCleanupJitterCoefficient, 0.15), + EnableRPCReplication: dc.GetBoolProperty(dynamicconfig.HistoryEnableRPCReplication, false), + EnableKafkaReplication: dc.GetBoolProperty(dynamicconfig.HistoryEnableKafkaReplication, true), EnableConsistentQuery: dc.GetBoolProperty(dynamicconfig.EnableConsistentQuery, true), EnableConsistentQueryByDomain: dc.GetBoolPropertyFnWithDomainFilter(dynamicconfig.EnableConsistentQueryByDomain, false), diff --git a/service/history/historyEngine.go b/service/history/historyEngine.go index 52a3bfa1103..1f57be13dc9 100644 --- a/service/history/historyEngine.go +++ b/service/history/historyEngine.go @@ -72,6 +72,11 @@ const ( defaultQueryFirstDecisionTaskWaitTime = time.Second queryFirstDecisionTaskCheckInterval = 200 * time.Millisecond replicationTimeout = 30 * time.Second + + // TerminateIfRunningReason reason for terminateIfRunning + TerminateIfRunningReason = "TerminateIfRunning Policy" + // TerminateIfRunningDetailsTemplate details template for terminateIfRunning + TerminateIfRunningDetailsTemplate = "New runID: %s" ) type ( @@ -309,7 +314,9 @@ func (e *historyEngineImpl) Start() { e.timerProcessor.Start() clusterMetadata := e.shard.GetClusterMetadata() - if e.replicatorProcessor != nil && clusterMetadata.GetReplicationConsumerConfig().Type != sconfig.ReplicationConsumerTypeRPC { + if e.replicatorProcessor != nil && + clusterMetadata.GetReplicationConsumerConfig().Type != sconfig.ReplicationConsumerTypeRPC && + e.config.EnableKafkaReplication() { e.replicatorProcessor.Start() } @@ -422,7 +429,6 @@ func (e *historyEngineImpl) registerDomainFailoverCallback() { } func (e *historyEngineImpl) createMutableState( - clusterMetadata cluster.Metadata, domainEntry *cache.DomainCacheEntry, runID string, ) (execution.MutableState, error) { @@ -501,11 +507,24 @@ func (e *historyEngineImpl) StartWorkflowExecution( nil) } +// for startWorkflowHelper be reused by signalWithStart type signalWithStartArg struct { - signalWithStartRequest *workflow.SignalWithStartWorkflowExecutionRequest + signalWithStartRequest *h.SignalWithStartWorkflowExecutionRequest prevMutableState execution.MutableState } +func (e *historyEngineImpl) newDomainNotActiveError( + domainName string, + failoverVersion int64, +) error { + clusterMetadata := e.shard.GetService().GetClusterMetadata() + return ce.NewDomainNotActiveError( + domainName, + clusterMetadata.GetCurrentClusterName(), + clusterMetadata.ClusterNameForFailoverVersion(failoverVersion), + ) +} + func (e *historyEngineImpl) startWorkflowHelper( ctx context.Context, startRequest *h.StartWorkflowExecutionRequest, @@ -538,18 +557,18 @@ func (e *historyEngineImpl) startWorkflowHelper( WorkflowId: common.StringPtr(workflowID), RunId: common.StringPtr(uuid.New()), } - clusterMetadata := e.shard.GetService().GetClusterMetadata() - curMutableState, err := e.createMutableState(clusterMetadata, domainEntry, workflowExecution.GetRunId()) + curMutableState, err := e.createMutableState(domainEntry, workflowExecution.GetRunId()) if err != nil { return nil, err } - // for signalWithStart, WorkflowIDReusePolicy is default to WorkflowIDReusePolicyAllowDuplicate - // while for startWorkflow it is default to WorkflowIdReusePolicyAllowDuplicateFailedOnly. - isSignalWithStart := signalWithStartArg != nil + // preprocess for signalWithStart var prevMutableState execution.MutableState - if isSignalWithStart && signalWithStartArg.prevMutableState != nil { + var signalWithStartRequest *h.SignalWithStartWorkflowExecutionRequest + isSignalWithStart := signalWithStartArg != nil + if isSignalWithStart { prevMutableState = signalWithStartArg.prevMutableState + signalWithStartRequest = signalWithStartArg.signalWithStartRequest } if prevMutableState != nil { prevLastWriteVersion, err := prevMutableState.GetLastWriteVersion() @@ -557,51 +576,28 @@ func (e *historyEngineImpl) startWorkflowHelper( return nil, err } if prevLastWriteVersion > curMutableState.GetCurrentVersion() { - return nil, ce.NewDomainNotActiveError( + return nil, e.newDomainNotActiveError( domainEntry.GetInfo().Name, - clusterMetadata.GetCurrentClusterName(), - clusterMetadata.ClusterNameForFailoverVersion(prevLastWriteVersion), + prevLastWriteVersion, ) } - policy := workflow.WorkflowIdReusePolicyAllowDuplicate - if request.WorkflowIdReusePolicy != nil { - policy = request.GetWorkflowIdReusePolicy() - } - - err = e.applyWorkflowIDReusePolicyForSigWithStart(prevMutableState.GetExecutionInfo(), workflowExecution, policy) + err = e.applyWorkflowIDReusePolicyForSigWithStart( + prevMutableState.GetExecutionInfo(), + workflowExecution, + request.GetWorkflowIdReusePolicy(), + ) if err != nil { return nil, err } } - // Add WF start event - startEvent, err := curMutableState.AddWorkflowExecutionStartedEvent( + err = e.addStartEventsAndTasks( + curMutableState, workflowExecution, startRequest, + signalWithStartRequest, ) if err != nil { - return nil, &workflow.InternalServiceError{ - Message: "Failed to add workflow execution started event.", - } - } - - if isSignalWithStart { - // Add signal event - sRequest := signalWithStartArg.signalWithStartRequest - if _, err := curMutableState.AddWorkflowExecutionSignaled( - sRequest.GetSignalName(), - sRequest.GetSignalInput(), - sRequest.GetIdentity()); err != nil { - return nil, &workflow.InternalServiceError{Message: "Failed to add workflow execution signaled event."} - } - } - - // Generate first decision task event if not child WF and no first decision task backoff - if err := e.generateFirstDecisionTask( - curMutableState, - startRequest.ParentExecutionInfo, - startEvent, - ); err != nil { return nil, err } @@ -624,7 +620,7 @@ func (e *historyEngineImpl) startWorkflowHelper( createMode := persistence.CreateWorkflowModeBrandNew prevRunID := "" prevLastWriteVersion := int64(0) - + // overwrite in case of signalWithStart if prevMutableState != nil { createMode = persistence.CreateWorkflowModeWorkflowIDReuse prevRunID = prevMutableState.GetExecutionInfo().RunID @@ -655,17 +651,29 @@ func (e *historyEngineImpl) startWorkflowHelper( } if curMutableState.GetCurrentVersion() < t.LastWriteVersion { - return nil, ce.NewDomainNotActiveError( - request.GetDomain(), - clusterMetadata.GetCurrentClusterName(), - clusterMetadata.ClusterNameForFailoverVersion(t.LastWriteVersion), + return nil, e.newDomainNotActiveError( + domainEntry.GetInfo().Name, + t.LastWriteVersion, ) } - // create as ID reuse - createMode = persistence.CreateWorkflowModeWorkflowIDReuse prevRunID = t.RunID - prevLastWriteVersion = t.LastWriteVersion + if shouldTerminateAndStart(startRequest, t.State) { + runningWFCtx, err := e.loadWorkflowOnce(ctx, domainID, workflowID, prevRunID) + if err != nil { + return nil, err + } + defer func() { runningWFCtx.getReleaseFn()(retError) }() + + return e.terminateAndStartWorkflow( + runningWFCtx, + workflowExecution, + domainEntry, + domainID, + startRequest, + nil, + ) + } if err = e.applyWorkflowIDReusePolicyHelper( t.StartRequestID, prevRunID, @@ -676,13 +684,15 @@ func (e *historyEngineImpl) startWorkflowHelper( ); err != nil { return nil, err } + // create as ID reuse + createMode = persistence.CreateWorkflowModeWorkflowIDReuse err = wfContext.CreateWorkflowExecution( newWorkflow, historySize, now, createMode, prevRunID, - prevLastWriteVersion, + t.LastWriteVersion, ) } if err != nil { @@ -694,6 +704,138 @@ func (e *historyEngineImpl) startWorkflowHelper( }, nil } +func shouldTerminateAndStart( + startRequest *h.StartWorkflowExecutionRequest, + state int, +) bool { + return startRequest.StartRequest.GetWorkflowIdReusePolicy() == workflow.WorkflowIdReusePolicyTerminateIfRunning && + (state == persistence.WorkflowStateRunning || state == persistence.WorkflowStateCreated) +} + +// terminate running workflow then start a new run in one transaction +func (e *historyEngineImpl) terminateAndStartWorkflow( + runningWFCtx workflowContext, + workflowExecution workflow.WorkflowExecution, + domainEntry *cache.DomainCacheEntry, + domainID string, + startRequest *h.StartWorkflowExecutionRequest, + signalWithStartRequest *h.SignalWithStartWorkflowExecutionRequest, +) (*workflow.StartWorkflowExecutionResponse, error) { + runningMutableState := runningWFCtx.getMutableState() +UpdateWorkflowLoop: + for attempt := 0; attempt < conditionalRetryCount; attempt++ { + if !runningMutableState.IsWorkflowExecutionRunning() { + return nil, ErrWorkflowCompleted + } + + if err := execution.TerminateWorkflow( + runningMutableState, + runningMutableState.GetNextEventID(), + TerminateIfRunningReason, + getTerminateIfRunningDetails(workflowExecution.GetRunId()), + execution.IdentityHistoryService, + ); err != nil { + if err == ErrStaleState { + // Handler detected that cached workflow mutable could potentially be stale + // Reload workflow execution history + runningWFCtx.getContext().Clear() + if attempt != conditionalRetryCount-1 { + _, err = runningWFCtx.reloadMutableState() + if err != nil { + return nil, err + } + } + continue UpdateWorkflowLoop + } + return nil, err + } + + // new mutable state + newMutableState, err := e.createMutableState(domainEntry, workflowExecution.GetRunId()) + if err != nil { + return nil, err + } + + if signalWithStartRequest != nil { + startRequest = getStartRequest(domainID, signalWithStartRequest.SignalWithStartRequest) + } + + err = e.addStartEventsAndTasks( + newMutableState, + workflowExecution, + startRequest, + signalWithStartRequest, + ) + if err != nil { + return nil, err + } + + updateErr := runningWFCtx.getContext().UpdateWorkflowExecutionWithNewAsActive( + e.timeSource.Now(), + execution.NewContext( + domainID, + workflowExecution, + e.shard, + e.shard.GetExecutionManager(), + e.logger, + ), + newMutableState, + ) + if updateErr != nil { + if updateErr == execution.ErrConflict { + e.metricsClient.IncCounter(metrics.HistoryStartWorkflowExecutionScope, metrics.ConcurrencyUpdateFailureCounter) + continue UpdateWorkflowLoop + } + return nil, updateErr + } + break UpdateWorkflowLoop + } + return &workflow.StartWorkflowExecutionResponse{ + RunId: workflowExecution.RunId, + }, nil +} + +func (e *historyEngineImpl) addStartEventsAndTasks( + mutableState execution.MutableState, + workflowExecution workflow.WorkflowExecution, + startRequest *h.StartWorkflowExecutionRequest, + signalWithStartRequest *h.SignalWithStartWorkflowExecutionRequest, +) error { + // Add WF start event + startEvent, err := mutableState.AddWorkflowExecutionStartedEvent( + workflowExecution, + startRequest, + ) + if err != nil { + return &workflow.InternalServiceError{ + Message: "Failed to add workflow execution started event.", + } + } + + if signalWithStartRequest != nil { + // Add signal event + sRequest := signalWithStartRequest.SignalWithStartRequest + _, err := mutableState.AddWorkflowExecutionSignaled( + sRequest.GetSignalName(), + sRequest.GetSignalInput(), + sRequest.GetIdentity()) + if err != nil { + return &workflow.InternalServiceError{Message: "Failed to add workflow execution signaled event."} + } + } + + // Generate first decision task event if not child WF and no first decision task backoff + return e.generateFirstDecisionTask( + mutableState, + startRequest.ParentExecutionInfo, + startEvent, + ) +} + +func getTerminateIfRunningDetails(newRunID string) []byte { + return []byte(fmt.Sprintf(TerminateIfRunningDetailsTemplate, newRunID)) +} + // GetMutableState retrieves the mutable state of the workflow execution func (e *historyEngineImpl) GetMutableState( ctx context.Context, @@ -1991,6 +2133,19 @@ func (e *historyEngineImpl) SignalWithStartWorkflowExecution( prevMutableState = mutableState break } + // workflow is running, if policy is TerminateIfRunning, terminate current run then signalWithStart + if sRequest.GetWorkflowIdReusePolicy() == workflow.WorkflowIdReusePolicyTerminateIfRunning { + workflowExecution.RunId = common.StringPtr(uuid.New()) + runningWFCtx := newWorkflowContext(wfContext, release, mutableState) + return e.terminateAndStartWorkflow( + runningWFCtx, + workflowExecution, + domainEntry, + domainID, + nil, + signalWithStartRequest, + ) + } executionInfo := mutableState.GetExecutionInfo() maxAllowedSignals := e.config.MaximumSignalsPerExecution(domainEntry.GetInfo().Name) @@ -2040,7 +2195,7 @@ func (e *historyEngineImpl) SignalWithStartWorkflowExecution( // Start workflow and signal startRequest := getStartRequest(domainID, sRequest) sigWithStartArg := &signalWithStartArg{ - signalWithStartRequest: sRequest, + signalWithStartRequest: signalWithStartRequest, prevMutableState: prevMutableState, } return e.startWorkflowHelper( @@ -2048,7 +2203,8 @@ func (e *historyEngineImpl) SignalWithStartWorkflowExecution( startRequest, domainEntry, metrics.HistorySignalWithStartWorkflowExecutionScope, - sigWithStartArg) + sigWithStartArg, + ) } // RemoveSignalMutableState remove the signal request id in signal_requested for deduplicate @@ -2532,7 +2688,7 @@ func (e *historyEngineImpl) NotifyNewReplicationTasks( tasks []persistence.Task, ) { - if len(tasks) > 0 { + if len(tasks) > 0 && e.replicatorProcessor != nil { e.replicatorProcessor.notifyNewTask() } } @@ -2687,7 +2843,6 @@ func getStartRequest( return startRequest } -// for startWorkflowExecution & signalWithStart to handle workflow reuse policy func (e *historyEngineImpl) applyWorkflowIDReusePolicyForSigWithStart( prevExecutionInfo *persistence.WorkflowExecutionInfo, execution workflow.WorkflowExecution, @@ -2707,7 +2862,6 @@ func (e *historyEngineImpl) applyWorkflowIDReusePolicyForSigWithStart( execution, wfIDReusePolicy, ) - } func (e *historyEngineImpl) applyWorkflowIDReusePolicyHelper( @@ -2739,7 +2893,8 @@ func (e *historyEngineImpl) applyWorkflowIDReusePolicyHelper( msg := "Workflow execution already finished successfully. WorkflowId: %v, RunId: %v. Workflow ID reuse policy: allow duplicate workflow ID if last run failed." return getWorkflowAlreadyStartedError(msg, prevStartRequestID, execution.GetWorkflowId(), prevRunID) } - case workflow.WorkflowIdReusePolicyAllowDuplicate: + case workflow.WorkflowIdReusePolicyAllowDuplicate, + workflow.WorkflowIdReusePolicyTerminateIfRunning: // no check need here case workflow.WorkflowIdReusePolicyRejectDuplicate: msg := "Workflow execution already finished. WorkflowId: %v, RunId: %v. Workflow ID reuse policy: reject duplicate workflow ID." diff --git a/service/history/historyEngine2_test.go b/service/history/historyEngine2_test.go index 43118a3f1c6..c40f68d3226 100644 --- a/service/history/historyEngine2_test.go +++ b/service/history/historyEngine2_test.go @@ -1283,6 +1283,7 @@ func (s *engine2Suite) TestSignalWithStartWorkflowExecution_WorkflowNotRunning() signalName := "my signal name" input := []byte("test input") requestID := uuid.New() + policy := workflow.WorkflowIdReusePolicyAllowDuplicate sRequest = &h.SignalWithStartWorkflowExecutionRequest{ DomainUUID: common.StringPtr(domainID), SignalWithStartRequest: &workflow.SignalWithStartWorkflowExecutionRequest{ @@ -1296,6 +1297,7 @@ func (s *engine2Suite) TestSignalWithStartWorkflowExecution_WorkflowNotRunning() SignalName: common.StringPtr(signalName), Input: input, RequestId: common.StringPtr(requestID), + WorkflowIdReusePolicy: &policy, }, } @@ -1327,6 +1329,7 @@ func (s *engine2Suite) TestSignalWithStartWorkflowExecution_Start_DuplicateReque signalName := "my signal name" input := []byte("test input") requestID := "testRequestID" + policy := workflow.WorkflowIdReusePolicyAllowDuplicate sRequest := &h.SignalWithStartWorkflowExecutionRequest{ DomainUUID: common.StringPtr(domainID), SignalWithStartRequest: &workflow.SignalWithStartWorkflowExecutionRequest{ @@ -1340,6 +1343,7 @@ func (s *engine2Suite) TestSignalWithStartWorkflowExecution_Start_DuplicateReque SignalName: common.StringPtr(signalName), Input: input, RequestId: common.StringPtr(requestID), + WorkflowIdReusePolicy: &policy, }, } @@ -1379,6 +1383,7 @@ func (s *engine2Suite) TestSignalWithStartWorkflowExecution_Start_WorkflowAlread signalName := "my signal name" input := []byte("test input") requestID := "testRequestID" + policy := workflow.WorkflowIdReusePolicyAllowDuplicate sRequest := &h.SignalWithStartWorkflowExecutionRequest{ DomainUUID: common.StringPtr(domainID), SignalWithStartRequest: &workflow.SignalWithStartWorkflowExecutionRequest{ @@ -1392,6 +1397,7 @@ func (s *engine2Suite) TestSignalWithStartWorkflowExecution_Start_WorkflowAlread SignalName: common.StringPtr(signalName), Input: input, RequestId: common.StringPtr(requestID), + WorkflowIdReusePolicy: &policy, }, } diff --git a/service/history/queue/domain_filter.go b/service/history/queue/domain_filter.go index c66b18e2822..62547c66330 100644 --- a/service/history/queue/domain_filter.go +++ b/service/history/queue/domain_filter.go @@ -25,6 +25,10 @@ func NewDomainFilter( domainIDs map[string]struct{}, reverseMatch bool, ) DomainFilter { + if domainIDs == nil { + domainIDs = make(map[string]struct{}) + } + return DomainFilter{ DomainIDs: domainIDs, ReverseMatch: reverseMatch, @@ -32,7 +36,7 @@ func NewDomainFilter( } // Filter returns true if domainID is in the domainID set specified by the filter -func (f *DomainFilter) Filter(domainID string) bool { +func (f DomainFilter) Filter(domainID string) bool { _, ok := f.DomainIDs[domainID] if f.ReverseMatch { ok = !ok @@ -41,7 +45,7 @@ func (f *DomainFilter) Filter(domainID string) bool { } // Include adds more domainIDs to the domainID set specified by the filter -func (f *DomainFilter) Include(domainIDs map[string]struct{}) DomainFilter { +func (f DomainFilter) Include(domainIDs map[string]struct{}) DomainFilter { filter := f.copy() for domainID := range domainIDs { if !filter.ReverseMatch { @@ -54,7 +58,7 @@ func (f *DomainFilter) Include(domainIDs map[string]struct{}) DomainFilter { } // Exclude removes domainIDs from the domainID set specified by the filter -func (f *DomainFilter) Exclude(domainIDs map[string]struct{}) DomainFilter { +func (f DomainFilter) Exclude(domainIDs map[string]struct{}) DomainFilter { filter := f.copy() for domainID := range domainIDs { if !filter.ReverseMatch { @@ -67,7 +71,7 @@ func (f *DomainFilter) Exclude(domainIDs map[string]struct{}) DomainFilter { } // Merge merges the domainID sets specified by two domain filters -func (f *DomainFilter) Merge(f2 DomainFilter) DomainFilter { +func (f DomainFilter) Merge(f2 DomainFilter) DomainFilter { // case 1: ReverseMatch field is false for both filters if !f.ReverseMatch && !f2.ReverseMatch { // union the domainIDs field @@ -113,7 +117,7 @@ func (f *DomainFilter) Merge(f2 DomainFilter) DomainFilter { return filter } -func (f *DomainFilter) copy() DomainFilter { +func (f DomainFilter) copy() DomainFilter { domainIDs := make(map[string]struct{}) for domainID := range f.DomainIDs { domainIDs[domainID] = struct{}{} diff --git a/service/history/queue/interface.go b/service/history/queue/interface.go index a88f6047347..770c6a77ec3 100644 --- a/service/history/queue/interface.go +++ b/service/history/queue/interface.go @@ -40,11 +40,10 @@ type ( // ProcessingQueueState indicates the scope of a task processing queue and its current progress ProcessingQueueState interface { - CollectionID() int - MinLevel() task.Key - MaxLevel() task.Key + Level() int AckLevel() task.Key ReadLevel() task.Key + MaxLevel() task.Key DomainFilter() DomainFilter } diff --git a/service/history/queue/interface_mock.go b/service/history/queue/interface_mock.go index 65fd0156d71..d34aa77197b 100644 --- a/service/history/queue/interface_mock.go +++ b/service/history/queue/interface_mock.go @@ -59,46 +59,18 @@ func (m *MockProcessingQueueState) EXPECT() *MockProcessingQueueStateMockRecorde return m.recorder } -// CollectionID mocks base method -func (m *MockProcessingQueueState) CollectionID() int { +// Level mocks base method +func (m *MockProcessingQueueState) Level() int { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CollectionID") + ret := m.ctrl.Call(m, "Level") ret0, _ := ret[0].(int) return ret0 } -// CollectionID indicates an expected call of CollectionID -func (mr *MockProcessingQueueStateMockRecorder) CollectionID() *gomock.Call { +// Level indicates an expected call of Level +func (mr *MockProcessingQueueStateMockRecorder) Level() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CollectionID", reflect.TypeOf((*MockProcessingQueueState)(nil).CollectionID)) -} - -// MinLevel mocks base method -func (m *MockProcessingQueueState) MinLevel() task.Key { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MinLevel") - ret0, _ := ret[0].(task.Key) - return ret0 -} - -// MinLevel indicates an expected call of MinLevel -func (mr *MockProcessingQueueStateMockRecorder) MinLevel() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinLevel", reflect.TypeOf((*MockProcessingQueueState)(nil).MinLevel)) -} - -// MaxLevel mocks base method -func (m *MockProcessingQueueState) MaxLevel() task.Key { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MaxLevel") - ret0, _ := ret[0].(task.Key) - return ret0 -} - -// MaxLevel indicates an expected call of MaxLevel -func (mr *MockProcessingQueueStateMockRecorder) MaxLevel() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxLevel", reflect.TypeOf((*MockProcessingQueueState)(nil).MaxLevel)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Level", reflect.TypeOf((*MockProcessingQueueState)(nil).Level)) } // AckLevel mocks base method @@ -129,6 +101,20 @@ func (mr *MockProcessingQueueStateMockRecorder) ReadLevel() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadLevel", reflect.TypeOf((*MockProcessingQueueState)(nil).ReadLevel)) } +// MaxLevel mocks base method +func (m *MockProcessingQueueState) MaxLevel() task.Key { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MaxLevel") + ret0, _ := ret[0].(task.Key) + return ret0 +} + +// MaxLevel indicates an expected call of MaxLevel +func (mr *MockProcessingQueueStateMockRecorder) MaxLevel() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxLevel", reflect.TypeOf((*MockProcessingQueueState)(nil).MaxLevel)) +} + // DomainFilter mocks base method func (m *MockProcessingQueueState) DomainFilter() DomainFilter { m.ctrl.T.Helper() diff --git a/service/history/queue/processing_queue.go b/service/history/queue/processing_queue.go new file mode 100644 index 00000000000..70c31e1581d --- /dev/null +++ b/service/history/queue/processing_queue.go @@ -0,0 +1,366 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package queue + +import ( + "fmt" + "sort" + + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/metrics" + t "github.com/uber/cadence/common/task" + "github.com/uber/cadence/service/history/task" +) + +type ( + processingQueueStateImpl struct { + level int + ackLevel task.Key + readLevel task.Key + maxLevel task.Key + domainFilter DomainFilter + } + + processingQueueImpl struct { + state *processingQueueStateImpl + outstandingTasks map[task.Key]task.Task + + logger log.Logger + metricsClient metrics.Client // TODO: emit metrics + } +) + +// NewProcessingQueueState creates a new state instance for processing queue +// readLevel will be set to the same value as ackLevel +func NewProcessingQueueState( + level int, + ackLevel task.Key, + maxLevel task.Key, + domainFilter DomainFilter, +) ProcessingQueueState { + return newProcessingQueueState( + level, + ackLevel, + ackLevel, + maxLevel, + domainFilter, + ) +} + +func newProcessingQueueState( + level int, + ackLevel task.Key, + readLevel task.Key, + maxLevel task.Key, + domainFilter DomainFilter, +) *processingQueueStateImpl { + return &processingQueueStateImpl{ + level: level, + ackLevel: ackLevel, + readLevel: readLevel, + maxLevel: maxLevel, + domainFilter: domainFilter, + } +} + +// NewProcessingQueue creates a new processing queue based on its state +func NewProcessingQueue( + state ProcessingQueueState, + logger log.Logger, + metricsClient metrics.Client, +) ProcessingQueue { + return newProcessingQueue( + state, + nil, + logger, + metricsClient, + ) +} + +func newProcessingQueue( + state ProcessingQueueState, + outstandingTasks map[task.Key]task.Task, + logger log.Logger, + metricsClient metrics.Client, +) *processingQueueImpl { + if outstandingTasks == nil { + outstandingTasks = make(map[task.Key]task.Task) + } + + queue := &processingQueueImpl{ + outstandingTasks: outstandingTasks, + logger: logger, + metricsClient: metricsClient, + } + + // convert state to *processingQueueStateImpl type so that + // queue implementation can change the state value + if stateImpl, ok := state.(*processingQueueStateImpl); ok { + queue.state = stateImpl + } else { + queue.state = newProcessingQueueState( + state.Level(), + state.AckLevel(), + state.ReadLevel(), + state.MaxLevel(), + state.DomainFilter(), + ) + } + + return queue +} + +func (s *processingQueueStateImpl) Level() int { + return s.level +} + +func (s *processingQueueStateImpl) MaxLevel() task.Key { + return s.maxLevel +} + +func (s *processingQueueStateImpl) AckLevel() task.Key { + return s.ackLevel +} + +func (s *processingQueueStateImpl) ReadLevel() task.Key { + return s.readLevel +} + +func (s *processingQueueStateImpl) DomainFilter() DomainFilter { + return s.domainFilter +} + +func (s *processingQueueStateImpl) String() string { + return fmt.Sprintf("&{level: %+v, ackLevel: %+v, readLevel: %+v, maxLevel: %+v, domainFilter: %+v}", + s.level, s.ackLevel, s.readLevel, s.maxLevel, s.domainFilter, + ) +} + +func (q *processingQueueImpl) State() ProcessingQueueState { + return q.state +} + +func (q *processingQueueImpl) Split( + policy ProcessingQueueSplitPolicy, +) []ProcessingQueue { + newQueueStates := policy.Evaluate(q) + if len(newQueueStates) == 0 { + // no need to split, return self + return []ProcessingQueue{q} + } + + return splitProcessingQueue([]*processingQueueImpl{q}, newQueueStates, q.logger, q.metricsClient) +} + +func (q *processingQueueImpl) Merge( + queue ProcessingQueue, +) []ProcessingQueue { + q1, q2 := q, queue.(*processingQueueImpl) + + if q1.State().Level() != q2.State().Level() { + errMsg := "Processing queue encountered a queue from different level during merge" + q.logger.Error(errMsg, tag.Error( + fmt.Errorf("current queue level: %v, incoming queue level: %v", q1.state.level, q2.state.level), + )) + panic(errMsg) + } + + if !q1.state.ackLevel.Less(q2.state.maxLevel) || + !q2.state.ackLevel.Less(q1.state.maxLevel) { + // one queue's ackLevel is larger or equal than the other one's maxLevel + // this means there's no overlap between two queues + return []ProcessingQueue{q1, q2} + } + + // generate new queue states for merged queues + newQueueStates := []ProcessingQueueState{} + if !taskKeyEquals(q1.state.ackLevel, q2.state.ackLevel) { + if q2.state.ackLevel.Less(q1.state.ackLevel) { + q1, q2 = q2, q1 + } + + newQueueStates = append(newQueueStates, newProcessingQueueState( + q1.state.level, + q1.state.ackLevel, + minTaskKey(q1.state.readLevel, q2.state.ackLevel), + q2.state.ackLevel, + q1.state.domainFilter.copy(), + )) + } + + if !taskKeyEquals(q1.state.maxLevel, q2.state.maxLevel) { + if q1.state.maxLevel.Less(q2.state.maxLevel) { + q1, q2 = q2, q1 + } + + newQueueStates = append(newQueueStates, newProcessingQueueState( + q1.state.level, + q2.state.maxLevel, + maxTaskKey(q1.state.readLevel, q2.state.maxLevel), + q1.state.maxLevel, + q1.state.domainFilter.copy(), + )) + } + + newQueueStates = append(newQueueStates, newProcessingQueueState( + q1.state.level, + maxTaskKey(q1.state.ackLevel, q2.state.ackLevel), + minTaskKey(q1.state.readLevel, q2.state.readLevel), + minTaskKey(q1.state.maxLevel, q2.state.maxLevel), + q1.state.domainFilter.Merge(q2.state.domainFilter), + )) + + return splitProcessingQueue([]*processingQueueImpl{q1, q2}, newQueueStates, q.logger, q.metricsClient) +} + +func (q *processingQueueImpl) AddTasks( + tasks map[task.Key]task.Task, +) { + for key, task := range tasks { + if _, loaded := q.outstandingTasks[key]; loaded { + q.logger.Debug(fmt.Sprintf("Skipping task: %+v. DomainID: %v, WorkflowID: %v, RunID: %v, Type: %v", + key, task.GetDomainID(), task.GetWorkflowID(), task.GetRunID(), task.GetTaskType())) + continue + } + + if !taskBelongsToProcessQueue(q.state, key, task) { + errMsg := "Processing queue encountered a task doesn't belong to its scope" + q.logger.Error(errMsg, tag.Error( + fmt.Errorf("Processing queue state: %+v, task: %+v", q.state, key), + )) + panic(errMsg) + } + + q.outstandingTasks[key] = task + if q.state.readLevel.Less(key) { + q.state.readLevel = key + } + } +} + +func (q *processingQueueImpl) UpdateAckLevel() { + keys := make([]task.Key, 0, len(q.outstandingTasks)) + for key := range q.outstandingTasks { + keys = append(keys, key) + } + + sort.Slice(keys, func(i, j int) bool { + return keys[i].Less(keys[j]) + }) + + for _, key := range keys { + if q.outstandingTasks[key].State() != t.TaskStateAcked { + break + } + + q.state.ackLevel = key + delete(q.outstandingTasks, key) + } +} + +func splitProcessingQueue( + queues []*processingQueueImpl, + newQueueStates []ProcessingQueueState, + logger log.Logger, + metricsClient metrics.Client, +) []ProcessingQueue { + newQueueTasks := make([]map[task.Key]task.Task, 0, len(newQueueStates)) + for i := 0; i != len(newQueueStates); i++ { + newQueueTasks = append(newQueueTasks, make(map[task.Key]task.Task)) + } + + for _, queue := range queues { + SplitTaskLoop: + for key, task := range queue.outstandingTasks { + for i, state := range newQueueStates { + if taskBelongsToProcessQueue(state, key, task) { + newQueueTasks[i][key] = task + continue SplitTaskLoop + } + } + + // if code reaches there it means the task doesn't belongs to any new queue. + // there's must be a bug in the code for generating the newQueueStates + // log error, skip the split and return current queues as result + currentQueues := make([]ProcessingQueue, 0, len(newQueueStates)) + currentQueueStates := make([]ProcessingQueueState, 0, len(newQueueStates)) + for _, q := range queues { + currentQueues = append(currentQueues, q) + currentQueueStates = append(currentQueueStates, queue.State()) + } + logger.Error("Processing queue encountered an error during split or merge.", tag.Error( + fmt.Errorf("current queue state: %+v, new queue state: %+v", currentQueueStates, newQueueStates), + )) + return currentQueues + } + } + + newQueues := make([]ProcessingQueue, 0, len(newQueueStates)) + for i, state := range newQueueStates { + queue := newProcessingQueue( + state, + newQueueTasks[i], + logger, + metricsClient, + ) + newQueues = append(newQueues, queue) + } + + return newQueues +} + +func taskBelongsToProcessQueue( + state ProcessingQueueState, + key task.Key, + task task.Task, +) bool { + return state.DomainFilter().Filter(task.GetDomainID()) && + state.AckLevel().Less(key) && + !state.MaxLevel().Less(key) +} + +func taskKeyEquals( + key1 task.Key, + key2 task.Key, +) bool { + return !key1.Less(key2) && !key2.Less(key1) +} + +func minTaskKey( + key1 task.Key, + key2 task.Key, +) task.Key { + if key1.Less(key2) { + return key1 + } + return key2 +} + +func maxTaskKey( + key1 task.Key, + key2 task.Key, +) task.Key { + if key1.Less(key2) { + return key2 + } + return key1 +} diff --git a/service/history/queue/processing_queue_test.go b/service/history/queue/processing_queue_test.go new file mode 100644 index 00000000000..e026f4edb3f --- /dev/null +++ b/service/history/queue/processing_queue_test.go @@ -0,0 +1,906 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package queue + +import ( + "sort" + "testing" + + gomock "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/uber-go/tally" + + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/loggerimpl" + "github.com/uber/cadence/common/metrics" + t "github.com/uber/cadence/common/task" + "github.com/uber/cadence/service/history/task" +) + +type ( + processingQueueSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller + + logger log.Logger + metricsClient metrics.Client + } + + testKey struct { + ID int + } +) + +func TestProcessingQueueSuite(t *testing.T) { + s := new(processingQueueSuite) + suite.Run(t, s) +} + +func (s *processingQueueSuite) SetupTest() { + s.Assertions = require.New(s.T()) + + s.controller = gomock.NewController(s.T()) + + s.logger = loggerimpl.NewDevelopmentForTest(s.Suite) + s.metricsClient = metrics.NewClient(tally.NoopScope, metrics.History) +} + +func (s *processingQueueSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *processingQueueSuite) TestAddTasks() { + ackLevel := &testKey{ID: 1} + maxLevel := &testKey{ID: 10} + + taskKeys := []task.Key{ + &testKey{ID: 2}, + &testKey{ID: 3}, + &testKey{ID: 5}, + &testKey{ID: 10}, + } + tasks := make(map[task.Key]task.Task) + for _, key := range taskKeys { + mockTask := task.NewMockTask(s.controller) + mockTask.EXPECT().GetDomainID().Return("some random domainID").AnyTimes() + mockTask.EXPECT().GetWorkflowID().Return("some random workflowID").AnyTimes() + mockTask.EXPECT().GetRunID().Return("some random runID").AnyTimes() + mockTask.EXPECT().GetTaskType().Return(0).AnyTimes() + tasks[key] = mockTask + } + + queue := s.newTestProcessingQueue( + 0, + ackLevel, + ackLevel, + maxLevel, + NewDomainFilter(nil, true), + make(map[task.Key]task.Task), + ) + + queue.AddTasks(tasks) + s.Len(queue.outstandingTasks, len(taskKeys)) + s.Equal(&testKey{ID: 10}, queue.state.readLevel) + + // add the same set of tasks again, should have no effect + queue.AddTasks(tasks) + s.Len(queue.outstandingTasks, len(taskKeys)) + s.Equal(&testKey{ID: 10}, queue.state.readLevel) +} + +func (s *processingQueueSuite) TestUpdateAckLevel() { + ackLevel := &testKey{ID: 1} + maxLevel := &testKey{ID: 10} + + taskKeys := []task.Key{ + &testKey{ID: 2}, + &testKey{ID: 3}, + &testKey{ID: 5}, + &testKey{ID: 8}, + &testKey{ID: 10}, + } + taskStates := []t.State{ + t.TaskStateAcked, + t.TaskStateAcked, + t.TaskStateNacked, + t.TaskStateAcked, + t.TaskStatePending, + } + tasks := make(map[task.Key]task.Task) + for i, key := range taskKeys { + task := task.NewMockTask(s.controller) + task.EXPECT().State().Return(taskStates[i]).MaxTimes(1) + tasks[key] = task + } + + queue := s.newTestProcessingQueue( + 0, + ackLevel, + ackLevel, + maxLevel, + NewDomainFilter(nil, true), + tasks, + ) + + queue.UpdateAckLevel() + s.Equal(&testKey{ID: 3}, queue.state.ackLevel) +} + +func (s *processingQueueSuite) TestSplit() { + testCases := []struct { + queue *processingQueueImpl + policyResult []ProcessingQueueState + expectedNewQueues []*processingQueueImpl + }{ + { + // test 1: no split needed + queue: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 3}, + &testKey{ID: 5}, + NewDomainFilter(nil, true), + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}, &testKey{ID: 2}, &testKey{ID: 3}}, + []string{"testDomain1", "testDomain1", "testDomain2"}, + ), + ), + policyResult: nil, + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 3}, + &testKey{ID: 5}, + NewDomainFilter(nil, true), + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + &testKey{ID: 2}: task.NewMockTask(s.controller), + &testKey{ID: 3}: task.NewMockTask(s.controller), + }, + ), + }, + }, + { + // test 2: split two domains to another level, doesn't change range + queue: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}, "testDomain2": {}, "testDomain3": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}, &testKey{ID: 2}, &testKey{ID: 3}, &testKey{ID: 5}}, + []string{"testDomain1", "testDomain1", "testDomain2", "testDomain3"}, + ), + ), + policyResult: []ProcessingQueueState{ + newProcessingQueueState( + 1, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}, "testDomain3": {}}, + ReverseMatch: false, + }, + ), + newProcessingQueueState( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + ), + }, + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + &testKey{ID: 2}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 1, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}, "testDomain3": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 3}: task.NewMockTask(s.controller), + &testKey{ID: 5}: task.NewMockTask(s.controller), + }, + ), + }, + }, + { + // test 3: split into multiple new levels, while keeping the existing range + queue: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: make(map[string]struct{}), + ReverseMatch: true, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}, &testKey{ID: 2}, &testKey{ID: 3}, &testKey{ID: 5}}, + []string{"testDomain1", "testDomain1", "testDomain2", "testDomain3"}, + ), + ), + policyResult: []ProcessingQueueState{ + newProcessingQueueState( + 1, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}}, + ReverseMatch: false, + }, + ), + newProcessingQueueState( + 2, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain3": {}}, + ReverseMatch: false, + }, + ), + newProcessingQueueState( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}, "testDomain3": {}}, + ReverseMatch: true, + }, + ), + }, + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}, "testDomain3": {}}, + ReverseMatch: true, + }, + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + &testKey{ID: 2}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 1, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 3}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 2, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain3": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 5}: task.NewMockTask(s.controller), + }, + ), + }, + }, + { + // test 4: change the queue range + queue: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 7}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: make(map[string]struct{}), + ReverseMatch: true, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}, &testKey{ID: 2}, &testKey{ID: 3}, &testKey{ID: 5}, &testKey{ID: 6}, &testKey{ID: 7}}, + []string{"testDomain1", "testDomain1", "testDomain2", "testDomain3", "testDomain1", "testDomain3"}, + ), + ), + policyResult: []ProcessingQueueState{ + newProcessingQueueState( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}, "testDomain3": {}}, + ReverseMatch: true, + }, + ), + newProcessingQueueState( + 0, + &testKey{ID: 5}, + &testKey{ID: 7}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: make(map[string]struct{}), + ReverseMatch: true, + }, + ), + newProcessingQueueState( + 1, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}, "testDomain3": {}}, + ReverseMatch: false, + }, + ), + }, + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}, "testDomain3": {}}, + ReverseMatch: true, + }, + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + &testKey{ID: 2}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 0, + &testKey{ID: 5}, + &testKey{ID: 7}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: make(map[string]struct{}), + ReverseMatch: true, + }, + map[task.Key]task.Task{ + &testKey{ID: 6}: task.NewMockTask(s.controller), + &testKey{ID: 7}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 1, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}, "testDomain3": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 3}: task.NewMockTask(s.controller), + &testKey{ID: 5}: task.NewMockTask(s.controller), + }, + ), + }, + }, + } + + for _, tc := range testCases { + mockPolicy := NewMockProcessingQueueSplitPolicy(s.controller) + mockPolicy.EXPECT().Evaluate(ProcessingQueue(tc.queue)).Return(tc.policyResult).Times(1) + + newQueues := tc.queue.Split(mockPolicy) + + s.assertQueuesEqual(tc.expectedNewQueues, newQueues) + } +} + +func (s *processingQueueSuite) TestMerge() { + testCases := []struct { + queue1 *processingQueueImpl + queue2 *processingQueueImpl + expectedNewQueues []*processingQueueImpl + }{ + { + // test 1: no overlap in range + queue1: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 1}, + &testKey{ID: 10}, + NewDomainFilter(nil, true), + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}}, + []string{"testDomain1"}, + ), + ), + queue2: s.newTestProcessingQueue( + 0, + &testKey{ID: 10}, + &testKey{ID: 50}, + &testKey{ID: 100}, + NewDomainFilter(nil, true), + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 50}}, + []string{"testDomain2"}, + ), + ), + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 1}, + &testKey{ID: 10}, + NewDomainFilter(nil, true), + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 0, + &testKey{ID: 10}, + &testKey{ID: 50}, + &testKey{ID: 100}, + NewDomainFilter(nil, true), + map[task.Key]task.Task{ + &testKey{ID: 50}: task.NewMockTask(s.controller), + }, + ), + }, + }, + { + // test 2: same ack level + queue1: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 7}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}, &testKey{ID: 4}, &testKey{ID: 7}}, + []string{"testDomain1", "testDomain1", "testDomain1"}, + ), + ), + queue2: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 3}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 2}, &testKey{ID: 3}}, + []string{"testDomain2", "testDomain2"}, + ), + ), + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 3}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}, "testDomain2": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + &testKey{ID: 2}: task.NewMockTask(s.controller), + &testKey{ID: 3}: task.NewMockTask(s.controller), + &testKey{ID: 4}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 0, + &testKey{ID: 5}, + &testKey{ID: 7}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 7}: task.NewMockTask(s.controller), + }, + ), + }, + }, + { + // test 3: same max level + queue1: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 7}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}, &testKey{ID: 4}, &testKey{ID: 7}}, + []string{"testDomain1", "testDomain1", "testDomain1"}, + ), + ), + queue2: s.newTestProcessingQueue( + 0, + &testKey{ID: 5}, + &testKey{ID: 9}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 6}, &testKey{ID: 8}, &testKey{ID: 9}}, + []string{"testDomain2", "testDomain2", "testDomain2"}, + ), + ), + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + &testKey{ID: 4}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 0, + &testKey{ID: 5}, + &testKey{ID: 7}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}, "testDomain2": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 6}: task.NewMockTask(s.controller), + &testKey{ID: 7}: task.NewMockTask(s.controller), + &testKey{ID: 8}: task.NewMockTask(s.controller), + &testKey{ID: 9}: task.NewMockTask(s.controller), + }, + ), + }, + }, + { + // test 4: one queue contain another + queue1: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 7}, + &testKey{ID: 20}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}, &testKey{ID: 4}, &testKey{ID: 7}}, + []string{"testDomain1", "testDomain1", "testDomain1"}, + ), + ), + queue2: s.newTestProcessingQueue( + 0, + &testKey{ID: 5}, + &testKey{ID: 9}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 6}, &testKey{ID: 8}, &testKey{ID: 9}}, + []string{"testDomain2", "testDomain2", "testDomain2"}, + ), + ), + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + &testKey{ID: 4}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 0, + &testKey{ID: 5}, + &testKey{ID: 7}, + &testKey{ID: 10}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}, "testDomain2": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 6}: task.NewMockTask(s.controller), + &testKey{ID: 7}: task.NewMockTask(s.controller), + &testKey{ID: 8}: task.NewMockTask(s.controller), + &testKey{ID: 9}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 0, + &testKey{ID: 10}, + &testKey{ID: 10}, + &testKey{ID: 20}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{}, + ), + }, + }, + { + // test 5: general case + queue1: s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 10}, + &testKey{ID: 15}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 1}, &testKey{ID: 4}, &testKey{ID: 7}, &testKey{ID: 10}}, + []string{"testDomain1", "testDomain1", "testDomain1", "testDomain1"}, + ), + ), + queue2: s.newTestProcessingQueue( + 0, + &testKey{ID: 5}, + &testKey{ID: 17}, + &testKey{ID: 20}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}}, + ReverseMatch: false, + }, + s.newMockTasksForDomain( + []task.Key{&testKey{ID: 6}, &testKey{ID: 8}, &testKey{ID: 9}, &testKey{ID: 17}}, + []string{"testDomain2", "testDomain2", "testDomain2", "testDomain2"}, + ), + ), + expectedNewQueues: []*processingQueueImpl{ + s.newTestProcessingQueue( + 0, + &testKey{ID: 0}, + &testKey{ID: 5}, + &testKey{ID: 5}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 1}: task.NewMockTask(s.controller), + &testKey{ID: 4}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 0, + &testKey{ID: 5}, + &testKey{ID: 10}, + &testKey{ID: 15}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain1": {}, "testDomain2": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 6}: task.NewMockTask(s.controller), + &testKey{ID: 7}: task.NewMockTask(s.controller), + &testKey{ID: 8}: task.NewMockTask(s.controller), + &testKey{ID: 9}: task.NewMockTask(s.controller), + &testKey{ID: 10}: task.NewMockTask(s.controller), + }, + ), + s.newTestProcessingQueue( + 0, + &testKey{ID: 15}, + &testKey{ID: 17}, + &testKey{ID: 20}, + DomainFilter{ + DomainIDs: map[string]struct{}{"testDomain2": {}}, + ReverseMatch: false, + }, + map[task.Key]task.Task{ + &testKey{ID: 17}: task.NewMockTask(s.controller), + }, + ), + }, + }, + } + + for _, tc := range testCases { + queue1 := s.copyProcessingQueue(tc.queue1) + queue2 := s.copyProcessingQueue(tc.queue2) + s.assertQueuesEqual(tc.expectedNewQueues, queue1.Merge(queue2)) + + queue1 = s.copyProcessingQueue(tc.queue1) + queue2 = s.copyProcessingQueue(tc.queue2) + s.assertQueuesEqual(tc.expectedNewQueues, queue2.Merge(queue1)) + } +} + +func (s *processingQueueSuite) assertQueuesEqual( + expectedQueues []*processingQueueImpl, + actual []ProcessingQueue, +) { + s.Equal(len(expectedQueues), len(actual)) + + actualQueues := make([]*processingQueueImpl, 0, len(actual)) + for _, queue := range actual { + actualQueues = append(actualQueues, queue.(*processingQueueImpl)) + } + + compFn := func(q1, q2 *processingQueueImpl) bool { + if taskKeyEquals(q1.state.ackLevel, q2.state.ackLevel) { + return q1.state.level < q2.state.level + } + + return q1.state.ackLevel.Less(q2.state.ackLevel) + } + + sort.Slice(expectedQueues, func(i, j int) bool { + return compFn(expectedQueues[i], expectedQueues[j]) + }) + sort.Slice(actualQueues, func(i, j int) bool { + return compFn(actualQueues[i], actualQueues[j]) + }) + + for i := 0; i != len(expectedQueues); i++ { + s.assertQueueEqual(expectedQueues[i], actualQueues[i]) + } +} + +func (s *processingQueueSuite) assertQueueEqual( + expected *processingQueueImpl, + actual *processingQueueImpl, +) { + s.Equal(expected.state, actual.state) + s.Equal(len(expected.outstandingTasks), len(actual.outstandingTasks)) + expectedKeys := make([]task.Key, 0, len(expected.outstandingTasks)) + for key := range expected.outstandingTasks { + expectedKeys = append(expectedKeys, key) + } + actualKeys := make([]task.Key, 0, len(actual.outstandingTasks)) + for key := range actual.outstandingTasks { + actualKeys = append(actualKeys, key) + } + sort.Slice(expectedKeys, func(i, j int) bool { + return expectedKeys[i].Less(expectedKeys[j]) + }) + sort.Slice(actualKeys, func(i, j int) bool { + return actualKeys[i].Less(actualKeys[j]) + }) + for i := 0; i != len(expectedKeys); i++ { + s.True(taskKeyEquals(expectedKeys[i], actualKeys[i])) + } +} + +func (s *processingQueueSuite) copyProcessingQueue( + queue *processingQueueImpl, +) *processingQueueImpl { + tasks := make(map[task.Key]task.Task) + for key, task := range queue.outstandingTasks { + tasks[key] = task + } + + return s.newTestProcessingQueue( + queue.state.level, + queue.state.ackLevel, + queue.state.readLevel, + queue.state.maxLevel, + queue.state.domainFilter.copy(), + tasks, + ) +} + +func (s *processingQueueSuite) newTestProcessingQueue( + level int, + ackLevel task.Key, + readLevel task.Key, + maxLevel task.Key, + domainFilter DomainFilter, + outstandingTasks map[task.Key]task.Task, +) *processingQueueImpl { + return newProcessingQueue( + &processingQueueStateImpl{ + level: level, + ackLevel: ackLevel, + readLevel: readLevel, + maxLevel: maxLevel, + domainFilter: domainFilter, + }, + outstandingTasks, + s.logger, + s.metricsClient, + ) +} + +func (s *processingQueueSuite) newMockTasksForDomain( + keys []task.Key, + domainID []string, +) map[task.Key]task.Task { + tasks := make(map[task.Key]task.Task) + s.Equal(len(keys), len(domainID)) + + for i := 0; i != len(keys); i++ { + mockTask := task.NewMockTask(s.controller) + mockTask.EXPECT().GetDomainID().Return(domainID[i]).AnyTimes() + tasks[keys[i]] = mockTask + } + + return tasks +} + +func (k *testKey) Less(key task.Key) bool { + return k.ID < key.(*testKey).ID +} diff --git a/service/history/replication/task_fetcher.go b/service/history/replication/task_fetcher.go index ae2f11c7505..a18d2b6d15c 100644 --- a/service/history/replication/task_fetcher.go +++ b/service/history/replication/task_fetcher.go @@ -93,7 +93,7 @@ func NewTaskFetchers( ) TaskFetchers { var fetchers []TaskFetcher - if consumerConfig.Type == serviceConfig.ReplicationConsumerTypeRPC { + if consumerConfig.Type == serviceConfig.ReplicationConsumerTypeRPC && config.EnableRPCReplication() { for clusterName, info := range clusterMetadata.GetAllClusterInfo() { if !info.Enabled { continue diff --git a/service/history/replicatorQueueProcessor.go b/service/history/replicatorQueueProcessor.go index 5b46c550658..ffb7141765f 100644 --- a/service/history/replicatorQueueProcessor.go +++ b/service/history/replicatorQueueProcessor.go @@ -212,27 +212,51 @@ func (p *replicatorQueueProcessorImpl) processHistoryReplicationTask( } err = p.replicator.Publish(replicationTask) - if err == messaging.ErrMessageSizeLimit && replicationTask.HistoryTaskAttributes != nil { + if err == messaging.ErrMessageSizeLimit { // message size exceeds the server messaging size limit // for this specific case, just send out a metadata message and // let receiver fetch from source (for the concrete history events) - err = p.replicator.Publish(p.generateHistoryMetadataTask(replicationTask.HistoryTaskAttributes.TargetClusters, task)) + if metadataTask := p.generateHistoryMetadataTask( + task, + replicationTask, + ); metadataTask != nil { + err = p.replicator.Publish(metadataTask) + } } return err } -func (p *replicatorQueueProcessorImpl) generateHistoryMetadataTask(targetClusters []string, task *persistence.ReplicationTaskInfo) *replicator.ReplicationTask { - return &replicator.ReplicationTask{ - TaskType: replicator.ReplicationTaskTypeHistoryMetadata.Ptr(), - HistoryMetadataTaskAttributes: &replicator.HistoryMetadataTaskAttributes{ - TargetClusters: targetClusters, - DomainId: common.StringPtr(task.DomainID), - WorkflowId: common.StringPtr(task.WorkflowID), - RunId: common.StringPtr(task.RunID), - FirstEventId: common.Int64Ptr(task.FirstEventID), - NextEventId: common.Int64Ptr(task.NextEventID), - }, +func (p *replicatorQueueProcessorImpl) generateHistoryMetadataTask( + task *persistence.ReplicationTaskInfo, + replicationTask *replicator.ReplicationTask, +) *replicator.ReplicationTask { + + if replicationTask.HistoryTaskAttributes != nil { + return &replicator.ReplicationTask{ + TaskType: replicator.ReplicationTaskTypeHistoryMetadata.Ptr(), + HistoryMetadataTaskAttributes: &replicator.HistoryMetadataTaskAttributes{ + TargetClusters: replicationTask.HistoryTaskAttributes.TargetClusters, + DomainId: common.StringPtr(task.DomainID), + WorkflowId: common.StringPtr(task.WorkflowID), + RunId: common.StringPtr(task.RunID), + FirstEventId: common.Int64Ptr(task.FirstEventID), + NextEventId: common.Int64Ptr(task.NextEventID), + }, + } + } else if replicationTask.HistoryTaskV2Attributes != nil { + return &replicator.ReplicationTask{ + TaskType: replicator.ReplicationTaskTypeHistoryMetadata.Ptr(), + HistoryMetadataTaskAttributes: &replicator.HistoryMetadataTaskAttributes{ + DomainId: common.StringPtr(task.DomainID), + WorkflowId: common.StringPtr(task.WorkflowID), + RunId: common.StringPtr(task.RunID), + FirstEventId: common.Int64Ptr(task.FirstEventID), + NextEventId: common.Int64Ptr(task.NextEventID), + Version: common.Int64Ptr(task.Version), + }, + } } + return nil } // GenerateReplicationTask generate replication task diff --git a/service/worker/indexer/indexer.go b/service/worker/indexer/indexer.go index 32f68db514c..d0bcbd220bc 100644 --- a/service/worker/indexer/indexer.go +++ b/service/worker/indexer/indexer.go @@ -75,7 +75,7 @@ func NewIndexer(config *Config, client messaging.Client, esClient es.Client, esC } // Start indexer -func (x Indexer) Start() error { +func (x *Indexer) Start() error { visibilityApp := common.VisibilityAppName visConsumerName := getConsumerName(x.visibilityIndexName) x.visibilityProcessor = newIndexProcessor(visibilityApp, visConsumerName, x.kafkaClient, x.esClient, @@ -84,7 +84,7 @@ func (x Indexer) Start() error { } // Stop indexer -func (x Indexer) Stop() { +func (x *Indexer) Stop() { x.visibilityProcessor.Stop() } diff --git a/service/worker/replicator/processor.go b/service/worker/replicator/processor.go index f0b37aceba7..45f80f4ac1d 100644 --- a/service/worker/replicator/processor.go +++ b/service/worker/replicator/processor.go @@ -409,6 +409,7 @@ func (p *replicationTaskProcessor) handleHistoryMetadataReplicationTask( p.historyClient, p.metricsClient, p.historyRereplicator, + p.nDCHistoryResender, ) return p.sequentialTaskProcessor.Submit(historyMetadataReplicationTask) } diff --git a/service/worker/replicator/replicationTask.go b/service/worker/replicator/replicationTask.go index ecda154791a..b8cfca59a74 100644 --- a/service/worker/replicator/replicationTask.go +++ b/service/worker/replicator/replicationTask.go @@ -74,7 +74,9 @@ type ( sourceCluster string firstEventID int64 nextEventID int64 + version *int64 historyRereplicator xdc.HistoryRereplicator + nDCHistoryResender xdc.NDCHistoryResender } historyReplicationV2Task struct { @@ -218,6 +220,7 @@ func newHistoryMetadataReplicationTask( historyClient history.Client, metricsClient metrics.Client, historyRereplicator xdc.HistoryRereplicator, + nDCHistoryResender xdc.NDCHistoryResender, ) *historyMetadataReplicationTask { attr := replicationTask.HistoryMetadataTaskAttributes @@ -226,6 +229,11 @@ func newHistoryMetadataReplicationTask( tag.WorkflowRunID(attr.GetRunId()), tag.WorkflowFirstEventID(attr.GetFirstEventId()), tag.WorkflowNextEventID(attr.GetNextEventId())) + var version *int64 + if attr.IsSetVersion() { + version = attr.Version + } + return &historyMetadataReplicationTask{ workflowReplicationTask: workflowReplicationTask{ metricsScope: metrics.HistoryMetadataReplicationTaskScope, @@ -246,7 +254,9 @@ func newHistoryMetadataReplicationTask( sourceCluster: sourceCluster, firstEventID: attr.GetFirstEventId(), nextEventID: attr.GetNextEventId(), + version: version, historyRereplicator: historyRereplicator, + nDCHistoryResender: nDCHistoryResender, } } @@ -410,6 +420,16 @@ func (t *historyMetadataReplicationTask) Execute() error { stopwatch := t.metricsClient.StartTimer(metrics.HistoryRereplicationByHistoryMetadataReplicationScope, metrics.CadenceClientLatency) defer stopwatch.Stop() + if t.version != nil { + return t.nDCHistoryResender.SendSingleWorkflowHistory( + t.queueID.DomainID, + t.queueID.WorkflowID, + t.queueID.RunID, + common.Int64Ptr(t.firstEventID-1), //NDC resend API is exclusive-exclusive. + t.version, + common.Int64Ptr(t.nextEventID), + t.version) + } return t.historyRereplicator.SendMultiWorkflowHistory( t.queueID.DomainID, t.queueID.WorkflowID, t.queueID.RunID, t.firstEventID, diff --git a/service/worker/replicator/replicationTask_test.go b/service/worker/replicator/replicationTask_test.go index 3e5ee6c276b..b588bb00b1e 100644 --- a/service/worker/replicator/replicationTask_test.go +++ b/service/worker/replicator/replicationTask_test.go @@ -90,6 +90,7 @@ type ( mockMsg *messageMocks.Message mockHistoryClient *historyservicetest.MockClient mockRereplicator *xdc.MockHistoryRereplicator + mockNDCResender *xdc.MockNDCHistoryResender controller *gomock.Controller } @@ -201,6 +202,7 @@ func (s *historyMetadataReplicationTaskSuite) SetupTest() { s.controller = gomock.NewController(s.T()) s.mockHistoryClient = historyservicetest.NewMockClient(s.controller) s.mockRereplicator = &xdc.MockHistoryRereplicator{} + s.mockNDCResender = &xdc.MockNDCHistoryResender{} } func (s *historyMetadataReplicationTaskSuite) TearDownTest() { @@ -635,6 +637,7 @@ func (s *historyMetadataReplicationTaskSuite) TestNewHistoryMetadataReplicationT s.mockHistoryClient, s.metricsClient, s.mockRereplicator, + s.mockNDCResender, ) // overwrite the logger for easy comparison metadataTask.logger = s.logger @@ -663,6 +666,7 @@ func (s *historyMetadataReplicationTaskSuite) TestNewHistoryMetadataReplicationT firstEventID: replicationAttr.GetFirstEventId(), nextEventID: replicationAttr.GetNextEventId(), historyRereplicator: s.mockRereplicator, + nDCHistoryResender: s.mockNDCResender, }, metadataTask, ) @@ -670,7 +674,7 @@ func (s *historyMetadataReplicationTaskSuite) TestNewHistoryMetadataReplicationT func (s *historyMetadataReplicationTaskSuite) TestExecute() { task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) randomErr := errors.New("some random error") s.mockRereplicator.On("SendMultiWorkflowHistory", @@ -685,7 +689,7 @@ func (s *historyMetadataReplicationTaskSuite) TestExecute() { func (s *historyMetadataReplicationTaskSuite) TestHandleErr_NotRetryErr() { task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) randomErr := errors.New("some random error") err := task.HandleErr(randomErr) @@ -694,7 +698,7 @@ func (s *historyMetadataReplicationTaskSuite) TestHandleErr_NotRetryErr() { func (s *historyMetadataReplicationTaskSuite) TestHandleErr_RetryErr() { task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) retryErr := &shared.RetryTaskError{ DomainId: common.StringPtr(task.queueID.DomainID), WorkflowId: common.StringPtr(task.queueID.WorkflowID), @@ -727,14 +731,14 @@ func (s *historyMetadataReplicationTaskSuite) TestHandleErr_RetryErr() { func (s *historyMetadataReplicationTaskSuite) TestRetryErr_NonRetryable() { err := &shared.BadRequestError{} task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) s.False(task.RetryErr(err)) } func (s *historyMetadataReplicationTaskSuite) TestRetryErr_Retryable() { err := &shared.InternalServiceError{} task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) task.attempt = 0 s.True(task.RetryErr(err)) } @@ -742,7 +746,7 @@ func (s *historyMetadataReplicationTaskSuite) TestRetryErr_Retryable() { func (s *historyMetadataReplicationTaskSuite) TestRetryErr_Retryable_ExceedAttempt() { err := &shared.InternalServiceError{} task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) task.attempt = s.config.ReplicationTaskMaxRetryCount() + 100 s.False(task.RetryErr(err)) } @@ -750,14 +754,14 @@ func (s *historyMetadataReplicationTaskSuite) TestRetryErr_Retryable_ExceedAttem func (s *historyMetadataReplicationTaskSuite) TestRetryErr_Retryable_ExceedDuration() { err := &shared.InternalServiceError{} task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) task.startTime = s.mockTimeSource.Now().Add(-2 * s.config.ReplicationTaskMaxRetryDuration()) s.False(task.RetryErr(err)) } func (s *historyMetadataReplicationTaskSuite) TestAck() { task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) s.mockMsg.On("Ack").Return(nil).Once() task.Ack() @@ -765,7 +769,7 @@ func (s *historyMetadataReplicationTaskSuite) TestAck() { func (s *historyMetadataReplicationTaskSuite) TestNack() { task := newHistoryMetadataReplicationTask(s.getHistoryMetadataReplicationTask(), s.mockMsg, s.sourceCluster, s.logger, - s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator) + s.config, s.mockTimeSource, s.mockHistoryClient, s.metricsClient, s.mockRereplicator, s.mockNDCResender) s.mockMsg.On("Nack").Return(nil).Once() task.Nack() diff --git a/service/worker/replicator/replicator.go b/service/worker/replicator/replicator.go index ccdc9633b3b..e71f42509aa 100644 --- a/service/worker/replicator/replicator.go +++ b/service/worker/replicator/replicator.go @@ -76,6 +76,7 @@ type ( ReplicationTaskMaxRetryDuration dynamicconfig.DurationPropertyFn ReplicationTaskContextTimeout dynamicconfig.DurationPropertyFn ReReplicationContextTimeout dynamicconfig.DurationPropertyFnWithDomainIDFilter + EnableRPCReplication dynamicconfig.BoolPropertyFn } ) @@ -123,7 +124,7 @@ func (r *Replicator) Start() error { } if clusterName != currentClusterName { - if replicationConsumerConfig.Type == config.ReplicationConsumerTypeRPC { + if replicationConsumerConfig.Type == config.ReplicationConsumerTypeRPC && r.config.EnableRPCReplication() { processor := newDomainReplicationMessageProcessor( clusterName, r.logger.WithTags(tag.ComponentReplicationTaskProcessor, tag.SourceCluster(clusterName)), diff --git a/service/worker/scanner/executions/common/interfaces.go b/service/worker/scanner/executions/common/interfaces.go new file mode 100644 index 00000000000..32fe78e921b --- /dev/null +++ b/service/worker/scanner/executions/common/interfaces.go @@ -0,0 +1,70 @@ +// The MIT License (MIT) +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package common + +type ( + // InvariantManager represents a manager of several invariants. + // It can be used to run a group of invariant checks or fixes. + // It is responsible for running invariants in their dependency order. + InvariantManager interface { + RunChecks(Execution) CheckResult + RunFixes(Execution) FixResult + InvariantTypes() []InvariantType + } + + // Invariant represents an invariant of a single execution. + // It can be used to check that the execution satisfies the invariant. + // It can also be used to fix the invariant for an execution. + Invariant interface { + Check(Execution, *InvariantResourceBag) CheckResult + Fix(Execution, *InvariantResourceBag) FixResult + InvariantType() InvariantType + } + + // ExecutionIterator gets Executions from underlying store. + ExecutionIterator interface { + // Next returns the next execution found. Any error reading from underlying store + // or converting store entry to Execution will result in an error after which iterator cannot be used. + Next() (*Execution, error) + // HasNext indicates if the iterator has a next element. If HasNext is true + // it is guaranteed that Next will return a nil error and a non-nil ExecutionIteratorResult. + HasNext() bool + } + + // Scanner is used to scan over all executions in a shard. It is responsible for three things: + // 1. Checking invariants for each execution. + // 2. Recording corruption and failures to durable store. + // 3. Producing a ShardScanReport + Scanner interface { + Scan() ShardScanReport + } + + // Fixer is used to fix all executions in a shard. It is responsible for three things: + // 1. Confirming that each execution it scans is corrupted. + // 2. Attempting to fix any confirmed corrupted executions. + // 3. Recording skipped executions, failed to fix executions and successfully fix executions to durable store. + // 4. Producing a ShardFixReport + Fixer interface { + Fix() ShardFixReport + } +) diff --git a/service/worker/scanner/executions/common/types.go b/service/worker/scanner/executions/common/types.go new file mode 100644 index 00000000000..a4e371c5886 --- /dev/null +++ b/service/worker/scanner/executions/common/types.go @@ -0,0 +1,187 @@ +// The MIT License (MIT) +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package common + +import ( + "github.com/uber/cadence/common/persistence" +) + +type ( + // CheckResultType is the result type of running an invariant check + CheckResultType string + // FixResultType is the result type of running an invariant fix + FixResultType string + // InvariantType is the type of an invariant + InvariantType string +) + +const ( + // CheckResultTypeFailed indicates a failure occurred while attempting to run check + CheckResultTypeFailed CheckResultType = "failed" + // CheckResultTypeCorrupted indicates check successfully ran and detected a corruption + CheckResultTypeCorrupted = "corrupted" + // CheckResultTypeHealthy indicates check successfully ran and detected no corruption + CheckResultTypeHealthy = "healthy" + + // FixResultTypeSkipped indicates that fix skipped execution + FixResultTypeSkipped FixResultType = "skipped" + // FixResultTypeFixed indicates that fix successfully fixed an execution + FixResultTypeFixed = "fixed" + // FixResultTypeFailed indicates that fix attempted to fix an execution but failed to do so + FixResultTypeFailed = "failed" + + // HistoryExistsInvariantType asserts that history must exist if concrete execution exists + HistoryExistsInvariantType InvariantType = "history_exists" + // ValidFirstEventInvariantType asserts that the first event in a history must be of a specific form + ValidFirstEventInvariantType = "valid_first_event" + // PairedWithCurrentInvariantType asserts that an open concrete execution must have a valid current execution + PairedWithCurrentInvariantType = "paired_with_current" +) + +// The following are types related to Invariant. +type ( + // Execution is an execution which should be checked or fixed. + Execution struct { + ShardID int + DomainID string + WorkflowID string + RunID string + BranchToken []byte + State int + } + + // InvariantResourceBag is a union of resources used to pass results from one Invariant to another Invariant. + InvariantResourceBag struct { + History *persistence.InternalReadHistoryBranchResponse + } + + // CheckResult is the result of running Check. + CheckResult struct { + CheckResultType CheckResultType + Info string + InfoDetails string + } + + // FixResult is the result of running Fix. + FixResult struct { + FixResultType FixResultType + Info string + InfoDetails string + } +) + +// The following are serializable types that represent the reports returns by Scan and Fix. +type ( + // ShardScanReport is the report of running Scan on a single shard. + ShardScanReport struct { + ShardID int + Stats ShardScanStats + Result ShardScanResult + } + + // ShardScanStats indicates the stats of executions which were handled by shard Scan. + ShardScanStats struct { + ExecutionsCount int64 + CorruptedCount int64 + CheckFailedCount int64 + CorruptionByType map[InvariantType]int64 + CorruptedOpenExecutionCount int64 + } + + // ShardScanResult indicates the result of running scan on a shard. + // Exactly one of ControlFlowFailure or ShardScanKeys will be non-nil + ShardScanResult struct { + ShardScanKeys *ShardScanKeys + ControlFlowFailure *ControlFlowFailure + } + + // ShardScanKeys are the keys to the blobs that were uploaded during scan. + ShardScanKeys struct { + Corrupt Keys + Failed Keys + } + + // ShardFixReport is the report of running Fix on a single shard + ShardFixReport struct { + ShardID int + Handled ShardFixHandled + Result ShardFixResult + } + + // ShardFixHandled indicates the executions which were handled by fix. + ShardFixHandled struct { + ExecutionCount int64 + FixedCount int64 + SkippedCount int64 + FailedCount int64 + } + + // ShardFixResult indicates the result of running fix on a shard. + // Exactly one of ControlFlowFailure or ShardFixKeys will be non-nil. + ShardFixResult struct { + ShardFixKeys *ShardFixKeys + ControlFlowFailure *ControlFlowFailure + } + + // ShardFixKeys are the keys to the blobs that were uploaded during fix. + ShardFixKeys struct { + Skipped Keys + Failed Keys + Fixed Keys + } + + // ControlFlowFailure indicates an error occurred which makes it impossible to + // even attempt to check or fix one or more execution(s). Note that it is not a ControlFlowFailure + // if a check or fix fails, it is only a ControlFlowFailure if + // an error is encountered which makes even attempting to check or fix impossible. + ControlFlowFailure struct { + Info string + InfoDetails string + } + + // Keys indicate the keys which were uploaded during a scan or fix. + // Keys are constructed as uuid_page.extension. MinPage and MaxPage are + // both inclusive and pages are sequential, meaning from this struct all pages can be deterministically constructed. + Keys struct { + UUID string + MinPage int + MaxPage int + Extension string + } +) + +// The following are serializable types which get output by Scan and Fix to durable sinks. +type ( + // ScanOutputEntity represents a single execution that should be durably recorded by Scan. + ScanOutputEntity struct { + Execution Execution + Result CheckResult + } + + // FixOutputEntity represents a single execution that should be durably recorded by fix. + // It contains the ScanOutputEntity that was given as input to fix. + FixOutputEntity struct { + ScanOutputEntity ScanOutputEntity + Result FixResult + } +) diff --git a/service/worker/service.go b/service/worker/service.go index 878f917533b..e6b99028cd4 100644 --- a/service/worker/service.go +++ b/service/worker/service.go @@ -119,6 +119,7 @@ func NewConfig(params *service.BootstrapParams) *Config { ReplicationTaskMaxRetryDuration: dc.GetDurationProperty(dynamicconfig.WorkerReplicationTaskMaxRetryDuration, 15*time.Minute), ReplicationTaskContextTimeout: dc.GetDurationProperty(dynamicconfig.WorkerReplicationTaskContextDuration, 30*time.Second), ReReplicationContextTimeout: dc.GetDurationPropertyFilteredByDomainID(dynamicconfig.WorkerReReplicationContextTimeout, 0*time.Second), + EnableRPCReplication: dc.GetBoolProperty(dynamicconfig.WorkerEnableRPCReplication, false), }, ArchiverConfig: &archiver.Config{ ArchiverConcurrency: dc.GetIntProperty(dynamicconfig.WorkerArchiverConcurrency, 50), diff --git a/tools/cli/adminKafkaCommands.go b/tools/cli/adminKafkaCommands.go index 6b0808cd6b5..63e604acce1 100644 --- a/tools/cli/adminKafkaCommands.go +++ b/tools/cli/adminKafkaCommands.go @@ -60,10 +60,18 @@ import ( "github.com/uber/cadence/service/history" ) -type filterFn func(*replicator.ReplicationTask) bool -type filterFnForVisibility func(*indexer.Message) bool +type ( + filterFn func(*replicator.ReplicationTask) bool + filterFnForVisibility func(*indexer.Message) bool -type kafkaMessageType int + kafkaMessageType int + + historyV2Task struct { + Task *replicator.ReplicationTask + Events []*shared.HistoryEvent + NewRunEvents []*shared.HistoryEvent + } +) const ( kafkaMessageTypeReplicationTask kafkaMessageType = iota @@ -121,13 +129,14 @@ func AdminKafkaParse(c *cli.Context) { readerCh := make(chan []byte, chanBufferSize) writerCh := newWriterChannel(kafkaMessageType(c.Int(FlagMessageType))) doneCh := make(chan struct{}) + serializer := persistence.NewPayloadSerializer() var skippedCount int32 skipErrMode := c.Bool(FlagSkipErrorMode) go startReader(inputFile, readerCh) go startParser(readerCh, writerCh, skipErrMode, &skippedCount) - go startWriter(outputFile, writerCh, doneCh, &skippedCount, c) + go startWriter(outputFile, writerCh, doneCh, &skippedCount, serializer, c) <-doneCh @@ -242,6 +251,7 @@ func startWriter( writerCh *writerChannel, doneCh chan struct{}, skippedCount *int32, + serializer persistence.PayloadSerializer, c *cli.Context, ) { @@ -252,7 +262,7 @@ func startWriter( switch writerCh.Type { case kafkaMessageTypeReplicationTask: - writeReplicationTask(outputFile, writerCh, skippedCount, skipErrMode, headerMode, c) + writeReplicationTask(outputFile, writerCh, skippedCount, skipErrMode, headerMode, serializer, c) case kafkaMessageTypeVisibilityMsg: writeVisibilityMessage(outputFile, writerCh, skippedCount, skipErrMode, headerMode, c) } @@ -264,6 +274,7 @@ func writeReplicationTask( skippedCount *int32, skipErrMode bool, headerMode bool, + serializer persistence.PayloadSerializer, c *cli.Context, ) { filter := buildFilterFn(c.String(FlagWorkflowID), c.String(FlagRunID)) @@ -275,7 +286,7 @@ Loop: break Loop } if filter(task) { - jsonStr, err := json.Marshal(task) + jsonStr, err := decodeReplicationTask(task, serializer) if err != nil { if !skipErrMode { ErrorAndExit(malformedMessage, fmt.Errorf("failed to encode into json, err: %v", err)) @@ -899,3 +910,39 @@ func loadBrokerConfig(hostFile string, cluster string) ([]string, *tls.Config, e } return nil, nil, fmt.Errorf("failed to load broker for cluster %v", cluster) } + +func decodeReplicationTask( + task *replicator.ReplicationTask, + serializer persistence.PayloadSerializer, +) ([]byte, error) { + + switch task.GetTaskType() { + case replicator.ReplicationTaskTypeHistoryV2: + historyV2 := task.GetHistoryTaskV2Attributes() + events, err := serializer.DeserializeBatchEvents( + persistence.NewDataBlobFromThrift(historyV2.Events), + ) + if err != nil { + return nil, err + } + var newRunEvents []*shared.HistoryEvent + if historyV2.IsSetNewRunEvents() { + newRunEvents, err = serializer.DeserializeBatchEvents( + persistence.NewDataBlobFromThrift(historyV2.NewRunEvents), + ) + if err != nil { + return nil, err + } + } + historyV2.Events = nil + historyV2.NewRunEvents = nil + historyV2Attributes := &historyV2Task{ + Task: task, + Events: events, + NewRunEvents: newRunEvents, + } + return json.Marshal(historyV2Attributes) + default: + return json.Marshal(task) + } +} diff --git a/tools/cli/app.go b/tools/cli/app.go index ce8166b1034..12bbedc1da7 100644 --- a/tools/cli/app.go +++ b/tools/cli/app.go @@ -27,7 +27,7 @@ import ( const ( // Version is the controlled version string. It should be updated every time // before we release a new version. - Version = "0.9.1" + Version = "0.12.0" ) // SetFactory is used to set the ClientFactory global diff --git a/tools/cli/defs.go b/tools/cli/defs.go index 96c02d5e85d..f340408fe83 100644 --- a/tools/cli/defs.go +++ b/tools/cli/defs.go @@ -65,6 +65,8 @@ const ( showErrorStackEnv = `CADENCE_CLI_SHOW_STACKS` searchAttrInputSeparator = "|" + + defaultGracefulFailoverTimeoutInSeconds = 60 ) var envKeysForUserName = []string{ diff --git a/tools/cli/domainCommands.go b/tools/cli/domainCommands.go index 4ab1ce97aa2..9144d47bd62 100644 --- a/tools/cli/domainCommands.go +++ b/tools/cli/domainCommands.go @@ -39,6 +39,10 @@ import ( "github.com/uber/cadence/common/domain" ) +var ( + gracefulFailoverType = "grace" +) + type ( domainCLIImpl struct { // used when making RPC call to frontend service @@ -243,13 +247,19 @@ func (d *domainCLIImpl) UpdateDomain(c *cli.Context) { badBinaryToDelete = common.StringPtr(c.String(FlagRemoveBadBinary)) } + var failoverTimeout *int32 + if c.String(FlagFailoverType) == gracefulFailoverType { + timeout := int32(c.Int(FlagFailoverTimeout)) + failoverTimeout = &timeout + } + updateInfo := &shared.UpdateDomainInfo{ Description: common.StringPtr(description), OwnerEmail: common.StringPtr(ownerEmail), Data: domainData, } updateConfig := &shared.DomainConfiguration{ - WorkflowExecutionRetentionPeriodInDays: common.Int32Ptr(int32(retentionDays)), + WorkflowExecutionRetentionPeriodInDays: common.Int32Ptr(retentionDays), EmitMetric: common.BoolPtr(emitMetric), HistoryArchivalStatus: archivalStatus(c, FlagHistoryArchivalStatus), HistoryArchivalURI: common.StringPtr(c.String(FlagHistoryArchivalURI)), @@ -266,6 +276,7 @@ func (d *domainCLIImpl) UpdateDomain(c *cli.Context) { Configuration: updateConfig, ReplicationConfiguration: replicationConfig, DeleteBadBinary: badBinaryToDelete, + FailoverTimeoutInSeconds: failoverTimeout, } } diff --git a/tools/cli/domainUtils.go b/tools/cli/domainUtils.go index 409ec307a89..03fb4de2dde 100644 --- a/tools/cli/domainUtils.go +++ b/tools/cli/domainUtils.go @@ -162,6 +162,15 @@ var ( Name: FlagReason, Usage: "Reason for the operation", }, + cli.StringFlag{ + Name: FlagFailoverTypeWithAlias, + Usage: "Domain failover type. Default value: force. Options: [force,grace]", + }, + cli.IntFlag{ + Name: FlagFailoverTimeoutWithAlias, + Value: defaultGracefulFailoverTimeoutInSeconds, + Usage: "[Optional] Domain failover timeout in seconds.", + }, } describeDomainFlags = []cli.Flag{ diff --git a/tools/cli/flags.go b/tools/cli/flags.go index 7026730d029..545907c6837 100644 --- a/tools/cli/flags.go +++ b/tools/cli/flags.go @@ -221,6 +221,10 @@ const ( FlagUpperShardBound = "upper_shard_bound" FlagInputDirectory = "input_directory" FlagSkipHistoryChecks = "skip_history_checks" + FlagFailoverType = "failover_type" + FlagFailoverTypeWithAlias = FlagFailoverType + ", ft" + FlagFailoverTimeout = "failover_timeout_seconds" + FlagFailoverTimeoutWithAlias = FlagFailoverTimeout + ", fts" ) var flagsForExecution = []cli.Flag{