diff --git a/api/historyservice/v1/request_response.pb.go b/api/historyservice/v1/request_response.pb.go index 1f573bad57..93cc3a5c56 100644 --- a/api/historyservice/v1/request_response.pb.go +++ b/api/historyservice/v1/request_response.pb.go @@ -1662,8 +1662,12 @@ type RecordActivityTaskStartedRequest struct { // Revision number that was sent by matching when the task was dispatched. Used to resolve eventual consistency issues // that may arise due to stale routing configs in task queue partitions. TaskDispatchRevisionNumber int64 `protobuf:"varint,13,opt,name=task_dispatch_revision_number,json=taskDispatchRevisionNumber,proto3" json:"task_dispatch_revision_number,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Reference to the Chasm component for activity execution (if applicable). For standalone activities, all necessary + // start information is carried within this component, obviating the need to use the fields that apply to embedded + // activities with the exception of version_directive. + ComponentRef []byte `protobuf:"bytes,14,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RecordActivityTaskStartedRequest) Reset() { @@ -1773,6 +1777,13 @@ func (x *RecordActivityTaskStartedRequest) GetTaskDispatchRevisionNumber() int64 return 0 } +func (x *RecordActivityTaskStartedRequest) GetComponentRef() []byte { + if x != nil { + return x.ComponentRef + } + return nil +} + type RecordActivityTaskStartedResponse struct { state protoimpl.MessageState `protogen:"open.v1"` ScheduledEvent *v115.HistoryEvent `protobuf:"bytes,1,opt,name=scheduled_event,json=scheduledEvent,proto3" json:"scheduled_event,omitempty"` @@ -10265,7 +10276,7 @@ const file_temporal_server_api_historyservice_v1_request_response_proto_rawDesc "\fQueriesEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12:\n" + "\x05value\x18\x02 \x01(\v2$.temporal.api.query.v1.WorkflowQueryR\x05value:\x028\x01J\x04\b\n" + - "\x10\v\"\xc6\x06\n" + + "\x10\v\"\xcd\x06\n" + " RecordActivityTaskStartedRequest\x12!\n" + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12X\n" + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12,\n" + @@ -10279,7 +10290,8 @@ const file_temporal_server_api_historyservice_v1_request_response_proto_rawDesc "\x14scheduled_deployment\x18\n" + " \x01(\v2&.temporal.api.deployment.v1.DeploymentR\x13scheduledDeployment\x12c\n" + "\x11version_directive\x18\f \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12A\n" + - "\x1dtask_dispatch_revision_number\x18\r \x01(\x03R\x1ataskDispatchRevisionNumber:$\x92\xc4\x03 *\x1eworkflow_execution.workflow_idJ\x04\b\x04\x10\x05J\x04\b\v\x10\f\"\xfc\x05\n" + + "\x1dtask_dispatch_revision_number\x18\r \x01(\x03R\x1ataskDispatchRevisionNumber\x12#\n" + + "\rcomponent_ref\x18\x0e \x01(\fR\fcomponentRef:\x06\x92\xc4\x03\x02\b\x01J\x04\b\x04\x10\x05J\x04\b\v\x10\f\"\xfc\x05\n" + "!RecordActivityTaskStartedResponse\x12N\n" + "\x0fscheduled_event\x18\x01 \x01(\v2%.temporal.api.history.v1.HistoryEventR\x0escheduledEvent\x12=\n" + "\fstarted_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12\x18\n" + diff --git a/api/matchingservice/v1/request_response.pb.go b/api/matchingservice/v1/request_response.pb.go index c200c0f291..4786dd544c 100644 --- a/api/matchingservice/v1/request_response.pb.go +++ b/api/matchingservice/v1/request_response.pb.go @@ -753,8 +753,12 @@ type AddActivityTaskRequest struct { ForwardInfo *v18.TaskForwardInfo `protobuf:"bytes,11,opt,name=forward_info,json=forwardInfo,proto3" json:"forward_info,omitempty"` Stamp int32 `protobuf:"varint,12,opt,name=stamp,proto3" json:"stamp,omitempty"` Priority *v11.Priority `protobuf:"bytes,13,opt,name=priority,proto3" json:"priority,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Reference to the Chasm component for activity execution (if applicable). For standalone activities, all + // necessary start information is carried within this component, obviating the need to use the fields that apply to + // embedded activities. + ComponentRef []byte `protobuf:"bytes,14,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddActivityTaskRequest) Reset() { @@ -857,6 +861,13 @@ func (x *AddActivityTaskRequest) GetPriority() *v11.Priority { return nil } +func (x *AddActivityTaskRequest) GetComponentRef() []byte { + if x != nil { + return x.ComponentRef + } + return nil +} + type AddActivityTaskResponse struct { state protoimpl.MessageState `protogen:"open.v1"` // When present, it means that the task is spooled to a versioned queue of this build ID @@ -5176,7 +5187,7 @@ const file_temporal_server_api_matchingservice_v1_request_response_proto_rawDesc "\bpriority\x18\f \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12\x14\n" + "\x05stamp\x18\r \x01(\x05R\x05stamp\"E\n" + "\x17AddWorkflowTaskResponse\x12*\n" + - "\x11assigned_build_id\x18\x01 \x01(\tR\x0fassignedBuildId\"\xa3\x05\n" + + "\x11assigned_build_id\x18\x01 \x01(\tR\x0fassignedBuildId\"\xc8\x05\n" + "\x16AddActivityTaskRequest\x12!\n" + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12C\n" + @@ -5189,7 +5200,8 @@ const file_temporal_server_api_matchingservice_v1_request_response_proto_rawDesc " \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12T\n" + "\fforward_info\x18\v \x01(\v21.temporal.server.api.taskqueue.v1.TaskForwardInfoR\vforwardInfo\x12\x14\n" + "\x05stamp\x18\f \x01(\x05R\x05stamp\x12<\n" + - "\bpriority\x18\r \x01(\v2 .temporal.api.common.v1.PriorityR\bpriorityJ\x04\b\x03\x10\x04\"E\n" + + "\bpriority\x18\r \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12#\n" + + "\rcomponent_ref\x18\x0e \x01(\fR\fcomponentRefJ\x04\b\x03\x10\x04\"E\n" + "\x17AddActivityTaskResponse\x12*\n" + "\x11assigned_build_id\x18\x01 \x01(\tR\x0fassignedBuildId\"\xd3\x03\n" + "\x14QueryWorkflowRequest\x12!\n" + diff --git a/api/persistence/v1/tasks.pb.go b/api/persistence/v1/tasks.pb.go index 31ff526c69..593f1940bc 100644 --- a/api/persistence/v1/tasks.pb.go +++ b/api/persistence/v1/tasks.pb.go @@ -101,8 +101,10 @@ type TaskInfo struct { // TaskVersionDirective, which is unversioned.) VersionDirective *v11.TaskVersionDirective `protobuf:"bytes,8,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` // Stamp field allows to differentiate between different instances of the same task - Stamp int32 `protobuf:"varint,9,opt,name=stamp,proto3" json:"stamp,omitempty"` - Priority *v12.Priority `protobuf:"bytes,10,opt,name=priority,proto3" json:"priority,omitempty"` + Stamp int32 `protobuf:"varint,9,opt,name=stamp,proto3" json:"stamp,omitempty"` + Priority *v12.Priority `protobuf:"bytes,10,opt,name=priority,proto3" json:"priority,omitempty"` + // Reference to any chasm component associated with this task + ComponentRef []byte `protobuf:"bytes,11,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -207,6 +209,13 @@ func (x *TaskInfo) GetPriority() *v12.Priority { return nil } +func (x *TaskInfo) GetComponentRef() []byte { + if x != nil { + return x.ComponentRef + } + return nil +} + // task_queue column type TaskQueueInfo struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -533,7 +542,7 @@ const file_temporal_server_api_persistence_v1_tasks_proto_rawDesc = "" + "\x11AllocatedTaskInfo\x12@\n" + "\x04data\x18\x01 \x01(\v2,.temporal.server.api.persistence.v1.TaskInfoR\x04data\x12\x1b\n" + "\ttask_pass\x18\x03 \x01(\x03R\btaskPass\x12\x17\n" + - "\atask_id\x18\x02 \x01(\x03R\x06taskId\"\x87\x04\n" + + "\atask_id\x18\x02 \x01(\x03R\x06taskId\"\xac\x04\n" + "\bTaskInfo\x12!\n" + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + "\vworkflow_id\x18\x02 \x01(\tR\n" + @@ -548,7 +557,8 @@ const file_temporal_server_api_persistence_v1_tasks_proto_rawDesc = "" + "\x11version_directive\x18\b \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12\x14\n" + "\x05stamp\x18\t \x01(\x05R\x05stamp\x12<\n" + "\bpriority\x18\n" + - " \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\"\x97\x04\n" + + " \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12#\n" + + "\rcomponent_ref\x18\v \x01(\fR\fcomponentRef\"\x97\x04\n" + "\rTaskQueueInfo\x12!\n" + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x12A\n" + diff --git a/api/token/v1/message.pb.go b/api/token/v1/message.pb.go index b28738276e..4751af251b 100644 --- a/api/token/v1/message.pb.go +++ b/api/token/v1/message.pb.go @@ -257,8 +257,10 @@ type Task struct { Version int64 `protobuf:"varint,11,opt,name=version,proto3" json:"version,omitempty"` StartedTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` StartVersion int64 `protobuf:"varint,13,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Reference to the associated Chasm component, if provided. + ComponentRef []byte `protobuf:"bytes,14,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Task) Reset() { @@ -382,6 +384,13 @@ func (x *Task) GetStartVersion() int64 { return 0 } +func (x *Task) GetComponentRef() []byte { + if x != nil { + return x.ComponentRef + } + return nil +} + type QueryTask struct { state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` @@ -679,7 +688,7 @@ const file_temporal_server_api_token_v1_message_proto_rawDesc = "" + "endEventId\x12*\n" + "\x11end_event_version\x18\a \x01(\x03R\x0fendEventVersion\x12+\n" + "\x11persistence_token\x18\b \x01(\fR\x10persistenceToken\x12]\n" + - "\x11version_histories\x18\t \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistoriesJ\x04\b\x01\x10\x02\"\xfd\x03\n" + + "\x11version_histories\x18\t \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistoriesJ\x04\b\x01\x10\x02\"\xa2\x04\n" + "\x04Task\x12!\n" + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + "\vworkflow_id\x18\x02 \x01(\tR\n" + @@ -696,7 +705,8 @@ const file_temporal_server_api_token_v1_message_proto_rawDesc = "" + " \x01(\x03R\x0estartedEventId\x12\x18\n" + "\aversion\x18\v \x01(\x03R\aversion\x12=\n" + "\fstarted_time\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12#\n" + - "\rstart_version\x18\r \x01(\x03R\fstartVersion\"f\n" + + "\rstart_version\x18\r \x01(\x03R\fstartVersion\x12#\n" + + "\rcomponent_ref\x18\x0e \x01(\fR\fcomponentRef\"f\n" + "\tQueryTask\x12!\n" + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + "\n" + diff --git a/chasm/context.go b/chasm/context.go index 6f13f6affd..14f6e08e7d 100644 --- a/chasm/context.go +++ b/chasm/context.go @@ -21,6 +21,7 @@ type Context interface { // Intent() OperationIntent // ComponentOptions(Component) []ComponentOption + structuredRef(Component) (ComponentRef, error) getContext() context.Context } @@ -94,6 +95,10 @@ func (c *immutableCtx) Ref(component Component) ([]byte, error) { return c.root.Ref(component) } +func (c *immutableCtx) structuredRef(component Component) (ComponentRef, error) { + return c.root.structuredRef(component) +} + func (c *immutableCtx) Now(component Component) time.Time { return c.root.Now(component) } diff --git a/chasm/context_mock.go b/chasm/context_mock.go index 8cf27a3bc7..de223411a0 100644 --- a/chasm/context_mock.go +++ b/chasm/context_mock.go @@ -31,6 +31,10 @@ func (c *MockContext) Ref(cmp Component) ([]byte, error) { return nil, nil } +func (c *MockContext) structuredRef(cmp Component) (ComponentRef, error) { + return ComponentRef{}, nil +} + func (c *MockContext) ExecutionKey() ExecutionKey { if c.HandleExecutionKey != nil { return c.HandleExecutionKey() diff --git a/chasm/engine.go b/chasm/engine.go index 7711967c53..f7043dea68 100644 --- a/chasm/engine.go +++ b/chasm/engine.go @@ -42,10 +42,12 @@ type Engine interface { PollComponent( context.Context, ComponentRef, - func(Context, Component) (any, bool, error), - func(MutableContext, Component, any) error, + func(Context, Component) (bool, error), ...TransitionOption, ) ([]byte, error) + + // NotifyExecution notifies any PollComponent callers waiting on the execution. + NotifyExecution(ExecutionKey) } type BusinessIDReusePolicy int @@ -178,6 +180,9 @@ func UpdateWithNewExecution[C Component, I any, O1 any, O2 any]( // - consider remove ComponentRef from the return value and allow components to get // the ref in the transition function. There are some caveats there, check the // comment of the NewRef method in MutableContext. +// +// UpdateComponent applies updateFn to the component identified by the supplied component reference. +// It returns the result, along with the new component reference. opts are currently ignored. func UpdateComponent[C any, R []byte | ComponentRef, I any, O any]( ctx context.Context, r R, @@ -209,6 +214,8 @@ func UpdateComponent[C any, R []byte | ComponentRef, I any, O any]( return output, newSerializedRef, err } +// ReadComponent returns the result of evaluating readFn against the component identified by the +// component reference. opts are currently ignored. func ReadComponent[C any, R []byte | ComponentRef, I any, O any]( ctx context.Context, r R, @@ -236,11 +243,18 @@ func ReadComponent[C any, R []byte | ComponentRef, I any, O any]( return output, err } -func PollComponent[C any, R []byte | ComponentRef, I any, O any, T any]( +// PollComponent waits until the predicate is true when evaluated against the component identified +// by the supplied component reference. If this times out due to a server-imposed long-poll timeout +// then it returns (nil, nil, nil), as an indication that the caller should continue long-polling. +// Otherwise it returns (output, ref, err), where output is the output of the predicate function, +// and ref is a component reference identifying the state at which the predicate was satisfied. The +// predicate must be monotonic: if it returns true at execution state transition s then it must +// return true at all transitions t > s. If the predicate is true at the outset then PollComponent +// returns immediately. opts are currently ignored. +func PollComponent[C any, R []byte | ComponentRef, I any, O any]( ctx context.Context, r R, - predicateFn func(C, Context, I) (T, bool, error), - operationFn func(C, MutableContext, I, T) (O, error), + monotonicPredicate func(C, Context, I) (O, bool, error), input I, opts ...TransitionOption, ) (O, []byte, error) { @@ -254,13 +268,12 @@ func PollComponent[C any, R []byte | ComponentRef, I any, O any, T any]( newSerializedRef, err := engineFromContext(ctx).PollComponent( ctx, ref, - func(ctx Context, c Component) (any, bool, error) { - return predicateFn(c.(C), ctx, input) - }, - func(ctx MutableContext, c Component, t any) error { - var err error - output, err = operationFn(c.(C), ctx, input, t.(T)) - return err + func(ctx Context, c Component) (bool, error) { + out, satisfied, err := monotonicPredicate(c.(C), ctx, input) + if satisfied { + output = out + } + return satisfied, err }, opts..., ) diff --git a/chasm/engine_mock.go b/chasm/engine_mock.go index cafdd48efd..9f2e0797eb 100644 --- a/chasm/engine_mock.go +++ b/chasm/engine_mock.go @@ -61,11 +61,23 @@ func (mr *MockEngineMockRecorder) NewExecution(arg0, arg1, arg2 any, arg3 ...any return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewExecution", reflect.TypeOf((*MockEngine)(nil).NewExecution), varargs...) } +// NotifyExecution mocks base method. +func (m *MockEngine) NotifyExecution(arg0 ExecutionKey) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "NotifyExecution", arg0) +} + +// NotifyExecution indicates an expected call of NotifyExecution. +func (mr *MockEngineMockRecorder) NotifyExecution(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotifyExecution", reflect.TypeOf((*MockEngine)(nil).NotifyExecution), arg0) +} + // PollComponent mocks base method. -func (m *MockEngine) PollComponent(arg0 context.Context, arg1 ComponentRef, arg2 func(Context, Component) (any, bool, error), arg3 func(MutableContext, Component, any) error, arg4 ...TransitionOption) ([]byte, error) { +func (m *MockEngine) PollComponent(arg0 context.Context, arg1 ComponentRef, arg2 func(Context, Component) (bool, error), arg3 ...TransitionOption) ([]byte, error) { m.ctrl.T.Helper() - varargs := []any{arg0, arg1, arg2, arg3} - for _, a := range arg4 { + varargs := []any{arg0, arg1, arg2} + for _, a := range arg3 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "PollComponent", varargs...) @@ -75,9 +87,9 @@ func (m *MockEngine) PollComponent(arg0 context.Context, arg1 ComponentRef, arg2 } // PollComponent indicates an expected call of PollComponent. -func (mr *MockEngineMockRecorder) PollComponent(arg0, arg1, arg2, arg3 any, arg4 ...any) *gomock.Call { +func (mr *MockEngineMockRecorder) PollComponent(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]any{arg0, arg1, arg2, arg3}, arg4...) + varargs := append([]any{arg0, arg1, arg2}, arg3...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollComponent", reflect.TypeOf((*MockEngine)(nil).PollComponent), varargs...) } diff --git a/chasm/lib/activity/activity.go b/chasm/lib/activity/activity.go new file mode 100644 index 0000000000..bb47278286 --- /dev/null +++ b/chasm/lib/activity/activity.go @@ -0,0 +1,912 @@ +package activity + +import ( + "fmt" + "slices" + "time" + + apiactivitypb "go.temporal.io/api/activity/v1" //nolint:importas + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/tqid" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // WorkflowTypeTag is a required workflow tag for standalone activities to ensure consistent + // metric labeling between workflows and activities. + WorkflowTypeTag = "__temporal_standalone_activity__" + + TypeSAAlias = "ActivityType" + StatusSAAlias = "ActivityStatus" + TaskQueueSAAlias = "ActivityTaskQueue" +) + +var ( + TypeSearchAttribute = chasm.NewSearchAttributeKeyword(TypeSAAlias, chasm.SearchAttributeFieldKeyword01) + StatusSearchAttribute = chasm.NewSearchAttributeKeyword(StatusSAAlias, chasm.SearchAttributeFieldLowCardinalityKeyword01) + TaskQueueSearchAttribute = chasm.NewSearchAttributeKeyword(TaskQueueSAAlias, chasm.SearchAttributeFieldKeyword02) +) + +var _ chasm.VisibilitySearchAttributesProvider = (*Activity)(nil) + +type ActivityStore interface { + // PopulateRecordStartedResponse populates the response for RecordActivityTaskStarted + PopulateRecordStartedResponse(ctx chasm.Context, key chasm.ExecutionKey, response *historyservice.RecordActivityTaskStartedResponse) error + + // RecordCompleted applies the provided function to record activity completion + RecordCompleted(ctx chasm.MutableContext, applyFn func(ctx chasm.MutableContext) error) error +} + +// Activity component represents an activity execution persistence object and can be either standalone activity or one +// embedded within a workflow. +type Activity struct { + chasm.UnimplementedComponent + + *activitypb.ActivityState + + Visibility chasm.Field[*chasm.Visibility] + LastAttempt chasm.Field[*activitypb.ActivityAttemptState] + LastHeartbeat chasm.Field[*activitypb.ActivityHeartbeatState] + // Standalone only + RequestData chasm.Field[*activitypb.ActivityRequestData] + Outcome chasm.Field[*activitypb.ActivityOutcome] + // Pointer to an implementation of the "store". For a workflow activity this would be a parent + // pointer back to the workflow. For a standalone activity this is nil (Activity itself + // implements the ActivityStore interface). + // TODO(saa-preview): revisit a standalone activity pointing to itself once we handle storing it more efficiently. + // TODO(saa-preview): figure out better naming. + Store chasm.Field[ActivityStore] +} + +// WithToken wraps a request with its deserialized task token. +type WithToken[R any] struct { + Token *tokenspb.Task + Request R +} + +// MetricsHandlerBuilderParams contains parameters for building/enriching a metrics handler for activity operations +type MetricsHandlerBuilderParams struct { + Handler metrics.Handler + NamespaceName string + BreakdownMetricsByTaskQueue dynamicconfig.TypedPropertyFnWithTaskQueueFilter[bool] +} + +// RespondCompletedEvent wraps the RespondActivityTaskCompletedRequest with context-specific data. +type RespondCompletedEvent struct { + Request *historyservice.RespondActivityTaskCompletedRequest + Token *tokenspb.Task + MetricsHandlerBuilderParams MetricsHandlerBuilderParams +} + +// RespondFailedEvent wraps the RespondActivityTaskFailedRequest with context-specific data. +type RespondFailedEvent struct { + Request *historyservice.RespondActivityTaskFailedRequest + Token *tokenspb.Task + MetricsHandlerBuilderParams MetricsHandlerBuilderParams +} + +// RespondCancelledEvent wraps the RespondActivityTaskCanceledRequest with context-specific data. +type RespondCancelledEvent struct { + Request *historyservice.RespondActivityTaskCanceledRequest + Token *tokenspb.Task + MetricsHandlerBuilderParams MetricsHandlerBuilderParams +} + +// requestCancelEvent wraps the RequestCancelActivityExecutionRequest with context-specific data. +type requestCancelEvent struct { + request *activitypb.RequestCancelActivityExecutionRequest + MetricsHandlerBuilderParams MetricsHandlerBuilderParams +} + +// terminateEvent wraps the TerminateActivityExecutionRequest with context-specific data. +type terminateEvent struct { + request *activitypb.TerminateActivityExecutionRequest + MetricsHandlerBuilderParams MetricsHandlerBuilderParams +} + +func (a *Activity) LifecycleState(_ chasm.Context) chasm.LifecycleState { + switch a.Status { + case activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED: + return chasm.LifecycleStateCompleted + case activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, + activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, + activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED: + return chasm.LifecycleStateFailed + default: + return chasm.LifecycleStateRunning + } +} + +// NewStandaloneActivity creates a new activity component and adds associated tasks to start execution. +func NewStandaloneActivity( + ctx chasm.MutableContext, + request *workflowservice.StartActivityExecutionRequest, +) (*Activity, error) { + visibility := chasm.NewVisibilityWithData( + ctx, + request.GetSearchAttributes().GetIndexedFields(), + nil, + ) + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: request.ActivityType, + TaskQueue: request.GetTaskQueue(), + ScheduleToCloseTimeout: request.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: request.GetScheduleToStartTimeout(), + StartToCloseTimeout: request.GetStartToCloseTimeout(), + HeartbeatTimeout: request.GetHeartbeatTimeout(), + RetryPolicy: request.GetRetryPolicy(), + Priority: request.Priority, + }, + LastAttempt: chasm.NewDataField(ctx, &activitypb.ActivityAttemptState{}), + RequestData: chasm.NewDataField(ctx, &activitypb.ActivityRequestData{ + Input: request.Input, + Header: request.Header, + UserMetadata: request.UserMetadata, + }), + Outcome: chasm.NewDataField(ctx, &activitypb.ActivityOutcome{}), + Visibility: chasm.NewComponentField(ctx, visibility), + } + + activity.ScheduleTime = timestamppb.New(ctx.Now(activity)) + + return activity, nil +} + +func NewEmbeddedActivity( + ctx chasm.MutableContext, + state *activitypb.ActivityState, + parent ActivityStore, +) { +} + +func (a *Activity) createAddActivityTaskRequest(ctx chasm.Context, namespaceID string) (*matchingservice.AddActivityTaskRequest, error) { + // Get latest component ref and unmarshal into proto ref + componentRef, err := ctx.Ref(a) + if err != nil { + return nil, err + } + + // Note: No need to set the vector clock here, as the components track version conflicts for read/write + // TODO: Need to fill in VersionDirective once we decide how to handle versioning for standalone activities + return &matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID, + ScheduleToStartTimeout: a.ScheduleToStartTimeout, + TaskQueue: a.GetTaskQueue(), + Priority: a.GetPriority(), + ComponentRef: componentRef, + }, nil +} + +// HandleStarted updates the activity on recording activity task started and populates the response. +func (a *Activity) HandleStarted(ctx chasm.MutableContext, request *historyservice.RecordActivityTaskStartedRequest) ( + *historyservice.RecordActivityTaskStartedResponse, error, +) { + if err := TransitionStarted.Apply(a, ctx, request); err != nil { + return nil, err + } + response := &historyservice.RecordActivityTaskStartedResponse{} + err := a.StoreOrSelf(ctx).PopulateRecordStartedResponse(ctx, ctx.ExecutionKey(), response) + return response, err +} + +// PopulateRecordStartedResponse populates the response for HandleStarted. +func (a *Activity) PopulateRecordStartedResponse(ctx chasm.Context, key chasm.ExecutionKey, response *historyservice.RecordActivityTaskStartedResponse) error { + lastHeartbeat, _ := a.LastHeartbeat.TryGet(ctx) + if lastHeartbeat != nil { + response.HeartbeatDetails = lastHeartbeat.GetDetails() + } + requestData := a.RequestData.Get(ctx) + attempt := a.LastAttempt.Get(ctx) + response.StartedTime = attempt.GetStartedTime() + response.Attempt = attempt.GetCount() + response.Priority = a.GetPriority() + response.RetryPolicy = a.GetRetryPolicy() + response.ScheduledEvent = &historypb.HistoryEvent{ + EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, + Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ + ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ + ActivityId: key.BusinessID, + ActivityType: a.GetActivityType(), + Input: requestData.GetInput(), + Header: requestData.GetHeader(), + TaskQueue: a.GetTaskQueue(), + ScheduleToCloseTimeout: a.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: a.GetScheduleToStartTimeout(), + StartToCloseTimeout: a.GetStartToCloseTimeout(), + HeartbeatTimeout: a.GetHeartbeatTimeout(), + }, + }, + } + return nil +} + +// RecordCompleted applies the provided function to record activity completion. +func (a *Activity) RecordCompleted(ctx chasm.MutableContext, applyFn func(ctx chasm.MutableContext) error) error { + return applyFn(ctx) +} + +// HandleCompleted updates the activity on activity completion. +func (a *Activity) HandleCompleted( + ctx chasm.MutableContext, + event RespondCompletedEvent, +) (*historyservice.RespondActivityTaskCompletedResponse, error) { + // TODO(saa-preview): add test coverage for this validation + if err := a.validateActivityTaskToken(ctx, event.Token); err != nil { + return nil, err + } + + metricsHandler := enrichMetricsHandler( + a, + event.MetricsHandlerBuilderParams.Handler, + event.MetricsHandlerBuilderParams.NamespaceName, + metrics.HistoryRespondActivityTaskCompletedScope, + event.MetricsHandlerBuilderParams.BreakdownMetricsByTaskQueue) + + if err := TransitionCompleted.Apply(a, ctx, completeEvent{ + req: event.Request, + metricsHandler: metricsHandler, + }); err != nil { + return nil, err + } + + return &historyservice.RespondActivityTaskCompletedResponse{}, nil +} + +// HandleFailed updates the activity on activity failure. if the activity is retryable, it will be rescheduled +// for retry instead. +func (a *Activity) HandleFailed( + ctx chasm.MutableContext, + event RespondFailedEvent, +) (*historyservice.RespondActivityTaskFailedResponse, error) { + // TODO(saa-preview): add test coverage for this validation + if err := a.validateActivityTaskToken(ctx, event.Token); err != nil { + return nil, err + } + + metricsHandler := enrichMetricsHandler( + a, + event.MetricsHandlerBuilderParams.Handler, + event.MetricsHandlerBuilderParams.NamespaceName, + metrics.HistoryRespondActivityTaskFailedScope, + event.MetricsHandlerBuilderParams.BreakdownMetricsByTaskQueue) + + failure := event.Request.GetFailedRequest().GetFailure() + + appFailure := failure.GetApplicationFailureInfo() + isRetryable := appFailure != nil && + !appFailure.GetNonRetryable() && + !slices.Contains(a.GetRetryPolicy().GetNonRetryableErrorTypes(), appFailure.GetType()) + + if isRetryable { + rescheduled, err := a.tryReschedule(ctx, appFailure.GetNextRetryDelay().AsDuration(), failure) + if err != nil { + return nil, err + } + if rescheduled { + a.emitOnAttemptFailedMetrics(ctx, metricsHandler) + + return &historyservice.RespondActivityTaskFailedResponse{}, nil + } + } + + if err := TransitionFailed.Apply(a, ctx, failedEvent{ + req: event.Request, + metricsHandler: metricsHandler, + }); err != nil { + return nil, err + } + + return &historyservice.RespondActivityTaskFailedResponse{}, nil +} + +// HandleCanceled updates the activity on activity canceled. +func (a *Activity) HandleCanceled( + ctx chasm.MutableContext, + event RespondCancelledEvent, +) (*historyservice.RespondActivityTaskCanceledResponse, error) { + // TODO(saa-preview): add test coverage for this validation + if err := a.validateActivityTaskToken(ctx, event.Token); err != nil { + return nil, err + } + + metricsHandler := enrichMetricsHandler( + a, + event.MetricsHandlerBuilderParams.Handler, + event.MetricsHandlerBuilderParams.NamespaceName, + metrics.HistoryRespondActivityTaskCanceledScope, + event.MetricsHandlerBuilderParams.BreakdownMetricsByTaskQueue) + + if err := TransitionCanceled.Apply(a, ctx, cancelEvent{ + details: event.Request.GetCancelRequest().GetDetails(), + handler: metricsHandler, + fromStatus: a.GetStatus(), + }); err != nil { + return nil, err + } + + return &historyservice.RespondActivityTaskCanceledResponse{}, nil +} + +func (a *Activity) handleTerminated(ctx chasm.MutableContext, req terminateEvent) ( + *activitypb.TerminateActivityExecutionResponse, error, +) { + frontendReq := req.request.GetFrontendRequest() + + // If already in terminated state, fail if request ID is different, else no-op + if a.GetStatus() == activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED { + newReqID := frontendReq.GetRequestId() + existingReqID := a.GetTerminateState().GetRequestId() + + if existingReqID != newReqID { + return nil, serviceerror.NewFailedPrecondition( + fmt.Sprintf("already terminated with request ID %s", existingReqID)) + } + + return &activitypb.TerminateActivityExecutionResponse{}, nil + } + + if err := TransitionTerminated.Apply(a, ctx, req); err != nil { + return nil, err + } + + return &activitypb.TerminateActivityExecutionResponse{}, nil +} + +// getOrCreateLastHeartbeat retrieves the last heartbeat state, initializing it if not present. The heartbeat is lazily created +// to avoid unnecessary writes when heartbeats are not used. +func (a *Activity) getOrCreateLastHeartbeat(ctx chasm.MutableContext) *activitypb.ActivityHeartbeatState { + heartbeat, ok := a.LastHeartbeat.TryGet(ctx) + if !ok { + heartbeat = &activitypb.ActivityHeartbeatState{} + a.LastHeartbeat = chasm.NewDataField(ctx, heartbeat) + } + return heartbeat +} + +func (a *Activity) handleCancellationRequested(ctx chasm.MutableContext, event requestCancelEvent) ( + *activitypb.RequestCancelActivityExecutionResponse, error, +) { + req := event.request.GetFrontendRequest() + newReqID := req.GetRequestId() + existingReqID := a.GetCancelState().GetRequestId() + + // If already in cancel requested state, fail if request ID is different, else no-op + if a.GetStatus() == activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED { + if existingReqID != newReqID { + return nil, serviceerror.NewFailedPrecondition( + fmt.Sprintf("cancellation already requested with request ID %s", existingReqID)) + } + + return &activitypb.RequestCancelActivityExecutionResponse{}, nil + } + + // If in scheduled state, cancel immediately right after marking cancel requested + isCancelImmediately := a.GetStatus() == activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED + + if err := TransitionCancelRequested.Apply(a, ctx, req); err != nil { + return nil, err + } + + if isCancelImmediately { + details := &commonpb.Payloads{ + Payloads: []*commonpb.Payload{ + payload.EncodeString(req.GetReason()), + }, + } + + metricsHandler := enrichMetricsHandler( + a, + event.MetricsHandlerBuilderParams.Handler, + event.MetricsHandlerBuilderParams.NamespaceName, + metrics.HistoryRespondActivityTaskCanceledScope, + event.MetricsHandlerBuilderParams.BreakdownMetricsByTaskQueue) + + err := TransitionCanceled.Apply(a, ctx, cancelEvent{ + details: details, + handler: metricsHandler, + fromStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, // if we're here the original status was scheduled + }) + if err != nil { + return nil, err + } + } + + return &activitypb.RequestCancelActivityExecutionResponse{}, nil +} + +// recordScheduleToStartOrCloseTimeoutFailure records schedule-to-start or schedule-to-close timeouts. Such timeouts are not retried so we +// set the outcome failure directly and leave the attempt failure as is. +func (a *Activity) recordScheduleToStartOrCloseTimeoutFailure(ctx chasm.MutableContext, timeoutType enumspb.TimeoutType) error { + outcome := a.Outcome.Get(ctx) + + failure := &failurepb.Failure{ + Message: fmt.Sprintf(common.FailureReasonActivityTimeout, timeoutType.String()), + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: timeoutType, + }, + }, + } + + outcome.Variant = &activitypb.ActivityOutcome_Failed_{ + Failed: &activitypb.ActivityOutcome_Failed{ + Failure: failure, + }, + } + + return nil +} + +// recordFailedAttempt records any failures resulting from a tried attempt, including worker application failures and +// start-to-close timeouts. Since the calls come from retried attempts we update the attempt failure info but leave +// the outcome failure empty to avoid duplication. +func (a *Activity) recordFailedAttempt( + ctx chasm.MutableContext, + retryInterval time.Duration, + failure *failurepb.Failure, + currentTime time.Time, + noRetriesLeft bool, +) error { + attempt := a.LastAttempt.Get(ctx) + + attempt.LastFailureDetails = &activitypb.ActivityAttemptState_LastFailureDetails{ + Failure: failure, + Time: timestamppb.New(currentTime), + } + attempt.CompleteTime = timestamppb.New(currentTime) + + if noRetriesLeft { + attempt.CurrentRetryInterval = nil + } else { + attempt.CurrentRetryInterval = durationpb.New(retryInterval) + } + return nil +} + +// tryReschedule attempts to reschedule the activity for retry. Returns true if rescheduled, false +// if retry is not possible. +func (a *Activity) tryReschedule( + ctx chasm.MutableContext, + overridingRetryInterval time.Duration, + failure *failurepb.Failure, +) (bool, error) { + shouldRetry, retryInterval := a.shouldRetry(ctx, overridingRetryInterval) + if !shouldRetry { + return false, nil + } + return true, TransitionRescheduled.Apply(a, ctx, rescheduleEvent{ + retryInterval: retryInterval, + failure: failure, + }) +} + +func (a *Activity) shouldRetry(ctx chasm.Context, overridingRetryInterval time.Duration) (bool, time.Duration) { + if !TransitionRescheduled.Possible(a) { + return false, 0 + } + attempt := a.LastAttempt.Get(ctx) + retryPolicy := a.RetryPolicy + + enoughAttempts := retryPolicy.GetMaximumAttempts() == 0 || attempt.GetCount() < retryPolicy.GetMaximumAttempts() + enoughTime, retryInterval := a.hasEnoughTimeForRetry(ctx, overridingRetryInterval) + return enoughAttempts && enoughTime, retryInterval +} + +// hasEnoughTimeForRetry checks if there is enough time left in the schedule-to-close timeout. If sufficient time +// remains, it will also return a valid retry interval. +func (a *Activity) hasEnoughTimeForRetry(ctx chasm.Context, overridingRetryInterval time.Duration) (bool, time.Duration) { + attempt := a.LastAttempt.Get(ctx) + + // Use overriding retry interval if provided, else calculate based on retry policy + retryInterval := overridingRetryInterval + if retryInterval <= 0 { + retryInterval = backoff.CalculateExponentialRetryInterval(a.RetryPolicy, attempt.Count) + } + + scheduleToClose := a.GetScheduleToCloseTimeout().AsDuration() + if scheduleToClose == 0 { + return true, retryInterval + } + + deadline := a.ScheduleTime.AsTime().Add(scheduleToClose) + return ctx.Now(a).Add(retryInterval).Before(deadline), retryInterval +} + +func createStartToCloseTimeoutFailure() *failurepb.Failure { + return &failurepb.Failure{ + Message: fmt.Sprintf(common.FailureReasonActivityTimeout, enumspb.TIMEOUT_TYPE_START_TO_CLOSE.String()), + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + }, + } +} + +func createHeartbeatTimeoutFailure() *failurepb.Failure { + return &failurepb.Failure{ + Message: fmt.Sprintf(common.FailureReasonActivityTimeout, enumspb.TIMEOUT_TYPE_HEARTBEAT.String()), + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + }, + }, + } +} + +// RecordHeartbeat records a heartbeat for the activity. +func (a *Activity) RecordHeartbeat( + ctx chasm.MutableContext, + input WithToken[*historyservice.RecordActivityTaskHeartbeatRequest], +) (*historyservice.RecordActivityTaskHeartbeatResponse, error) { + err := a.validateActivityTaskToken(ctx, input.Token) + if err != nil { + return nil, err + } + a.LastHeartbeat = chasm.NewDataField(ctx, &activitypb.ActivityHeartbeatState{ + RecordedTime: timestamppb.New(ctx.Now(a)), + Details: input.Request.GetHeartbeatRequest().GetDetails(), + }) + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: ctx.Now(a).Add(a.GetHeartbeatTimeout().AsDuration()), + }, + &activitypb.HeartbeatTimeoutTask{ + Attempt: a.LastAttempt.Get(ctx).GetCount(), + }, + ) + return &historyservice.RecordActivityTaskHeartbeatResponse{ + CancelRequested: a.Status == activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + // TODO(saa-preview): ActivityPaused, ActivityReset + }, nil +} + +// InternalStatusToAPIStatus converts internal activity execution status to API status. +func InternalStatusToAPIStatus(status activitypb.ActivityExecutionStatus) enumspb.ActivityExecutionStatus { + switch status { + case activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED: + return enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING + case activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED: + return enumspb.ACTIVITY_EXECUTION_STATUS_COMPLETED + case activitypb.ACTIVITY_EXECUTION_STATUS_FAILED: + return enumspb.ACTIVITY_EXECUTION_STATUS_FAILED + case activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED: + return enumspb.ACTIVITY_EXECUTION_STATUS_CANCELED + case activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED: + return enumspb.ACTIVITY_EXECUTION_STATUS_TERMINATED + case activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT: + return enumspb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT + case activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED: + return enumspb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED + default: + panic(fmt.Sprintf("unknown activity execution status: %v", status)) //nolint:forbidigo + } +} + +func internalStatusToRunState(status activitypb.ActivityExecutionStatus) enumspb.PendingActivityState { + switch status { + case activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED: + return enumspb.PENDING_ACTIVITY_STATE_SCHEDULED + case activitypb.ACTIVITY_EXECUTION_STATUS_STARTED: + return enumspb.PENDING_ACTIVITY_STATE_STARTED + case activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED: + return enumspb.PENDING_ACTIVITY_STATE_CANCEL_REQUESTED + case activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED, + activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED, + activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, + activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, + activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED: + return enumspb.PENDING_ACTIVITY_STATE_UNSPECIFIED + default: + panic(fmt.Sprintf("unknown activity execution status: %v", status)) //nolint:forbidigo + } +} + +func (a *Activity) buildActivityExecutionInfo(ctx chasm.Context) (*apiactivitypb.ActivityExecutionInfo, error) { + // TODO(saa-preview): support pause states + status := InternalStatusToAPIStatus(a.GetStatus()) + runState := internalStatusToRunState(a.GetStatus()) + + requestData := a.RequestData.Get(ctx) + attempt := a.LastAttempt.Get(ctx) + heartbeat, _ := a.LastHeartbeat.TryGet(ctx) + key := ctx.ExecutionKey() + + // TODO(saa-preview): debating if we should persist next attempt schedule time for stronger consistency + var nextAttemptScheduleTime *timestamppb.Timestamp + interval := attempt.GetCurrentRetryInterval() + completeTime := attempt.GetCompleteTime() + if interval != nil && interval.AsDuration() > 0 && completeTime != nil { + nextAttemptScheduleTime = timestamppb.New(completeTime.AsTime().Add(interval.AsDuration())) + } + + var closeTime *timestamppb.Timestamp + var executionDuration = durationpb.New(0) + if a.LifecycleState(ctx) != chasm.LifecycleStateRunning && attempt.GetCompleteTime() != nil { + closeTime = attempt.GetCompleteTime() + executionDuration = durationpb.New(closeTime.AsTime().Sub(a.GetScheduleTime().AsTime())) + } + + var expirationTime *timestamppb.Timestamp + if timeout := a.GetScheduleToCloseTimeout().AsDuration(); timeout > 0 { + expirationTime = timestamppb.New(a.GetScheduleTime().AsTime().Add(timeout)) + } + + sa := &commonpb.SearchAttributes{ + IndexedFields: a.Visibility.Get(ctx).GetSearchAttributes(ctx), + } + + info := &apiactivitypb.ActivityExecutionInfo{ + ActivityId: key.BusinessID, + ActivityType: a.GetActivityType(), + Attempt: attempt.GetCount(), + CanceledReason: a.CancelState.GetReason(), + CloseTime: closeTime, + CurrentRetryInterval: attempt.GetCurrentRetryInterval(), + ExecutionDuration: executionDuration, + ExpirationTime: expirationTime, + Header: requestData.GetHeader(), + HeartbeatDetails: heartbeat.GetDetails(), + HeartbeatTimeout: a.GetHeartbeatTimeout(), + LastAttemptCompleteTime: attempt.GetCompleteTime(), + LastFailure: attempt.GetLastFailureDetails().GetFailure(), + LastHeartbeatTime: heartbeat.GetRecordedTime(), + LastStartedTime: attempt.GetStartedTime(), + LastWorkerIdentity: attempt.GetLastWorkerIdentity(), + NextAttemptScheduleTime: nextAttemptScheduleTime, + Priority: a.GetPriority(), + RetryPolicy: a.GetRetryPolicy(), + RunId: key.RunID, + RunState: runState, + ScheduleTime: a.GetScheduleTime(), + ScheduleToCloseTimeout: a.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: a.GetScheduleToStartTimeout(), + StartToCloseTimeout: a.GetStartToCloseTimeout(), + StateTransitionCount: a.Visibility.Get(ctx).Data.TransitionCount, + // TODO(saa-preview): StateSizeBytes? + SearchAttributes: sa, + Status: status, + TaskQueue: a.GetTaskQueue().GetName(), + UserMetadata: requestData.GetUserMetadata(), + } + + return info, nil +} + +func (a *Activity) buildDescribeActivityExecutionResponse( + ctx chasm.Context, + req *activitypb.DescribeActivityExecutionRequest, +) (*activitypb.DescribeActivityExecutionResponse, error) { + request := req.GetFrontendRequest() + + token, err := ctx.Ref(a) + if err != nil { + return nil, err + } + + info, err := a.buildActivityExecutionInfo(ctx) + if err != nil { + return nil, err + } + + var input *commonpb.Payloads + if request.GetIncludeInput() { + input = a.RequestData.Get(ctx).GetInput() + } + + response := &workflowservice.DescribeActivityExecutionResponse{ + Info: info, + RunId: ctx.ExecutionKey().RunID, + Input: input, + LongPollToken: token, + } + + if request.GetIncludeOutcome() { + response.Outcome = a.outcome(ctx) + } + + return &activitypb.DescribeActivityExecutionResponse{ + FrontendResponse: response, + }, nil +} + +func (a *Activity) buildPollActivityExecutionResponse( + ctx chasm.Context, +) (*activitypb.PollActivityExecutionResponse, error) { + return &activitypb.PollActivityExecutionResponse{ + FrontendResponse: &workflowservice.PollActivityExecutionResponse{ + RunId: ctx.ExecutionKey().RunID, + Outcome: a.outcome(ctx), + }, + }, nil +} + +// outcome retrieves the activity outcome (result or failure) if the activity has completed. +// Returns nil if the activity has not completed. +func (a *Activity) outcome(ctx chasm.Context) *apiactivitypb.ActivityExecutionOutcome { + if !a.LifecycleState(ctx).IsClosed() { + return nil + } + activityOutcome := a.Outcome.Get(ctx) + if successful := activityOutcome.GetSuccessful(); successful != nil { + return &apiactivitypb.ActivityExecutionOutcome{ + Value: &apiactivitypb.ActivityExecutionOutcome_Result{Result: successful.GetOutput()}, + } + } + if failure := activityOutcome.GetFailed().GetFailure(); failure != nil { + return &apiactivitypb.ActivityExecutionOutcome{ + Value: &apiactivitypb.ActivityExecutionOutcome_Failure{Failure: failure}, + } + } + if details := a.LastAttempt.Get(ctx).GetLastFailureDetails(); details != nil { + return &apiactivitypb.ActivityExecutionOutcome{ + Value: &apiactivitypb.ActivityExecutionOutcome_Failure{Failure: details.GetFailure()}, + } + } + return nil +} + +// StoreOrSelf returns the store for the activity. If the store is not set as a field (e.g. +// standalone activities), it returns the activity itself. +func (a *Activity) StoreOrSelf(ctx chasm.Context) ActivityStore { + store, ok := a.Store.TryGet(ctx) + if ok { + return store + } + return a +} + +// validateActivityTaskToken validates a task token against the current activity state. +func (a *Activity) validateActivityTaskToken( + ctx chasm.Context, + token *tokenspb.Task, +) error { + if a.Status != activitypb.ACTIVITY_EXECUTION_STATUS_STARTED && + a.Status != activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED { + return serviceerror.NewNotFound("activity task not found") + } + if token.Attempt != a.LastAttempt.Get(ctx).GetCount() { + return serviceerror.NewNotFound("activity task not found") + } + return nil +} + +func enrichMetricsHandler( + a *Activity, + handler metrics.Handler, + namespaceName string, + operationTag string, + breakdownMetricsByTaskQueue dynamicconfig.TypedPropertyFnWithTaskQueueFilter[bool], +) metrics.Handler { + taskQueueFamily := a.GetTaskQueue().GetName() + return metrics.GetPerTaskQueueFamilyScope( + handler, + namespaceName, + tqid.UnsafeTaskQueueFamily(namespaceName, taskQueueFamily), + breakdownMetricsByTaskQueue(namespaceName, taskQueueFamily, enumspb.TASK_QUEUE_TYPE_ACTIVITY), + metrics.OperationTag(operationTag), + metrics.ActivityTypeTag(a.GetActivityType().GetName()), + metrics.VersioningBehaviorTag(enumspb.VERSIONING_BEHAVIOR_UNSPECIFIED), + metrics.WorkflowTypeTag(WorkflowTypeTag), + ) +} + +func (a *Activity) emitOnAttemptTimedOutMetrics(ctx chasm.Context, handler metrics.Handler, timeoutType enumspb.TimeoutType) { + attempt := a.LastAttempt.Get(ctx) + startedTime := attempt.GetStartedTime().AsTime() + + latency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(latency) + + timeoutTag := metrics.StringTag("timeout_type", timeoutType.String()) + metrics.ActivityTaskTimeout.With(handler).Record(1, timeoutTag) +} + +func (a *Activity) emitOnAttemptFailedMetrics(ctx chasm.Context, handler metrics.Handler) { + attempt := a.LastAttempt.Get(ctx) + startedTime := attempt.GetStartedTime().AsTime() + + latency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(latency) + + metrics.ActivityTaskFail.With(handler).Record(1) +} + +func (a *Activity) emitOnCompletedMetrics(ctx chasm.Context, handler metrics.Handler) { + attempt := a.LastAttempt.Get(ctx) + startedTime := attempt.GetStartedTime().AsTime() + + startToCloseLatency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(startToCloseLatency) + + scheduleToCloseLatency := time.Since(a.GetScheduleTime().AsTime()) + metrics.ActivityScheduleToCloseLatency.With(handler).Record(scheduleToCloseLatency) + + metrics.ActivitySuccess.With(handler).Record(1) +} + +func (a *Activity) emitOnFailedMetrics(ctx chasm.Context, handler metrics.Handler) { + attempt := a.LastAttempt.Get(ctx) + startedTime := attempt.GetStartedTime().AsTime() + + startToCloseLatency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(startToCloseLatency) + + scheduleToCloseLatency := time.Since(a.GetScheduleTime().AsTime()) + metrics.ActivityScheduleToCloseLatency.With(handler).Record(scheduleToCloseLatency) + + metrics.ActivityTaskFail.With(handler).Record(1) + metrics.ActivityFail.With(handler).Record(1) +} + +func (a *Activity) emitOnCanceledMetrics( + ctx chasm.Context, + handler metrics.Handler, + fromStatus activitypb.ActivityExecutionStatus, +) { + // Only record start-to-close latency if a current attempt was running. If it in scheduled status, it means the current attempt never started. + if fromStatus != activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED { + startedTime := a.LastAttempt.Get(ctx).GetStartedTime().AsTime() + startToCloseLatency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(startToCloseLatency) + } + + scheduleToCloseLatency := time.Since(a.GetScheduleTime().AsTime()) + metrics.ActivityScheduleToCloseLatency.With(handler).Record(scheduleToCloseLatency) + + metrics.ActivityCancel.With(handler).Record(1) +} + +func (a *Activity) emitOnTimedOutMetrics( + ctx chasm.Context, + handler metrics.Handler, + timeoutType enumspb.TimeoutType, + fromStatus activitypb.ActivityExecutionStatus, +) { + // Only record start-to-close latency if a current attempt was running. If it in scheduled status, it means the current attempt never started. + if fromStatus != activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED { + startedTime := a.LastAttempt.Get(ctx).GetStartedTime().AsTime() + startToCloseLatency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(startToCloseLatency) + } + + scheduleToCloseLatency := time.Since(a.GetScheduleTime().AsTime()) + metrics.ActivityScheduleToCloseLatency.With(handler).Record(scheduleToCloseLatency) + + timeoutTag := metrics.StringTag("timeout_type", timeoutType.String()) + metrics.ActivityTaskTimeout.With(handler).Record(1, timeoutTag) + metrics.ActivityTimeout.With(handler).Record(1, timeoutTag) +} + +// SearchAttributes implements chasm.VisibilitySearchAttributesProvider interface. +// Returns the current search attribute values for this activity execution. +func (a *Activity) SearchAttributes(_ chasm.Context) []chasm.SearchAttributeKeyValue { + return []chasm.SearchAttributeKeyValue{ + TypeSearchAttribute.Value(a.GetActivityType().GetName()), + StatusSearchAttribute.Value(InternalStatusToAPIStatus(a.GetStatus()).String()), + TaskQueueSearchAttribute.Value(a.GetTaskQueue().GetName()), + } +} diff --git a/chasm/lib/activity/activity_tasks.go b/chasm/lib/activity/activity_tasks.go new file mode 100644 index 0000000000..0403b9a3b5 --- /dev/null +++ b/chasm/lib/activity/activity_tasks.go @@ -0,0 +1,315 @@ +package activity + +import ( + "context" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/util" + "go.uber.org/fx" +) + +type activityDispatchTaskExecutorOptions struct { + fx.In + + MatchingClient resource.MatchingClient +} + +type activityDispatchTaskExecutor struct { + opts activityDispatchTaskExecutorOptions +} + +func newActivityDispatchTaskExecutor(opts activityDispatchTaskExecutorOptions) *activityDispatchTaskExecutor { + return &activityDispatchTaskExecutor{ + opts, + } +} + +func (e *activityDispatchTaskExecutor) Validate( + ctx chasm.Context, + activity *Activity, + _ chasm.TaskAttributes, + task *activitypb.ActivityDispatchTask, +) (bool, error) { + // TODO(saa-preview): make sure we handle resets when we support them, as they will reset the attempt count + return (TransitionStarted.Possible(activity) && + task.Attempt == activity.LastAttempt.Get(ctx).GetCount()), nil +} + +func (e *activityDispatchTaskExecutor) Execute( + ctx context.Context, + activityRef chasm.ComponentRef, + _ chasm.TaskAttributes, + _ *activitypb.ActivityDispatchTask, +) error { + request, err := chasm.ReadComponent( + ctx, + activityRef, + (*Activity).createAddActivityTaskRequest, + activityRef.NamespaceID, + ) + if err != nil { + return err + } + + _, err = e.opts.MatchingClient.AddActivityTask(ctx, request) + + return err +} + +type timeoutTaskExecutorOptions struct { + fx.In + + Config *Config + MetricsHandler metrics.Handler + NamespaceRegistry namespace.Registry +} + +type scheduleToStartTimeoutTaskExecutor struct { + opts timeoutTaskExecutorOptions +} + +func newScheduleToStartTimeoutTaskExecutor(opts timeoutTaskExecutorOptions) *scheduleToStartTimeoutTaskExecutor { + return &scheduleToStartTimeoutTaskExecutor{ + opts, + } +} + +func (e *scheduleToStartTimeoutTaskExecutor) Validate( + ctx chasm.Context, + activity *Activity, + _ chasm.TaskAttributes, + task *activitypb.ScheduleToStartTimeoutTask, +) (bool, error) { + return (activity.Status == activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED && + task.Attempt == activity.LastAttempt.Get(ctx).GetCount()), nil +} + +func (e *scheduleToStartTimeoutTaskExecutor) Execute( + ctx chasm.MutableContext, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.ScheduleToStartTimeoutTask, +) error { + nsID := namespace.ID(ctx.ExecutionKey().NamespaceID) + namespaceName, err := e.opts.NamespaceRegistry.GetNamespaceName(nsID) + if err != nil { + return err + } + + metricsHandler := enrichMetricsHandler( + activity, + e.opts.MetricsHandler, + namespaceName.String(), + metrics.TimerActiveTaskActivityTimeoutScope, + e.opts.Config.BreakdownMetricsByTaskQueue) + + event := timeoutEvent{ + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + metricsHandler: metricsHandler, + fromStatus: activity.GetStatus(), + } + + return TransitionTimedOut.Apply(activity, ctx, event) +} + +type scheduleToCloseTimeoutTaskExecutor struct { + opts timeoutTaskExecutorOptions +} + +func newScheduleToCloseTimeoutTaskExecutor(opts timeoutTaskExecutorOptions) *scheduleToCloseTimeoutTaskExecutor { + return &scheduleToCloseTimeoutTaskExecutor{ + opts, + } +} + +func (e *scheduleToCloseTimeoutTaskExecutor) Validate( + _ chasm.Context, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.ScheduleToCloseTimeoutTask, +) (bool, error) { + return TransitionTimedOut.Possible(activity), nil +} + +func (e *scheduleToCloseTimeoutTaskExecutor) Execute( + ctx chasm.MutableContext, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.ScheduleToCloseTimeoutTask, +) error { + nsID := namespace.ID(ctx.ExecutionKey().NamespaceID) + namespaceName, err := e.opts.NamespaceRegistry.GetNamespaceName(nsID) + if err != nil { + return err + } + + metricsHandler := enrichMetricsHandler( + activity, + e.opts.MetricsHandler, + namespaceName.String(), + metrics.TimerActiveTaskActivityTimeoutScope, + e.opts.Config.BreakdownMetricsByTaskQueue) + + event := timeoutEvent{ + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + metricsHandler: metricsHandler, + fromStatus: activity.GetStatus(), + } + + return TransitionTimedOut.Apply(activity, ctx, event) +} + +type startToCloseTimeoutTaskExecutor struct { + opts timeoutTaskExecutorOptions +} + +func newStartToCloseTimeoutTaskExecutor(opts timeoutTaskExecutorOptions) *startToCloseTimeoutTaskExecutor { + return &startToCloseTimeoutTaskExecutor{ + opts, + } +} + +func (e *startToCloseTimeoutTaskExecutor) Validate( + ctx chasm.Context, + activity *Activity, + _ chasm.TaskAttributes, + task *activitypb.StartToCloseTimeoutTask, +) (bool, error) { + valid := (activity.Status == activitypb.ACTIVITY_EXECUTION_STATUS_STARTED && + task.Attempt == activity.LastAttempt.Get(ctx).GetCount()) + return valid, nil +} + +// Execute executes a StartToCloseTimeoutTask. It fails the attempt, leading to retry or activity +// failure. +func (e *startToCloseTimeoutTaskExecutor) Execute( + ctx chasm.MutableContext, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.StartToCloseTimeoutTask, +) error { + rescheduled, err := activity.tryReschedule(ctx, 0, createStartToCloseTimeoutFailure()) + if err != nil { + return err + } + + nsID := namespace.ID(ctx.ExecutionKey().NamespaceID) + namespaceName, err := e.opts.NamespaceRegistry.GetNamespaceName(nsID) + if err != nil { + return err + } + + metricsHandler := enrichMetricsHandler( + activity, + e.opts.MetricsHandler, + namespaceName.String(), + metrics.TimerActiveTaskActivityTimeoutScope, + e.opts.Config.BreakdownMetricsByTaskQueue) + + if rescheduled { + activity.emitOnAttemptTimedOutMetrics(ctx, metricsHandler, enumspb.TIMEOUT_TYPE_START_TO_CLOSE) + + return nil + } + + return TransitionTimedOut.Apply(activity, ctx, timeoutEvent{ + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + metricsHandler: metricsHandler, + fromStatus: activity.GetStatus(), + }) +} + +// HeartbeatTimeoutTask is a pure task that enforces heartbeat timeouts. +type heartbeatTimeoutTaskExecutor struct { + opts timeoutTaskExecutorOptions +} + +func newHeartbeatTimeoutTaskExecutor(opts timeoutTaskExecutorOptions) *heartbeatTimeoutTaskExecutor { + return &heartbeatTimeoutTaskExecutor{ + opts, + } +} + +// Validate validates a HeartbeatTimeoutTask. +func (e *heartbeatTimeoutTaskExecutor) Validate( + ctx chasm.Context, + activity *Activity, + taskAttrs chasm.TaskAttributes, + task *activitypb.HeartbeatTimeoutTask, +) (bool, error) { + // Let T = user-configured heartbeat timeout and let hb_i be the time of the ith user-submitted + // heartbeat request. (hb_0 = 0 since we always start a timer task when an attempt starts). + + // There are two concurrent sequences of events: + // 1. A worker is sending heartbeats at times hb_i. + // 2. This task is being executed at (shortly after) times hb_i + T. + + // On the i-th execution of this function, we look back into the past and determine whether the + // last heartbeat was received after hb_i. If so, we reject this timeout task. Otherwise, the + // Execute function runs and we fail the attempt. + if activity.Status != activitypb.ACTIVITY_EXECUTION_STATUS_STARTED && + activity.Status != activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED { + return false, nil + } + // Task attempt must still match current attempt. + attempt := activity.LastAttempt.Get(ctx) + if attempt.GetCount() != task.Attempt { + return false, nil + } + + // Must not have been a heartbeat since this task was created + hbTimeout := activity.GetHeartbeatTimeout().AsDuration() // T + attemptStartTime := attempt.GetStartedTime().AsTime() + lastHb, _ := activity.LastHeartbeat.TryGet(ctx) // could be nil, or from a previous attempt + // No hbs in attempt so far is equivalent to hb having been sent at attempt start time. + lastHbTime := util.MaxTime(lastHb.GetRecordedTime().AsTime(), attemptStartTime) + thisTaskHbTime := taskAttrs.ScheduledTime.Add(-hbTimeout) // hb_i + if lastHbTime.After(thisTaskHbTime) { + // another heartbeat has invalidated this task's heartbeat + return false, nil + } + return true, nil +} + +// Execute executes a HeartbeatTimeoutTask. It fails the attempt, leading to retry or activity +// failure. +func (e *heartbeatTimeoutTaskExecutor) Execute( + ctx chasm.MutableContext, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.HeartbeatTimeoutTask, +) error { + rescheduled, err := activity.tryReschedule(ctx, 0, createHeartbeatTimeoutFailure()) + if err != nil { + return err + } + + nsID := namespace.ID(ctx.ExecutionKey().NamespaceID) + namespaceName, err := e.opts.NamespaceRegistry.GetNamespaceName(nsID) + if err != nil { + return err + } + + metricsHandler := enrichMetricsHandler( + activity, + e.opts.MetricsHandler, + namespaceName.String(), + metrics.TimerActiveTaskActivityTimeoutScope, + e.opts.Config.BreakdownMetricsByTaskQueue) + + if rescheduled { + activity.emitOnAttemptTimedOutMetrics(ctx, metricsHandler, enumspb.TIMEOUT_TYPE_HEARTBEAT) + return nil + } + + return TransitionTimedOut.Apply(activity, ctx, timeoutEvent{ + timeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + metricsHandler: metricsHandler, + fromStatus: activity.GetStatus(), + }) +} diff --git a/chasm/lib/activity/config.go b/chasm/lib/activity/config.go new file mode 100644 index 0000000000..4cc8f04c8e --- /dev/null +++ b/chasm/lib/activity/config.go @@ -0,0 +1,54 @@ +package activity + +import ( + "time" + + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/retrypolicy" +) + +var ( + Enabled = dynamicconfig.NewNamespaceBoolSetting( + "activity.enableStandalone", + false, + `Toggles standalone activity functionality on the server.`, + ) + + LongPollTimeout = dynamicconfig.NewNamespaceDurationSetting( + "activity.longPollTimeout", + 20*time.Second, + `Timeout for activity long-poll requests.`, + ) + + LongPollBuffer = dynamicconfig.NewNamespaceDurationSetting( + "activity.longPollBuffer", + time.Second, + `A buffer used to adjust the activity long-poll timeouts. + Specifically, activity long-poll requests are timed out at a time which leaves at least the buffer's duration + remaining before the caller's deadline, if permitted by the caller's deadline.`, + ) +) + +type Config struct { + BlobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter + BlobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter + BreakdownMetricsByTaskQueue dynamicconfig.TypedPropertyFnWithTaskQueueFilter[bool] + Enabled dynamicconfig.BoolPropertyFnWithNamespaceFilter + LongPollBuffer dynamicconfig.DurationPropertyFnWithNamespaceFilter + LongPollTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter + MaxIDLengthLimit dynamicconfig.IntPropertyFn + DefaultActivityRetryPolicy dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings] +} + +func ConfigProvider(dc *dynamicconfig.Collection) *Config { + return &Config{ + BlobSizeLimitError: dynamicconfig.BlobSizeLimitError.Get(dc), + BlobSizeLimitWarn: dynamicconfig.BlobSizeLimitWarn.Get(dc), + BreakdownMetricsByTaskQueue: dynamicconfig.MetricsBreakdownByTaskQueue.Get(dc), + DefaultActivityRetryPolicy: dynamicconfig.DefaultActivityRetryPolicy.Get(dc), + Enabled: Enabled.Get(dc), + LongPollBuffer: LongPollBuffer.Get(dc), + LongPollTimeout: LongPollTimeout.Get(dc), + MaxIDLengthLimit: dynamicconfig.MaxIDLengthLimit.Get(dc), + } +} diff --git a/chasm/lib/activity/frontend.go b/chasm/lib/activity/frontend.go new file mode 100644 index 0000000000..246195eff3 --- /dev/null +++ b/chasm/lib/activity/frontend.go @@ -0,0 +1,448 @@ +package activity + +import ( + "context" + + "github.com/google/uuid" + apiactivitypb "go.temporal.io/api/activity/v1" //nolint:importas + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/searchattribute" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const StandaloneActivityDisabledError = "Standalone activity is disabled" + +type FrontendHandler interface { + StartActivityExecution(ctx context.Context, req *workflowservice.StartActivityExecutionRequest) (*workflowservice.StartActivityExecutionResponse, error) + DescribeActivityExecution(ctx context.Context, req *workflowservice.DescribeActivityExecutionRequest) (*workflowservice.DescribeActivityExecutionResponse, error) + PollActivityExecution(ctx context.Context, req *workflowservice.PollActivityExecutionRequest) (*workflowservice.PollActivityExecutionResponse, error) + CountActivityExecutions(context.Context, *workflowservice.CountActivityExecutionsRequest) (*workflowservice.CountActivityExecutionsResponse, error) + DeleteActivityExecution(context.Context, *workflowservice.DeleteActivityExecutionRequest) (*workflowservice.DeleteActivityExecutionResponse, error) + ListActivityExecutions(context.Context, *workflowservice.ListActivityExecutionsRequest) (*workflowservice.ListActivityExecutionsResponse, error) + RequestCancelActivityExecution(context.Context, *workflowservice.RequestCancelActivityExecutionRequest) (*workflowservice.RequestCancelActivityExecutionResponse, error) + TerminateActivityExecution(context.Context, *workflowservice.TerminateActivityExecutionRequest) (*workflowservice.TerminateActivityExecutionResponse, error) + IsStandaloneActivityEnabled(namespaceName string) bool +} + +type frontendHandler struct { + FrontendHandler + client activitypb.ActivityServiceClient + config *Config + logger log.Logger + metricsHandler metrics.Handler + namespaceRegistry namespace.Registry + saMapperProvider searchattribute.MapperProvider + saValidator *searchattribute.Validator +} + +// NewFrontendHandler creates a new FrontendHandler instance for processing activity frontend requests. +func NewFrontendHandler( + client activitypb.ActivityServiceClient, + config *Config, + logger log.Logger, + metricsHandler metrics.Handler, + namespaceRegistry namespace.Registry, + saMapperProvider searchattribute.MapperProvider, + saValidator *searchattribute.Validator, +) FrontendHandler { + return &frontendHandler{ + client: client, + config: config, + logger: logger, + metricsHandler: metricsHandler, + namespaceRegistry: namespaceRegistry, + saMapperProvider: saMapperProvider, + saValidator: saValidator, + } +} + +// IsStandaloneActivityEnabled checks if standalone activities are enabled for the given namespace +func (h *frontendHandler) IsStandaloneActivityEnabled(namespaceName string) bool { + return h.config.Enabled(namespaceName) +} + +// StartActivityExecution initiates a standalone activity execution in the specified namespace. +// It validates the request, resolves the namespace ID, applies default configurations, +// and forwards the request to the activity service handler. +// +// The method performs the following steps: +// 1. Resolves the namespace name to its internal ID +// 2. Validates and populates request fields (timeouts, retry policies, search attributes). The request is cloned +// before mutation to preserve the original for retries. +// 3. Sends the request to the history activity service. +func (h *frontendHandler) StartActivityExecution(ctx context.Context, req *workflowservice.StartActivityExecutionRequest) (*workflowservice.StartActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, serviceerror.NewUnavailable(StandaloneActivityDisabledError) + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + modifiedReq, err := h.validateAndPopulateStartRequest(req, namespaceID) + if err != nil { + return nil, err + } + + resp, err := h.client.StartActivityExecution(ctx, &activitypb.StartActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: modifiedReq, + }) + + return resp.GetFrontendResponse(), err +} + +// DescribeActivityExecution queries current activity state, optionally as a long-poll that waits +// for any state change. +func (h *frontendHandler) DescribeActivityExecution( + ctx context.Context, + req *workflowservice.DescribeActivityExecutionRequest, +) (*workflowservice.DescribeActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, serviceerror.NewUnavailable(StandaloneActivityDisabledError) + } + + err := ValidateDescribeActivityExecutionRequest( + req, + h.config.MaxIDLengthLimit(), + ) + if err != nil { + return nil, err + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + resp, err := h.client.DescribeActivityExecution(ctx, &activitypb.DescribeActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + return resp.GetFrontendResponse(), err +} + +// PollActivityExecution long-polls for activity outcome. +func (h *frontendHandler) PollActivityExecution( + ctx context.Context, + req *workflowservice.PollActivityExecutionRequest, +) (*workflowservice.PollActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, serviceerror.NewUnavailable(StandaloneActivityDisabledError) + } + + err := ValidatePollActivityExecutionRequest( + req, + h.config.MaxIDLengthLimit(), + ) + if err != nil { + return nil, err + } + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + resp, err := h.client.PollActivityExecution(ctx, &activitypb.PollActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + return resp.GetFrontendResponse(), err +} + +// ListActivityExecutions lists activity executions matching the query in the request. +func (h *frontendHandler) ListActivityExecutions( + ctx context.Context, + req *workflowservice.ListActivityExecutionsRequest, +) (*workflowservice.ListActivityExecutionsResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, serviceerror.NewUnavailable(StandaloneActivityDisabledError) + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + resp, err := chasm.ListExecutions[*Activity, *emptypb.Empty](ctx, &chasm.ListExecutionsRequest{ + NamespaceID: namespaceID.String(), + NamespaceName: req.GetNamespace(), + PageSize: int(req.GetPageSize()), + NextPageToken: req.GetNextPageToken(), + Query: req.GetQuery(), + }) + if err != nil { + return nil, err + } + + executions := make([]*apiactivitypb.ActivityExecutionListInfo, 0, len(resp.Executions)) + for _, exec := range resp.Executions { + activityType, _ := chasm.GetValue(exec.ChasmSearchAttributes, TypeSearchAttribute) + taskQueue, _ := chasm.GetValue(exec.ChasmSearchAttributes, TaskQueueSearchAttribute) + statusStr, _ := chasm.GetValue(exec.ChasmSearchAttributes, StatusSearchAttribute) + status, _ := enumspb.ActivityExecutionStatusFromString(statusStr) + + info := &apiactivitypb.ActivityExecutionListInfo{ + ActivityId: exec.BusinessID, + RunId: exec.RunID, + ScheduleTime: timestamppb.New(exec.StartTime), + StateTransitionCount: exec.StateTransitionCount, + StateSizeBytes: exec.HistorySizeBytes, + SearchAttributes: &commonpb.SearchAttributes{IndexedFields: exec.CustomSearchAttributes}, + ActivityType: &commonpb.ActivityType{Name: activityType}, + TaskQueue: taskQueue, + Status: status, + } + if !exec.CloseTime.IsZero() { + info.CloseTime = timestamppb.New(exec.CloseTime) + if !exec.StartTime.IsZero() { + info.ExecutionDuration = durationpb.New(exec.CloseTime.Sub(exec.StartTime)) + } + } + executions = append(executions, info) + } + + return &workflowservice.ListActivityExecutionsResponse{ + Executions: executions, + NextPageToken: resp.NextPageToken, + }, nil +} + +// CountActivityExecutions counts activity executions matching the query in the request. +func (h *frontendHandler) CountActivityExecutions( + ctx context.Context, + req *workflowservice.CountActivityExecutionsRequest, +) (*workflowservice.CountActivityExecutionsResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, serviceerror.NewUnavailable(StandaloneActivityDisabledError) + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + resp, err := chasm.CountExecutions[*Activity](ctx, &chasm.CountExecutionsRequest{ + NamespaceID: namespaceID.String(), + NamespaceName: req.GetNamespace(), + Query: req.GetQuery(), + }) + if err != nil { + return nil, err + } + + groups := make([]*workflowservice.CountActivityExecutionsResponse_AggregationGroup, 0, len(resp.Groups)) + for _, g := range resp.Groups { + groups = append(groups, &workflowservice.CountActivityExecutionsResponse_AggregationGroup{ + GroupValues: g.Values, + Count: g.Count, + }) + } + + return &workflowservice.CountActivityExecutionsResponse{ + Count: resp.Count, + Groups: groups, + }, nil +} + +// TerminateActivityExecution terminates a standalone activity execution +func (h *frontendHandler) TerminateActivityExecution( + ctx context.Context, + req *workflowservice.TerminateActivityExecutionRequest, +) (*workflowservice.TerminateActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, serviceerror.NewUnavailable(StandaloneActivityDisabledError) + } + + namespaceName := req.GetNamespace() + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(namespaceName)) + if err != nil { + return nil, err + } + + // Since validation potentially mutates the request, we clone it first so that any retries use the original request. + req = common.CloneProto(req) + + maxIDLen := h.config.MaxIDLengthLimit() + if len(req.GetRequestId()) > maxIDLen { + return nil, serviceerror.NewInvalidArgument("RequestID length exceeds limit.") + } + + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } + + if err := validateInputSize( + req.GetActivityId(), + "activity-termination", + h.config.BlobSizeLimitError, + h.config.BlobSizeLimitWarn, + len(req.GetReason()), + h.logger, + namespaceName); err != nil { + return nil, err + } + + _, err = h.client.TerminateActivityExecution(ctx, &activitypb.TerminateActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + if err != nil { + return nil, err + } + + return &workflowservice.TerminateActivityExecutionResponse{}, nil +} + +func (h *frontendHandler) RequestCancelActivityExecution( + ctx context.Context, + req *workflowservice.RequestCancelActivityExecutionRequest, +) (*workflowservice.RequestCancelActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, serviceerror.NewUnavailable(StandaloneActivityDisabledError) + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + // Since validation potentially mutates the request, we clone it first so that any retries use the original request. + req = common.CloneProto(req) + + maxIDLen := h.config.MaxIDLengthLimit() + + if len(req.GetRequestId()) > maxIDLen { + return nil, serviceerror.NewInvalidArgument("RequestID length exceeds limit.") + } + + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } + + if len(req.GetReason()) > maxIDLen { + return nil, serviceerror.NewInvalidArgument("Reason length exceeds limit.") + } + + _, err = h.client.RequestCancelActivityExecution(ctx, &activitypb.RequestCancelActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + if err != nil { + return nil, err + } + + return &workflowservice.RequestCancelActivityExecutionResponse{}, nil +} + +func (h *frontendHandler) validateAndPopulateStartRequest( + req *workflowservice.StartActivityExecutionRequest, + namespaceID namespace.ID, +) (*workflowservice.StartActivityExecutionRequest, error) { + // Since validation includes mutation of the request, we clone it first so that any retries use the original request. + req = common.CloneProto(req) + activityType := req.ActivityType.GetName() + + if req.RetryPolicy == nil { + req.RetryPolicy = &commonpb.RetryPolicy{} + } + + opts := activityOptionsFromStartRequest(req) + err := ValidateAndNormalizeActivityAttributes( + req.ActivityId, + activityType, + h.config.DefaultActivityRetryPolicy, + h.config.MaxIDLengthLimit(), + namespaceID, + opts, + req.Priority, + durationpb.New(0), + ) + if err != nil { + return nil, err + } + applyActivityOptionsToStartRequest(opts, req) + + err = h.validateAndNormalizeStartActivityExecutionRequest(req) + if err != nil { + return nil, err + } + + return req, nil +} + +// validateAndNormalizeStartActivityExecutionRequest validates and normalizes the standalone +// activity specific attributes. Note that this method mutates the input params; the caller must +// clone the request if necessary (e.g. if it may be retried). +func (h *frontendHandler) validateAndNormalizeStartActivityExecutionRequest( + req *workflowservice.StartActivityExecutionRequest, +) error { + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } + + if len(req.GetRequestId()) > h.config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgument("RequestID length exceeds limit.") + } + + if err := normalizeAndValidateIDPolicy(req); err != nil { + return err + } + + if err := validateInputSize( + req.GetActivityId(), + req.GetActivityType().GetName(), + h.config.BlobSizeLimitError, + h.config.BlobSizeLimitWarn, + req.Input.Size(), + h.logger, + req.GetNamespace()); err != nil { + return err + } + + if req.GetSearchAttributes() != nil { + if err := validateAndNormalizeSearchAttributes( + req, + h.saMapperProvider, + h.saValidator); err != nil { + return err + } + } + + return nil +} + +// activityOptionsFromStartRequest builds an ActivityOptions from the inlined fields +// of a StartActivityExecutionRequest for use with shared validation logic. +func activityOptionsFromStartRequest(req *workflowservice.StartActivityExecutionRequest) *apiactivitypb.ActivityOptions { + return &apiactivitypb.ActivityOptions{ + TaskQueue: req.TaskQueue, + ScheduleToCloseTimeout: req.ScheduleToCloseTimeout, + ScheduleToStartTimeout: req.ScheduleToStartTimeout, + StartToCloseTimeout: req.StartToCloseTimeout, + HeartbeatTimeout: req.HeartbeatTimeout, + RetryPolicy: req.RetryPolicy, + } +} + +// applyActivityOptionsToStartRequest copies normalized values from ActivityOptions +// back to the StartActivityExecutionRequest. +func applyActivityOptionsToStartRequest(opts *apiactivitypb.ActivityOptions, req *workflowservice.StartActivityExecutionRequest) { + req.TaskQueue = opts.TaskQueue + req.ScheduleToCloseTimeout = opts.ScheduleToCloseTimeout + req.ScheduleToStartTimeout = opts.ScheduleToStartTimeout + req.StartToCloseTimeout = opts.StartToCloseTimeout + req.HeartbeatTimeout = opts.HeartbeatTimeout + req.RetryPolicy = opts.RetryPolicy +} diff --git a/chasm/lib/activity/fx.go b/chasm/lib/activity/fx.go new file mode 100644 index 0000000000..60deaec41d --- /dev/null +++ b/chasm/lib/activity/fx.go @@ -0,0 +1,38 @@ +package activity + +import ( + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/resource" + "go.uber.org/fx" +) + +var HistoryModule = fx.Module( + "activity-history", + fx.Provide( + ConfigProvider, + newActivityDispatchTaskExecutor, + newScheduleToStartTimeoutTaskExecutor, + newScheduleToCloseTimeoutTaskExecutor, + newStartToCloseTimeoutTaskExecutor, + newHeartbeatTimeoutTaskExecutor, + newHandler, + newLibrary, + ), + fx.Invoke(func(l *library, registry *chasm.Registry) error { + return registry.Register(l) + }), +) + +var FrontendModule = fx.Module( + "activity-frontend", + fx.Provide(ConfigProvider), + fx.Provide(activitypb.NewActivityServiceLayeredClient), + fx.Provide(NewFrontendHandler), + fx.Provide(resource.SearchAttributeValidatorProvider), + fx.Invoke(func(registry *chasm.Registry) error { + // Frontend needs to register the component in order to serialize ComponentRefs, but doesn't + // need task executors. + return registry.Register(newComponentOnlyLibrary()) + }), +) diff --git a/chasm/lib/activity/gen/activitypb/v1/activity_state.go-helpers.pb.go b/chasm/lib/activity/gen/activitypb/v1/activity_state.go-helpers.pb.go new file mode 100644 index 0000000000..a765e20868 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/activity_state.go-helpers.pb.go @@ -0,0 +1,292 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package activitypb + +import ( + "fmt" + + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ActivityState to the protobuf v3 wire format +func (val *ActivityState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityState from the protobuf v3 wire format +func (val *ActivityState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityState + switch t := that.(type) { + case *ActivityState: + that1 = t + case ActivityState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityCancelState to the protobuf v3 wire format +func (val *ActivityCancelState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityCancelState from the protobuf v3 wire format +func (val *ActivityCancelState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityCancelState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityCancelState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityCancelState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityCancelState + switch t := that.(type) { + case *ActivityCancelState: + that1 = t + case ActivityCancelState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityTerminateState to the protobuf v3 wire format +func (val *ActivityTerminateState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityTerminateState from the protobuf v3 wire format +func (val *ActivityTerminateState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityTerminateState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityTerminateState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityTerminateState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityTerminateState + switch t := that.(type) { + case *ActivityTerminateState: + that1 = t + case ActivityTerminateState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityAttemptState to the protobuf v3 wire format +func (val *ActivityAttemptState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityAttemptState from the protobuf v3 wire format +func (val *ActivityAttemptState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityAttemptState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityAttemptState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityAttemptState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityAttemptState + switch t := that.(type) { + case *ActivityAttemptState: + that1 = t + case ActivityAttemptState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityHeartbeatState to the protobuf v3 wire format +func (val *ActivityHeartbeatState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityHeartbeatState from the protobuf v3 wire format +func (val *ActivityHeartbeatState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityHeartbeatState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityHeartbeatState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityHeartbeatState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityHeartbeatState + switch t := that.(type) { + case *ActivityHeartbeatState: + that1 = t + case ActivityHeartbeatState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityRequestData to the protobuf v3 wire format +func (val *ActivityRequestData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityRequestData from the protobuf v3 wire format +func (val *ActivityRequestData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityRequestData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityRequestData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityRequestData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityRequestData + switch t := that.(type) { + case *ActivityRequestData: + that1 = t + case ActivityRequestData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityOutcome to the protobuf v3 wire format +func (val *ActivityOutcome) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityOutcome from the protobuf v3 wire format +func (val *ActivityOutcome) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityOutcome) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityOutcome values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityOutcome) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityOutcome + switch t := that.(type) { + case *ActivityOutcome: + that1 = t + case ActivityOutcome: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +var ( + ActivityExecutionStatus_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Scheduled": 1, + "Started": 2, + "CancelRequested": 3, + "Completed": 4, + "Failed": 5, + "Canceled": 6, + "Terminated": 7, + "TimedOut": 8, + } +) + +// ActivityExecutionStatusFromString parses a ActivityExecutionStatus value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to ActivityExecutionStatus +func ActivityExecutionStatusFromString(s string) (ActivityExecutionStatus, error) { + if v, ok := ActivityExecutionStatus_value[s]; ok { + return ActivityExecutionStatus(v), nil + } else if v, ok := ActivityExecutionStatus_shorthandValue[s]; ok { + return ActivityExecutionStatus(v), nil + } + return ActivityExecutionStatus(0), fmt.Errorf("%s is not a valid ActivityExecutionStatus", s) +} diff --git a/chasm/lib/activity/gen/activitypb/v1/activity_state.pb.go b/chasm/lib/activity/gen/activitypb/v1/activity_state.pb.go new file mode 100644 index 0000000000..3906c01a6e --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/activity_state.pb.go @@ -0,0 +1,1038 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/activity/proto/v1/activity_state.proto + +package activitypb + +import ( + reflect "reflect" + "strconv" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/common/v1" + v12 "go.temporal.io/api/deployment/v1" + v14 "go.temporal.io/api/failure/v1" + v13 "go.temporal.io/api/sdk/v1" + v11 "go.temporal.io/api/taskqueue/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ActivityExecutionStatus int32 + +const ( + ACTIVITY_EXECUTION_STATUS_UNSPECIFIED ActivityExecutionStatus = 0 + // The activity is not in a terminal status. This does not necessarily mean that there is a currently running + // attempt. The activity may be backing off between attempts or waiting for a worker to pick it up. + ACTIVITY_EXECUTION_STATUS_SCHEDULED ActivityExecutionStatus = 1 + ACTIVITY_EXECUTION_STATUS_STARTED ActivityExecutionStatus = 2 + ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED ActivityExecutionStatus = 3 + // Left as placeholders for when we add pause. + // // PAUSED means activity is paused on the server, and is not running in the worker + // ACTIVITY_EXECUTION_STATUS_PAUSED = 4; + // // PAUSE_REQUESTED means activity is currently running on the worker, but paused on the server + // ACTIVITY_EXECUTION_STATUS_PAUSE_REQUESTED = 5; + // The activity completed successfully. + ACTIVITY_EXECUTION_STATUS_COMPLETED ActivityExecutionStatus = 4 + // The activity completed with failure. + ACTIVITY_EXECUTION_STATUS_FAILED ActivityExecutionStatus = 5 + // The activity completed as canceled. + // Requesting to cancel an activity does not automatically transition the activity to canceled status. If the + // activity has a currently running attempt, the activity will only transition to canceled status if the current + // attempt is unsuccessful. + // TODO: Clarify what happens if there are no more allowed retries after the current attempt. + ACTIVITY_EXECUTION_STATUS_CANCELED ActivityExecutionStatus = 6 + // The activity was terminated. Termination does not reach the worker and the activity code cannot react to it. + // A terminated activity may have a running attempt and will be requested to be canceled by the server when it + // heartbeats. + ACTIVITY_EXECUTION_STATUS_TERMINATED ActivityExecutionStatus = 7 + // The activity has timed out by reaching the specified shedule-to-start or schedule-to-close timeouts. + // TODO: Clarify if there are other conditions where the activity can end up in timed out status. + ACTIVITY_EXECUTION_STATUS_TIMED_OUT ActivityExecutionStatus = 8 +) + +// Enum value maps for ActivityExecutionStatus. +var ( + ActivityExecutionStatus_name = map[int32]string{ + 0: "ACTIVITY_EXECUTION_STATUS_UNSPECIFIED", + 1: "ACTIVITY_EXECUTION_STATUS_SCHEDULED", + 2: "ACTIVITY_EXECUTION_STATUS_STARTED", + 3: "ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED", + 4: "ACTIVITY_EXECUTION_STATUS_COMPLETED", + 5: "ACTIVITY_EXECUTION_STATUS_FAILED", + 6: "ACTIVITY_EXECUTION_STATUS_CANCELED", + 7: "ACTIVITY_EXECUTION_STATUS_TERMINATED", + 8: "ACTIVITY_EXECUTION_STATUS_TIMED_OUT", + } + ActivityExecutionStatus_value = map[string]int32{ + "ACTIVITY_EXECUTION_STATUS_UNSPECIFIED": 0, + "ACTIVITY_EXECUTION_STATUS_SCHEDULED": 1, + "ACTIVITY_EXECUTION_STATUS_STARTED": 2, + "ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED": 3, + "ACTIVITY_EXECUTION_STATUS_COMPLETED": 4, + "ACTIVITY_EXECUTION_STATUS_FAILED": 5, + "ACTIVITY_EXECUTION_STATUS_CANCELED": 6, + "ACTIVITY_EXECUTION_STATUS_TERMINATED": 7, + "ACTIVITY_EXECUTION_STATUS_TIMED_OUT": 8, + } +) + +func (x ActivityExecutionStatus) Enum() *ActivityExecutionStatus { + p := new(ActivityExecutionStatus) + *p = x + return p +} + +func (x ActivityExecutionStatus) String() string { + switch x { + case ACTIVITY_EXECUTION_STATUS_UNSPECIFIED: + return "Unspecified" + case ACTIVITY_EXECUTION_STATUS_SCHEDULED: + return "Scheduled" + case ACTIVITY_EXECUTION_STATUS_STARTED: + return "Started" + case ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED: + return "CancelRequested" + case ACTIVITY_EXECUTION_STATUS_COMPLETED: + return "Completed" + case ACTIVITY_EXECUTION_STATUS_FAILED: + return "Failed" + case ACTIVITY_EXECUTION_STATUS_CANCELED: + return "Canceled" + case ACTIVITY_EXECUTION_STATUS_TERMINATED: + return "Terminated" + case ACTIVITY_EXECUTION_STATUS_TIMED_OUT: + return "TimedOut" + + // Deprecated: Use ActivityExecutionStatus.Descriptor instead. + default: + return strconv.Itoa(int(x)) + } + +} + +func (ActivityExecutionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_enumTypes[0].Descriptor() +} + +func (ActivityExecutionStatus) Type() protoreflect.EnumType { + return &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_enumTypes[0] +} + +func (x ActivityExecutionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +func (ActivityExecutionStatus) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{0} +} + +type ActivityState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The type of the activity, a string that maps to a registered activity on a worker. + ActivityType *v1.ActivityType `protobuf:"bytes,1,opt,name=activity_type,json=activityType,proto3" json:"activity_type,omitempty"` + TaskQueue *v11.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // Indicates how long the caller is willing to wait for an activity completion. Limits how long + // retries will be attempted. Either this or `start_to_close_timeout` must be specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + ScheduleToCloseTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=schedule_to_close_timeout,json=scheduleToCloseTimeout,proto3" json:"schedule_to_close_timeout,omitempty"` + // Limits time an activity task can stay in a task queue before a worker picks it up. This + // timeout is always non retryable, as all a retry would achieve is to put it back into the same + // queue. Defaults to `schedule_to_close_timeout` or workflow execution timeout if not + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` + // Maximum time an activity is allowed to execute after being picked up by a worker. This + // timeout is always retryable. Either this or `schedule_to_close_timeout` must be + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + StartToCloseTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3" json:"start_to_close_timeout,omitempty"` + // Maximum permitted time between successful worker heartbeats. + HeartbeatTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=heartbeat_timeout,json=heartbeatTimeout,proto3" json:"heartbeat_timeout,omitempty"` + // The retry policy for the activity. Will never exceed `schedule_to_close_timeout`. + RetryPolicy *v1.RetryPolicy `protobuf:"bytes,7,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // All of the possible activity statuses (covers both the public ActivityExecutionStatus and PendingActivityState). + // TODO: consider moving this into ActivityAttemptState and renaming that message. This could save mutating two + // components on each attempt transition. + Status ActivityExecutionStatus `protobuf:"varint,8,opt,name=status,proto3,enum=temporal.server.chasm.lib.activity.proto.v1.ActivityExecutionStatus" json:"status,omitempty"` + // Time the activity was originally scheduled via a StartActivityExecution request. + ScheduleTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=schedule_time,json=scheduleTime,proto3" json:"schedule_time,omitempty"` + // Priority metadata. + Priority *v1.Priority `protobuf:"bytes,10,opt,name=priority,proto3" json:"priority,omitempty"` + // Set if activity cancellation was requested. + CancelState *ActivityCancelState `protobuf:"bytes,11,opt,name=cancel_state,json=cancelState,proto3" json:"cancel_state,omitempty"` + // Set if the activity was terminated + TerminateState *ActivityTerminateState `protobuf:"bytes,12,opt,name=terminate_state,json=terminateState,proto3" json:"terminate_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityState) Reset() { + *x = ActivityState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityState) ProtoMessage() {} + +func (x *ActivityState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityState.ProtoReflect.Descriptor instead. +func (*ActivityState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{0} +} + +func (x *ActivityState) GetActivityType() *v1.ActivityType { + if x != nil { + return x.ActivityType + } + return nil +} + +func (x *ActivityState) GetTaskQueue() *v11.TaskQueue { + if x != nil { + return x.TaskQueue + } + return nil +} + +func (x *ActivityState) GetScheduleToCloseTimeout() *durationpb.Duration { + if x != nil { + return x.ScheduleToCloseTimeout + } + return nil +} + +func (x *ActivityState) GetScheduleToStartTimeout() *durationpb.Duration { + if x != nil { + return x.ScheduleToStartTimeout + } + return nil +} + +func (x *ActivityState) GetStartToCloseTimeout() *durationpb.Duration { + if x != nil { + return x.StartToCloseTimeout + } + return nil +} + +func (x *ActivityState) GetHeartbeatTimeout() *durationpb.Duration { + if x != nil { + return x.HeartbeatTimeout + } + return nil +} + +func (x *ActivityState) GetRetryPolicy() *v1.RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *ActivityState) GetStatus() ActivityExecutionStatus { + if x != nil { + return x.Status + } + return ACTIVITY_EXECUTION_STATUS_UNSPECIFIED +} + +func (x *ActivityState) GetScheduleTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduleTime + } + return nil +} + +func (x *ActivityState) GetPriority() *v1.Priority { + if x != nil { + return x.Priority + } + return nil +} + +func (x *ActivityState) GetCancelState() *ActivityCancelState { + if x != nil { + return x.CancelState + } + return nil +} + +func (x *ActivityState) GetTerminateState() *ActivityTerminateState { + if x != nil { + return x.TerminateState + } + return nil +} + +type ActivityCancelState struct { + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` + Identity string `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityCancelState) Reset() { + *x = ActivityCancelState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityCancelState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityCancelState) ProtoMessage() {} + +func (x *ActivityCancelState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityCancelState.ProtoReflect.Descriptor instead. +func (*ActivityCancelState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{1} +} + +func (x *ActivityCancelState) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *ActivityCancelState) GetRequestTime() *timestamppb.Timestamp { + if x != nil { + return x.RequestTime + } + return nil +} + +func (x *ActivityCancelState) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *ActivityCancelState) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type ActivityTerminateState struct { + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityTerminateState) Reset() { + *x = ActivityTerminateState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityTerminateState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityTerminateState) ProtoMessage() {} + +func (x *ActivityTerminateState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityTerminateState.ProtoReflect.Descriptor instead. +func (*ActivityTerminateState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{2} +} + +func (x *ActivityTerminateState) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +type ActivityAttemptState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The attempt this activity is currently on. + // Incremented each time a new attempt is started. + // TODO: Confirm if this is on scheduled or started. + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // Time from the last attempt failure to the next activity retry. + // If the activity is currently running, this represents the next retry interval in case the attempt fails. + // If activity is currently backing off between attempt, this represents the current retry interval. + // If there is no next retry allowed, this field will be null. + // This interval is typically calculated from the specified retry policy, but may be modified if an activity fails + // with a retryable application failure specifying a retry delay. + CurrentRetryInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=current_retry_interval,json=currentRetryInterval,proto3" json:"current_retry_interval,omitempty"` + // Time the last attempt was started. + StartedTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + // The time when the last activity attempt completed. If activity has not been completed yet, it will be null. + CompleteTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=complete_time,json=completeTime,proto3" json:"complete_time,omitempty"` + // Details about the last failure. This will only be updated when an activity attempt fails, + // including start-to-close timeout. Activity success, termination, schedule-to-start and schedule-to-close timeouts + // will not reset it. + LastFailureDetails *ActivityAttemptState_LastFailureDetails `protobuf:"bytes,5,opt,name=last_failure_details,json=lastFailureDetails,proto3" json:"last_failure_details,omitempty"` + LastWorkerIdentity string `protobuf:"bytes,7,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` + // The Worker Deployment Version this activity was dispatched to most recently. + // If nil, the activity has not yet been dispatched or was last dispatched to an unversioned worker. + LastDeploymentVersion *v12.WorkerDeploymentVersion `protobuf:"bytes,8,opt,name=last_deployment_version,json=lastDeploymentVersion,proto3" json:"last_deployment_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityAttemptState) Reset() { + *x = ActivityAttemptState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityAttemptState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityAttemptState) ProtoMessage() {} + +func (x *ActivityAttemptState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityAttemptState.ProtoReflect.Descriptor instead. +func (*ActivityAttemptState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{3} +} + +func (x *ActivityAttemptState) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *ActivityAttemptState) GetCurrentRetryInterval() *durationpb.Duration { + if x != nil { + return x.CurrentRetryInterval + } + return nil +} + +func (x *ActivityAttemptState) GetStartedTime() *timestamppb.Timestamp { + if x != nil { + return x.StartedTime + } + return nil +} + +func (x *ActivityAttemptState) GetCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.CompleteTime + } + return nil +} + +func (x *ActivityAttemptState) GetLastFailureDetails() *ActivityAttemptState_LastFailureDetails { + if x != nil { + return x.LastFailureDetails + } + return nil +} + +func (x *ActivityAttemptState) GetLastWorkerIdentity() string { + if x != nil { + return x.LastWorkerIdentity + } + return "" +} + +func (x *ActivityAttemptState) GetLastDeploymentVersion() *v12.WorkerDeploymentVersion { + if x != nil { + return x.LastDeploymentVersion + } + return nil +} + +type ActivityHeartbeatState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Details provided in the last recorded activity heartbeat. + Details *v1.Payloads `protobuf:"bytes,1,opt,name=details,proto3" json:"details,omitempty"` + // Time the last heartbeat was recorded. + RecordedTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=recorded_time,json=recordedTime,proto3" json:"recorded_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityHeartbeatState) Reset() { + *x = ActivityHeartbeatState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityHeartbeatState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityHeartbeatState) ProtoMessage() {} + +func (x *ActivityHeartbeatState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityHeartbeatState.ProtoReflect.Descriptor instead. +func (*ActivityHeartbeatState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{4} +} + +func (x *ActivityHeartbeatState) GetDetails() *v1.Payloads { + if x != nil { + return x.Details + } + return nil +} + +func (x *ActivityHeartbeatState) GetRecordedTime() *timestamppb.Timestamp { + if x != nil { + return x.RecordedTime + } + return nil +} + +type ActivityRequestData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Serialized activity input, passed as arguments to the activity function. + Input *v1.Payloads `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + Header *v1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header,omitempty"` + // Metadata for use by user interfaces to display the fixed as-of-start summary and details of the activity. + UserMetadata *v13.UserMetadata `protobuf:"bytes,3,opt,name=user_metadata,json=userMetadata,proto3" json:"user_metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityRequestData) Reset() { + *x = ActivityRequestData{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityRequestData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityRequestData) ProtoMessage() {} + +func (x *ActivityRequestData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityRequestData.ProtoReflect.Descriptor instead. +func (*ActivityRequestData) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{5} +} + +func (x *ActivityRequestData) GetInput() *v1.Payloads { + if x != nil { + return x.Input + } + return nil +} + +func (x *ActivityRequestData) GetHeader() *v1.Header { + if x != nil { + return x.Header + } + return nil +} + +func (x *ActivityRequestData) GetUserMetadata() *v13.UserMetadata { + if x != nil { + return x.UserMetadata + } + return nil +} + +type ActivityOutcome struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Variant: + // + // *ActivityOutcome_Successful_ + // *ActivityOutcome_Failed_ + Variant isActivityOutcome_Variant `protobuf_oneof:"variant"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityOutcome) Reset() { + *x = ActivityOutcome{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityOutcome) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityOutcome) ProtoMessage() {} + +func (x *ActivityOutcome) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityOutcome.ProtoReflect.Descriptor instead. +func (*ActivityOutcome) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{6} +} + +func (x *ActivityOutcome) GetVariant() isActivityOutcome_Variant { + if x != nil { + return x.Variant + } + return nil +} + +func (x *ActivityOutcome) GetSuccessful() *ActivityOutcome_Successful { + if x != nil { + if x, ok := x.Variant.(*ActivityOutcome_Successful_); ok { + return x.Successful + } + } + return nil +} + +func (x *ActivityOutcome) GetFailed() *ActivityOutcome_Failed { + if x != nil { + if x, ok := x.Variant.(*ActivityOutcome_Failed_); ok { + return x.Failed + } + } + return nil +} + +type isActivityOutcome_Variant interface { + isActivityOutcome_Variant() +} + +type ActivityOutcome_Successful_ struct { + Successful *ActivityOutcome_Successful `protobuf:"bytes,1,opt,name=successful,proto3,oneof"` +} + +type ActivityOutcome_Failed_ struct { + Failed *ActivityOutcome_Failed `protobuf:"bytes,2,opt,name=failed,proto3,oneof"` +} + +func (*ActivityOutcome_Successful_) isActivityOutcome_Variant() {} + +func (*ActivityOutcome_Failed_) isActivityOutcome_Variant() {} + +type ActivityAttemptState_LastFailureDetails struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The last time the activity attempt failed. + Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // Failure details from the last failed attempt. + Failure *v14.Failure `protobuf:"bytes,2,opt,name=failure,proto3" json:"failure,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityAttemptState_LastFailureDetails) Reset() { + *x = ActivityAttemptState_LastFailureDetails{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityAttemptState_LastFailureDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityAttemptState_LastFailureDetails) ProtoMessage() {} + +func (x *ActivityAttemptState_LastFailureDetails) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityAttemptState_LastFailureDetails.ProtoReflect.Descriptor instead. +func (*ActivityAttemptState_LastFailureDetails) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ActivityAttemptState_LastFailureDetails) GetTime() *timestamppb.Timestamp { + if x != nil { + return x.Time + } + return nil +} + +func (x *ActivityAttemptState_LastFailureDetails) GetFailure() *v14.Failure { + if x != nil { + return x.Failure + } + return nil +} + +type ActivityOutcome_Successful struct { + state protoimpl.MessageState `protogen:"open.v1"` + Output *v1.Payloads `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityOutcome_Successful) Reset() { + *x = ActivityOutcome_Successful{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityOutcome_Successful) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityOutcome_Successful) ProtoMessage() {} + +func (x *ActivityOutcome_Successful) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityOutcome_Successful.ProtoReflect.Descriptor instead. +func (*ActivityOutcome_Successful) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *ActivityOutcome_Successful) GetOutput() *v1.Payloads { + if x != nil { + return x.Output + } + return nil +} + +type ActivityOutcome_Failed struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Only filled on schedule-to-start timeouts, schedule-to-close timeouts or terminations. All other attempt + // failures will be recorded in ActivityAttemptState.last_failure_details. + Failure *v14.Failure `protobuf:"bytes,1,opt,name=failure,proto3" json:"failure,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityOutcome_Failed) Reset() { + *x = ActivityOutcome_Failed{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityOutcome_Failed) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityOutcome_Failed) ProtoMessage() {} + +func (x *ActivityOutcome_Failed) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityOutcome_Failed.ProtoReflect.Descriptor instead. +func (*ActivityOutcome_Failed) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *ActivityOutcome_Failed) GetFailure() *v14.Failure { + if x != nil { + return x.Failure + } + return nil +} + +var File_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc = "" + + "\n" + + "@temporal/server/chasm/lib/activity/proto/v1/activity_state.proto\x12+temporal.server.chasm.lib.activity.proto.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a(temporal/api/deployment/v1/message.proto\x1a%temporal/api/failure/v1/message.proto\x1a'temporal/api/sdk/v1/user_metadata.proto\x1a'temporal/api/taskqueue/v1/message.proto\"\xdb\a\n" + + "\rActivityState\x12I\n" + + "\ractivity_type\x18\x01 \x01(\v2$.temporal.api.common.v1.ActivityTypeR\factivityType\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12T\n" + + "\x19schedule_to_close_timeout\x18\x03 \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToCloseTimeout\x12T\n" + + "\x19schedule_to_start_timeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToStartTimeout\x12N\n" + + "\x16start_to_close_timeout\x18\x05 \x01(\v2\x19.google.protobuf.DurationR\x13startToCloseTimeout\x12F\n" + + "\x11heartbeat_timeout\x18\x06 \x01(\v2\x19.google.protobuf.DurationR\x10heartbeatTimeout\x12F\n" + + "\fretry_policy\x18\a \x01(\v2#.temporal.api.common.v1.RetryPolicyR\vretryPolicy\x12\\\n" + + "\x06status\x18\b \x01(\x0e2D.temporal.server.chasm.lib.activity.proto.v1.ActivityExecutionStatusR\x06status\x12?\n" + + "\rschedule_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\fscheduleTime\x12<\n" + + "\bpriority\x18\n" + + " \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12c\n" + + "\fcancel_state\x18\v \x01(\v2@.temporal.server.chasm.lib.activity.proto.v1.ActivityCancelStateR\vcancelState\x12l\n" + + "\x0fterminate_state\x18\f \x01(\v2C.temporal.server.chasm.lib.activity.proto.v1.ActivityTerminateStateR\x0eterminateState\"\xa7\x01\n" + + "\x13ActivityCancelState\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\x12=\n" + + "\frequest_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\vrequestTime\x12\x1a\n" + + "\bidentity\x18\x03 \x01(\tR\bidentity\x12\x16\n" + + "\x06reason\x18\x04 \x01(\tR\x06reason\"7\n" + + "\x16ActivityTerminateState\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\"\xa8\x05\n" + + "\x14ActivityAttemptState\x12\x14\n" + + "\x05count\x18\x01 \x01(\x05R\x05count\x12O\n" + + "\x16current_retry_interval\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x14currentRetryInterval\x12=\n" + + "\fstarted_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12?\n" + + "\rcomplete_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\fcompleteTime\x12\x86\x01\n" + + "\x14last_failure_details\x18\x05 \x01(\v2T.temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetailsR\x12lastFailureDetails\x120\n" + + "\x14last_worker_identity\x18\a \x01(\tR\x12lastWorkerIdentity\x12k\n" + + "\x17last_deployment_version\x18\b \x01(\v23.temporal.api.deployment.v1.WorkerDeploymentVersionR\x15lastDeploymentVersion\x1a\x80\x01\n" + + "\x12LastFailureDetails\x12.\n" + + "\x04time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x04time\x12:\n" + + "\afailure\x18\x02 \x01(\v2 .temporal.api.failure.v1.FailureR\afailure\"\x95\x01\n" + + "\x16ActivityHeartbeatState\x12:\n" + + "\adetails\x18\x01 \x01(\v2 .temporal.api.common.v1.PayloadsR\adetails\x12?\n" + + "\rrecorded_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\frecordedTime\"\xcd\x01\n" + + "\x13ActivityRequestData\x126\n" + + "\x05input\x18\x01 \x01(\v2 .temporal.api.common.v1.PayloadsR\x05input\x126\n" + + "\x06header\x18\x02 \x01(\v2\x1e.temporal.api.common.v1.HeaderR\x06header\x12F\n" + + "\ruser_metadata\x18\x03 \x01(\v2!.temporal.api.sdk.v1.UserMetadataR\fuserMetadata\"\xf4\x02\n" + + "\x0fActivityOutcome\x12i\n" + + "\n" + + "successful\x18\x01 \x01(\v2G.temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.SuccessfulH\x00R\n" + + "successful\x12]\n" + + "\x06failed\x18\x02 \x01(\v2C.temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.FailedH\x00R\x06failed\x1aF\n" + + "\n" + + "Successful\x128\n" + + "\x06output\x18\x01 \x01(\v2 .temporal.api.common.v1.PayloadsR\x06output\x1aD\n" + + "\x06Failed\x12:\n" + + "\afailure\x18\x01 \x01(\v2 .temporal.api.failure.v1.FailureR\afailureB\t\n" + + "\avariant*\x8e\x03\n" + + "\x17ActivityExecutionStatus\x12)\n" + + "%ACTIVITY_EXECUTION_STATUS_UNSPECIFIED\x10\x00\x12'\n" + + "#ACTIVITY_EXECUTION_STATUS_SCHEDULED\x10\x01\x12%\n" + + "!ACTIVITY_EXECUTION_STATUS_STARTED\x10\x02\x12.\n" + + "*ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED\x10\x03\x12'\n" + + "#ACTIVITY_EXECUTION_STATUS_COMPLETED\x10\x04\x12$\n" + + " ACTIVITY_EXECUTION_STATUS_FAILED\x10\x05\x12&\n" + + "\"ACTIVITY_EXECUTION_STATUS_CANCELED\x10\x06\x12(\n" + + "$ACTIVITY_EXECUTION_STATUS_TERMINATED\x10\a\x12'\n" + + "#ACTIVITY_EXECUTION_STATUS_TIMED_OUT\x10\bBDZBgo.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescData +} + +var file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_goTypes = []any{ + (ActivityExecutionStatus)(0), // 0: temporal.server.chasm.lib.activity.proto.v1.ActivityExecutionStatus + (*ActivityState)(nil), // 1: temporal.server.chasm.lib.activity.proto.v1.ActivityState + (*ActivityCancelState)(nil), // 2: temporal.server.chasm.lib.activity.proto.v1.ActivityCancelState + (*ActivityTerminateState)(nil), // 3: temporal.server.chasm.lib.activity.proto.v1.ActivityTerminateState + (*ActivityAttemptState)(nil), // 4: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState + (*ActivityHeartbeatState)(nil), // 5: temporal.server.chasm.lib.activity.proto.v1.ActivityHeartbeatState + (*ActivityRequestData)(nil), // 6: temporal.server.chasm.lib.activity.proto.v1.ActivityRequestData + (*ActivityOutcome)(nil), // 7: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome + (*ActivityAttemptState_LastFailureDetails)(nil), // 8: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetails + (*ActivityOutcome_Successful)(nil), // 9: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Successful + (*ActivityOutcome_Failed)(nil), // 10: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Failed + (*v1.ActivityType)(nil), // 11: temporal.api.common.v1.ActivityType + (*v11.TaskQueue)(nil), // 12: temporal.api.taskqueue.v1.TaskQueue + (*durationpb.Duration)(nil), // 13: google.protobuf.Duration + (*v1.RetryPolicy)(nil), // 14: temporal.api.common.v1.RetryPolicy + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (*v1.Priority)(nil), // 16: temporal.api.common.v1.Priority + (*v12.WorkerDeploymentVersion)(nil), // 17: temporal.api.deployment.v1.WorkerDeploymentVersion + (*v1.Payloads)(nil), // 18: temporal.api.common.v1.Payloads + (*v1.Header)(nil), // 19: temporal.api.common.v1.Header + (*v13.UserMetadata)(nil), // 20: temporal.api.sdk.v1.UserMetadata + (*v14.Failure)(nil), // 21: temporal.api.failure.v1.Failure +} +var file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_depIdxs = []int32{ + 11, // 0: temporal.server.chasm.lib.activity.proto.v1.ActivityState.activity_type:type_name -> temporal.api.common.v1.ActivityType + 12, // 1: temporal.server.chasm.lib.activity.proto.v1.ActivityState.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 13, // 2: temporal.server.chasm.lib.activity.proto.v1.ActivityState.schedule_to_close_timeout:type_name -> google.protobuf.Duration + 13, // 3: temporal.server.chasm.lib.activity.proto.v1.ActivityState.schedule_to_start_timeout:type_name -> google.protobuf.Duration + 13, // 4: temporal.server.chasm.lib.activity.proto.v1.ActivityState.start_to_close_timeout:type_name -> google.protobuf.Duration + 13, // 5: temporal.server.chasm.lib.activity.proto.v1.ActivityState.heartbeat_timeout:type_name -> google.protobuf.Duration + 14, // 6: temporal.server.chasm.lib.activity.proto.v1.ActivityState.retry_policy:type_name -> temporal.api.common.v1.RetryPolicy + 0, // 7: temporal.server.chasm.lib.activity.proto.v1.ActivityState.status:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityExecutionStatus + 15, // 8: temporal.server.chasm.lib.activity.proto.v1.ActivityState.schedule_time:type_name -> google.protobuf.Timestamp + 16, // 9: temporal.server.chasm.lib.activity.proto.v1.ActivityState.priority:type_name -> temporal.api.common.v1.Priority + 2, // 10: temporal.server.chasm.lib.activity.proto.v1.ActivityState.cancel_state:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityCancelState + 3, // 11: temporal.server.chasm.lib.activity.proto.v1.ActivityState.terminate_state:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityTerminateState + 15, // 12: temporal.server.chasm.lib.activity.proto.v1.ActivityCancelState.request_time:type_name -> google.protobuf.Timestamp + 13, // 13: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.current_retry_interval:type_name -> google.protobuf.Duration + 15, // 14: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.started_time:type_name -> google.protobuf.Timestamp + 15, // 15: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.complete_time:type_name -> google.protobuf.Timestamp + 8, // 16: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.last_failure_details:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetails + 17, // 17: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.last_deployment_version:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersion + 18, // 18: temporal.server.chasm.lib.activity.proto.v1.ActivityHeartbeatState.details:type_name -> temporal.api.common.v1.Payloads + 15, // 19: temporal.server.chasm.lib.activity.proto.v1.ActivityHeartbeatState.recorded_time:type_name -> google.protobuf.Timestamp + 18, // 20: temporal.server.chasm.lib.activity.proto.v1.ActivityRequestData.input:type_name -> temporal.api.common.v1.Payloads + 19, // 21: temporal.server.chasm.lib.activity.proto.v1.ActivityRequestData.header:type_name -> temporal.api.common.v1.Header + 20, // 22: temporal.server.chasm.lib.activity.proto.v1.ActivityRequestData.user_metadata:type_name -> temporal.api.sdk.v1.UserMetadata + 9, // 23: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.successful:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Successful + 10, // 24: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.failed:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Failed + 15, // 25: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetails.time:type_name -> google.protobuf.Timestamp + 21, // 26: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetails.failure:type_name -> temporal.api.failure.v1.Failure + 18, // 27: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Successful.output:type_name -> temporal.api.common.v1.Payloads + 21, // 28: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Failed.failure:type_name -> temporal.api.failure.v1.Failure + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_init() } +func file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_init() { + if File_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto != nil { + return + } + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[6].OneofWrappers = []any{ + (*ActivityOutcome_Successful_)(nil), + (*ActivityOutcome_Failed_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc)), + NumEnums: 1, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_depIdxs, + EnumInfos: file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_enumTypes, + MessageInfos: file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto = out.File + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_goTypes = nil + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_depIdxs = nil +} diff --git a/chasm/lib/activity/gen/activitypb/v1/request_response.go-helpers.pb.go b/chasm/lib/activity/gen/activitypb/v1/request_response.go-helpers.pb.go new file mode 100644 index 0000000000..63311278fa --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/request_response.go-helpers.pb.go @@ -0,0 +1,376 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package activitypb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type StartActivityExecutionRequest to the protobuf v3 wire format +func (val *StartActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartActivityExecutionRequest from the protobuf v3 wire format +func (val *StartActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartActivityExecutionRequest + switch t := that.(type) { + case *StartActivityExecutionRequest: + that1 = t + case StartActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartActivityExecutionResponse to the protobuf v3 wire format +func (val *StartActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartActivityExecutionResponse from the protobuf v3 wire format +func (val *StartActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartActivityExecutionResponse + switch t := that.(type) { + case *StartActivityExecutionResponse: + that1 = t + case StartActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeActivityExecutionRequest to the protobuf v3 wire format +func (val *DescribeActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeActivityExecutionRequest from the protobuf v3 wire format +func (val *DescribeActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeActivityExecutionRequest + switch t := that.(type) { + case *DescribeActivityExecutionRequest: + that1 = t + case DescribeActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeActivityExecutionResponse to the protobuf v3 wire format +func (val *DescribeActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeActivityExecutionResponse from the protobuf v3 wire format +func (val *DescribeActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeActivityExecutionResponse + switch t := that.(type) { + case *DescribeActivityExecutionResponse: + that1 = t + case DescribeActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PollActivityExecutionRequest to the protobuf v3 wire format +func (val *PollActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollActivityExecutionRequest from the protobuf v3 wire format +func (val *PollActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollActivityExecutionRequest + switch t := that.(type) { + case *PollActivityExecutionRequest: + that1 = t + case PollActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PollActivityExecutionResponse to the protobuf v3 wire format +func (val *PollActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollActivityExecutionResponse from the protobuf v3 wire format +func (val *PollActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollActivityExecutionResponse + switch t := that.(type) { + case *PollActivityExecutionResponse: + that1 = t + case PollActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TerminateActivityExecutionRequest to the protobuf v3 wire format +func (val *TerminateActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TerminateActivityExecutionRequest from the protobuf v3 wire format +func (val *TerminateActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TerminateActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TerminateActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TerminateActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TerminateActivityExecutionRequest + switch t := that.(type) { + case *TerminateActivityExecutionRequest: + that1 = t + case TerminateActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TerminateActivityExecutionResponse to the protobuf v3 wire format +func (val *TerminateActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TerminateActivityExecutionResponse from the protobuf v3 wire format +func (val *TerminateActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TerminateActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TerminateActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TerminateActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TerminateActivityExecutionResponse + switch t := that.(type) { + case *TerminateActivityExecutionResponse: + that1 = t + case TerminateActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RequestCancelActivityExecutionRequest to the protobuf v3 wire format +func (val *RequestCancelActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RequestCancelActivityExecutionRequest from the protobuf v3 wire format +func (val *RequestCancelActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RequestCancelActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RequestCancelActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RequestCancelActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RequestCancelActivityExecutionRequest + switch t := that.(type) { + case *RequestCancelActivityExecutionRequest: + that1 = t + case RequestCancelActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RequestCancelActivityExecutionResponse to the protobuf v3 wire format +func (val *RequestCancelActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RequestCancelActivityExecutionResponse from the protobuf v3 wire format +func (val *RequestCancelActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RequestCancelActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RequestCancelActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RequestCancelActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RequestCancelActivityExecutionResponse + switch t := that.(type) { + case *RequestCancelActivityExecutionResponse: + that1 = t + case RequestCancelActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/activity/gen/activitypb/v1/request_response.pb.go b/chasm/lib/activity/gen/activitypb/v1/request_response.pb.go new file mode 100644 index 0000000000..44d947db73 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/request_response.pb.go @@ -0,0 +1,590 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/activity/proto/v1/request_response.proto + +package activitypb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/workflowservice/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StartActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.StartActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartActivityExecutionRequest) Reset() { + *x = StartActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartActivityExecutionRequest) ProtoMessage() {} + +func (x *StartActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*StartActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *StartActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *StartActivityExecutionRequest) GetFrontendRequest() *v1.StartActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type StartActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.StartActivityExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartActivityExecutionResponse) Reset() { + *x = StartActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartActivityExecutionResponse) ProtoMessage() {} + +func (x *StartActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*StartActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{1} +} + +func (x *StartActivityExecutionResponse) GetFrontendResponse() *v1.StartActivityExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type DescribeActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.DescribeActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeActivityExecutionRequest) Reset() { + *x = DescribeActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeActivityExecutionRequest) ProtoMessage() {} + +func (x *DescribeActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*DescribeActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{2} +} + +func (x *DescribeActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DescribeActivityExecutionRequest) GetFrontendRequest() *v1.DescribeActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type DescribeActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.DescribeActivityExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeActivityExecutionResponse) Reset() { + *x = DescribeActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeActivityExecutionResponse) ProtoMessage() {} + +func (x *DescribeActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*DescribeActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{3} +} + +func (x *DescribeActivityExecutionResponse) GetFrontendResponse() *v1.DescribeActivityExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type PollActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.PollActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollActivityExecutionRequest) Reset() { + *x = PollActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollActivityExecutionRequest) ProtoMessage() {} + +func (x *PollActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*PollActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{4} +} + +func (x *PollActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *PollActivityExecutionRequest) GetFrontendRequest() *v1.PollActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type PollActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.PollActivityExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollActivityExecutionResponse) Reset() { + *x = PollActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollActivityExecutionResponse) ProtoMessage() {} + +func (x *PollActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*PollActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{5} +} + +func (x *PollActivityExecutionResponse) GetFrontendResponse() *v1.PollActivityExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type TerminateActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.TerminateActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TerminateActivityExecutionRequest) Reset() { + *x = TerminateActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TerminateActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TerminateActivityExecutionRequest) ProtoMessage() {} + +func (x *TerminateActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TerminateActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*TerminateActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{6} +} + +func (x *TerminateActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *TerminateActivityExecutionRequest) GetFrontendRequest() *v1.TerminateActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type TerminateActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TerminateActivityExecutionResponse) Reset() { + *x = TerminateActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TerminateActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TerminateActivityExecutionResponse) ProtoMessage() {} + +func (x *TerminateActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TerminateActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*TerminateActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{7} +} + +type RequestCancelActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.RequestCancelActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestCancelActivityExecutionRequest) Reset() { + *x = RequestCancelActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestCancelActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestCancelActivityExecutionRequest) ProtoMessage() {} + +func (x *RequestCancelActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestCancelActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*RequestCancelActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{8} +} + +func (x *RequestCancelActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *RequestCancelActivityExecutionRequest) GetFrontendRequest() *v1.RequestCancelActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type RequestCancelActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestCancelActivityExecutionResponse) Reset() { + *x = RequestCancelActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestCancelActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestCancelActivityExecutionResponse) ProtoMessage() {} + +func (x *RequestCancelActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestCancelActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*RequestCancelActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{9} +} + +var File_temporal_server_chasm_lib_activity_proto_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc = "" + + "\n" + + "Btemporal/server/chasm/lib/activity/proto/v1/request_response.proto\x12+temporal.server.chasm.lib.activity.proto.v1\x1a6temporal/api/workflowservice/v1/request_response.proto\"\xad\x01\n" + + "\x1dStartActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12i\n" + + "\x10frontend_request\x18\x02 \x01(\v2>.temporal.api.workflowservice.v1.StartActivityExecutionRequestR\x0ffrontendRequest\"\x8e\x01\n" + + "\x1eStartActivityExecutionResponse\x12l\n" + + "\x11frontend_response\x18\x01 \x01(\v2?.temporal.api.workflowservice.v1.StartActivityExecutionResponseR\x10frontendResponse\"\xb3\x01\n" + + " DescribeActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12l\n" + + "\x10frontend_request\x18\x02 \x01(\v2A.temporal.api.workflowservice.v1.DescribeActivityExecutionRequestR\x0ffrontendRequest\"\x94\x01\n" + + "!DescribeActivityExecutionResponse\x12o\n" + + "\x11frontend_response\x18\x01 \x01(\v2B.temporal.api.workflowservice.v1.DescribeActivityExecutionResponseR\x10frontendResponse\"\xab\x01\n" + + "\x1cPollActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12h\n" + + "\x10frontend_request\x18\x02 \x01(\v2=.temporal.api.workflowservice.v1.PollActivityExecutionRequestR\x0ffrontendRequest\"\x8c\x01\n" + + "\x1dPollActivityExecutionResponse\x12k\n" + + "\x11frontend_response\x18\x01 \x01(\v2>.temporal.api.workflowservice.v1.PollActivityExecutionResponseR\x10frontendResponse\"\xb5\x01\n" + + "!TerminateActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12m\n" + + "\x10frontend_request\x18\x02 \x01(\v2B.temporal.api.workflowservice.v1.TerminateActivityExecutionRequestR\x0ffrontendRequest\"$\n" + + "\"TerminateActivityExecutionResponse\"\xbd\x01\n" + + "%RequestCancelActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12q\n" + + "\x10frontend_request\x18\x02 \x01(\v2F.temporal.api.workflowservice.v1.RequestCancelActivityExecutionRequestR\x0ffrontendRequest\"(\n" + + "&RequestCancelActivityExecutionResponseBDZBgo.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescData +} + +var file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_goTypes = []any{ + (*StartActivityExecutionRequest)(nil), // 0: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest + (*StartActivityExecutionResponse)(nil), // 1: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse + (*DescribeActivityExecutionRequest)(nil), // 2: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest + (*DescribeActivityExecutionResponse)(nil), // 3: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse + (*PollActivityExecutionRequest)(nil), // 4: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest + (*PollActivityExecutionResponse)(nil), // 5: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse + (*TerminateActivityExecutionRequest)(nil), // 6: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest + (*TerminateActivityExecutionResponse)(nil), // 7: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionResponse + (*RequestCancelActivityExecutionRequest)(nil), // 8: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest + (*RequestCancelActivityExecutionResponse)(nil), // 9: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionResponse + (*v1.StartActivityExecutionRequest)(nil), // 10: temporal.api.workflowservice.v1.StartActivityExecutionRequest + (*v1.StartActivityExecutionResponse)(nil), // 11: temporal.api.workflowservice.v1.StartActivityExecutionResponse + (*v1.DescribeActivityExecutionRequest)(nil), // 12: temporal.api.workflowservice.v1.DescribeActivityExecutionRequest + (*v1.DescribeActivityExecutionResponse)(nil), // 13: temporal.api.workflowservice.v1.DescribeActivityExecutionResponse + (*v1.PollActivityExecutionRequest)(nil), // 14: temporal.api.workflowservice.v1.PollActivityExecutionRequest + (*v1.PollActivityExecutionResponse)(nil), // 15: temporal.api.workflowservice.v1.PollActivityExecutionResponse + (*v1.TerminateActivityExecutionRequest)(nil), // 16: temporal.api.workflowservice.v1.TerminateActivityExecutionRequest + (*v1.RequestCancelActivityExecutionRequest)(nil), // 17: temporal.api.workflowservice.v1.RequestCancelActivityExecutionRequest +} +var file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_depIdxs = []int32{ + 10, // 0: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.StartActivityExecutionRequest + 11, // 1: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.StartActivityExecutionResponse + 12, // 2: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.DescribeActivityExecutionRequest + 13, // 3: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.DescribeActivityExecutionResponse + 14, // 4: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.PollActivityExecutionRequest + 15, // 5: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.PollActivityExecutionResponse + 16, // 6: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.TerminateActivityExecutionRequest + 17, // 7: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.RequestCancelActivityExecutionRequest + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_init() } +func file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_init() { + if File_temporal_server_chasm_lib_activity_proto_v1_request_response_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_activity_proto_v1_request_response_proto = out.File + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_goTypes = nil + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_depIdxs = nil +} diff --git a/chasm/lib/activity/gen/activitypb/v1/service.pb.go b/chasm/lib/activity/gen/activitypb/v1/service.pb.go new file mode 100644 index 0000000000..2b58d3ec64 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/service.pb.go @@ -0,0 +1,89 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/activity/proto/v1/service.proto + +package activitypb + +import ( + reflect "reflect" + unsafe "unsafe" + + _ "go.temporal.io/server/api/routing/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_temporal_server_chasm_lib_activity_proto_v1_service_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_activity_proto_v1_service_proto_rawDesc = "" + + "\n" + + "9temporal/server/chasm/lib/activity/proto/v1/service.proto\x12+temporal.server.chasm.lib.activity.proto.v1\x1aBtemporal/server/chasm/lib/activity/proto/v1/request_response.proto\x1a.temporal/server/api/routing/v1/extension.proto2\xf3\b\n" + + "\x0fActivityService\x12\xd5\x01\n" + + "\x16StartActivityExecution\x12J.temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest\x1aK.temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse\"\"\x92\xc4\x03\x1e\x1a\x1cfrontend_request.activity_id\x12\xde\x01\n" + + "\x19DescribeActivityExecution\x12M.temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest\x1aN.temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse\"\"\x92\xc4\x03\x1e\x1a\x1cfrontend_request.activity_id\x12\xd2\x01\n" + + "\x15PollActivityExecution\x12I.temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest\x1aJ.temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse\"\"\x92\xc4\x03\x1e\x1a\x1cfrontend_request.activity_id\x12\xe1\x01\n" + + "\x1aTerminateActivityExecution\x12N.temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest\x1aO.temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionResponse\"\"\x92\xc4\x03\x1e\x1a\x1cfrontend_request.activity_id\x12\xed\x01\n" + + "\x1eRequestCancelActivityExecution\x12R.temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest\x1aS.temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionResponse\"\"\x92\xc4\x03\x1e\x1a\x1cfrontend_request.activity_idBDZBgo.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypbb\x06proto3" + +var file_temporal_server_chasm_lib_activity_proto_v1_service_proto_goTypes = []any{ + (*StartActivityExecutionRequest)(nil), // 0: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest + (*DescribeActivityExecutionRequest)(nil), // 1: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest + (*PollActivityExecutionRequest)(nil), // 2: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest + (*TerminateActivityExecutionRequest)(nil), // 3: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest + (*RequestCancelActivityExecutionRequest)(nil), // 4: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest + (*StartActivityExecutionResponse)(nil), // 5: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse + (*DescribeActivityExecutionResponse)(nil), // 6: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse + (*PollActivityExecutionResponse)(nil), // 7: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse + (*TerminateActivityExecutionResponse)(nil), // 8: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionResponse + (*RequestCancelActivityExecutionResponse)(nil), // 9: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionResponse +} +var file_temporal_server_chasm_lib_activity_proto_v1_service_proto_depIdxs = []int32{ + 0, // 0: temporal.server.chasm.lib.activity.proto.v1.ActivityService.StartActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest + 1, // 1: temporal.server.chasm.lib.activity.proto.v1.ActivityService.DescribeActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest + 2, // 2: temporal.server.chasm.lib.activity.proto.v1.ActivityService.PollActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest + 3, // 3: temporal.server.chasm.lib.activity.proto.v1.ActivityService.TerminateActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest + 4, // 4: temporal.server.chasm.lib.activity.proto.v1.ActivityService.RequestCancelActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest + 5, // 5: temporal.server.chasm.lib.activity.proto.v1.ActivityService.StartActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse + 6, // 6: temporal.server.chasm.lib.activity.proto.v1.ActivityService.DescribeActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse + 7, // 7: temporal.server.chasm.lib.activity.proto.v1.ActivityService.PollActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse + 8, // 8: temporal.server.chasm.lib.activity.proto.v1.ActivityService.TerminateActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionResponse + 9, // 9: temporal.server.chasm.lib.activity.proto.v1.ActivityService.RequestCancelActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionResponse + 5, // [5:10] is the sub-list for method output_type + 0, // [0:5] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_activity_proto_v1_service_proto_init() } +func file_temporal_server_chasm_lib_activity_proto_v1_service_proto_init() { + if File_temporal_server_chasm_lib_activity_proto_v1_service_proto != nil { + return + } + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_service_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_temporal_server_chasm_lib_activity_proto_v1_service_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_activity_proto_v1_service_proto_depIdxs, + }.Build() + File_temporal_server_chasm_lib_activity_proto_v1_service_proto = out.File + file_temporal_server_chasm_lib_activity_proto_v1_service_proto_goTypes = nil + file_temporal_server_chasm_lib_activity_proto_v1_service_proto_depIdxs = nil +} diff --git a/chasm/lib/activity/gen/activitypb/v1/service_client.pb.go b/chasm/lib/activity/gen/activitypb/v1/service_client.pb.go new file mode 100644 index 0000000000..cef8a2b47e --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/service_client.pb.go @@ -0,0 +1,275 @@ +// Code generated by protoc-gen-go-chasm. DO NOT EDIT. +package activitypb + +import ( + "context" + "time" + + "go.temporal.io/server/client/history" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives" + "google.golang.org/grpc" +) + +// ActivityServiceLayeredClient is a client for ActivityService. +type ActivityServiceLayeredClient struct { + metricsHandler metrics.Handler + numShards int32 + redirector history.Redirector[ActivityServiceClient] + retryPolicy backoff.RetryPolicy +} + +// NewActivityServiceLayeredClient initializes a new ActivityServiceLayeredClient. +func NewActivityServiceLayeredClient( + dc *dynamicconfig.Collection, + rpcFactory common.RPCFactory, + monitor membership.Monitor, + config *config.Persistence, + logger log.Logger, + metricsHandler metrics.Handler, +) (ActivityServiceClient, error) { + resolver, err := monitor.GetResolver(primitives.HistoryService) + if err != nil { + return nil, err + } + connections := history.NewConnectionPool(resolver, rpcFactory, NewActivityServiceClient) + var redirector history.Redirector[ActivityServiceClient] + if dynamicconfig.HistoryClientOwnershipCachingEnabled.Get(dc)() { + redirector = history.NewCachingRedirector( + connections, + resolver, + logger, + dynamicconfig.HistoryClientOwnershipCachingStaleTTL.Get(dc), + ) + } else { + redirector = history.NewBasicRedirector(connections, resolver) + } + return &ActivityServiceLayeredClient{ + metricsHandler: metricsHandler, + redirector: redirector, + numShards: config.NumHistoryShards, + retryPolicy: common.CreateHistoryClientRetryPolicy(), + }, nil +} +func (c *ActivityServiceLayeredClient) callStartActivityExecutionNoRetry( + ctx context.Context, + request *StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (*StartActivityExecutionResponse, error) { + var response *StartActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.StartActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.StartActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) StartActivityExecution( + ctx context.Context, + request *StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (*StartActivityExecutionResponse, error) { + call := func(ctx context.Context) (*StartActivityExecutionResponse, error) { + return c.callStartActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callDescribeActivityExecutionNoRetry( + ctx context.Context, + request *DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (*DescribeActivityExecutionResponse, error) { + var response *DescribeActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.DescribeActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.DescribeActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) DescribeActivityExecution( + ctx context.Context, + request *DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (*DescribeActivityExecutionResponse, error) { + call := func(ctx context.Context) (*DescribeActivityExecutionResponse, error) { + return c.callDescribeActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callPollActivityExecutionNoRetry( + ctx context.Context, + request *PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (*PollActivityExecutionResponse, error) { + var response *PollActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.PollActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.PollActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) PollActivityExecution( + ctx context.Context, + request *PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (*PollActivityExecutionResponse, error) { + call := func(ctx context.Context) (*PollActivityExecutionResponse, error) { + return c.callPollActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callTerminateActivityExecutionNoRetry( + ctx context.Context, + request *TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (*TerminateActivityExecutionResponse, error) { + var response *TerminateActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.TerminateActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.TerminateActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) TerminateActivityExecution( + ctx context.Context, + request *TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (*TerminateActivityExecutionResponse, error) { + call := func(ctx context.Context) (*TerminateActivityExecutionResponse, error) { + return c.callTerminateActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callRequestCancelActivityExecutionNoRetry( + ctx context.Context, + request *RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (*RequestCancelActivityExecutionResponse, error) { + var response *RequestCancelActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.RequestCancelActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.RequestCancelActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) RequestCancelActivityExecution( + ctx context.Context, + request *RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (*RequestCancelActivityExecutionResponse, error) { + call := func(ctx context.Context) (*RequestCancelActivityExecutionResponse, error) { + return c.callRequestCancelActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} diff --git a/chasm/lib/activity/gen/activitypb/v1/service_grpc.pb.go b/chasm/lib/activity/gen/activitypb/v1/service_grpc.pb.go new file mode 100644 index 0000000000..bc90af3872 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/service_grpc.pb.go @@ -0,0 +1,258 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// plugins: +// - protoc-gen-go-grpc +// - protoc +// source: temporal/server/chasm/lib/activity/proto/v1/service.proto + +package activitypb + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ActivityService_StartActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/StartActivityExecution" + ActivityService_DescribeActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/DescribeActivityExecution" + ActivityService_PollActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/PollActivityExecution" + ActivityService_TerminateActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/TerminateActivityExecution" + ActivityService_RequestCancelActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/RequestCancelActivityExecution" +) + +// ActivityServiceClient is the client API for ActivityService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ActivityServiceClient interface { + StartActivityExecution(ctx context.Context, in *StartActivityExecutionRequest, opts ...grpc.CallOption) (*StartActivityExecutionResponse, error) + DescribeActivityExecution(ctx context.Context, in *DescribeActivityExecutionRequest, opts ...grpc.CallOption) (*DescribeActivityExecutionResponse, error) + PollActivityExecution(ctx context.Context, in *PollActivityExecutionRequest, opts ...grpc.CallOption) (*PollActivityExecutionResponse, error) + TerminateActivityExecution(ctx context.Context, in *TerminateActivityExecutionRequest, opts ...grpc.CallOption) (*TerminateActivityExecutionResponse, error) + RequestCancelActivityExecution(ctx context.Context, in *RequestCancelActivityExecutionRequest, opts ...grpc.CallOption) (*RequestCancelActivityExecutionResponse, error) +} + +type activityServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewActivityServiceClient(cc grpc.ClientConnInterface) ActivityServiceClient { + return &activityServiceClient{cc} +} + +func (c *activityServiceClient) StartActivityExecution(ctx context.Context, in *StartActivityExecutionRequest, opts ...grpc.CallOption) (*StartActivityExecutionResponse, error) { + out := new(StartActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_StartActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) DescribeActivityExecution(ctx context.Context, in *DescribeActivityExecutionRequest, opts ...grpc.CallOption) (*DescribeActivityExecutionResponse, error) { + out := new(DescribeActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_DescribeActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) PollActivityExecution(ctx context.Context, in *PollActivityExecutionRequest, opts ...grpc.CallOption) (*PollActivityExecutionResponse, error) { + out := new(PollActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_PollActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) TerminateActivityExecution(ctx context.Context, in *TerminateActivityExecutionRequest, opts ...grpc.CallOption) (*TerminateActivityExecutionResponse, error) { + out := new(TerminateActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_TerminateActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) RequestCancelActivityExecution(ctx context.Context, in *RequestCancelActivityExecutionRequest, opts ...grpc.CallOption) (*RequestCancelActivityExecutionResponse, error) { + out := new(RequestCancelActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_RequestCancelActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ActivityServiceServer is the server API for ActivityService service. +// All implementations must embed UnimplementedActivityServiceServer +// for forward compatibility +type ActivityServiceServer interface { + StartActivityExecution(context.Context, *StartActivityExecutionRequest) (*StartActivityExecutionResponse, error) + DescribeActivityExecution(context.Context, *DescribeActivityExecutionRequest) (*DescribeActivityExecutionResponse, error) + PollActivityExecution(context.Context, *PollActivityExecutionRequest) (*PollActivityExecutionResponse, error) + TerminateActivityExecution(context.Context, *TerminateActivityExecutionRequest) (*TerminateActivityExecutionResponse, error) + RequestCancelActivityExecution(context.Context, *RequestCancelActivityExecutionRequest) (*RequestCancelActivityExecutionResponse, error) + mustEmbedUnimplementedActivityServiceServer() +} + +// UnimplementedActivityServiceServer must be embedded to have forward compatible implementations. +type UnimplementedActivityServiceServer struct { +} + +func (UnimplementedActivityServiceServer) StartActivityExecution(context.Context, *StartActivityExecutionRequest) (*StartActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) DescribeActivityExecution(context.Context, *DescribeActivityExecutionRequest) (*DescribeActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) PollActivityExecution(context.Context, *PollActivityExecutionRequest) (*PollActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PollActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) TerminateActivityExecution(context.Context, *TerminateActivityExecutionRequest) (*TerminateActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TerminateActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) RequestCancelActivityExecution(context.Context, *RequestCancelActivityExecutionRequest) (*RequestCancelActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RequestCancelActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) mustEmbedUnimplementedActivityServiceServer() {} + +// UnsafeActivityServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ActivityServiceServer will +// result in compilation errors. +type UnsafeActivityServiceServer interface { + mustEmbedUnimplementedActivityServiceServer() +} + +func RegisterActivityServiceServer(s grpc.ServiceRegistrar, srv ActivityServiceServer) { + s.RegisterService(&ActivityService_ServiceDesc, srv) +} + +func _ActivityService_StartActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).StartActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_StartActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).StartActivityExecution(ctx, req.(*StartActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_DescribeActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).DescribeActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_DescribeActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).DescribeActivityExecution(ctx, req.(*DescribeActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_PollActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PollActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).PollActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_PollActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).PollActivityExecution(ctx, req.(*PollActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_TerminateActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TerminateActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).TerminateActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_TerminateActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).TerminateActivityExecution(ctx, req.(*TerminateActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_RequestCancelActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCancelActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).RequestCancelActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_RequestCancelActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).RequestCancelActivityExecution(ctx, req.(*RequestCancelActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ActivityService_ServiceDesc is the grpc.ServiceDesc for ActivityService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ActivityService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "temporal.server.chasm.lib.activity.proto.v1.ActivityService", + HandlerType: (*ActivityServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "StartActivityExecution", + Handler: _ActivityService_StartActivityExecution_Handler, + }, + { + MethodName: "DescribeActivityExecution", + Handler: _ActivityService_DescribeActivityExecution_Handler, + }, + { + MethodName: "PollActivityExecution", + Handler: _ActivityService_PollActivityExecution_Handler, + }, + { + MethodName: "TerminateActivityExecution", + Handler: _ActivityService_TerminateActivityExecution_Handler, + }, + { + MethodName: "RequestCancelActivityExecution", + Handler: _ActivityService_RequestCancelActivityExecution_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "temporal/server/chasm/lib/activity/proto/v1/service.proto", +} diff --git a/chasm/lib/activity/gen/activitypb/v1/tasks.go-helpers.pb.go b/chasm/lib/activity/gen/activitypb/v1/tasks.go-helpers.pb.go new file mode 100644 index 0000000000..d7628a6e9e --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/tasks.go-helpers.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package activitypb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ActivityDispatchTask to the protobuf v3 wire format +func (val *ActivityDispatchTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityDispatchTask from the protobuf v3 wire format +func (val *ActivityDispatchTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityDispatchTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityDispatchTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityDispatchTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityDispatchTask + switch t := that.(type) { + case *ActivityDispatchTask: + that1 = t + case ActivityDispatchTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ScheduleToStartTimeoutTask to the protobuf v3 wire format +func (val *ScheduleToStartTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ScheduleToStartTimeoutTask from the protobuf v3 wire format +func (val *ScheduleToStartTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ScheduleToStartTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ScheduleToStartTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ScheduleToStartTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ScheduleToStartTimeoutTask + switch t := that.(type) { + case *ScheduleToStartTimeoutTask: + that1 = t + case ScheduleToStartTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ScheduleToCloseTimeoutTask to the protobuf v3 wire format +func (val *ScheduleToCloseTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ScheduleToCloseTimeoutTask from the protobuf v3 wire format +func (val *ScheduleToCloseTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ScheduleToCloseTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ScheduleToCloseTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ScheduleToCloseTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ScheduleToCloseTimeoutTask + switch t := that.(type) { + case *ScheduleToCloseTimeoutTask: + that1 = t + case ScheduleToCloseTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartToCloseTimeoutTask to the protobuf v3 wire format +func (val *StartToCloseTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartToCloseTimeoutTask from the protobuf v3 wire format +func (val *StartToCloseTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartToCloseTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartToCloseTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartToCloseTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartToCloseTimeoutTask + switch t := that.(type) { + case *StartToCloseTimeoutTask: + that1 = t + case StartToCloseTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type HeartbeatTimeoutTask to the protobuf v3 wire format +func (val *HeartbeatTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type HeartbeatTimeoutTask from the protobuf v3 wire format +func (val *HeartbeatTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *HeartbeatTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two HeartbeatTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *HeartbeatTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *HeartbeatTimeoutTask + switch t := that.(type) { + case *HeartbeatTimeoutTask: + that1 = t + case HeartbeatTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/activity/gen/activitypb/v1/tasks.pb.go b/chasm/lib/activity/gen/activitypb/v1/tasks.pb.go new file mode 100644 index 0000000000..279d1944be --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/tasks.pb.go @@ -0,0 +1,307 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/activity/proto/v1/tasks.proto + +package activitypb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ActivityDispatchTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current attempt number for this activity execution. Used for task validation. + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityDispatchTask) Reset() { + *x = ActivityDispatchTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityDispatchTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityDispatchTask) ProtoMessage() {} + +func (x *ActivityDispatchTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityDispatchTask.ProtoReflect.Descriptor instead. +func (*ActivityDispatchTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{0} +} + +func (x *ActivityDispatchTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +type ScheduleToStartTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current attempt number for this activity execution. Used for task validation. + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScheduleToStartTimeoutTask) Reset() { + *x = ScheduleToStartTimeoutTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScheduleToStartTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScheduleToStartTimeoutTask) ProtoMessage() {} + +func (x *ScheduleToStartTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScheduleToStartTimeoutTask.ProtoReflect.Descriptor instead. +func (*ScheduleToStartTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{1} +} + +func (x *ScheduleToStartTimeoutTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +type ScheduleToCloseTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScheduleToCloseTimeoutTask) Reset() { + *x = ScheduleToCloseTimeoutTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScheduleToCloseTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScheduleToCloseTimeoutTask) ProtoMessage() {} + +func (x *ScheduleToCloseTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScheduleToCloseTimeoutTask.ProtoReflect.Descriptor instead. +func (*ScheduleToCloseTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{2} +} + +type StartToCloseTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current attempt number for this activity execution. Used for task validation. + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartToCloseTimeoutTask) Reset() { + *x = StartToCloseTimeoutTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartToCloseTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartToCloseTimeoutTask) ProtoMessage() {} + +func (x *StartToCloseTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartToCloseTimeoutTask.ProtoReflect.Descriptor instead. +func (*StartToCloseTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{3} +} + +func (x *StartToCloseTimeoutTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +// HeartbeatTimeoutTask is a pure task that enforces heartbeat timeouts. +type HeartbeatTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current attempt number for this activity execution. Used for task validation. + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeartbeatTimeoutTask) Reset() { + *x = HeartbeatTimeoutTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeartbeatTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartbeatTimeoutTask) ProtoMessage() {} + +func (x *HeartbeatTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeartbeatTimeoutTask.ProtoReflect.Descriptor instead. +func (*HeartbeatTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{4} +} + +func (x *HeartbeatTimeoutTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +var File_temporal_server_chasm_lib_activity_proto_v1_tasks_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc = "" + + "\n" + + "7temporal/server/chasm/lib/activity/proto/v1/tasks.proto\x12+temporal.server.chasm.lib.activity.proto.v1\"0\n" + + "\x14ActivityDispatchTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\"6\n" + + "\x1aScheduleToStartTimeoutTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\"\x1c\n" + + "\x1aScheduleToCloseTimeoutTask\"3\n" + + "\x17StartToCloseTimeoutTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\"0\n" + + "\x14HeartbeatTimeoutTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattemptBDZBgo.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescData +} + +var file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_goTypes = []any{ + (*ActivityDispatchTask)(nil), // 0: temporal.server.chasm.lib.activity.proto.v1.ActivityDispatchTask + (*ScheduleToStartTimeoutTask)(nil), // 1: temporal.server.chasm.lib.activity.proto.v1.ScheduleToStartTimeoutTask + (*ScheduleToCloseTimeoutTask)(nil), // 2: temporal.server.chasm.lib.activity.proto.v1.ScheduleToCloseTimeoutTask + (*StartToCloseTimeoutTask)(nil), // 3: temporal.server.chasm.lib.activity.proto.v1.StartToCloseTimeoutTask + (*HeartbeatTimeoutTask)(nil), // 4: temporal.server.chasm.lib.activity.proto.v1.HeartbeatTimeoutTask +} +var file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_init() } +func file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_init() { + if File_temporal_server_chasm_lib_activity_proto_v1_tasks_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_activity_proto_v1_tasks_proto = out.File + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_goTypes = nil + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_depIdxs = nil +} diff --git a/chasm/lib/activity/handler.go b/chasm/lib/activity/handler.go new file mode 100644 index 0000000000..e1c8c70e60 --- /dev/null +++ b/chasm/lib/activity/handler.go @@ -0,0 +1,324 @@ +package activity + +import ( + "context" + "errors" + "fmt" + + enumspb "go.temporal.io/api/enums/v1" + errordetailspb "go.temporal.io/api/errordetails/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/contextutil" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + businessIDReusePolicyMap = map[enumspb.ActivityIdReusePolicy]chasm.BusinessIDReusePolicy{ + enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE: chasm.BusinessIDReusePolicyAllowDuplicate, + enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY: chasm.BusinessIDReusePolicyAllowDuplicateFailedOnly, + enumspb.ACTIVITY_ID_REUSE_POLICY_REJECT_DUPLICATE: chasm.BusinessIDReusePolicyRejectDuplicate, + } + + businessIDConflictPolicyMap = map[enumspb.ActivityIdConflictPolicy]chasm.BusinessIDConflictPolicy{ + enumspb.ACTIVITY_ID_CONFLICT_POLICY_FAIL: chasm.BusinessIDConflictPolicyFail, + enumspb.ACTIVITY_ID_CONFLICT_POLICY_USE_EXISTING: chasm.BusinessIDConflictPolicyUseExisting, + } +) + +type handler struct { + activitypb.UnimplementedActivityServiceServer + config *Config + logger log.Logger + metricsHandler metrics.Handler + namespaceRegistry namespace.Registry +} + +func newHandler(config *Config, metricsHandler metrics.Handler, logger log.Logger, namespaceRegistry namespace.Registry) *handler { + return &handler{ + config: config, + logger: logger, + metricsHandler: metricsHandler, + namespaceRegistry: namespaceRegistry, + } +} + +// StartActivityExecution schedules an activity execution. Note that while external callers refer to +// this as "start", the start transition in fact happens later, in response to the activity task in +// matching being delivered to a worker poll request. +func (h *handler) StartActivityExecution(ctx context.Context, req *activitypb.StartActivityExecutionRequest) (*activitypb.StartActivityExecutionResponse, error) { + frontendReq := req.GetFrontendRequest() + + reusePolicy, ok := businessIDReusePolicyMap[frontendReq.GetIdReusePolicy()] + if !ok { + return nil, serviceerror.NewFailedPrecondition(fmt.Sprintf("unsupported ID reuse policy: %v", frontendReq.GetIdReusePolicy())) + } + + conflictPolicy, ok := businessIDConflictPolicyMap[frontendReq.GetIdConflictPolicy()] + if !ok { + return nil, serviceerror.NewFailedPrecondition(fmt.Sprintf("unsupported ID conflict policy: %v", frontendReq.GetIdConflictPolicy())) + } + + response, key, _, err := chasm.NewExecution( + ctx, + chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetActivityId(), + }, + func(mutableContext chasm.MutableContext, request *workflowservice.StartActivityExecutionRequest) (*Activity, *workflowservice.StartActivityExecutionResponse, error) { + newActivity, err := NewStandaloneActivity(mutableContext, request) + if err != nil { + return nil, nil, err + } + + err = TransitionScheduled.Apply(newActivity, mutableContext, nil) + if err != nil { + return nil, nil, err + } + + return newActivity, &workflowservice.StartActivityExecutionResponse{ + Started: true, // TODO for ACTIVITY_ID_CONFLICT_POLICY_USE_EXISTING, we need a know from chasm the execution is existing and to set false + // EagerTask: TODO when supported, need to call the same code that would handle the HandleStarted API + }, nil + }, + req.GetFrontendRequest(), + chasm.WithRequestID(req.GetFrontendRequest().GetRequestId()), + chasm.WithBusinessIDPolicy(reusePolicy, conflictPolicy), + ) + + if err != nil { + var alreadyStartedErr *chasm.ExecutionAlreadyStartedError + if errors.As(err, &alreadyStartedErr) { + details := &errordetailspb.ActivityExecutionAlreadyStartedFailure{ + StartRequestId: alreadyStartedErr.CurrentRequestID, + RunId: alreadyStartedErr.CurrentRunID, + } + + errStatus := status.New(codes.AlreadyExists, "activity execution already started") + + errStatusWithDetails, errDetail := status.New(codes.AlreadyExists, "activity execution already started").WithDetails(details) + if errDetail != nil { + h.logger.Error("Failed to add error details to ActivityExecutionAlreadyStartedFailure", + tag.Error(errDetail), tag.ActivityID(frontendReq.GetActivityId())) + return nil, errStatus.Err() + } + + return nil, errStatusWithDetails.Err() + } + + return nil, err + } + + response.RunId = key.RunID + + return &activitypb.StartActivityExecutionResponse{ + FrontendResponse: response, + }, nil +} + +// DescribeActivityExecution queries current activity state, optionally as a long-poll that waits +// for any state change. When used to long-poll, it returns an empty non-error response on context +// deadline expiry, to indicate that the state being waited for was not reached. Callers should +// interpret this as an invitation to resubmit their long-poll request. This response is sent before +// the caller's deadline (see chasm.activity.longPollBuffer) so that it is likely that the caller +// does indeed receive the non-error response. +func (h *handler) DescribeActivityExecution( + ctx context.Context, + req *activitypb.DescribeActivityExecutionRequest, +) (response *activitypb.DescribeActivityExecutionResponse, err error) { + ref := chasm.NewComponentRef[*Activity](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetActivityId(), + RunID: req.GetFrontendRequest().GetRunId(), + }) + defer func() { + var notFound *serviceerror.NotFound + if errors.As(err, ¬Found) { + err = serviceerror.NewNotFound("activity execution not found") + } + }() + + // Below, we send an empty non-error response on context deadline expiry. Here we compute a + // deadline that causes us to send that response before the caller's own deadline (see + // chasm.activity.longPollBuffer). We also cap the caller's deadline at + // chasm.activity.longPollTimeout. + ns := req.GetFrontendRequest().GetNamespace() + ctx, cancel := contextutil.WithDeadlineBuffer( + ctx, + h.config.LongPollTimeout(ns), + h.config.LongPollBuffer(ns), + ) + defer cancel() + + token := req.GetFrontendRequest().GetLongPollToken() + if len(token) == 0 { + return chasm.ReadComponent(ctx, ref, (*Activity).buildDescribeActivityExecutionResponse, req, nil) + } + response, _, err = chasm.PollComponent(ctx, ref, func( + a *Activity, + ctx chasm.Context, + req *activitypb.DescribeActivityExecutionRequest, + ) (*activitypb.DescribeActivityExecutionResponse, bool, error) { + changed, err := chasm.ExecutionStateChanged(a, ctx, token) + if err != nil { + if errors.Is(err, chasm.ErrMalformedComponentRef) { + return nil, false, serviceerror.NewInvalidArgument("invalid long poll token") + } + if errors.Is(err, chasm.ErrInvalidComponentRef) { + return nil, false, serviceerror.NewInvalidArgument("long poll token does not match execution") + } + return nil, false, err + } + if changed { + response, err := a.buildDescribeActivityExecutionResponse(ctx, req) + return response, true, err + } + return nil, false, nil + }, req) + + if err != nil && ctx.Err() != nil { + // Send empty non-error response on deadline expiry: caller should continue long-polling. + return &activitypb.DescribeActivityExecutionResponse{ + FrontendResponse: &workflowservice.DescribeActivityExecutionResponse{}, + }, nil + } + return response, err +} + +// PollActivityExecution long-polls for activity outcome. It returns an empty non-error response on +// context deadline expiry, to indicate that the state being waited for was not reached. Callers +// should interpret this as an invitation to resubmit their long-poll request. This response is sent +// before the caller's deadline (see chasm.activity.longPollBuffer) so that it is likely that the +// caller does indeed receive the non-error response. +func (h *handler) PollActivityExecution( + ctx context.Context, + req *activitypb.PollActivityExecutionRequest, +) (response *activitypb.PollActivityExecutionResponse, err error) { + ref := chasm.NewComponentRef[*Activity](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetActivityId(), + RunID: req.GetFrontendRequest().GetRunId(), + }) + defer func() { + var notFound *serviceerror.NotFound + if errors.As(err, ¬Found) { + err = serviceerror.NewNotFound("activity execution not found") + } + }() + + // Below, we send an empty non-error response on context deadline expiry. Here we compute a + // deadline that causes us to send that response before the caller's own deadline (see + // chasm.activity.longPollBuffer). We also cap the caller's deadline at + // chasm.activity.longPollTimeout. + ns := req.GetFrontendRequest().GetNamespace() + ctx, cancel := contextutil.WithDeadlineBuffer( + ctx, + h.config.LongPollTimeout(ns), + h.config.LongPollBuffer(ns), + ) + defer cancel() + + response, _, err = chasm.PollComponent(ctx, ref, func( + a *Activity, + ctx chasm.Context, + req *activitypb.PollActivityExecutionRequest, + ) (*activitypb.PollActivityExecutionResponse, bool, error) { + if a.LifecycleState(ctx) != chasm.LifecycleStateRunning { + response, err := a.buildPollActivityExecutionResponse(ctx) + return response, true, err + } + return nil, false, nil + }, req) + + if err != nil && ctx.Err() != nil { + // Send an empty non-error response as an invitation to resubmit the long-poll. + return &activitypb.PollActivityExecutionResponse{ + FrontendResponse: &workflowservice.PollActivityExecutionResponse{}, + }, nil + } + return response, err +} + +// TerminateActivityExecution terminates an activity execution. +func (h *handler) TerminateActivityExecution( + ctx context.Context, + req *activitypb.TerminateActivityExecutionRequest, +) (response *activitypb.TerminateActivityExecutionResponse, err error) { + frontendReq := req.GetFrontendRequest() + + ref := chasm.NewComponentRef[*Activity](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: frontendReq.GetActivityId(), + RunID: frontendReq.GetRunId(), + }) + + namespaceName, err := h.namespaceRegistry.GetNamespaceName(namespace.ID(req.GetNamespaceId())) + if err != nil { + return nil, err + } + + response, _, err = chasm.UpdateComponent( + ctx, + ref, + (*Activity).handleTerminated, + terminateEvent{ + request: req, + MetricsHandlerBuilderParams: MetricsHandlerBuilderParams{ + Handler: h.metricsHandler, + NamespaceName: namespaceName.String(), + BreakdownMetricsByTaskQueue: h.config.BreakdownMetricsByTaskQueue, + }, + }, + ) + + if err != nil { + return nil, err + } + + return response, nil +} + +// RequestCancelActivityExecution requests cancellation of an activity execution. +func (h *handler) RequestCancelActivityExecution( + ctx context.Context, + req *activitypb.RequestCancelActivityExecutionRequest, +) (response *activitypb.RequestCancelActivityExecutionResponse, err error) { + frontendReq := req.GetFrontendRequest() + + ref := chasm.NewComponentRef[*Activity](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: frontendReq.GetActivityId(), + RunID: frontendReq.GetRunId(), + }) + + namespaceName, err := h.namespaceRegistry.GetNamespaceName(namespace.ID(req.GetNamespaceId())) + if err != nil { + return nil, err + } + + response, _, err = chasm.UpdateComponent( + ctx, + ref, + (*Activity).handleCancellationRequested, + requestCancelEvent{ + request: req, + MetricsHandlerBuilderParams: MetricsHandlerBuilderParams{ + Handler: h.metricsHandler, + NamespaceName: namespaceName.String(), + BreakdownMetricsByTaskQueue: h.config.BreakdownMetricsByTaskQueue, + }, + }, + ) + if err != nil { + return nil, err + } + + return response, nil +} diff --git a/chasm/lib/activity/library.go b/chasm/lib/activity/library.go new file mode 100644 index 0000000000..59ad0ac7d1 --- /dev/null +++ b/chasm/lib/activity/library.go @@ -0,0 +1,94 @@ +package activity + +import ( + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "google.golang.org/grpc" +) + +type componentOnlyLibrary struct { + chasm.UnimplementedLibrary +} + +func newComponentOnlyLibrary() *componentOnlyLibrary { + return &componentOnlyLibrary{} +} + +func (l *componentOnlyLibrary) Name() string { + return "activity" +} + +func (l *componentOnlyLibrary) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*Activity]("activity", + chasm.WithSearchAttributes( + TypeSearchAttribute, + StatusSearchAttribute, + TaskQueueSearchAttribute, + ), + ), + } +} + +type library struct { + componentOnlyLibrary + + handler *handler + activityDispatchTaskExecutor *activityDispatchTaskExecutor + scheduleToStartTimeoutTaskExecutor *scheduleToStartTimeoutTaskExecutor + scheduleToCloseTimeoutTaskExecutor *scheduleToCloseTimeoutTaskExecutor + startToCloseTimeoutTaskExecutor *startToCloseTimeoutTaskExecutor + heartbeatTimeoutTaskExecutor *heartbeatTimeoutTaskExecutor +} + +func newLibrary( + handler *handler, + activityDispatchTaskExecutor *activityDispatchTaskExecutor, + scheduleToStartTimeoutTaskExecutor *scheduleToStartTimeoutTaskExecutor, + scheduleToCloseTimeoutTaskExecutor *scheduleToCloseTimeoutTaskExecutor, + startToCloseTimeoutTaskExecutor *startToCloseTimeoutTaskExecutor, + heartbeatTimeoutTaskExecutor *heartbeatTimeoutTaskExecutor, +) *library { + return &library{ + handler: handler, + activityDispatchTaskExecutor: activityDispatchTaskExecutor, + scheduleToStartTimeoutTaskExecutor: scheduleToStartTimeoutTaskExecutor, + scheduleToCloseTimeoutTaskExecutor: scheduleToCloseTimeoutTaskExecutor, + startToCloseTimeoutTaskExecutor: startToCloseTimeoutTaskExecutor, + heartbeatTimeoutTaskExecutor: heartbeatTimeoutTaskExecutor, + } +} + +func (l *library) RegisterServices(server *grpc.Server) { + server.RegisterService(&activitypb.ActivityService_ServiceDesc, l.handler) +} + +func (l *library) Tasks() []*chasm.RegistrableTask { + return []*chasm.RegistrableTask{ + chasm.NewRegistrableSideEffectTask( + "dispatch", + l.activityDispatchTaskExecutor, + l.activityDispatchTaskExecutor, + ), + chasm.NewRegistrablePureTask( + "scheduleToStartTimer", + l.scheduleToStartTimeoutTaskExecutor, + l.scheduleToStartTimeoutTaskExecutor, + ), + chasm.NewRegistrablePureTask( + "scheduleToCloseTimer", + l.scheduleToCloseTimeoutTaskExecutor, + l.scheduleToCloseTimeoutTaskExecutor, + ), + chasm.NewRegistrablePureTask( + "startToCloseTimer", + l.startToCloseTimeoutTaskExecutor, + l.startToCloseTimeoutTaskExecutor, + ), + chasm.NewRegistrablePureTask( + "heartbeatTimer", + l.heartbeatTimeoutTaskExecutor, + l.heartbeatTimeoutTaskExecutor, + ), + } +} diff --git a/chasm/lib/activity/proto/v1/activity_state.proto b/chasm/lib/activity/proto/v1/activity_state.proto new file mode 100644 index 0000000000..02ae9379ab --- /dev/null +++ b/chasm/lib/activity/proto/v1/activity_state.proto @@ -0,0 +1,186 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.activity.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypb"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/deployment/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; +import "temporal/api/sdk/v1/user_metadata.proto"; +import "temporal/api/taskqueue/v1/message.proto"; + +enum ActivityExecutionStatus { + ACTIVITY_EXECUTION_STATUS_UNSPECIFIED = 0; + // The activity is not in a terminal status. This does not necessarily mean that there is a currently running + // attempt. The activity may be backing off between attempts or waiting for a worker to pick it up. + ACTIVITY_EXECUTION_STATUS_SCHEDULED = 1; + ACTIVITY_EXECUTION_STATUS_STARTED = 2; + ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED = 3; + // Left as placeholders for when we add pause. + // // PAUSED means activity is paused on the server, and is not running in the worker + // ACTIVITY_EXECUTION_STATUS_PAUSED = 4; + // // PAUSE_REQUESTED means activity is currently running on the worker, but paused on the server + // ACTIVITY_EXECUTION_STATUS_PAUSE_REQUESTED = 5; + // The activity completed successfully. + ACTIVITY_EXECUTION_STATUS_COMPLETED = 4; + // The activity completed with failure. + ACTIVITY_EXECUTION_STATUS_FAILED = 5; + // The activity completed as canceled. + // Requesting to cancel an activity does not automatically transition the activity to canceled status. If the + // activity has a currently running attempt, the activity will only transition to canceled status if the current + // attempt is unsuccessful. + // TODO: Clarify what happens if there are no more allowed retries after the current attempt. + ACTIVITY_EXECUTION_STATUS_CANCELED = 6; + // The activity was terminated. Termination does not reach the worker and the activity code cannot react to it. + // A terminated activity may have a running attempt and will be requested to be canceled by the server when it + // heartbeats. + ACTIVITY_EXECUTION_STATUS_TERMINATED = 7; + // The activity has timed out by reaching the specified shedule-to-start or schedule-to-close timeouts. + // TODO: Clarify if there are other conditions where the activity can end up in timed out status. + ACTIVITY_EXECUTION_STATUS_TIMED_OUT = 8; +} + +message ActivityState { + // === Should be covered by CHASM + // Unique identifier of this activity within its namespace along with run ID (below). + // string activity_id = 1; + // string run_id = 2; + // Incremented each time the activity's state is mutated in persistence. + // int64 state_transition_count = 22; TODO: we would need this if we had conflict resolution functionality. + // === END Should be covered by CHASM + + // The type of the activity, a string that maps to a registered activity on a worker. + temporal.api.common.v1.ActivityType activity_type = 1; + + temporal.api.taskqueue.v1.TaskQueue task_queue = 2; + + // Indicates how long the caller is willing to wait for an activity completion. Limits how long + // retries will be attempted. Either this or `start_to_close_timeout` must be specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_close_timeout = 3; + // Limits time an activity task can stay in a task queue before a worker picks it up. This + // timeout is always non retryable, as all a retry would achieve is to put it back into the same + // queue. Defaults to `schedule_to_close_timeout` or workflow execution timeout if not + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_start_timeout = 4; + // Maximum time an activity is allowed to execute after being picked up by a worker. This + // timeout is always retryable. Either this or `schedule_to_close_timeout` must be + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration start_to_close_timeout = 5; + // Maximum permitted time between successful worker heartbeats. + google.protobuf.Duration heartbeat_timeout = 6; + // The retry policy for the activity. Will never exceed `schedule_to_close_timeout`. + temporal.api.common.v1.RetryPolicy retry_policy = 7; + + // All of the possible activity statuses (covers both the public ActivityExecutionStatus and PendingActivityState). + // TODO: consider moving this into ActivityAttemptState and renaming that message. This could save mutating two + // components on each attempt transition. + ActivityExecutionStatus status = 8; + + // Time the activity was originally scheduled via a StartActivityExecution request. + google.protobuf.Timestamp schedule_time = 9; + + // Priority metadata. + temporal.api.common.v1.Priority priority = 10; + + // Set if activity cancellation was requested. + ActivityCancelState cancel_state = 11; + + // Set if the activity was terminated + ActivityTerminateState terminate_state = 12; +} + +message ActivityCancelState { + string request_id = 1; + google.protobuf.Timestamp request_time = 2; + string identity = 3; + string reason = 4; +} + +message ActivityTerminateState { + string request_id = 1; +} + +message ActivityAttemptState { + // The attempt this activity is currently on. + // Incremented each time a new attempt is started. + // TODO: Confirm if this is on scheduled or started. + int32 count = 1; + + // Time from the last attempt failure to the next activity retry. + // If the activity is currently running, this represents the next retry interval in case the attempt fails. + // If activity is currently backing off between attempt, this represents the current retry interval. + // If there is no next retry allowed, this field will be null. + // This interval is typically calculated from the specified retry policy, but may be modified if an activity fails + // with a retryable application failure specifying a retry delay. + google.protobuf.Duration current_retry_interval = 2; + + // Time the last attempt was started. + google.protobuf.Timestamp started_time = 3; + + // The time when the last activity attempt completed. If activity has not been completed yet, it will be null. + google.protobuf.Timestamp complete_time = 4; + + message LastFailureDetails { + // The last time the activity attempt failed. + google.protobuf.Timestamp time = 1; + + // Failure details from the last failed attempt. + temporal.api.failure.v1.Failure failure = 2; + } + + // Details about the last failure. This will only be updated when an activity attempt fails, + // including start-to-close timeout. Activity success, termination, schedule-to-start and schedule-to-close timeouts + // will not reset it. + LastFailureDetails last_failure_details = 5; + + string last_worker_identity = 7; + + // The Worker Deployment Version this activity was dispatched to most recently. + // If nil, the activity has not yet been dispatched or was last dispatched to an unversioned worker. + temporal.api.deployment.v1.WorkerDeploymentVersion last_deployment_version = 8; +} + +message ActivityHeartbeatState { + // Details provided in the last recorded activity heartbeat. + temporal.api.common.v1.Payloads details = 1; + // Time the last heartbeat was recorded. + google.protobuf.Timestamp recorded_time = 2; +} + +message ActivityRequestData { + // Serialized activity input, passed as arguments to the activity function. + temporal.api.common.v1.Payloads input = 1; + temporal.api.common.v1.Header header = 2; + + // Metadata for use by user interfaces to display the fixed as-of-start summary and details of the activity. + temporal.api.sdk.v1.UserMetadata user_metadata = 3; +} + +message ActivityOutcome { + message Successful { + temporal.api.common.v1.Payloads output = 1; + } + + message Failed { + // Only filled on schedule-to-start timeouts, schedule-to-close timeouts or terminations. All other attempt + // failures will be recorded in ActivityAttemptState.last_failure_details. + temporal.api.failure.v1.Failure failure = 1; + } + + oneof variant { + Successful successful = 1; + Failed failed = 2; + } +} diff --git a/chasm/lib/activity/proto/v1/request_response.proto b/chasm/lib/activity/proto/v1/request_response.proto new file mode 100644 index 0000000000..835071815b --- /dev/null +++ b/chasm/lib/activity/proto/v1/request_response.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.activity.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypb"; + +import "temporal/api/workflowservice/v1/request_response.proto"; + +message StartActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.StartActivityExecutionRequest frontend_request = 2; +} + +message StartActivityExecutionResponse { + temporal.api.workflowservice.v1.StartActivityExecutionResponse frontend_response = 1; +} + +message DescribeActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.DescribeActivityExecutionRequest frontend_request = 2; +} + +message DescribeActivityExecutionResponse { + temporal.api.workflowservice.v1.DescribeActivityExecutionResponse frontend_response = 1; +} + +message PollActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.PollActivityExecutionRequest frontend_request = 2; +} + +message PollActivityExecutionResponse { + temporal.api.workflowservice.v1.PollActivityExecutionResponse frontend_response = 1; +} + +message TerminateActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.TerminateActivityExecutionRequest frontend_request = 2; +} + +message TerminateActivityExecutionResponse { +} + +message RequestCancelActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.RequestCancelActivityExecutionRequest frontend_request = 2; +} + +message RequestCancelActivityExecutionResponse { +} \ No newline at end of file diff --git a/chasm/lib/activity/proto/v1/service.proto b/chasm/lib/activity/proto/v1/service.proto new file mode 100644 index 0000000000..f669b065de --- /dev/null +++ b/chasm/lib/activity/proto/v1/service.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.activity.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypb"; + +import "chasm/lib/activity/proto/v1/request_response.proto"; +import "temporal/server/api/routing/v1/extension.proto"; + +service ActivityService { + rpc StartActivityExecution(StartActivityExecutionRequest) returns (StartActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + } + + rpc DescribeActivityExecution(DescribeActivityExecutionRequest) returns (DescribeActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + } + + rpc PollActivityExecution(PollActivityExecutionRequest) returns (PollActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + } + + rpc TerminateActivityExecution(TerminateActivityExecutionRequest) returns (TerminateActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + } + + rpc RequestCancelActivityExecution(RequestCancelActivityExecutionRequest) returns (RequestCancelActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + } +} diff --git a/chasm/lib/activity/proto/v1/tasks.proto b/chasm/lib/activity/proto/v1/tasks.proto new file mode 100644 index 0000000000..36185c6d26 --- /dev/null +++ b/chasm/lib/activity/proto/v1/tasks.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.activity.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypb"; + +message ActivityDispatchTask { + // The current attempt number for this activity execution. Used for task validation. + int32 attempt = 1; +} + +message ScheduleToStartTimeoutTask { + // The current attempt number for this activity execution. Used for task validation. + int32 attempt = 1; +} + +message ScheduleToCloseTimeoutTask { +} + +message StartToCloseTimeoutTask { + // The current attempt number for this activity execution. Used for task validation. + int32 attempt = 1; +} + +// HeartbeatTimeoutTask is a pure task that enforces heartbeat timeouts. +message HeartbeatTimeoutTask { + // The current attempt number for this activity execution. Used for task validation. + int32 attempt = 1; +} \ No newline at end of file diff --git a/chasm/lib/activity/statemachine.go b/chasm/lib/activity/statemachine.go new file mode 100644 index 0000000000..b54a953027 --- /dev/null +++ b/chasm/lib/activity/statemachine.go @@ -0,0 +1,372 @@ +package activity + +import ( + "fmt" + "time" + + commonpb "go.temporal.io/api/common/v1" + deploymentpb "go.temporal.io/api/deployment/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/metrics" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Ensure that Activity implements chasm.StateMachine interface +var _ chasm.StateMachine[activitypb.ActivityExecutionStatus] = (*Activity)(nil) + +// StateMachineState returns the current status of the activity. +func (a *Activity) StateMachineState() activitypb.ActivityExecutionStatus { + if a.ActivityState == nil { + return activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED + } + return a.Status +} + +// SetStateMachineState sets the status of the activity. +func (a *Activity) SetStateMachineState(state activitypb.ActivityExecutionStatus) { + a.Status = state +} + +// TransitionScheduled transitions to Scheduled status. This is only called on the initial +// scheduling of the activity. +var TransitionScheduled = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + func(a *Activity, ctx chasm.MutableContext, _ any) error { + attempt := a.LastAttempt.Get(ctx) + currentTime := ctx.Now(a) + attempt.Count += 1 + + if timeout := a.GetScheduleToStartTimeout().AsDuration(); timeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: currentTime.Add(timeout), + }, + &activitypb.ScheduleToStartTimeoutTask{ + Attempt: attempt.GetCount(), + }) + } + + if timeout := a.GetScheduleToCloseTimeout().AsDuration(); timeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: currentTime.Add(timeout), + }, + &activitypb.ScheduleToCloseTimeoutTask{}) + } + + ctx.AddTask( + a, + chasm.TaskAttributes{}, + &activitypb.ActivityDispatchTask{ + Attempt: attempt.GetCount(), + }) + + return nil + }, +) + +type rescheduleEvent struct { + retryInterval time.Duration + failure *failurepb.Failure + timeoutType enumspb.TimeoutType +} + +// TransitionRescheduled transitions to Scheduled from Started, which happens on retries. The event +// to pass in is the failure to be recorded from the previously failed attempt. +var TransitionRescheduled = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, // For retries the activity will be in started status + }, + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + func(a *Activity, ctx chasm.MutableContext, event rescheduleEvent) error { + attempt := a.LastAttempt.Get(ctx) + currentTime := ctx.Now(a) + attempt.Count += 1 + + err := a.recordFailedAttempt(ctx, event.retryInterval, event.failure, currentTime, false) + if err != nil { + return err + } + + if timeout := a.GetScheduleToStartTimeout().AsDuration(); timeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: currentTime.Add(timeout).Add(event.retryInterval), + }, + &activitypb.ScheduleToStartTimeoutTask{ + Attempt: attempt.GetCount(), + }) + } + + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: currentTime.Add(event.retryInterval), + }, + &activitypb.ActivityDispatchTask{ + Attempt: attempt.GetCount(), + }) + + return nil + }, +) + +// TransitionStarted transitions to Started status. +var TransitionStarted = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + func(a *Activity, ctx chasm.MutableContext, request *historyservice.RecordActivityTaskStartedRequest) error { + attempt := a.LastAttempt.Get(ctx) + attempt.StartedTime = timestamppb.New(ctx.Now(a)) + attempt.LastWorkerIdentity = request.GetPollRequest().GetIdentity() + if versionDirective := request.GetVersionDirective().GetDeploymentVersion(); versionDirective != nil { + attempt.LastDeploymentVersion = &deploymentpb.WorkerDeploymentVersion{ + BuildId: versionDirective.GetBuildId(), + DeploymentName: versionDirective.GetDeploymentName(), + } + } + startTime := attempt.GetStartedTime().AsTime() + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: startTime.Add(a.GetStartToCloseTimeout().AsDuration()), + }, + &activitypb.StartToCloseTimeoutTask{ + Attempt: a.LastAttempt.Get(ctx).GetCount(), + }) + + if heartbeatTimeout := a.GetHeartbeatTimeout().AsDuration(); heartbeatTimeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: startTime.Add(heartbeatTimeout), + }, + &activitypb.HeartbeatTimeoutTask{ + Attempt: attempt.GetCount(), + }) + } + + return nil + }, +) + +type completeEvent struct { + req *historyservice.RespondActivityTaskCompletedRequest + metricsHandler metrics.Handler +} + +// TransitionCompleted transitions to Completed status. +var TransitionCompleted = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED, + func(a *Activity, ctx chasm.MutableContext, event completeEvent) error { + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + req := event.req.GetCompleteRequest() + + attempt := a.LastAttempt.Get(ctx) + attempt.CompleteTime = timestamppb.New(ctx.Now(a)) + attempt.LastWorkerIdentity = req.GetIdentity() + outcome := a.Outcome.Get(ctx) + outcome.Variant = &activitypb.ActivityOutcome_Successful_{ + Successful: &activitypb.ActivityOutcome_Successful{ + Output: req.GetResult(), + }, + } + + a.emitOnCompletedMetrics(ctx, event.metricsHandler) + + return nil + }) + }, +) + +type failedEvent struct { + req *historyservice.RespondActivityTaskFailedRequest + metricsHandler metrics.Handler +} + +// TransitionFailed transitions to Failed status. +var TransitionFailed = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, + func(a *Activity, ctx chasm.MutableContext, event failedEvent) error { + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + req := event.req.GetFailedRequest() + + if details := req.GetLastHeartbeatDetails(); details != nil { + heartbeat := a.getOrCreateLastHeartbeat(ctx) + heartbeat.Details = details + heartbeat.RecordedTime = timestamppb.New(ctx.Now(a)) + } + attempt := a.LastAttempt.Get(ctx) + attempt.LastWorkerIdentity = req.GetIdentity() + + if err := a.recordFailedAttempt(ctx, 0, req.GetFailure(), ctx.Now(a), true); err != nil { + return err + } + + a.emitOnFailedMetrics(ctx, event.metricsHandler) + + return nil + }) + }, +) + +// TransitionTerminated transitions to Terminated status. +var TransitionTerminated = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, + func(a *Activity, ctx chasm.MutableContext, event terminateEvent) error { + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + req := event.request.GetFrontendRequest() + + a.TerminateState = &activitypb.ActivityTerminateState{ + RequestId: req.GetRequestId(), + } + outcome := a.Outcome.Get(ctx) + failure := &failurepb.Failure{ + // TODO(saa-preview): if the reason isn't provided, perhaps set a default reason. Also see if we should prefix with "Activity terminated: " + Message: req.GetReason(), + FailureInfo: &failurepb.Failure_TerminatedFailureInfo{}, + } + outcome.Variant = &activitypb.ActivityOutcome_Failed_{ + Failed: &activitypb.ActivityOutcome_Failed{ + Failure: failure, + }, + } + + metricsHandler := enrichMetricsHandler( + a, + event.MetricsHandlerBuilderParams.Handler, + event.MetricsHandlerBuilderParams.NamespaceName, + metrics.ActivityTerminatedScope, + event.MetricsHandlerBuilderParams.BreakdownMetricsByTaskQueue) + + metrics.ActivityTerminate.With(metricsHandler).Record(1) + + return nil + }) + }, +) + +// TransitionCancelRequested transitions to CancelRequested status. +var TransitionCancelRequested = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + func(a *Activity, ctx chasm.MutableContext, req *workflowservice.RequestCancelActivityExecutionRequest) error { + a.CancelState = &activitypb.ActivityCancelState{ + Identity: req.GetIdentity(), + RequestId: req.GetRequestId(), + Reason: req.GetReason(), + RequestTime: timestamppb.New(ctx.Now(a)), + } + + return nil + }, +) + +type cancelEvent struct { + details *commonpb.Payloads + handler metrics.Handler + fromStatus activitypb.ActivityExecutionStatus +} + +// TransitionCanceled transitions to Canceled status. +var TransitionCanceled = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED, + func(a *Activity, ctx chasm.MutableContext, event cancelEvent) error { + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + outcome := a.Outcome.Get(ctx) + failure := &failurepb.Failure{ + Message: "Activity canceled", + FailureInfo: &failurepb.Failure_CanceledFailureInfo{ + CanceledFailureInfo: &failurepb.CanceledFailureInfo{ + Details: event.details, + }, + }, + } + outcome.Variant = &activitypb.ActivityOutcome_Failed_{ + Failed: &activitypb.ActivityOutcome_Failed{ + Failure: failure, + }, + } + + a.emitOnCanceledMetrics(ctx, event.handler, event.fromStatus) + + return nil + }) + }, +) + +type timeoutEvent struct { + metricsHandler metrics.Handler + timeoutType enumspb.TimeoutType + fromStatus activitypb.ActivityExecutionStatus +} + +// TransitionTimedOut transitions to TimedOut status. +var TransitionTimedOut = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, + func(a *Activity, ctx chasm.MutableContext, event timeoutEvent) error { + timeoutType := event.timeoutType + + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + var err error + switch timeoutType { + case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE: + err = a.recordScheduleToStartOrCloseTimeoutFailure(ctx, timeoutType) + case enumspb.TIMEOUT_TYPE_START_TO_CLOSE: + failure := createStartToCloseTimeoutFailure() + err = a.recordFailedAttempt(ctx, 0, failure, ctx.Now(a), true) + case enumspb.TIMEOUT_TYPE_HEARTBEAT: + failure := createHeartbeatTimeoutFailure() + err = a.recordFailedAttempt(ctx, 0, failure, ctx.Now(a), true) + default: + err = fmt.Errorf("unhandled activity timeout: %v", timeoutType) + } + if err != nil { + return err + } + + a.emitOnTimedOutMetrics(ctx, event.metricsHandler, timeoutType, event.fromStatus) + + return nil + }) + }, +) diff --git a/chasm/lib/activity/statemachine_test.go b/chasm/lib/activity/statemachine_test.go new file mode 100644 index 0000000000..68d1219182 --- /dev/null +++ b/chasm/lib/activity/statemachine_test.go @@ -0,0 +1,725 @@ +package activity + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/testing/protorequire" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + defaultTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + defaultRetryPolicy = &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + BackoffCoefficient: 2.0, + MaximumAttempts: 5, + MaximumInterval: durationpb.New(100 * time.Second), + } + defaultScheduleToCloseTimeout = 10 * time.Minute + defaultScheduleToStartTimeout = 2 * time.Minute + defaultStartToCloseTimeout = 3 * time.Minute +) + +func TestTransitionScheduled(t *testing.T) { + testCases := []struct { + name string + startingAttemptCount int32 + expectedTasks []chasm.MockTask + scheduleToStartTimeout time.Duration + scheduleToCloseTimeout time.Duration + }{ + { + name: "all timeouts set", + startingAttemptCount: 0, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ScheduleToCloseTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + scheduleToCloseTimeout: defaultScheduleToCloseTimeout, + }, + { + name: "schedule to start timeout not set", + startingAttemptCount: 0, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToCloseTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + scheduleToStartTimeout: 0, + scheduleToCloseTimeout: defaultScheduleToCloseTimeout, + }, + { + name: "schedule to close timeout not set", + startingAttemptCount: 0, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + scheduleToCloseTimeout: 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + attemptState := &activitypb.ActivityAttemptState{Count: tc.startingAttemptCount} + outcome := &activitypb.ActivityOutcome{} + input := payloads.EncodeString("test-input") + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(tc.scheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(tc.scheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + RequestData: chasm.NewDataField(ctx, &activitypb.ActivityRequestData{ + Input: input, + }), + } + + err := TransitionScheduled.Apply(activity, ctx, nil) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + + // Verify added tasks + require.Len(t, ctx.Tasks, len(tc.expectedTasks)) + for i, expectedTask := range tc.expectedTasks { + actualTask := ctx.Tasks[i] + + require.IsType(t, expectedTask.Payload, actualTask.Payload, "expected %T at index %d, got %T", + expectedTask.Payload, i, actualTask.Payload) + + switch expectedTask.Payload.(type) { + case *activitypb.ActivityDispatchTask: + require.Empty(t, actualTask.Attributes.ScheduledTime) + case *activitypb.ScheduleToStartTimeoutTask: + require.Equal(t, defaultTime.Add(tc.scheduleToStartTimeout), actualTask.Attributes.ScheduledTime) + case *activitypb.ScheduleToCloseTimeoutTask: + require.Equal(t, defaultTime.Add(tc.scheduleToCloseTimeout), actualTask.Attributes.ScheduledTime) + default: + t.Fatalf("unexpected task payload type at index %d: %T", i, actualTask.Payload) + } + + } + }) + } +} + +func TestTransitionRescheduled(t *testing.T) { + testCases := []struct { + name string + startingAttemptCount int32 + expectedTasks []chasm.MockTask + expectedRetryInterval time.Duration + retryPolicy *commonpb.RetryPolicy + scheduleToStartTimeout time.Duration + operationTag string + counterMetric string + timeoutType enumspb.TimeoutType + }{ + { + name: "second attempt - timeout recorded", + startingAttemptCount: 1, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 2 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + operationTag: metrics.TimerActiveTaskActivityTimeoutScope, + counterMetric: metrics.ActivityTaskTimeout.Name(), + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + { + name: "third attempt - timeout recorded", + startingAttemptCount: 2, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 4 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + operationTag: metrics.TimerActiveTaskActivityTimeoutScope, + counterMetric: metrics.ActivityTaskTimeout.Name(), + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + { + name: "no schedule to start timeout", + startingAttemptCount: 1, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 2 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: 0, + operationTag: metrics.TimerActiveTaskActivityTimeoutScope, + counterMetric: metrics.ActivityTaskTimeout.Name(), + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + { + name: "heartbeat timeout - timeout recorded", + startingAttemptCount: 1, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 2 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + operationTag: metrics.TimerActiveTaskActivityTimeoutScope, + counterMetric: metrics.ActivityTaskTimeout.Name(), + timeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + }, + + { + name: "reschedule from failure", + startingAttemptCount: 1, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 2 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + operationTag: metrics.HistoryRespondActivityTaskFailedScope, + counterMetric: metrics.ActivityTaskFail.Name(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: tc.startingAttemptCount} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(tc.scheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + event := rescheduleEvent{ + retryInterval: tc.expectedRetryInterval, + failure: createStartToCloseTimeoutFailure(), + timeoutType: tc.timeoutType, + } + + err := TransitionRescheduled.Apply(activity, ctx, event) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, activity.Status) + require.Equal(t, tc.startingAttemptCount+1, attemptState.Count) + protorequire.ProtoEqual(t, durationpb.New(tc.expectedRetryInterval), attemptState.GetCurrentRetryInterval()) + + // Verify attempt state failure details updated correctly + lastFailureDetails := attemptState.GetLastFailureDetails() + require.NotNil(t, lastFailureDetails.GetFailure()) + require.Equal(t, lastFailureDetails.GetTime(), attemptState.GetCompleteTime()) + // This should remain nil on intermediate retry attempts. The final attempt goes directly via TransitionTimedOut. + require.Nil(t, outcome.GetVariant()) + + // Verify added tasks + require.Len(t, ctx.Tasks, len(tc.expectedTasks)) + for i, expectedTask := range tc.expectedTasks { + actualTask := ctx.Tasks[i] + + switch expectedTask.Payload.(type) { + case *activitypb.ActivityDispatchTask: + _, ok := actualTask.Payload.(*activitypb.ActivityDispatchTask) + require.True(t, ok, "expected ActivityDispatchTask at index %d", i) + require.Equal(t, defaultTime.Add(tc.expectedRetryInterval), actualTask.Attributes.ScheduledTime) + case *activitypb.ScheduleToStartTimeoutTask: + _, ok := actualTask.Payload.(*activitypb.ScheduleToStartTimeoutTask) + require.True(t, ok, "expected ScheduleToStartTimeoutTask at index %d", i) + require.Equal(t, defaultTime.Add(tc.scheduleToStartTimeout).Add(tc.expectedRetryInterval), actualTask.Attributes.ScheduledTime) + default: + t.Fatalf("unexpected task payload type at index %d: %T", i, actualTask.Payload) + } + + } + }) + } +} + +func TestTransitionStarted(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{ + Count: 1, + StartedTime: timestamppb.New(defaultTime), + } + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + err := TransitionStarted.Apply(activity, ctx, &historyservice.RecordActivityTaskStartedRequest{ + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + Identity: "test-worker", + }, + }) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + require.Equal(t, defaultTime, attemptState.StartedTime.AsTime()) + require.Equal(t, "test-worker", attemptState.LastWorkerIdentity) + + // Verify added tasks + require.Len(t, ctx.Tasks, 1) + _, ok := ctx.Tasks[0].Payload.(*activitypb.StartToCloseTimeoutTask) + require.True(t, ok, "expected ScheduleToStartTimeoutTask") + require.Equal(t, defaultTime.Add(defaultStartToCloseTimeout), ctx.Tasks[0].Attributes.ScheduledTime) +} + +func TestTransitionTimedout(t *testing.T) { + testCases := []struct { + name string + startStatus activitypb.ActivityExecutionStatus + timeoutType enumspb.TimeoutType + attemptCount int32 + }{ + { + name: "schedule to start timeout", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + attemptCount: 2, + }, + { + name: "schedule to close timeout from scheduled status", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + attemptCount: 3, + }, + { + name: "schedule to close timeout from started status", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + attemptCount: 4, + }, + { + name: "start to close timeout", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + attemptCount: 5, + }, + { + name: "heartbeat timeout", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + timeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + attemptCount: 2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + attemptState := &activitypb.ActivityAttemptState{Count: tc.attemptCount} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: tc.startStatus, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + timerStartToCloseLatency := metrics.NewMockTimerIface(controller) + timerStartToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityStartToCloseLatency.Name()).Return(timerStartToCloseLatency) + + timerScheduleToCloseLatency := metrics.NewMockTimerIface(controller) + timerScheduleToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityScheduleToCloseLatency.Name()).Return(timerScheduleToCloseLatency) + + timeoutTag := metrics.StringTag("timeout_type", tc.timeoutType.String()) + + counterTimeout := metrics.NewMockCounterIface(controller) + counterTimeout.EXPECT().Record(int64(1), timeoutTag).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityTimeout.Name()).Return(counterTimeout) + + counterTaskTimeout := metrics.NewMockCounterIface(controller) + counterTaskTimeout.EXPECT().Record(int64(1), timeoutTag).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityTaskTimeout.Name()).Return(counterTaskTimeout) + + event := timeoutEvent{ + timeoutType: tc.timeoutType, + metricsHandler: metricsHandler, + } + + err := TransitionTimedOut.Apply(activity, ctx, event) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, activity.Status) + require.Equal(t, tc.attemptCount, attemptState.Count) + + switch tc.timeoutType { + case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE: + // Timeout failure is recorded in outcome but not attempt state + require.Nil(t, attemptState.GetLastFailureDetails()) + require.Nil(t, attemptState.GetCompleteTime()) + require.NotNil(t, outcome.GetFailed().GetFailure()) + // do something + case enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + enumspb.TIMEOUT_TYPE_HEARTBEAT: + // Timeout failure is recorded in attempt state only. TransitionTimedOut should only be called when there + // are no more retries. Retries go through TransitionRescheduled. + require.NotNil(t, attemptState.GetLastFailureDetails().GetFailure()) + require.NotNil(t, attemptState.GetLastFailureDetails().GetTime()) + require.NotNil(t, attemptState.GetCompleteTime()) + require.Nil(t, attemptState.GetCurrentRetryInterval()) + require.Nil(t, outcome.GetVariant()) + + default: + t.Fatalf("unexpected timeout type: %v", tc.timeoutType) + } + + require.Empty(t, ctx.Tasks) + }) + } +} + +func TestTransitionCompleted(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: 1} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + payload := payloads.EncodeString("Done") + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + timerStartToCloseLatency := metrics.NewMockTimerIface(controller) + timerStartToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityStartToCloseLatency.Name()).Return(timerStartToCloseLatency) + + timerScheduleToCloseLatency := metrics.NewMockTimerIface(controller) + timerScheduleToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityScheduleToCloseLatency.Name()).Return(timerScheduleToCloseLatency) + + counterSuccess := metrics.NewMockCounterIface(controller) + counterSuccess.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivitySuccess.Name()).Return(counterSuccess) + + req := &historyservice.RespondActivityTaskCompletedRequest{ + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + Result: payload, + Identity: "worker", + }, + } + + err := TransitionCompleted.Apply(activity, ctx, completeEvent{ + req: req, + metricsHandler: metricsHandler, + }) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + require.Equal(t, "worker", attemptState.GetLastWorkerIdentity()) + require.NotNil(t, attemptState.GetCompleteTime()) + protorequire.ProtoEqual(t, payload, outcome.GetSuccessful().GetOutput()) +} + +func TestTransitionFailed(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: 1} + heartbeatState := &activitypb.ActivityHeartbeatState{} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + LastHeartbeat: chasm.NewDataField(ctx, heartbeatState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + heartbeatDetails := payloads.EncodeString("Heartbeat") + failure := &failurepb.Failure{ + Message: "Failed Activity", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + Type: "Test", + NonRetryable: true, + }}, + } + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + timerStartToCloseLatency := metrics.NewMockTimerIface(controller) + timerStartToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityStartToCloseLatency.Name()).Return(timerStartToCloseLatency) + + timerScheduleToCloseLatency := metrics.NewMockTimerIface(controller) + timerScheduleToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityScheduleToCloseLatency.Name()).Return(timerScheduleToCloseLatency) + + counterFail := metrics.NewMockCounterIface(controller) + counterFail.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityFail.Name()).Return(counterFail) + + counterTaskFail := metrics.NewMockCounterIface(controller) + counterTaskFail.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityTaskFail.Name()).Return(counterTaskFail) + + req := &historyservice.RespondActivityTaskFailedRequest{ + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + Failure: failure, + LastHeartbeatDetails: heartbeatDetails, + Identity: "worker", + }, + } + + err := TransitionFailed.Apply(activity, ctx, failedEvent{ + req: req, + metricsHandler: metricsHandler, + }) + + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + require.Equal(t, "worker", attemptState.GetLastWorkerIdentity()) + require.NotNil(t, attemptState.GetCompleteTime()) + protorequire.ProtoEqual(t, heartbeatDetails, heartbeatState.GetDetails()) + require.NotNil(t, heartbeatState.GetRecordedTime()) + protorequire.ProtoEqual(t, failure, attemptState.GetLastFailureDetails().GetFailure()) + require.NotNil(t, attemptState.GetLastFailureDetails().GetTime()) + require.Nil(t, outcome.GetFailed()) +} + +func TestTransitionTerminated(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{ + Count: 1, + LastWorkerIdentity: "worker", + } + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + enrichedMetricsHandler := metrics.NewMockHandler(controller) + + tags := []metrics.Tag{ + metrics.OperationTag(metrics.ActivityTerminatedScope), + metrics.ActivityTypeTag("test-activity-type"), + metrics.VersioningBehaviorTag(enumspb.VERSIONING_BEHAVIOR_UNSPECIFIED), + metrics.WorkflowTypeTag(WorkflowTypeTag), + metrics.NamespaceTag("test-namespace"), + metrics.UnsafeTaskQueueTag("test-task-queue"), + } + metricsHandler.EXPECT().WithTags(tags).Return(enrichedMetricsHandler) + + counterTerminate := metrics.NewMockCounterIface(controller) + counterTerminate.EXPECT().Record(int64(1)).Times(1) + enrichedMetricsHandler.EXPECT().Counter(metrics.ActivityTerminate.Name()).Return(counterTerminate) + + req := &activitypb.TerminateActivityExecutionRequest{ + FrontendRequest: &workflowservice.TerminateActivityExecutionRequest{ + Reason: "Test Termination", + Identity: "terminator", + RequestId: "test-request-id", + }, + } + + err := TransitionTerminated.Apply(activity, ctx, terminateEvent{ + request: req, + MetricsHandlerBuilderParams: MetricsHandlerBuilderParams{ + Handler: metricsHandler, + NamespaceName: "test-namespace", + BreakdownMetricsByTaskQueue: func(namespace string, taskQueue string, taskQueueType enumspb.TaskQueueType) bool { + return namespace == "test-namespace" && taskQueue == "test-task-queue" && taskQueueType == enumspb.TASK_QUEUE_TYPE_ACTIVITY + }, + }, + }) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + require.Equal(t, "worker", attemptState.GetLastWorkerIdentity()) + require.Equal(t, "test-request-id", activity.GetTerminateState().RequestId) + + expectedFailure := &failurepb.Failure{ + Message: "Test Termination", + FailureInfo: &failurepb.Failure_TerminatedFailureInfo{}, + } + protorequire.ProtoEqual(t, expectedFailure, outcome.GetFailed().GetFailure()) +} + +func TestTransitionCancelRequested(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: 1} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + } + + err := TransitionCancelRequested.Apply(activity, ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + RequestId: "cancel-request", + Reason: "Test Cancel Requested", + Identity: "worker", + }) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, activity.Status) + + cancelState := activity.CancelState + + require.Equal(t, "cancel-request", cancelState.GetRequestId()) + require.Equal(t, "worker", cancelState.GetIdentity()) + require.Equal(t, "Test Cancel Requested", cancelState.GetReason()) + require.NotNil(t, cancelState.GetRequestTime()) +} + +func TestTransitionCanceled(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: 1} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + timerStartToCloseLatency := metrics.NewMockTimerIface(controller) + timerStartToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityStartToCloseLatency.Name()).Return(timerStartToCloseLatency) + + timerScheduleToCloseLatency := metrics.NewMockTimerIface(controller) + timerScheduleToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityScheduleToCloseLatency.Name()).Return(timerScheduleToCloseLatency) + + counterCancel := metrics.NewMockCounterIface(controller) + counterCancel.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityCancel.Name()).Return(counterCancel) + + event := cancelEvent{ + details: payloads.EncodeString("Details"), + handler: metricsHandler, + } + + err := TransitionCanceled.Apply(activity, ctx, event) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED, activity.Status) + + expectedFailure := &failurepb.Failure{ + Message: "Activity canceled", + FailureInfo: &failurepb.Failure_CanceledFailureInfo{ + CanceledFailureInfo: &failurepb.CanceledFailureInfo{ + Details: payloads.EncodeString("Details"), + }, + }, + } + protorequire.ProtoEqual(t, expectedFailure, outcome.GetFailed().GetFailure()) +} diff --git a/chasm/lib/activity/validator.go b/chasm/lib/activity/validator.go new file mode 100644 index 0000000000..3126312234 --- /dev/null +++ b/chasm/lib/activity/validator.go @@ -0,0 +1,293 @@ +package activity + +import ( + "github.com/google/uuid" + activitypb "go.temporal.io/api/activity/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/chasm" + activitystatepb "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/priorities" + "go.temporal.io/server/common/retrypolicy" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/common/tqid" + "google.golang.org/protobuf/types/known/durationpb" +) + +// ValidateAndNormalizeActivityAttributes validates and normalizes the common activity request attributes. +// This validation is shared by both standalone and embedded activities. +// IMPORTANT: this method mutates the input params; in cases where it's critical to maintain immutability +// (i.e., when incoming request can potentially be retried), clone the params first before passing it in. +// +// The timeout normalization logic is as follows: +// 1. If ScheduleToClose is set, fill in missing ScheduleToStart and StartToClose from ScheduleToClose +// 2. If StartToClose is set but ScheduleToClose is not set, set ScheduleToClose to runTimeout, and fill in missing ScheduleToStart from runTimeout +// 3. If neither ScheduleToClose nor StartToClose is set, return error +// 4. Ensure all timeouts do not exceed runTimeout if runTimeout is set (>0) +// 5. Ensure HeartbeatTimeout does not exceed StartToClose +func ValidateAndNormalizeActivityAttributes( + activityID string, + activityType string, + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings], + maxIDLengthLimit int, + namespaceID namespace.ID, + options *activitypb.ActivityOptions, + priority *commonpb.Priority, + runTimeout *durationpb.Duration, +) error { + if err := tqid.NormalizeAndValidate(options.TaskQueue, "", maxIDLengthLimit); err != nil { + return err + } + + if activityID == "" { + return serviceerror.NewInvalidArgumentf("ActivityId is not set. ActivityType=%s", activityType) + } + if activityType == "" { + return serviceerror.NewInvalidArgumentf("ActivityType is not set. ActivityID=%s", activityID) + } + + if err := validateActivityRetryPolicy(namespaceID, options.RetryPolicy, getDefaultActivityRetrySettings); err != nil { + return err + } + + if len(activityID) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("ActivityId exceeds length limit. ActivityId=%s ActivityType=%s Length=%d Limit=%d", + activityID, activityType, len(activityID), maxIDLengthLimit) + } + if len(activityType) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("ActivityType exceeds length limit. ActivityId=%s ActivityType=%s Length=%d Limit=%d", + activityID, activityType, len(activityType), maxIDLengthLimit) + } + + if err := priorities.Validate(priority); err != nil { + return serviceerror.NewInvalidArgumentf("Invalid Priorities: %v ActivityId=%s ActivityType=%s", + err, activityID, activityType) + } + + return normalizeAndValidateTimeouts(activityID, + activityType, + runTimeout, + options) +} + +func validateActivityRetryPolicy( + namespaceID namespace.ID, + retryPolicy *commonpb.RetryPolicy, + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings], +) error { + if retryPolicy == nil { + return nil + } + // TODO(saa-preview): this is a namespace setting, not a namespace id setting + defaultActivityRetrySettings := getDefaultActivityRetrySettings(namespaceID.String()) + retrypolicy.EnsureDefaults(retryPolicy, defaultActivityRetrySettings) + return retrypolicy.Validate(retryPolicy) +} + +func normalizeAndValidateTimeouts( + activityID string, + activityType string, + runTimeout *durationpb.Duration, + options *activitypb.ActivityOptions, +) error { + // Only attempt to deduce and fill in unspecified timeouts only when all timeouts are non-negative. + if err := timestamp.ValidateAndCapProtoDuration(options.GetScheduleToCloseTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("Invalid ScheduleToCloseTimeout: %v ActivityId=%s ActivityType=%s", + err, activityID, activityType) + } + if err := timestamp.ValidateAndCapProtoDuration(options.GetScheduleToStartTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("Invalid ScheduleToStartTimeout: %v ActivityId=%s ActivityType=%s", + err, activityID, activityType) + } + if err := timestamp.ValidateAndCapProtoDuration(options.GetStartToCloseTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("Invalid StartToCloseTimeout: %v ActivityId=%s ActivityType=%s", + err, activityID, activityType) + } + if err := timestamp.ValidateAndCapProtoDuration(options.GetHeartbeatTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("Invalid HeartbeatTimeout: %v ActivityId=%s ActivityType=%s", + err, activityID, activityType) + } + + scheduleToCloseSet := options.GetScheduleToCloseTimeout().AsDuration() > 0 + scheduleToStartSet := options.GetScheduleToStartTimeout().AsDuration() > 0 + startToCloseSet := options.GetStartToCloseTimeout().AsDuration() > 0 + + if scheduleToCloseSet { + if scheduleToStartSet { + options.ScheduleToStartTimeout = timestamp.MinDurationPtr(options.ScheduleToStartTimeout, options.ScheduleToCloseTimeout) + } else { + options.ScheduleToStartTimeout = options.ScheduleToCloseTimeout + } + if startToCloseSet { + options.StartToCloseTimeout = timestamp.MinDurationPtr(options.StartToCloseTimeout, options.ScheduleToCloseTimeout) + } else { + options.StartToCloseTimeout = options.ScheduleToCloseTimeout + } + } else if startToCloseSet { + // We are in !validScheduleToClose due to the first if above + options.ScheduleToCloseTimeout = runTimeout + if !scheduleToStartSet { + options.ScheduleToStartTimeout = runTimeout + } + } else { + // Deduction failed as there's not enough information to fill in missing timeouts. + return serviceerror.NewInvalidArgumentf("A valid StartToClose or ScheduleToCloseTimeout is not set on ScheduleActivityTaskCommand. ActivityId=%s ActivityType=%s", + activityID, activityType) + } + // ensure activity timeout never larger than workflow timeout + if runTimeout.AsDuration() > 0 { + runTimeoutDur := runTimeout.AsDuration() + if options.ScheduleToCloseTimeout.AsDuration() > runTimeoutDur { + options.ScheduleToCloseTimeout = runTimeout + } + if options.ScheduleToStartTimeout.AsDuration() > runTimeoutDur { + options.ScheduleToStartTimeout = runTimeout + } + if options.StartToCloseTimeout.AsDuration() > runTimeoutDur { + options.StartToCloseTimeout = runTimeout + } + if options.HeartbeatTimeout.AsDuration() > runTimeoutDur { + options.HeartbeatTimeout = runTimeout + } + } + + options.HeartbeatTimeout = timestamp.MinDurationPtr(options.HeartbeatTimeout, options.StartToCloseTimeout) + + return nil +} + +func normalizeAndValidateIDPolicy(req *workflowservice.StartActivityExecutionRequest) error { + if req.GetIdReusePolicy() == enumspb.ACTIVITY_ID_REUSE_POLICY_UNSPECIFIED { + req.IdReusePolicy = enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE + } + + if req.GetIdConflictPolicy() == enumspb.ACTIVITY_ID_CONFLICT_POLICY_UNSPECIFIED { + req.IdConflictPolicy = enumspb.ACTIVITY_ID_CONFLICT_POLICY_FAIL + } + + return nil +} + +func validateInputSize( + activityID string, + blobSizeViolationTagValue string, + blobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter, + blobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter, + inputSize int, + logger log.Logger, + namespaceName string, +) error { + sizeWarnLimit := blobSizeLimitWarn(namespaceName) + sizeErrorLimit := blobSizeLimitError(namespaceName) + + if inputSize > sizeWarnLimit { + logger.Warn("Activity input size exceeds the warning limit.", + tag.WorkflowNamespace(namespaceName), + tag.ActivityID(activityID), + tag.ActivitySize(int64(inputSize)), + tag.BlobSizeViolationOperation(blobSizeViolationTagValue)) + + if inputSize > sizeErrorLimit { + return common.ErrBlobSizeExceedsLimit + } + } + + return nil +} + +func validateAndNormalizeSearchAttributes( + req *workflowservice.StartActivityExecutionRequest, + saMapperProvider searchattribute.MapperProvider, + saValidator *searchattribute.Validator, +) error { + namespaceName := req.GetNamespace() + + // Unalias search attributes for validation. + saToValidate := req.SearchAttributes + if saMapperProvider != nil && saToValidate != nil { + var err error + saToValidate, err = searchattribute.UnaliasFields(saMapperProvider, saToValidate, namespaceName) + if err != nil { + return err + } + } + + if err := saValidator.Validate(saToValidate, namespaceName); err != nil { + return err + } + + return saValidator.ValidateSize(saToValidate, namespaceName) +} + +// ValidateDescribeActivityExecutionRequest validates DescribeActivityExecutionRequest. +func ValidateDescribeActivityExecutionRequest( + req *workflowservice.DescribeActivityExecutionRequest, + maxIDLengthLimit int, +) error { + if req.GetActivityId() == "" { + return serviceerror.NewInvalidArgument("activity ID is required") + } + if len(req.GetActivityId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activity ID exceeds length limit. Length=%d Limit=%d", + len(req.GetActivityId()), maxIDLengthLimit) + } + hasRunID := req.GetRunId() != "" + hasLongPollToken := len(req.GetLongPollToken()) > 0 + + if hasLongPollToken && !hasRunID { + return serviceerror.NewInvalidArgument("run id is required when long poll token is provided") + } + if hasRunID { + _, err := uuid.Parse(req.GetRunId()) + if err != nil { + return serviceerror.NewInvalidArgument("invalid run id: must be a valid UUID") + } + } + return nil +} + +// ValidatePollActivityExecutionRequest validates PollActivityExecutionRequest. +func ValidatePollActivityExecutionRequest( + req *workflowservice.PollActivityExecutionRequest, + maxIDLengthLimit int, +) error { + if req.GetActivityId() == "" { + return serviceerror.NewInvalidArgument("activity ID is required") + } + if len(req.GetActivityId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activity ID exceeds length limit. Length=%d Limit=%d", + len(req.GetActivityId()), maxIDLengthLimit) + } + if runID := req.GetRunId(); runID != "" { + _, err := uuid.Parse(runID) + if err != nil { + return serviceerror.NewInvalidArgument("invalid run id: must be a valid UUID") + } + } + return nil +} + +// ValidateActivityTaskToken validates a task token against the current activity state. +func ValidateActivityTaskToken( + ctx chasm.Context, + a *Activity, + token *tokenspb.Task, +) error { + if a.Status != activitystatepb.ACTIVITY_EXECUTION_STATUS_STARTED && + a.Status != activitystatepb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED { + return serviceerror.NewNotFound("activity task not found") + } + if token.Attempt != a.LastAttempt.Get(ctx).GetCount() { + return serviceerror.NewNotFound("activity task not found") + } + return nil +} diff --git a/chasm/lib/activity/validator_test.go b/chasm/lib/activity/validator_test.go new file mode 100644 index 0000000000..4da016a369 --- /dev/null +++ b/chasm/lib/activity/validator_test.go @@ -0,0 +1,469 @@ +package activity + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + activitypb "go.temporal.io/api/activity/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/retrypolicy" + "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + defaultActivityID = "test-activity-id" + defaultActivityType = "test-activity-type" + defaultTaskQueue = "test-task-queue" + defaultMaxIDLengthLimit = 1000 + defaultNamespaceID = "default" +) + +var ( + defaultActivityOptions = activitypb.ActivityOptions{ + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + } + + defaultPriority = commonpb.Priority{FairnessKey: "normal"} + + defaultBlobSizeLimitError = func(ns string) int { + return 64 + } + defaultBlobSizeLimitWarn = func(ns string) int { + return 32 + } +) + +func TestValidateSuccess(t *testing.T) { + err := ValidateAndNormalizeActivityAttributes( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + &defaultActivityOptions, + &defaultPriority, + durationpb.New(0)) + require.NoError(t, err) +} + +func TestValidateFailures(t *testing.T) { + cases := []struct { + name string + activityID string + activityType string + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings] + maxIDLengthLimit int + namespaceID namespace.ID + options *activitypb.ActivityOptions + priority *commonpb.Priority + runTimeout *durationpb.Duration + }{ + { + name: "Empty ActivityId", + activityID: "", + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "Empty ActivityType", + activityID: defaultActivityID, + activityType: "", + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "ActivityId exceeds length limit", + activityID: string(make([]byte, 1001)), + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "ActivityType exceeds length limit", + activityID: defaultActivityID, + activityType: string(make([]byte, 1001)), + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "Invalid TaskQueue", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: ""}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "Negative ScheduleToCloseTimeout", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(-1 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "Negative ScheduleToStartTimeout", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + ScheduleToStartTimeout: durationpb.New(-1 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "Negative StartToCloseTimeout", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + StartToCloseTimeout: durationpb.New(-1 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "Negative HeartbeatTimeout", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + HeartbeatTimeout: durationpb.New(-1 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + }, + { + name: "Invalid Priority", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &commonpb.Priority{FairnessKey: string(make([]byte, 1001))}, + runTimeout: nil, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := ValidateAndNormalizeActivityAttributes( + tc.activityID, + tc.activityType, + tc.getDefaultActivityRetrySettings, + tc.maxIDLengthLimit, + tc.namespaceID, + tc.options, + tc.priority, + durationpb.New(0)) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + }) + } +} + +func newTestFrontendHandler( + blobSizeLimitError func(string) int, + blobSizeLimitWarn func(string) int, + maxIDLengthLimit int, +) *frontendHandler { + return &frontendHandler{ + config: &Config{ + BlobSizeLimitError: blobSizeLimitError, + BlobSizeLimitWarn: blobSizeLimitWarn, + MaxIDLengthLimit: func() int { return maxIDLengthLimit }, + }, + logger: log.NewNoopLogger(), + } +} + +func TestValidateStandAloneRequestIDTooLong(t *testing.T) { + req := &workflowservice.StartActivityExecutionRequest{ + ActivityId: defaultActivityID, + ActivityType: &commonpb.ActivityType{Name: defaultActivityType}, + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + Namespace: "default", + RequestId: string(make([]byte, 1001)), + Input: payloads.EncodeString("test-input"), + } + + h := newTestFrontendHandler(defaultBlobSizeLimitError, defaultBlobSizeLimitWarn, defaultMaxIDLengthLimit) + err := h.validateAndNormalizeStartActivityExecutionRequest(req) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) +} + +func TestValidateStandAloneInputTooLarge(t *testing.T) { + req := &workflowservice.StartActivityExecutionRequest{ + ActivityId: defaultActivityID, + ActivityType: &commonpb.ActivityType{Name: defaultActivityType}, + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + Namespace: "default", + RequestId: "test-request-id", + Input: payloads.EncodeString(string(make([]byte, 1000))), + } + + h := newTestFrontendHandler(defaultBlobSizeLimitError, defaultBlobSizeLimitWarn, defaultMaxIDLengthLimit) + err := h.validateAndNormalizeStartActivityExecutionRequest(req) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) +} + +func TestValidateStandAloneInputWarningSizeShouldSucceed(t *testing.T) { + payload := payloads.EncodeString("test-input") + payloadSize := payload.Size() + + req := &workflowservice.StartActivityExecutionRequest{ + ActivityId: defaultActivityID, + ActivityType: &commonpb.ActivityType{Name: defaultActivityType}, + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + Namespace: "default", + RequestId: "test-request-id", + Input: payload, + } + + h := newTestFrontendHandler( + func(ns string) int { return payloadSize + 1 }, + func(ns string) int { return payloadSize }, + defaultMaxIDLengthLimit, + ) + err := h.validateAndNormalizeStartActivityExecutionRequest(req) + require.NoError(t, err) +} + +func TestValidateStandAlone_IDPolicyShouldDefault(t *testing.T) { + req := &workflowservice.StartActivityExecutionRequest{ + ActivityId: defaultActivityID, + ActivityType: &commonpb.ActivityType{Name: defaultActivityType}, + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + Namespace: "default", + RequestId: "test-request-id", + } + + h := newTestFrontendHandler(defaultBlobSizeLimitError, defaultBlobSizeLimitWarn, defaultMaxIDLengthLimit) + err := h.validateAndNormalizeStartActivityExecutionRequest(req) + + require.NoError(t, err) + require.Equal(t, enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE, req.IdReusePolicy) + require.Equal(t, enumspb.ACTIVITY_ID_CONFLICT_POLICY_FAIL, req.IdConflictPolicy) +} + +func TestModifiedActivityTimeouts(t *testing.T) { + cases := []struct { + name string + options *activitypb.ActivityOptions + runTimeout *durationpb.Duration + isErr bool + validate func(t *testing.T, options *activitypb.ActivityOptions) + }{ + { + name: "ScheduleToClose set - fills in missing timeouts", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + }, + runTimeout: durationpb.New(0), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 10*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 0*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "StartToClose set but not ScheduleToClose - fills from runTimeout", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + StartToCloseTimeout: durationpb.New(5 * time.Second), + }, + runTimeout: durationpb.New(20 * time.Second), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 20*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 20*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 5*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 0*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "Neither ScheduleToClose nor StartToClose set - returns error", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + }, + runTimeout: durationpb.New(0), + isErr: true, + validate: func(t *testing.T, options *activitypb.ActivityOptions) {}, + }, + { + name: "ScheduleToClose and StartToClose set - StartToClose capped by ScheduleToClose", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + StartToCloseTimeout: durationpb.New(15 * time.Second), + }, + runTimeout: durationpb.New(0), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 10*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 0*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "ScheduleToStart capped by ScheduleToClose", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + ScheduleToStartTimeout: durationpb.New(20 * time.Second), + }, + runTimeout: durationpb.New(0), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 10*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 0*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "HeartbeatTimeout capped by StartToClose", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(20 * time.Second), + StartToCloseTimeout: durationpb.New(10 * time.Second), + HeartbeatTimeout: durationpb.New(15 * time.Second), + }, + runTimeout: durationpb.New(0), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 20*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 20*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "All timeouts capped by runTimeout", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(30 * time.Second), + ScheduleToStartTimeout: durationpb.New(25 * time.Second), + StartToCloseTimeout: durationpb.New(20 * time.Second), + HeartbeatTimeout: durationpb.New(15 * time.Second), + }, + runTimeout: durationpb.New(10 * time.Second), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 10*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := ValidateAndNormalizeActivityAttributes( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + tc.options, + &defaultPriority, + tc.runTimeout) + + if tc.isErr { + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + return + } + + require.NoError(t, err) + tc.validate(t, tc.options) + }) + } +} + +func getDefaultRetrySettings(_ string) retrypolicy.DefaultRetrySettings { + return retrypolicy.DefaultRetrySettings{ + InitialInterval: time.Second, + MaximumIntervalCoefficient: 100.0, + BackoffCoefficient: 2.0, + MaximumAttempts: 0, + } +} diff --git a/chasm/lib/callback/component.go b/chasm/lib/callback/component.go index 9c6aa4face..a10734e19a 100644 --- a/chasm/lib/callback/component.go +++ b/chasm/lib/callback/component.go @@ -119,17 +119,17 @@ func (c *Callback) saveResult( ) (chasm.NoValue, error) { switch r := input.result.(type) { case invocationResultOK: - err := TransitionSucceeded.Apply(ctx, c, EventSucceeded{Time: ctx.Now(c)}) + err := TransitionSucceeded.Apply(c, ctx, EventSucceeded{Time: ctx.Now(c)}) return nil, err case invocationResultRetry: - err := TransitionAttemptFailed.Apply(ctx, c, EventAttemptFailed{ + err := TransitionAttemptFailed.Apply(c, ctx, EventAttemptFailed{ Time: ctx.Now(c), Err: r.err, RetryPolicy: input.retryPolicy, }) return nil, err case invocationResultFail: - err := TransitionFailed.Apply(ctx, c, EventFailed{ + err := TransitionFailed.Apply(c, ctx, EventFailed{ Time: ctx.Now(c), Err: r.err, }) diff --git a/chasm/lib/callback/executors.go b/chasm/lib/callback/executors.go index dac4042b96..9ab9e199e5 100644 --- a/chasm/lib/callback/executors.go +++ b/chasm/lib/callback/executors.go @@ -179,7 +179,7 @@ func (e *BackoffTaskExecutor) Execute( taskAttrs chasm.TaskAttributes, task *callbackspb.BackoffTask, ) error { - return TransitionRescheduled.Apply(ctx, callback, EventRescheduled{}) + return TransitionRescheduled.Apply(callback, ctx, EventRescheduled{}) } func (e *BackoffTaskExecutor) Validate( diff --git a/chasm/lib/callback/statemachine_test.go b/chasm/lib/callback/statemachine_test.go index fbb6595088..dbefaf5d96 100644 --- a/chasm/lib/callback/statemachine_test.go +++ b/chasm/lib/callback/statemachine_test.go @@ -30,7 +30,7 @@ func TestValidTransitions(t *testing.T) { // AttemptFailed mctx := &chasm.MockMutableContext{} - err := TransitionAttemptFailed.Apply(mctx, callback, EventAttemptFailed{ + err := TransitionAttemptFailed.Apply(callback, mctx, EventAttemptFailed{ Time: currentTime, Err: errors.New("test"), RetryPolicy: backoff.NewExponentialRetryPolicy(time.Second), @@ -52,7 +52,7 @@ func TestValidTransitions(t *testing.T) { // Rescheduled mctx = &chasm.MockMutableContext{} - err = TransitionRescheduled.Apply(mctx, callback, EventRescheduled{}) + err = TransitionRescheduled.Apply(callback, mctx, EventRescheduled{}) require.NoError(t, err) // Assert info object is updated only where needed @@ -76,7 +76,7 @@ func TestValidTransitions(t *testing.T) { // Succeeded currentTime = currentTime.Add(time.Second) mctx = &chasm.MockMutableContext{} - err = TransitionSucceeded.Apply(mctx, callback, EventSucceeded{Time: currentTime}) + err = TransitionSucceeded.Apply(callback, mctx, EventSucceeded{Time: currentTime}) require.NoError(t, err) // Assert info object is updated only where needed @@ -96,7 +96,7 @@ func TestValidTransitions(t *testing.T) { // failed mctx = &chasm.MockMutableContext{} - err = TransitionFailed.Apply(mctx, callback, EventFailed{Time: currentTime, Err: errors.New("failed")}) + err = TransitionFailed.Apply(callback, mctx, EventFailed{Time: currentTime, Err: errors.New("failed")}) require.NoError(t, err) // Assert info object is updated only where needed diff --git a/chasm/lib/workflow/workflow.go b/chasm/lib/workflow/workflow.go index 337de487a7..6e0acdd063 100644 --- a/chasm/lib/workflow/workflow.go +++ b/chasm/lib/workflow/workflow.go @@ -57,7 +57,7 @@ func (w *Workflow) ProcessCloseCallbacks(ctx chasm.MutableContext) error { continue } // Trigger the callback by transitioning to SCHEDULED state - if err := callback.TransitionScheduled.Apply(ctx, cb, callback.EventScheduled{}); err != nil { + if err := callback.TransitionScheduled.Apply(cb, ctx, callback.EventScheduled{}); err != nil { return err } } diff --git a/chasm/ref.go b/chasm/ref.go index d4a91466d5..6731808022 100644 --- a/chasm/ref.go +++ b/chasm/ref.go @@ -7,9 +7,13 @@ import ( persistencespb "go.temporal.io/server/api/persistence/v1" ) -var ( - defaultShardingFn = func(key ExecutionKey) string { return key.NamespaceID + "_" + key.BusinessID } -) +// ErrMalformedComponentRef is returned when component ref bytes cannot be deserialized. +var ErrMalformedComponentRef = serviceerror.NewInvalidArgument("malformed component ref") + +// ErrInvalidComponentRef is returned when component ref bytes deserialize to an invalid component ref. +var ErrInvalidComponentRef = serviceerror.NewInvalidArgument("invalid component ref") + +var defaultShardingFn = func(key ExecutionKey) string { return key.NamespaceID + "_" + key.BusinessID } // ExecutionKey uniquely identifies a CHASM execution in the system. type ExecutionKey struct { @@ -128,12 +132,19 @@ func (r *ComponentRef) Serialize( // DeserializeComponentRef deserializes a byte slice into a ComponentRef. // Provides caller the access to information including ExecutionKey, Archetype, and ShardingKey. func DeserializeComponentRef(data []byte) (ComponentRef, error) { + if len(data) == 0 { + return ComponentRef{}, ErrInvalidComponentRef + } var pRef persistencespb.ChasmComponentRef if err := pRef.Unmarshal(data); err != nil { - return ComponentRef{}, err + return ComponentRef{}, ErrMalformedComponentRef } - return ProtoRefToComponentRef(&pRef), nil + ref := ProtoRefToComponentRef(&pRef) + if ref.BusinessID == "" || ref.NamespaceID == "" { + return ComponentRef{}, ErrInvalidComponentRef + } + return ref, nil } // ProtoRefToComponentRef converts a persistence ChasmComponentRef reference to a diff --git a/chasm/ref_test.go b/chasm/ref_test.go index 98e252c6ff..07f3e3af87 100644 --- a/chasm/ref_test.go +++ b/chasm/ref_test.go @@ -75,6 +75,11 @@ func (s *componentRefSuite) TestShardingKey() { } func (s *componentRefSuite) TestSerializeDeserialize() { + _, err := DeserializeComponentRef(nil) + s.ErrorIs(err, ErrInvalidComponentRef) + _, err = DeserializeComponentRef([]byte{}) + s.ErrorIs(err, ErrInvalidComponentRef) + executionKey := ExecutionKey{ NamespaceID: primitives.NewUUID().String(), BusinessID: primitives.NewUUID().String(), diff --git a/chasm/statemachine.go b/chasm/statemachine.go index 73bc825e63..14ac198050 100644 --- a/chasm/statemachine.go +++ b/chasm/statemachine.go @@ -4,6 +4,8 @@ import ( "errors" "fmt" "slices" + + "go.temporal.io/api/serviceerror" ) // ErrInvalidTransition is returned from [Transition.Apply] on an invalid state transition. @@ -28,7 +30,7 @@ type Transition[S comparable, SM StateMachine[S], E any] struct { // NewTransition creates a new [Transition] from the given source states to a destination state for a given event. // The apply function is called after verifying the transition is possible and setting the destination state. -func NewTransition[S comparable, SM StateMachine[S], E any](src []S, dst S, apply func(sm SM, ctx MutableContext, event E) error) Transition[S, SM, E] { +func NewTransition[S comparable, SM StateMachine[S], E any](src []S, dst S, apply func(SM, MutableContext, E) error) Transition[S, SM, E] { return Transition[S, SM, E]{ Sources: src, Destination: dst, @@ -43,10 +45,11 @@ func (t Transition[S, SM, E]) Possible(sm SM) bool { // Apply applies a transition event to the given state machine changing the state machine's state to the transition's // Destination on success. -func (t Transition[S, SM, E]) Apply(ctx MutableContext, sm SM, event E) error { +func (t Transition[S, SM, E]) Apply(sm SM, ctx MutableContext, event E) error { prevState := sm.StateMachineState() if !t.Possible(sm) { - return fmt.Errorf("%w from %v: %v", ErrInvalidTransition, prevState, event) + err := fmt.Errorf("%w from %v: %v", ErrInvalidTransition, prevState, event) + return serviceerror.NewFailedPrecondition(err.Error()) } sm.SetStateMachineState(t.Destination) diff --git a/chasm/task.go b/chasm/task.go index 153195cf68..7fc0bec636 100644 --- a/chasm/task.go +++ b/chasm/task.go @@ -22,6 +22,25 @@ type ( } TaskValidator[C any, T any] interface { + // Validate determines whether a task should proceed with execution based on the current context, component + // state, task attributes, and task data. + // + // This function serves as a gate to prevent unnecessary task execution in several scenarios: + // 1. Standby cluster deduplication: When state is replicated to standby clusters, tasks are also replicated. + // Validate allows standby clusters to check if a task was already completed on the active cluster and + // skip execution if so (e.g., checking if an activity already transitioned from scheduled to started state). + // 2. Task obsolescence: Tasks can become irrelevant when state changes invalidate them (e.g., when a scheduler + // is updated to run at a different time, making the previously scheduled task invalid for the new state). + // For pure tasks that can run in a single transaction, Validate is called before execution to avoid + // unnecessary work. + // + // The framework automatically calls Validate at key points, such as after closing transactions, to check all + // generated tasks before they execute. + // + // Returns: + // - (true, nil) if the task is valid and should be executed + // - (false, nil) if the task should be silently dropped (it's no longer relevant) + // - (anything, error) if validation fails with an error Validate(Context, C, TaskAttributes, T) (bool, error) } ) diff --git a/chasm/transition_history.go b/chasm/transition_history.go new file mode 100644 index 0000000000..02b4cde8b3 --- /dev/null +++ b/chasm/transition_history.go @@ -0,0 +1,35 @@ +package chasm + +import ( + "go.temporal.io/server/common/persistence/transitionhistory" + "go.temporal.io/server/service/history/consts" +) + +// ExecutionStateChanged returns true if execution state has advanced beyond the state encoded in +// refBytes. It may return ErrInvalidComponentRef or ErrMalformedComponentRef. Callers should +// consider converting these to serviceerror.NewInvalidArgument. +func ExecutionStateChanged(c Component, ctx Context, refBytes []byte) (bool, error) { + ref, err := DeserializeComponentRef(refBytes) + if err != nil { + return false, ErrMalformedComponentRef + } + currentRef, err := ctx.structuredRef(c) + if err != nil { + return false, err + } + if ref.ExecutionKey != currentRef.ExecutionKey { + return false, ErrInvalidComponentRef + } + switch transitionhistory.Compare(ref.executionLastUpdateVT, currentRef.executionLastUpdateVT) { + case -1: + // Execution state has advanced beyond submitted ref + return true, nil + case 0: + // Execution state has not advanced beyond submitted ref + return false, nil + case 1: + // Execution state is behind submitted ref + return false, consts.ErrStaleState + } + panic("unexpected result from transitionhistory.Compare") //nolint:forbidigo +} diff --git a/chasm/tree.go b/chasm/tree.go index 0df539e763..2190ac0ba9 100644 --- a/chasm/tree.go +++ b/chasm/tree.go @@ -1225,6 +1225,17 @@ func unmarshalProto( func (n *Node) Ref( component Component, ) ([]byte, error) { + ref, err := n.structuredRef(component) + if err != nil { + return nil, err + } + return ref.Serialize(n.registry) +} + +// structuredRef returns a ComponentRef for the node. +func (n *Node) structuredRef( + component Component, +) (ComponentRef, error) { // No need to update tree structure here. If a Component can only be found after // syncSubComponents() is called, it means the component is created in the // current transition and don't have a reference yet. @@ -1232,7 +1243,7 @@ func (n *Node) Ref( for path, node := range n.andAllChildren() { if node.value == component { workflowKey := node.backend.GetWorkflowKey() - ref := ComponentRef{ + return ComponentRef{ ExecutionKey: ExecutionKey{ NamespaceID: workflowKey.NamespaceID, BusinessID: workflowKey.WorkflowID, @@ -1244,11 +1255,10 @@ func (n *Node) Ref( executionLastUpdateVT: transitionhistory.CopyVersionedTransition(node.backend.CurrentVersionedTransition()), componentPath: path, componentInitialVT: node.serializedNode.GetMetadata().GetInitialVersionedTransition(), - } - return ref.Serialize(n.registry) + }, nil } } - return nil, errComponentNotFound + return ComponentRef{}, errComponentNotFound } // componentNodePath implements the CHASM Context interface diff --git a/client/frontend/client_gen.go b/client/frontend/client_gen.go index 857afd0775..312a0f0d03 100644 --- a/client/frontend/client_gen.go +++ b/client/frontend/client_gen.go @@ -9,6 +9,16 @@ import ( "google.golang.org/grpc" ) +func (c *clientImpl) CountActivityExecutions( + ctx context.Context, + request *workflowservice.CountActivityExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.CountActivityExecutionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.CountActivityExecutions(ctx, request, opts...) +} + func (c *clientImpl) CountWorkflowExecutions( ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest, @@ -39,6 +49,16 @@ func (c *clientImpl) CreateWorkflowRule( return c.client.CreateWorkflowRule(ctx, request, opts...) } +func (c *clientImpl) DeleteActivityExecution( + ctx context.Context, + request *workflowservice.DeleteActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DeleteActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) DeleteSchedule( ctx context.Context, request *workflowservice.DeleteScheduleRequest, @@ -99,6 +119,16 @@ func (c *clientImpl) DeprecateNamespace( return c.client.DeprecateNamespace(ctx, request, opts...) } +func (c *clientImpl) DescribeActivityExecution( + ctx context.Context, + request *workflowservice.DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) DescribeBatchOperation( ctx context.Context, request *workflowservice.DescribeBatchOperationRequest, @@ -319,6 +349,16 @@ func (c *clientImpl) GetWorkflowExecutionHistoryReverse( return c.client.GetWorkflowExecutionHistoryReverse(ctx, request, opts...) } +func (c *clientImpl) ListActivityExecutions( + ctx context.Context, + request *workflowservice.ListActivityExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListActivityExecutionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ListActivityExecutions(ctx, request, opts...) +} + func (c *clientImpl) ListArchivedWorkflowExecutions( ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest, @@ -479,6 +519,16 @@ func (c *clientImpl) PauseWorkflowExecution( return c.client.PauseWorkflowExecution(ctx, request, opts...) } +func (c *clientImpl) PollActivityExecution( + ctx context.Context, + request *workflowservice.PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PollActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.PollActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) PollActivityTaskQueue( ctx context.Context, request *workflowservice.PollActivityTaskQueueRequest, @@ -569,6 +619,16 @@ func (c *clientImpl) RegisterNamespace( return c.client.RegisterNamespace(ctx, request, opts...) } +func (c *clientImpl) RequestCancelActivityExecution( + ctx context.Context, + request *workflowservice.RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.RequestCancelActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.RequestCancelActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) RequestCancelWorkflowExecution( ctx context.Context, request *workflowservice.RequestCancelWorkflowExecutionRequest, @@ -799,6 +859,16 @@ func (c *clientImpl) SignalWorkflowExecution( return c.client.SignalWorkflowExecution(ctx, request, opts...) } +func (c *clientImpl) StartActivityExecution( + ctx context.Context, + request *workflowservice.StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.StartActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.StartActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) StartBatchOperation( ctx context.Context, request *workflowservice.StartBatchOperationRequest, @@ -829,6 +899,16 @@ func (c *clientImpl) StopBatchOperation( return c.client.StopBatchOperation(ctx, request, opts...) } +func (c *clientImpl) TerminateActivityExecution( + ctx context.Context, + request *workflowservice.TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.TerminateActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.TerminateActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) TerminateWorkflowExecution( ctx context.Context, request *workflowservice.TerminateWorkflowExecutionRequest, diff --git a/client/frontend/metric_client_gen.go b/client/frontend/metric_client_gen.go index 9abc88676a..87ce92a444 100644 --- a/client/frontend/metric_client_gen.go +++ b/client/frontend/metric_client_gen.go @@ -9,6 +9,20 @@ import ( "google.golang.org/grpc" ) +func (c *metricClient) CountActivityExecutions( + ctx context.Context, + request *workflowservice.CountActivityExecutionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.CountActivityExecutionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientCountActivityExecutions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CountActivityExecutions(ctx, request, opts...) +} + func (c *metricClient) CountWorkflowExecutions( ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest, @@ -51,6 +65,20 @@ func (c *metricClient) CreateWorkflowRule( return c.client.CreateWorkflowRule(ctx, request, opts...) } +func (c *metricClient) DeleteActivityExecution( + ctx context.Context, + request *workflowservice.DeleteActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DeleteActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDeleteActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeleteActivityExecution(ctx, request, opts...) +} + func (c *metricClient) DeleteSchedule( ctx context.Context, request *workflowservice.DeleteScheduleRequest, @@ -135,6 +163,20 @@ func (c *metricClient) DeprecateNamespace( return c.client.DeprecateNamespace(ctx, request, opts...) } +func (c *metricClient) DescribeActivityExecution( + ctx context.Context, + request *workflowservice.DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DescribeActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDescribeActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeActivityExecution(ctx, request, opts...) +} + func (c *metricClient) DescribeBatchOperation( ctx context.Context, request *workflowservice.DescribeBatchOperationRequest, @@ -443,6 +485,20 @@ func (c *metricClient) GetWorkflowExecutionHistoryReverse( return c.client.GetWorkflowExecutionHistoryReverse(ctx, request, opts...) } +func (c *metricClient) ListActivityExecutions( + ctx context.Context, + request *workflowservice.ListActivityExecutionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ListActivityExecutionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientListActivityExecutions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ListActivityExecutions(ctx, request, opts...) +} + func (c *metricClient) ListArchivedWorkflowExecutions( ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest, @@ -667,6 +723,20 @@ func (c *metricClient) PauseWorkflowExecution( return c.client.PauseWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) PollActivityExecution( + ctx context.Context, + request *workflowservice.PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.PollActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientPollActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.PollActivityExecution(ctx, request, opts...) +} + func (c *metricClient) PollActivityTaskQueue( ctx context.Context, request *workflowservice.PollActivityTaskQueueRequest, @@ -793,6 +863,20 @@ func (c *metricClient) RegisterNamespace( return c.client.RegisterNamespace(ctx, request, opts...) } +func (c *metricClient) RequestCancelActivityExecution( + ctx context.Context, + request *workflowservice.RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.RequestCancelActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientRequestCancelActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.RequestCancelActivityExecution(ctx, request, opts...) +} + func (c *metricClient) RequestCancelWorkflowExecution( ctx context.Context, request *workflowservice.RequestCancelWorkflowExecutionRequest, @@ -1115,6 +1199,20 @@ func (c *metricClient) SignalWorkflowExecution( return c.client.SignalWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) StartActivityExecution( + ctx context.Context, + request *workflowservice.StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.StartActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientStartActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.StartActivityExecution(ctx, request, opts...) +} + func (c *metricClient) StartBatchOperation( ctx context.Context, request *workflowservice.StartBatchOperationRequest, @@ -1157,6 +1255,20 @@ func (c *metricClient) StopBatchOperation( return c.client.StopBatchOperation(ctx, request, opts...) } +func (c *metricClient) TerminateActivityExecution( + ctx context.Context, + request *workflowservice.TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.TerminateActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientTerminateActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.TerminateActivityExecution(ctx, request, opts...) +} + func (c *metricClient) TerminateWorkflowExecution( ctx context.Context, request *workflowservice.TerminateWorkflowExecutionRequest, diff --git a/client/frontend/retryable_client_gen.go b/client/frontend/retryable_client_gen.go index 5b8e05b540..036883dade 100644 --- a/client/frontend/retryable_client_gen.go +++ b/client/frontend/retryable_client_gen.go @@ -11,6 +11,21 @@ import ( "go.temporal.io/server/common/backoff" ) +func (c *retryableClient) CountActivityExecutions( + ctx context.Context, + request *workflowservice.CountActivityExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.CountActivityExecutionsResponse, error) { + var resp *workflowservice.CountActivityExecutionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CountActivityExecutions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) CountWorkflowExecutions( ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest, @@ -56,6 +71,21 @@ func (c *retryableClient) CreateWorkflowRule( return resp, err } +func (c *retryableClient) DeleteActivityExecution( + ctx context.Context, + request *workflowservice.DeleteActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteActivityExecutionResponse, error) { + var resp *workflowservice.DeleteActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeleteActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DeleteSchedule( ctx context.Context, request *workflowservice.DeleteScheduleRequest, @@ -146,6 +176,21 @@ func (c *retryableClient) DeprecateNamespace( return resp, err } +func (c *retryableClient) DescribeActivityExecution( + ctx context.Context, + request *workflowservice.DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeActivityExecutionResponse, error) { + var resp *workflowservice.DescribeActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DescribeBatchOperation( ctx context.Context, request *workflowservice.DescribeBatchOperationRequest, @@ -476,6 +521,21 @@ func (c *retryableClient) GetWorkflowExecutionHistoryReverse( return resp, err } +func (c *retryableClient) ListActivityExecutions( + ctx context.Context, + request *workflowservice.ListActivityExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListActivityExecutionsResponse, error) { + var resp *workflowservice.ListActivityExecutionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListActivityExecutions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ListArchivedWorkflowExecutions( ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest, @@ -716,6 +776,21 @@ func (c *retryableClient) PauseWorkflowExecution( return resp, err } +func (c *retryableClient) PollActivityExecution( + ctx context.Context, + request *workflowservice.PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PollActivityExecutionResponse, error) { + var resp *workflowservice.PollActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PollActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) PollActivityTaskQueue( ctx context.Context, request *workflowservice.PollActivityTaskQueueRequest, @@ -851,6 +926,21 @@ func (c *retryableClient) RegisterNamespace( return resp, err } +func (c *retryableClient) RequestCancelActivityExecution( + ctx context.Context, + request *workflowservice.RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.RequestCancelActivityExecutionResponse, error) { + var resp *workflowservice.RequestCancelActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.RequestCancelActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) RequestCancelWorkflowExecution( ctx context.Context, request *workflowservice.RequestCancelWorkflowExecutionRequest, @@ -1196,6 +1286,21 @@ func (c *retryableClient) SignalWorkflowExecution( return resp, err } +func (c *retryableClient) StartActivityExecution( + ctx context.Context, + request *workflowservice.StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.StartActivityExecutionResponse, error) { + var resp *workflowservice.StartActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.StartActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) StartBatchOperation( ctx context.Context, request *workflowservice.StartBatchOperationRequest, @@ -1241,6 +1346,21 @@ func (c *retryableClient) StopBatchOperation( return resp, err } +func (c *retryableClient) TerminateActivityExecution( + ctx context.Context, + request *workflowservice.TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.TerminateActivityExecutionResponse, error) { + var resp *workflowservice.TerminateActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.TerminateActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) TerminateWorkflowExecution( ctx context.Context, request *workflowservice.TerminateWorkflowExecutionRequest, diff --git a/client/history/client.go b/client/history/client.go index 6ad9ea839a..6e9d53232d 100644 --- a/client/history/client.go +++ b/client/history/client.go @@ -218,6 +218,40 @@ func (c *clientImpl) GetReplicationStatus( return response, nil } +func (c *clientImpl) RecordActivityTaskStarted( + ctx context.Context, + request *historyservice.RecordActivityTaskStartedRequest, + opts ...grpc.CallOption, +) (*historyservice.RecordActivityTaskStartedResponse, error) { + var shardID int32 + + // For Chasm components we need to route the shard based on business ID. Note that shardIDFromWorkflowID simply + // calculates the hash from the ID so it works for both workflowID and businessID. + if len(request.GetComponentRef()) == 0 { + shardID = c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) + } else { + componentRef, err := c.tokenSerializer.DeserializeChasmComponentRef(request.GetComponentRef()) + if err != nil { + return nil, err + } + + shardID = c.shardIDFromWorkflowID(componentRef.GetNamespaceId(), componentRef.GetBusinessId()) + } + + var response *historyservice.RecordActivityTaskStartedResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.RecordActivityTaskStarted(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) StreamWorkflowReplicationMessages( ctx context.Context, opts ...grpc.CallOption, diff --git a/client/history/client_gen.go b/client/history/client_gen.go index f288309f52..36fa4c0bd1 100644 --- a/client/history/client_gen.go +++ b/client/history/client_gen.go @@ -764,26 +764,6 @@ func (c *clientImpl) RecordActivityTaskHeartbeat( return response, nil } -func (c *clientImpl) RecordActivityTaskStarted( - ctx context.Context, - request *historyservice.RecordActivityTaskStartedRequest, - opts ...grpc.CallOption, -) (*historyservice.RecordActivityTaskStartedResponse, error) { - shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) - var response *historyservice.RecordActivityTaskStartedResponse - op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { - var err error - ctx, cancel := c.createContext(ctx) - defer cancel() - response, err = client.RecordActivityTaskStarted(ctx, request, opts...) - return err - } - if err := c.executeWithRedirect(ctx, shardID, op); err != nil { - return nil, err - } - return response, nil -} - func (c *clientImpl) RecordChildExecutionCompleted( ctx context.Context, request *historyservice.RecordChildExecutionCompletedRequest, diff --git a/cmd/tools/getproto/files.go b/cmd/tools/getproto/files.go index 7d95246a84..5a269db008 100644 --- a/cmd/tools/getproto/files.go +++ b/cmd/tools/getproto/files.go @@ -1,4 +1,3 @@ - // Code generated by getproto. DO NOT EDIT. // If you get build errors in this file, just delete it. It will be regenerated. @@ -51,6 +50,7 @@ func init() { importMap["temporal/api/command/v1/message.proto"] = command.File_temporal_api_command_v1_message_proto importMap["temporal/api/common/v1/message.proto"] = common.File_temporal_api_common_v1_message_proto importMap["temporal/api/deployment/v1/message.proto"] = deployment.File_temporal_api_deployment_v1_message_proto + importMap["temporal/api/enums/v1/activity.proto"] = enums.File_temporal_api_enums_v1_activity_proto importMap["temporal/api/enums/v1/batch_operation.proto"] = enums.File_temporal_api_enums_v1_batch_operation_proto importMap["temporal/api/enums/v1/command_type.proto"] = enums.File_temporal_api_enums_v1_command_type_proto importMap["temporal/api/enums/v1/common.proto"] = enums.File_temporal_api_enums_v1_common_proto diff --git a/common/api/metadata.go b/common/api/metadata.go index ca62cfbbb8..d730c8c609 100644 --- a/common/api/metadata.go +++ b/common/api/metadata.go @@ -88,6 +88,14 @@ var ( "RespondActivityTaskFailedById": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, "RespondActivityTaskCanceled": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, "RespondActivityTaskCanceledById": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "CountActivityExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DeleteActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DescribeActivityExecution": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingCapable}, + "PollActivityExecution": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingAlways}, + "ListActivityExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "RequestCancelActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "StartActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "TerminateActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, "PollNexusTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingAlways}, "RespondNexusTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, "RespondNexusTaskFailed": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, diff --git a/common/backoff/retry.go b/common/backoff/retry.go index fb5853f532..5192a0f52d 100644 --- a/common/backoff/retry.go +++ b/common/backoff/retry.go @@ -2,10 +2,13 @@ package backoff import ( "context" + "math" "time" + commonpb "go.temporal.io/api/common/v1" "go.temporal.io/api/serviceerror" "go.temporal.io/server/common/clock" + "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -169,3 +172,40 @@ func IgnoreErrors(errorsToExclude []error) func(error) bool { return true } } + +// BackoffCalculatorAlgorithmFunc is a function type that calculates backoff duration based on +// initial duration, coefficient, and current attempt number. +type BackoffCalculatorAlgorithmFunc func(duration *durationpb.Duration, coefficient float64, currentAttempt int32) time.Duration + +// ExponentialBackoffAlgorithm calculates the backoff duration using exponential algorithm. +// The result is initInterval * (backoffCoefficient ^ (currentAttempt - 1)). +// If the calculation overflows int64, it returns the maximum possible duration. A negative result will also never be returned. +func ExponentialBackoffAlgorithm(initInterval *durationpb.Duration, backoffCoefficient float64, currentAttempt int32) time.Duration { + result := float64(initInterval.AsDuration().Nanoseconds()) * math.Pow(backoffCoefficient, float64(currentAttempt-1)) + return time.Duration(max(0, min(int64(result), math.MaxInt64))) +} + +// MakeBackoffAlgorithm creates a BackoffCalculatorAlgorithmFunc that returns a fixed delay if requestedDelay is non-nil, +// otherwise falls back to exponential backoff algorithm. +func MakeBackoffAlgorithm(requestedDelay *time.Duration) BackoffCalculatorAlgorithmFunc { + return func(duration *durationpb.Duration, coefficient float64, currentAttempt int32) time.Duration { + if requestedDelay != nil { + return *requestedDelay + } + return ExponentialBackoffAlgorithm(duration, coefficient, currentAttempt) + } +} + +// CalculateExponentialRetryInterval calculates the retry interval using exponential backoff algorithm +func CalculateExponentialRetryInterval(retryPolicy *commonpb.RetryPolicy, attempt int32) time.Duration { + interval := ExponentialBackoffAlgorithm(retryPolicy.GetInitialInterval(), retryPolicy.GetBackoffCoefficient(), attempt) + + maxInterval := retryPolicy.GetMaximumInterval() + + // Cap interval to maximum if it's set + if maxInterval.AsDuration() != 0 && interval > maxInterval.AsDuration() { + interval = maxInterval.AsDuration() + } + + return interval +} diff --git a/common/log/tag/tags.go b/common/log/tag/tags.go index f8d1067dc9..08edfcec88 100644 --- a/common/log/tag/tags.go +++ b/common/log/tag/tags.go @@ -919,7 +919,17 @@ func ActivityInfo(activityInfo interface{}) ZapTag { return NewAnyTag("activity-info", activityInfo) } -// WorkflowTaskRequestId returns tag for workflow task RequestId +// ActivityID returns tag for a standalone activity ID +func ActivityID(id string) ZapTag { + return NewStringTag("activity-id", id) +} + +// ActivitySize returns a tag for a standalone activity size +func ActivitySize(activitySize int64) ZapTag { + return NewInt64("activity-size", activitySize) +} + +// WorkflowTaskRequestId returns a tag for workflow task RequestId func WorkflowTaskRequestId(s string) ZapTag { return NewStringTag("workflow-task-request-id", s) } diff --git a/common/metrics/metric_defs.go b/common/metrics/metric_defs.go index 354c79d653..cd2776b6b7 100644 --- a/common/metrics/metric_defs.go +++ b/common/metrics/metric_defs.go @@ -339,6 +339,8 @@ const ( HistoryRespondActivityTaskFailedScope = "RespondActivityTaskFailed" // HistoryRespondActivityTaskCanceledScope tracks RespondActivityTaskCanceled API calls received by service HistoryRespondActivityTaskCanceledScope = "RespondActivityTaskCanceled" + // ActivityTerminatedScope tracks TerminateActivityExecution API calls received by service + ActivityTerminatedScope = "ActivityTerminated" // HistoryGetWorkflowExecutionHistoryScope is the metric scope for non-long-poll frontend.GetWorkflowExecutionHistory HistoryGetWorkflowExecutionHistoryScope = "GetWorkflowExecutionHistory" // HistoryPollWorkflowExecutionHistoryScope is the metric scope for long poll case of frontend.GetWorkflowExecutionHistory @@ -861,7 +863,8 @@ var ( ActivitySuccess = NewCounterDef("activity_success", WithDescription("Number of activities that succeeded (doesn't include retries).")) ActivityFail = NewCounterDef("activity_fail", WithDescription("Number of activities that failed and won't be retried anymore.")) ActivityTaskFail = NewCounterDef("activity_task_fail", WithDescription("Number of activity task failures (includes retries).")) - ActivityCancel = NewCounterDef("activity_cancel") + ActivityCancel = NewCounterDef("activity_cancel", WithDescription("Number of activities that are cancelled.")) + ActivityTerminate = NewCounterDef("activity_terminate", WithDescription("Number of activities that are terminated.")) ActivityTaskTimeout = NewCounterDef("activity_task_timeout", WithDescription("Number of activity task timeouts (including retries).")) ActivityTimeout = NewCounterDef("activity_timeout", WithDescription("Number of terminal activity timeouts.")) ActivityPayloadSize = NewCounterDef("activity_payload_size", WithDescription("Size of activity payloads in bytes.")) diff --git a/common/persistence/sql/sqlplugin/visibility.go b/common/persistence/sql/sqlplugin/visibility.go index 25e98e5e8f..672114944c 100644 --- a/common/persistence/sql/sqlplugin/visibility.go +++ b/common/persistence/sql/sqlplugin/visibility.go @@ -210,6 +210,10 @@ func parseCountGroupByGroupValue(fieldName string, value any) (any, error) { ), ) default: + // MySQL driver returns VARCHAR columns as []byte when scanning into *any. + if bs, ok := value.([]byte); ok { + return string(bs), nil + } return value, nil } } diff --git a/common/persistence/visibility/chasm_visibility_manager_test.go b/common/persistence/visibility/chasm_visibility_manager_test.go index c8a214f640..ee6173e3df 100644 --- a/common/persistence/visibility/chasm_visibility_manager_test.go +++ b/common/persistence/visibility/chasm_visibility_manager_test.go @@ -11,14 +11,10 @@ import ( commonpb "go.temporal.io/api/common/v1" persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/chasm" - "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/payload" "go.temporal.io/server/common/persistence/visibility/manager" - "go.temporal.io/server/service/history/configs" - historyi "go.temporal.io/server/service/history/interfaces" - "go.temporal.io/server/service/history/tests" "go.uber.org/mock/gomock" "google.golang.org/protobuf/proto" ) @@ -31,8 +27,6 @@ type ( registry *chasm.Registry visibilityManager *manager.MockVisibilityManager - shardContext *historyi.MockShardContext - config *configs.Config visibilityMgr *ChasmVisibilityManager } @@ -97,11 +91,6 @@ func (s *ChasmVisibilityManagerSuite) SetupTest() { s.NoError(err) s.visibilityManager = manager.NewMockVisibilityManager(s.controller) - s.shardContext = historyi.NewMockShardContext(s.controller) - s.shardContext.EXPECT().ChasmRegistry().Return(s.registry).AnyTimes() - - s.config = tests.NewDynamicConfig() - s.config.HistoryMaxPageSize = dynamicconfig.GetIntPropertyFnFilteredByNamespace(1000) s.visibilityMgr = NewChasmVisibilityManager( s.registry, diff --git a/common/persistence/visibility/store/query/resolve.go b/common/persistence/visibility/store/query/resolve.go index b829cd8f86..990899bcf7 100644 --- a/common/persistence/visibility/store/query/resolve.go +++ b/common/persistence/visibility/store/query/resolve.go @@ -38,6 +38,13 @@ func ResolveSearchAttributeAlias( return sadefs.WorkflowID, saType, nil } + // Handle ActivityId → WorkflowID transformation for standalone activities. + // TODO: Remove this hardcoded transformation. + if name == sadefs.ActivityID { + saType, _ := saTypeMap.GetType(sadefs.WorkflowID) + return sadefs.WorkflowID, saType, nil + } + fieldName, fieldType = tryChasmMapper(name, chasmMapper) if fieldName != "" { return fieldName, fieldType, nil diff --git a/common/resource/fx.go b/common/resource/fx.go index ce8dc0e643..76598aa183 100644 --- a/common/resource/fx.go +++ b/common/resource/fx.go @@ -32,6 +32,8 @@ import ( "go.temporal.io/server/common/persistence" persistenceClient "go.temporal.io/server/common/persistence/client" "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/visibility" + "go.temporal.io/server/common/persistence/visibility/manager" "go.temporal.io/server/common/pingable" "go.temporal.io/server/common/primitives" "go.temporal.io/server/common/quotas" @@ -185,6 +187,30 @@ func SearchAttributeManagerProvider( dynamicconfig.ForceSearchAttributesCacheRefreshOnRead.Get(dynamicCollection)) } +// SearchAttributeValidatorProvider creates a new search attribute validator with the given dependencies. It configures +// the validator with dynamic config values for key limits, value size limits, total size limits, visibility allowlist, +// and system search attribute error suppression. +func SearchAttributeValidatorProvider( + saProvider searchattribute.Provider, + saMapperProvider searchattribute.MapperProvider, + visibilityMgr manager.VisibilityManager, + dynamicCollection *dynamicconfig.Collection, +) *searchattribute.Validator { + return searchattribute.NewValidator( + saProvider, + saMapperProvider, + dynamicconfig.SearchAttributesNumberOfKeysLimit.Get(dynamicCollection), + dynamicconfig.SearchAttributesSizeOfValueLimit.Get(dynamicCollection), + dynamicconfig.SearchAttributesTotalSizeLimit.Get(dynamicCollection), + visibilityMgr, + visibility.AllowListForValidation( + visibilityMgr.GetStoreNames(), + dynamicconfig.VisibilityAllowList.Get(dynamicCollection), + ), + dynamicconfig.SuppressErrorSetSystemSearchAttribute.Get(dynamicCollection), + ) +} + func NamespaceRegistryProvider( logger log.SnTaggedLogger, metricsHandler metrics.Handler, diff --git a/common/retrypolicy/retry_policy_test.go b/common/retrypolicy/retry_policy_test.go index 8d1506b877..27ec1493f8 100644 --- a/common/retrypolicy/retry_policy_test.go +++ b/common/retrypolicy/retry_policy_test.go @@ -96,6 +96,16 @@ func TestEnsureRetryPolicyDefaults(t *testing.T) { NonRetryableErrorTypes: []string{"testFailureType"}, }, }, + { + name: "empty policy has non-zero defaults", + input: &commonpb.RetryPolicy{}, + want: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + MaximumInterval: durationpb.New(100 * time.Second), + BackoffCoefficient: 2, + MaximumAttempts: 120, + }, + }, } for _, tt := range testCases { diff --git a/common/rpc/interceptor/logtags/workflow_service_server_gen.go b/common/rpc/interceptor/logtags/workflow_service_server_gen.go index fe6bb14a78..8300fae41d 100644 --- a/common/rpc/interceptor/logtags/workflow_service_server_gen.go +++ b/common/rpc/interceptor/logtags/workflow_service_server_gen.go @@ -9,6 +9,10 @@ import ( func (wt *WorkflowTags) extractFromWorkflowServiceServerMessage(message any) []tag.Tag { switch r := message.(type) { + case *workflowservice.CountActivityExecutionsRequest: + return nil + case *workflowservice.CountActivityExecutionsResponse: + return nil case *workflowservice.CountWorkflowExecutionsRequest: return nil case *workflowservice.CountWorkflowExecutionsResponse: @@ -21,6 +25,12 @@ func (wt *WorkflowTags) extractFromWorkflowServiceServerMessage(message any) []t return nil case *workflowservice.CreateWorkflowRuleResponse: return nil + case *workflowservice.DeleteActivityExecutionRequest: + return []tag.Tag{ + tag.WorkflowRunID(r.GetRunId()), + } + case *workflowservice.DeleteActivityExecutionResponse: + return nil case *workflowservice.DeleteScheduleRequest: return nil case *workflowservice.DeleteScheduleResponse: @@ -48,6 +58,14 @@ func (wt *WorkflowTags) extractFromWorkflowServiceServerMessage(message any) []t return nil case *workflowservice.DeprecateNamespaceResponse: return nil + case *workflowservice.DescribeActivityExecutionRequest: + return []tag.Tag{ + tag.WorkflowRunID(r.GetRunId()), + } + case *workflowservice.DescribeActivityExecutionResponse: + return []tag.Tag{ + tag.WorkflowRunID(r.GetRunId()), + } case *workflowservice.DescribeBatchOperationRequest: return nil case *workflowservice.DescribeBatchOperationResponse: @@ -145,6 +163,10 @@ func (wt *WorkflowTags) extractFromWorkflowServiceServerMessage(message any) []t } case *workflowservice.GetWorkflowExecutionHistoryReverseResponse: return nil + case *workflowservice.ListActivityExecutionsRequest: + return nil + case *workflowservice.ListActivityExecutionsResponse: + return nil case *workflowservice.ListArchivedWorkflowExecutionsRequest: return nil case *workflowservice.ListArchivedWorkflowExecutionsResponse: @@ -215,6 +237,14 @@ func (wt *WorkflowTags) extractFromWorkflowServiceServerMessage(message any) []t } case *workflowservice.PauseWorkflowExecutionResponse: return nil + case *workflowservice.PollActivityExecutionRequest: + return []tag.Tag{ + tag.WorkflowRunID(r.GetRunId()), + } + case *workflowservice.PollActivityExecutionResponse: + return []tag.Tag{ + tag.WorkflowRunID(r.GetRunId()), + } case *workflowservice.PollActivityTaskQueueRequest: return nil case *workflowservice.PollActivityTaskQueueResponse: @@ -269,6 +299,12 @@ func (wt *WorkflowTags) extractFromWorkflowServiceServerMessage(message any) []t return nil case *workflowservice.RegisterNamespaceResponse: return nil + case *workflowservice.RequestCancelActivityExecutionRequest: + return []tag.Tag{ + tag.WorkflowRunID(r.GetRunId()), + } + case *workflowservice.RequestCancelActivityExecutionResponse: + return nil case *workflowservice.RequestCancelWorkflowExecutionRequest: return []tag.Tag{ tag.WorkflowID(r.GetWorkflowExecution().GetWorkflowId()), @@ -391,6 +427,12 @@ func (wt *WorkflowTags) extractFromWorkflowServiceServerMessage(message any) []t } case *workflowservice.SignalWorkflowExecutionResponse: return nil + case *workflowservice.StartActivityExecutionRequest: + return nil + case *workflowservice.StartActivityExecutionResponse: + return []tag.Tag{ + tag.WorkflowRunID(r.GetRunId()), + } case *workflowservice.StartBatchOperationRequest: return nil case *workflowservice.StartBatchOperationResponse: @@ -407,6 +449,12 @@ func (wt *WorkflowTags) extractFromWorkflowServiceServerMessage(message any) []t return nil case *workflowservice.StopBatchOperationResponse: return nil + case *workflowservice.TerminateActivityExecutionRequest: + return []tag.Tag{ + tag.WorkflowRunID(r.GetRunId()), + } + case *workflowservice.TerminateActivityExecutionResponse: + return nil case *workflowservice.TerminateWorkflowExecutionRequest: return []tag.Tag{ tag.WorkflowID(r.GetWorkflowExecution().GetWorkflowId()), diff --git a/common/rpc/interceptor/namespace_rate_limit.go b/common/rpc/interceptor/namespace_rate_limit.go index e4ca7f9615..a67ee7782d 100644 --- a/common/rpc/interceptor/namespace_rate_limit.go +++ b/common/rpc/interceptor/namespace_rate_limit.go @@ -64,8 +64,10 @@ func (ni *NamespaceRateLimitInterceptorImpl) Intercept( ) (interface{}, error) { if ns := MustGetNamespaceName(ni.namespaceRegistry, req); ns != namespace.EmptyName { method := info.FullMethod - if IsLongPollGetHistoryRequest(req) { + if IsLongPollGetWorkflowExecutionHistoryRequest(req) { method = configs.PollWorkflowHistoryAPIName + } else if IsLongPollDescribeActivityExecutionRequest(req) { + method = configs.PollActivityExecutionAPIName } if err := ni.Allow(ns, method, headers.NewGRPCHeaderGetter(ctx)); err != nil { return nil, err @@ -94,7 +96,7 @@ func (ni *NamespaceRateLimitInterceptorImpl) Allow(namespaceName namespace.Name, return nil } -func IsLongPollGetHistoryRequest( +func IsLongPollGetWorkflowExecutionHistoryRequest( req interface{}, ) bool { switch request := req.(type) { @@ -103,3 +105,13 @@ func IsLongPollGetHistoryRequest( } return false } + +func IsLongPollDescribeActivityExecutionRequest( + req interface{}, +) bool { + switch request := req.(type) { + case *workflowservice.DescribeActivityExecutionRequest: + return len(request.GetLongPollToken()) > 0 + } + return false +} diff --git a/common/rpc/interceptor/redirection.go b/common/rpc/interceptor/redirection.go index dced9c1251..47f193f28d 100644 --- a/common/rpc/interceptor/redirection.go +++ b/common/rpc/interceptor/redirection.go @@ -137,6 +137,15 @@ var ( "UpdateTaskQueueConfig": func() any { return &workflowservice.UpdateTaskQueueConfigResponse{} }, "FetchWorkerConfig": func() any { return &workflowservice.FetchWorkerConfigResponse{} }, "UpdateWorkerConfig": func() any { return &workflowservice.UpdateWorkerConfigResponse{} }, + + "StartActivityExecution": func() any { return &workflowservice.StartActivityExecutionResponse{} }, + "CountActivityExecutions": func() any { return &workflowservice.CountActivityExecutionsResponse{} }, + "ListActivityExecutions": func() any { return &workflowservice.ListActivityExecutionsResponse{} }, + "DescribeActivityExecution": func() any { return &workflowservice.DescribeActivityExecutionResponse{} }, + "PollActivityExecution": func() any { return &workflowservice.PollActivityExecutionResponse{} }, + "RequestCancelActivityExecution": func() any { return &workflowservice.RequestCancelActivityExecutionResponse{} }, + "TerminateActivityExecution": func() any { return &workflowservice.TerminateActivityExecutionResponse{} }, + "DeleteActivityExecution": func() any { return &workflowservice.DeleteActivityExecutionResponse{} }, } ) diff --git a/common/rpc/interceptor/redirection_test.go b/common/rpc/interceptor/redirection_test.go index 47d5570cfa..86b745305d 100644 --- a/common/rpc/interceptor/redirection_test.go +++ b/common/rpc/interceptor/redirection_test.go @@ -192,6 +192,15 @@ func (s *redirectionInterceptorSuite) TestGlobalAPI() { "UpdateTaskQueueConfig": {}, "FetchWorkerConfig": {}, "UpdateWorkerConfig": {}, + + "StartActivityExecution": {}, + "CountActivityExecutions": {}, + "ListActivityExecutions": {}, + "DescribeActivityExecution": {}, + "PollActivityExecution": {}, + "RequestCancelActivityExecution": {}, + "TerminateActivityExecution": {}, + "DeleteActivityExecution": {}, }, apis) } @@ -210,6 +219,7 @@ func (s *redirectionInterceptorSuite) TestAPIResultMapping() { for api, respAllocFn := range globalAPIResponses { actualAPIs[api] = reflect.TypeOf(respAllocFn()) } + s.Equal(expectedAPIs, actualAPIs) } diff --git a/common/searchattribute/sadefs/constants.go b/common/searchattribute/sadefs/constants.go index efc760ef04..dee24ca923 100644 --- a/common/searchattribute/sadefs/constants.go +++ b/common/searchattribute/sadefs/constants.go @@ -59,6 +59,9 @@ const ( // any other custom search attribute. ScheduleID = "ScheduleId" + // TODO: Remove this hardcoded constant. + ActivityID = "ActivityId" + // TemporalPauseInfo is a search attribute that stores the information about paused entities in the workflow. // Format of a single paused entity: ":". // * is something that can be used to identify the filtering condition diff --git a/common/tasktoken/token.go b/common/tasktoken/token.go index 88e0e907f3..4212b4f93c 100644 --- a/common/tasktoken/token.go +++ b/common/tasktoken/token.go @@ -41,6 +41,7 @@ func NewActivityTaskToken( clock *clockspb.VectorClock, version int64, startVersion int64, + componentRef []byte, ) *tokenspb.Task { return &tokenspb.Task{ NamespaceId: namespaceID, @@ -53,5 +54,6 @@ func NewActivityTaskToken( Clock: clock, Version: version, StartVersion: startVersion, + ComponentRef: componentRef, } } diff --git a/common/testing/mockapi/workflowservicemock/v1/service_grpc.pb.mock.go b/common/testing/mockapi/workflowservicemock/v1/service_grpc.pb.mock.go index 15837e0bb9..6242b452ba 100644 --- a/common/testing/mockapi/workflowservicemock/v1/service_grpc.pb.mock.go +++ b/common/testing/mockapi/workflowservicemock/v1/service_grpc.pb.mock.go @@ -42,6 +42,26 @@ func (m *MockWorkflowServiceClient) EXPECT() *MockWorkflowServiceClientMockRecor return m.recorder } +// CountActivityExecutions mocks base method. +func (m *MockWorkflowServiceClient) CountActivityExecutions(ctx context.Context, in *workflowservice.CountActivityExecutionsRequest, opts ...grpc.CallOption) (*workflowservice.CountActivityExecutionsResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CountActivityExecutions", varargs...) + ret0, _ := ret[0].(*workflowservice.CountActivityExecutionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountActivityExecutions indicates an expected call of CountActivityExecutions. +func (mr *MockWorkflowServiceClientMockRecorder) CountActivityExecutions(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountActivityExecutions", reflect.TypeOf((*MockWorkflowServiceClient)(nil).CountActivityExecutions), varargs...) +} + // CountWorkflowExecutions mocks base method. func (m *MockWorkflowServiceClient) CountWorkflowExecutions(ctx context.Context, in *workflowservice.CountWorkflowExecutionsRequest, opts ...grpc.CallOption) (*workflowservice.CountWorkflowExecutionsResponse, error) { m.ctrl.T.Helper() @@ -102,6 +122,26 @@ func (mr *MockWorkflowServiceClientMockRecorder) CreateWorkflowRule(ctx, in any, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateWorkflowRule", reflect.TypeOf((*MockWorkflowServiceClient)(nil).CreateWorkflowRule), varargs...) } +// DeleteActivityExecution mocks base method. +func (m *MockWorkflowServiceClient) DeleteActivityExecution(ctx context.Context, in *workflowservice.DeleteActivityExecutionRequest, opts ...grpc.CallOption) (*workflowservice.DeleteActivityExecutionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteActivityExecution", varargs...) + ret0, _ := ret[0].(*workflowservice.DeleteActivityExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteActivityExecution indicates an expected call of DeleteActivityExecution. +func (mr *MockWorkflowServiceClientMockRecorder) DeleteActivityExecution(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteActivityExecution", reflect.TypeOf((*MockWorkflowServiceClient)(nil).DeleteActivityExecution), varargs...) +} + // DeleteSchedule mocks base method. func (m *MockWorkflowServiceClient) DeleteSchedule(ctx context.Context, in *workflowservice.DeleteScheduleRequest, opts ...grpc.CallOption) (*workflowservice.DeleteScheduleResponse, error) { m.ctrl.T.Helper() @@ -222,6 +262,26 @@ func (mr *MockWorkflowServiceClientMockRecorder) DeprecateNamespace(ctx, in any, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeprecateNamespace", reflect.TypeOf((*MockWorkflowServiceClient)(nil).DeprecateNamespace), varargs...) } +// DescribeActivityExecution mocks base method. +func (m *MockWorkflowServiceClient) DescribeActivityExecution(ctx context.Context, in *workflowservice.DescribeActivityExecutionRequest, opts ...grpc.CallOption) (*workflowservice.DescribeActivityExecutionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeActivityExecution", varargs...) + ret0, _ := ret[0].(*workflowservice.DescribeActivityExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeActivityExecution indicates an expected call of DescribeActivityExecution. +func (mr *MockWorkflowServiceClientMockRecorder) DescribeActivityExecution(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeActivityExecution", reflect.TypeOf((*MockWorkflowServiceClient)(nil).DescribeActivityExecution), varargs...) +} + // DescribeBatchOperation mocks base method. func (m *MockWorkflowServiceClient) DescribeBatchOperation(ctx context.Context, in *workflowservice.DescribeBatchOperationRequest, opts ...grpc.CallOption) (*workflowservice.DescribeBatchOperationResponse, error) { m.ctrl.T.Helper() @@ -662,6 +722,26 @@ func (mr *MockWorkflowServiceClientMockRecorder) GetWorkflowExecutionHistoryReve return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionHistoryReverse", reflect.TypeOf((*MockWorkflowServiceClient)(nil).GetWorkflowExecutionHistoryReverse), varargs...) } +// ListActivityExecutions mocks base method. +func (m *MockWorkflowServiceClient) ListActivityExecutions(ctx context.Context, in *workflowservice.ListActivityExecutionsRequest, opts ...grpc.CallOption) (*workflowservice.ListActivityExecutionsResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListActivityExecutions", varargs...) + ret0, _ := ret[0].(*workflowservice.ListActivityExecutionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListActivityExecutions indicates an expected call of ListActivityExecutions. +func (mr *MockWorkflowServiceClientMockRecorder) ListActivityExecutions(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListActivityExecutions", reflect.TypeOf((*MockWorkflowServiceClient)(nil).ListActivityExecutions), varargs...) +} + // ListArchivedWorkflowExecutions mocks base method. func (m *MockWorkflowServiceClient) ListArchivedWorkflowExecutions(ctx context.Context, in *workflowservice.ListArchivedWorkflowExecutionsRequest, opts ...grpc.CallOption) (*workflowservice.ListArchivedWorkflowExecutionsResponse, error) { m.ctrl.T.Helper() @@ -982,6 +1062,26 @@ func (mr *MockWorkflowServiceClientMockRecorder) PauseWorkflowExecution(ctx, in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseWorkflowExecution", reflect.TypeOf((*MockWorkflowServiceClient)(nil).PauseWorkflowExecution), varargs...) } +// PollActivityExecution mocks base method. +func (m *MockWorkflowServiceClient) PollActivityExecution(ctx context.Context, in *workflowservice.PollActivityExecutionRequest, opts ...grpc.CallOption) (*workflowservice.PollActivityExecutionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PollActivityExecution", varargs...) + ret0, _ := ret[0].(*workflowservice.PollActivityExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PollActivityExecution indicates an expected call of PollActivityExecution. +func (mr *MockWorkflowServiceClientMockRecorder) PollActivityExecution(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollActivityExecution", reflect.TypeOf((*MockWorkflowServiceClient)(nil).PollActivityExecution), varargs...) +} + // PollActivityTaskQueue mocks base method. func (m *MockWorkflowServiceClient) PollActivityTaskQueue(ctx context.Context, in *workflowservice.PollActivityTaskQueueRequest, opts ...grpc.CallOption) (*workflowservice.PollActivityTaskQueueResponse, error) { m.ctrl.T.Helper() @@ -1162,6 +1262,26 @@ func (mr *MockWorkflowServiceClientMockRecorder) RegisterNamespace(ctx, in any, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterNamespace", reflect.TypeOf((*MockWorkflowServiceClient)(nil).RegisterNamespace), varargs...) } +// RequestCancelActivityExecution mocks base method. +func (m *MockWorkflowServiceClient) RequestCancelActivityExecution(ctx context.Context, in *workflowservice.RequestCancelActivityExecutionRequest, opts ...grpc.CallOption) (*workflowservice.RequestCancelActivityExecutionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RequestCancelActivityExecution", varargs...) + ret0, _ := ret[0].(*workflowservice.RequestCancelActivityExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RequestCancelActivityExecution indicates an expected call of RequestCancelActivityExecution. +func (mr *MockWorkflowServiceClientMockRecorder) RequestCancelActivityExecution(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestCancelActivityExecution", reflect.TypeOf((*MockWorkflowServiceClient)(nil).RequestCancelActivityExecution), varargs...) +} + // RequestCancelWorkflowExecution mocks base method. func (m *MockWorkflowServiceClient) RequestCancelWorkflowExecution(ctx context.Context, in *workflowservice.RequestCancelWorkflowExecutionRequest, opts ...grpc.CallOption) (*workflowservice.RequestCancelWorkflowExecutionResponse, error) { m.ctrl.T.Helper() @@ -1622,6 +1742,26 @@ func (mr *MockWorkflowServiceClientMockRecorder) SignalWorkflowExecution(ctx, in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignalWorkflowExecution", reflect.TypeOf((*MockWorkflowServiceClient)(nil).SignalWorkflowExecution), varargs...) } +// StartActivityExecution mocks base method. +func (m *MockWorkflowServiceClient) StartActivityExecution(ctx context.Context, in *workflowservice.StartActivityExecutionRequest, opts ...grpc.CallOption) (*workflowservice.StartActivityExecutionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "StartActivityExecution", varargs...) + ret0, _ := ret[0].(*workflowservice.StartActivityExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartActivityExecution indicates an expected call of StartActivityExecution. +func (mr *MockWorkflowServiceClientMockRecorder) StartActivityExecution(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartActivityExecution", reflect.TypeOf((*MockWorkflowServiceClient)(nil).StartActivityExecution), varargs...) +} + // StartBatchOperation mocks base method. func (m *MockWorkflowServiceClient) StartBatchOperation(ctx context.Context, in *workflowservice.StartBatchOperationRequest, opts ...grpc.CallOption) (*workflowservice.StartBatchOperationResponse, error) { m.ctrl.T.Helper() @@ -1682,6 +1822,26 @@ func (mr *MockWorkflowServiceClientMockRecorder) StopBatchOperation(ctx, in any, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopBatchOperation", reflect.TypeOf((*MockWorkflowServiceClient)(nil).StopBatchOperation), varargs...) } +// TerminateActivityExecution mocks base method. +func (m *MockWorkflowServiceClient) TerminateActivityExecution(ctx context.Context, in *workflowservice.TerminateActivityExecutionRequest, opts ...grpc.CallOption) (*workflowservice.TerminateActivityExecutionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "TerminateActivityExecution", varargs...) + ret0, _ := ret[0].(*workflowservice.TerminateActivityExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TerminateActivityExecution indicates an expected call of TerminateActivityExecution. +func (mr *MockWorkflowServiceClientMockRecorder) TerminateActivityExecution(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TerminateActivityExecution", reflect.TypeOf((*MockWorkflowServiceClient)(nil).TerminateActivityExecution), varargs...) +} + // TerminateWorkflowExecution mocks base method. func (m *MockWorkflowServiceClient) TerminateWorkflowExecution(ctx context.Context, in *workflowservice.TerminateWorkflowExecutionRequest, opts ...grpc.CallOption) (*workflowservice.TerminateWorkflowExecutionResponse, error) { m.ctrl.T.Helper() diff --git a/common/util.go b/common/util.go index 8b450fbde7..5b3cbadd08 100644 --- a/common/util.go +++ b/common/util.go @@ -89,6 +89,8 @@ const ( contextExpireThreshold = 10 * time.Millisecond + // FailureReasonActivityTimeout is failureReason for when an activity times out, with %v as the timeout type. + FailureReasonActivityTimeout = "activity %v timeout" // FailureReasonCompleteResultExceedsLimit is failureReason for complete result exceeds limit FailureReasonCompleteResultExceedsLimit = "Complete result exceeds size limit." // FailureReasonFailureDetailsExceedsLimit is failureReason for failure details exceeds limit diff --git a/config/dynamicconfig/development-cass.yaml b/config/dynamicconfig/development-cass.yaml index 8e0c42cfcf..68e787a663 100644 --- a/config/dynamicconfig/development-cass.yaml +++ b/config/dynamicconfig/development-cass.yaml @@ -55,3 +55,7 @@ history.hostLevelCacheMaxSize: - value: 8192 history.enableTransitionHistory: - value: true +history.enableChasm: + - value: true +activity.enableStandalone: + - value: true diff --git a/config/dynamicconfig/development-sql.yaml b/config/dynamicconfig/development-sql.yaml index ea01750910..f921470b95 100644 --- a/config/dynamicconfig/development-sql.yaml +++ b/config/dynamicconfig/development-sql.yaml @@ -73,3 +73,7 @@ history.hostLevelCacheMaxSize: - value: 8192 history.enableTransitionHistory: - value: true +history.enableChasm: + - value: true +activity.enableStandalone: + - value: true diff --git a/config/dynamicconfig/development-xdc.yaml b/config/dynamicconfig/development-xdc.yaml index 00ca5f28b4..e148320fe4 100644 --- a/config/dynamicconfig/development-xdc.yaml +++ b/config/dynamicconfig/development-xdc.yaml @@ -46,4 +46,8 @@ history.ReplicationEnableUpdateWithNewTaskMerge: history.enableTransitionHistory: - value: true history.EnableReplicationTaskTieredProcessing: - - value: true \ No newline at end of file + - value: true +history.enableChasm: + - value: true +activity.enableStandalone: + - value: true diff --git a/go.mod b/go.mod index 3a0f8a2c15..d97aaa36aa 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( go.opentelemetry.io/otel/sdk v1.34.0 go.opentelemetry.io/otel/sdk/metric v1.34.0 go.opentelemetry.io/otel/trace v1.34.0 - go.temporal.io/api v1.59.1-0.20251205215512-1b49af22324a + go.temporal.io/api v1.59.1-0.20251219010759-931cb9ad0dea go.temporal.io/sdk v1.35.0 go.uber.org/fx v1.24.0 go.uber.org/mock v0.6.0 diff --git a/go.sum b/go.sum index 4d74230c5a..f5f4433962 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.temporal.io/api v1.59.1-0.20251205215512-1b49af22324a h1:eGsL32R3c2rlc6v56XI+vNtUViPONS5kyXW2OPh3d0U= -go.temporal.io/api v1.59.1-0.20251205215512-1b49af22324a/go.mod h1:iaxoP/9OXMJcQkETTECfwYq4cw/bj4nwov8b3ZLVnXM= +go.temporal.io/api v1.59.1-0.20251219010759-931cb9ad0dea h1:afZnrr9PBcQEzrA3eXFNREg/S8zcEKGc7jTdrUc4+NY= +go.temporal.io/api v1.59.1-0.20251219010759-931cb9ad0dea/go.mod h1:iaxoP/9OXMJcQkETTECfwYq4cw/bj4nwov8b3ZLVnXM= go.temporal.io/sdk v1.35.0 h1:lRNAQ5As9rLgYa7HBvnmKyzxLcdElTuoFJ0FXM/AsLQ= go.temporal.io/sdk v1.35.0/go.mod h1:1q5MuLc2MEJ4lneZTHJzpVebW2oZnyxoIOWX3oFVebw= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= diff --git a/proto/internal/temporal/server/api/historyservice/v1/request_response.proto b/proto/internal/temporal/server/api/historyservice/v1/request_response.proto index af2d853b4b..92d7c24727 100644 --- a/proto/internal/temporal/server/api/historyservice/v1/request_response.proto +++ b/proto/internal/temporal/server/api/historyservice/v1/request_response.proto @@ -95,7 +95,7 @@ message StartWorkflowExecutionRequest { temporal.api.deployment.v1.WorkerDeploymentVersion inherited_pinned_version = 15; // Passes deployment version and revision number from a parent/previous workflow with AutoUpgrade behavior // to its child/continued-as-new workflow. The first workflow task of the child/CAN workflow is dispatched to - // either this deployment version or the current version of the task queue, depending on which is the more recent version. + // either this deployment version or the current version of the task queue, depending on which is the more recent version. // After the first workflow task, the effective behavior of the workflow is determined by worker-sent values in // subsequent workflow tasks. temporal.api.deployment.v1.InheritedAutoUpgradeInfo inherited_auto_upgrade_info = 16; @@ -300,7 +300,7 @@ message RecordWorkflowTaskStartedResponseWithRawHistory { } message RecordActivityTaskStartedRequest { - option (routing).workflow_id = "workflow_execution.workflow_id"; + option (routing).custom = true; string namespace_id = 1; temporal.api.common.v1.WorkflowExecution workflow_execution = 2; @@ -323,6 +323,10 @@ message RecordActivityTaskStartedRequest { // Revision number that was sent by matching when the task was dispatched. Used to resolve eventual consistency issues // that may arise due to stale routing configs in task queue partitions. int64 task_dispatch_revision_number = 13; + // Reference to the Chasm component for activity execution (if applicable). For standalone activities, all necessary + // start information is carried within this component, obviating the need to use the fields that apply to embedded + // activities with the exception of version_directive. + bytes component_ref = 14; } message RecordActivityTaskStartedResponse { diff --git a/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto b/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto index 697396f346..7627db3513 100644 --- a/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto +++ b/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto @@ -132,6 +132,10 @@ message AddActivityTaskRequest { temporal.server.api.taskqueue.v1.TaskForwardInfo forward_info = 11; int32 stamp = 12; temporal.api.common.v1.Priority priority = 13; + // Reference to the Chasm component for activity execution (if applicable). For standalone activities, all + // necessary start information is carried within this component, obviating the need to use the fields that apply to + // embedded activities. + bytes component_ref = 14; } message AddActivityTaskResponse { diff --git a/proto/internal/temporal/server/api/persistence/v1/tasks.proto b/proto/internal/temporal/server/api/persistence/v1/tasks.proto index 183fc9a2c3..c2a6c0b5d3 100644 --- a/proto/internal/temporal/server/api/persistence/v1/tasks.proto +++ b/proto/internal/temporal/server/api/persistence/v1/tasks.proto @@ -32,6 +32,8 @@ message TaskInfo { // Stamp field allows to differentiate between different instances of the same task int32 stamp = 9; temporal.api.common.v1.Priority priority = 10; + // Reference to any chasm component associated with this task + bytes component_ref = 11; } // task_queue column diff --git a/proto/internal/temporal/server/api/token/v1/message.proto b/proto/internal/temporal/server/api/token/v1/message.proto index df1531cc63..ffb2302c5d 100644 --- a/proto/internal/temporal/server/api/token/v1/message.proto +++ b/proto/internal/temporal/server/api/token/v1/message.proto @@ -49,6 +49,8 @@ message Task { int64 version = 11; google.protobuf.Timestamp started_time = 12; int64 start_version = 13; + // Reference to the associated Chasm component, if provided. + bytes component_ref = 14; } message QueryTask { diff --git a/service/frontend/configs/quotas.go b/service/frontend/configs/quotas.go index 11f131173e..3bbc45961a 100644 --- a/service/frontend/configs/quotas.go +++ b/service/frontend/configs/quotas.go @@ -24,25 +24,34 @@ const ( CompleteNexusOperation = "/temporal.api.nexusservice.v1.NexusService/CompleteNexusOperation" // PollWorkflowHistoryAPIName is used instead of GetWorkflowExecutionHistory if WaitNewEvent is true in request. PollWorkflowHistoryAPIName = "/temporal.api.workflowservice.v1.WorkflowService/PollWorkflowExecutionHistory" + // PollActivityExecutionAPIName is used instead of DescribeActivityExecution if LongPollToken is set in request. + PollActivityExecutionAPIName = "/temporal.api.workflowservice.v1.WorkflowService/PollActivityExecutionDescription" ) var ( - // ExecutionAPICountLimitOverride determines how many tokens each of these API calls consumes from their - // corresponding quota, which is determined by dynamicconfig.FrontendMaxConcurrentLongRunningRequestsPerInstance. If - // the value is not set, then the method is not considered a long-running request and the number of concurrent - // requests will not be throttled. The Poll* methods here are long-running because they block until there is a task - // available. The GetWorkflowExecutionHistory method is blocking only if WaitNewEvent is true, otherwise it is not - // long-running. The QueryWorkflow and UpdateWorkflowExecution methods are long-running because they both block - // until a background WFT is complete. + // ExecutionAPICountLimitOverride determines how many tokens each of these API calls consumes + // from their corresponding quota, which is determined by + // dynamicconfig.FrontendMaxConcurrentLongRunningRequestsPerInstance. If the value is not set, + // then the method is not considered a long-running request and the number of concurrent + // requests will not be throttled. The Poll* methods here are long-running because they block + // until there is a task available. GetWorkflowExecutionHistory and DescribeActivityExecution + // methods are blocking only if WaitNewEvent/LongPollToken are set, otherwise they are not + // long-running. The QueryWorkflow and UpdateWorkflowExecution methods are long-running because + // they both block until a background WFT is complete. ExecutionAPICountLimitOverride = map[string]int{ "/temporal.api.workflowservice.v1.WorkflowService/PollActivityTaskQueue": 1, "/temporal.api.workflowservice.v1.WorkflowService/PollWorkflowTaskQueue": 1, "/temporal.api.workflowservice.v1.WorkflowService/PollWorkflowExecutionUpdate": 1, "/temporal.api.workflowservice.v1.WorkflowService/QueryWorkflow": 1, "/temporal.api.workflowservice.v1.WorkflowService/UpdateWorkflowExecution": 1, - "/temporal.api.workflowservice.v1.WorkflowService/GetWorkflowExecutionHistory": 1, "/temporal.api.workflowservice.v1.WorkflowService/PollNexusTaskQueue": 1, + // Long-running if activity outcome is not already available + "/temporal.api.workflowservice.v1.WorkflowService/PollActivityExecution": 1, + // Long-running if certain request parameters are set + "/temporal.api.workflowservice.v1.WorkflowService/GetWorkflowExecutionHistory": 1, + "/temporal.api.workflowservice.v1.WorkflowService/DescribeActivityExecution": 1, + // potentially long-running, depending on the operations "/temporal.api.workflowservice.v1.WorkflowService/ExecuteMultiOperation": 1, @@ -71,6 +80,7 @@ var ( "/temporal.api.workflowservice.v1.WorkflowService/ExecuteMultiOperation": 1, "/temporal.api.workflowservice.v1.WorkflowService/CreateSchedule": 1, "/temporal.api.workflowservice.v1.WorkflowService/StartBatchOperation": 1, + "/temporal.api.workflowservice.v1.WorkflowService/StartActivityExecution": 1, DispatchNexusTaskByNamespaceAndTaskQueueAPIName: 1, DispatchNexusTaskByEndpointAPIName: 1, @@ -114,11 +124,15 @@ var ( "/temporal.api.workflowservice.v1.WorkflowService/ListWorkflowRules": 2, "/temporal.api.workflowservice.v1.WorkflowService/TriggerWorkflowRule": 2, "/temporal.api.workflowservice.v1.WorkflowService/UpdateTaskQueueConfig": 2, + "/temporal.api.workflowservice.v1.WorkflowService/RequestCancelActivityExecution": 2, + "/temporal.api.workflowservice.v1.WorkflowService/TerminateActivityExecution": 2, + "/temporal.api.workflowservice.v1.WorkflowService/DeleteActivityExecution": 2, "/temporal.api.workflowservice.v1.WorkflowService/PauseWorkflowExecution": 2, "/temporal.api.workflowservice.v1.WorkflowService/UnpauseWorkflowExecution": 2, // P3: Status Querying APIs "/temporal.api.workflowservice.v1.WorkflowService/DescribeWorkflowExecution": 3, + "/temporal.api.workflowservice.v1.WorkflowService/DescribeActivityExecution": 3, "/temporal.api.workflowservice.v1.WorkflowService/DescribeTaskQueue": 3, "/temporal.api.workflowservice.v1.WorkflowService/GetWorkerBuildIdCompatibility": 3, "/temporal.api.workflowservice.v1.WorkflowService/GetWorkerVersioningRules": 3, @@ -143,6 +157,7 @@ var ( "/temporal.api.workflowservice.v1.WorkflowService/RespondNexusTaskFailed": 3, // P4: Poll APIs and other low priority APIs + "/temporal.api.workflowservice.v1.WorkflowService/PollActivityExecution": 4, // TODO(saa-preview): should it be 4 or 3? "/temporal.api.workflowservice.v1.WorkflowService/PollWorkflowTaskQueue": 4, "/temporal.api.workflowservice.v1.WorkflowService/PollActivityTaskQueue": 4, "/temporal.api.workflowservice.v1.WorkflowService/PollWorkflowExecutionUpdate": 4, @@ -154,9 +169,12 @@ var ( "/temporal.api.workflowservice.v1.WorkflowService/FetchWorkerConfig": 4, "/temporal.api.workflowservice.v1.WorkflowService/UpdateWorkerConfig": 4, - // P5: GetWorkflowExecutionHistory with WaitNewEvent set to true is a long poll API. - // Treat as long-poll but lower priority (5) so spikes don’t block Poll* APIs. - PollWorkflowHistoryAPIName: 5, + // P5: Low priority APIs + // GetWorkflowExecutionHistory with WaitNewEvent set to true is a long poll API. + // Similarly, DescribeActivityExecution is a long poll API if LongPollToken is set. + // Treat these as long-poll but lower priority (5) so spikes don’t block Poll* APIs. + PollWorkflowHistoryAPIName: 5, + PollActivityExecutionAPIName: 5, // Informational API that aren't required for the temporal service to function OpenAPIV3APIName: 5, OpenAPIV2APIName: 5, @@ -173,6 +191,8 @@ var ( "/temporal.api.workflowservice.v1.WorkflowService/ListArchivedWorkflowExecutions": 1, "/temporal.api.workflowservice.v1.WorkflowService/ListWorkers": 1, "/temporal.api.workflowservice.v1.WorkflowService/DescribeWorker": 1, + "/temporal.api.workflowservice.v1.WorkflowService/CountActivityExecutions": 1, + "/temporal.api.workflowservice.v1.WorkflowService/ListActivityExecutions": 1, // APIs that rely on visibility "/temporal.api.workflowservice.v1.WorkflowService/GetWorkerTaskReachability": 1, diff --git a/service/frontend/configs/quotas_test.go b/service/frontend/configs/quotas_test.go index cf9c35e27d..4d64ab6b53 100644 --- a/service/frontend/configs/quotas_test.go +++ b/service/frontend/configs/quotas_test.go @@ -101,6 +101,9 @@ func (s *quotasSuite) TestVisibilityAPIs() { "/temporal.api.workflowservice.v1.WorkflowService/DescribeTaskQueueWithReachability": {}, "/temporal.api.workflowservice.v1.WorkflowService/ListDeployments": {}, "/temporal.api.workflowservice.v1.WorkflowService/GetDeploymentReachability": {}, + + "/temporal.api.workflowservice.v1.WorkflowService/CountActivityExecutions": {}, + "/temporal.api.workflowservice.v1.WorkflowService/ListActivityExecutions": {}, } var service workflowservice.WorkflowServiceServer diff --git a/service/frontend/fx.go b/service/frontend/fx.go index 4aa4d30efa..4615699479 100644 --- a/service/frontend/fx.go +++ b/service/frontend/fx.go @@ -7,6 +7,7 @@ import ( "github.com/gorilla/mux" "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" "go.temporal.io/server/client" "go.temporal.io/server/common" @@ -111,6 +112,7 @@ var Module = fx.Options( fx.Invoke(EndpointRegistryLifetimeHooks), fx.Provide(schedulerpb.NewSchedulerServiceLayeredClient), nexusfrontend.Module, + activity.FrontendModule, fx.Provide(visibility.ChasmVisibilityManagerProvider), fx.Provide(chasm.ChasmVisibilityInterceptorProvider), ) @@ -764,6 +766,8 @@ func HandlerProvider( membershipMonitor membership.Monitor, healthInterceptor *interceptor.HealthInterceptor, scheduleSpecBuilder *scheduler.SpecBuilder, + activityHandler activity.FrontendHandler, + registry *chasm.Registry, ) Handler { wfHandler := NewWorkflowHandler( serviceConfig, @@ -791,6 +795,8 @@ func HandlerProvider( healthInterceptor, scheduleSpecBuilder, httpEnabled(cfg, serviceName), + activityHandler, + registry, ) return wfHandler } diff --git a/service/frontend/workflow_handler.go b/service/frontend/workflow_handler.go index f4e1e43da3..ff7af51af5 100644 --- a/service/frontend/workflow_handler.go +++ b/service/frontend/workflow_handler.go @@ -33,6 +33,8 @@ import ( "go.temporal.io/server/api/matchingservice/v1" schedulespb "go.temporal.io/server/api/schedule/v1" taskqueuespb "go.temporal.io/server/api/taskqueue/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" "go.temporal.io/server/client/frontend" "go.temporal.io/server/common" @@ -112,7 +114,9 @@ const ( type ( // WorkflowHandler - gRPC handler interface for workflowservice WorkflowHandler struct { - workflowservice.UnimplementedWorkflowServiceServer + workflowservice.UnsafeWorkflowServiceServer + activity.FrontendHandler + status int32 tokenSerializer *tasktoken.Serializer @@ -144,6 +148,7 @@ type ( scheduleSpecBuilder *scheduler.SpecBuilder outstandingPollers collection.SyncMap[string, collection.SyncMap[string, context.CancelFunc]] httpEnabled bool + registry *chasm.Registry } ) @@ -174,8 +179,11 @@ func NewWorkflowHandler( healthInterceptor *interceptor.HealthInterceptor, scheduleSpecBuilder *scheduler.SpecBuilder, httpEnabled bool, + activityHandler activity.FrontendHandler, + registry *chasm.Registry, ) *WorkflowHandler { handler := &WorkflowHandler{ + FrontendHandler: activityHandler, status: common.DaemonStatusInitialized, config: config, tokenSerializer: tasktoken.NewSerializer(), @@ -227,6 +235,7 @@ func NewWorkflowHandler( scheduleSpecBuilder: scheduleSpecBuilder, outstandingPollers: collection.NewSyncMap[string, collection.SyncMap[string, context.CancelFunc]](), httpEnabled: httpEnabled, + registry: registry, } return handler @@ -1222,6 +1231,11 @@ func (wh *WorkflowHandler) RecordActivityTaskHeartbeat(ctx context.Context, requ if err != nil { return nil, err } + namespaceName := namespaceEntry.Name().String() + + if len(taskToken.GetComponentRef()) > 0 && !wh.IsStandaloneActivityEnabled(namespaceName) { + return nil, serviceerror.NewUnavailable(activity.StandaloneActivityDisabledError) + } sizeLimitError := wh.config.BlobSizeLimitError(namespaceEntry.Name().String()) sizeLimitWarn := wh.config.BlobSizeLimitWarn(namespaceEntry.Name().String()) @@ -1289,13 +1303,30 @@ func (wh *WorkflowHandler) RecordActivityTaskHeartbeatById(ctx context.Context, runID := request.GetRunId() // runID is optional so can be empty activityID := request.GetActivityId() - if workflowID == "" { - return nil, errWorkflowIDNotSet - } if activityID == "" { return nil, errActivityIDNotSet } + // If workflowID is empty, it means the activity is a standalone activity and we need to set the component ref. + // Else this should be a validation error. + var componentRef []byte + if workflowID == "" { + if !wh.IsStandaloneActivityEnabled(request.GetNamespace()) { + return nil, errWorkflowIDNotSet + } + + ref := chasm.NewComponentRef[*activity.Activity](chasm.ExecutionKey{ + NamespaceID: namespaceID.String(), + BusinessID: activityID, + RunID: runID, + }) + + componentRef, err = ref.Serialize(wh.registry) + if err != nil { + return nil, err + } + } + taskToken := tasktoken.NewActivityTaskToken( namespaceID.String(), workflowID, @@ -1307,6 +1338,7 @@ func (wh *WorkflowHandler) RecordActivityTaskHeartbeatById(ctx context.Context, nil, common.EmptyVersion, common.EmptyVersion, + componentRef, ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { @@ -1392,13 +1424,18 @@ func (wh *WorkflowHandler) RespondActivityTaskCompleted( if err != nil { return nil, err } + namespaceName := namespaceEntry.Name().String() + + if len(taskToken.GetComponentRef()) > 0 && !wh.IsStandaloneActivityEnabled(namespaceName) { + return nil, serviceerror.NewUnavailable(activity.StandaloneActivityDisabledError) + } if len(request.GetIdentity()) > wh.config.MaxIDLengthLimit() { return nil, errIdentityTooLong } - sizeLimitError := wh.config.BlobSizeLimitError(namespaceEntry.Name().String()) - sizeLimitWarn := wh.config.BlobSizeLimitWarn(namespaceEntry.Name().String()) + sizeLimitError := wh.config.BlobSizeLimitError(namespaceName) + sizeLimitWarn := wh.config.BlobSizeLimitWarn(namespaceName) if err := common.CheckEventBlobSizeLimit( request.GetResult().Size(), @@ -1457,9 +1494,6 @@ func (wh *WorkflowHandler) RespondActivityTaskCompletedById(ctx context.Context, runID := request.GetRunId() // runID is optional so can be empty activityID := request.GetActivityId() - if workflowID == "" { - return nil, errWorkflowIDNotSet - } if activityID == "" { return nil, errActivityIDNotSet } @@ -1468,6 +1502,26 @@ func (wh *WorkflowHandler) RespondActivityTaskCompletedById(ctx context.Context, return nil, errIdentityTooLong } + // If workflowID is empty, it means the activity is a standalone activity and we need to set the component ref. + // Else this should be a validation error. + var componentRef []byte + if workflowID == "" { + if !wh.IsStandaloneActivityEnabled(request.GetNamespace()) { + return nil, errWorkflowIDNotSet + } + + ref := chasm.NewComponentRef[*activity.Activity](chasm.ExecutionKey{ + NamespaceID: namespaceID.String(), + BusinessID: activityID, + RunID: runID, + }) + + componentRef, err = ref.Serialize(wh.registry) + if err != nil { + return nil, err + } + } + taskToken := tasktoken.NewActivityTaskToken( namespaceID.String(), workflowID, @@ -1479,6 +1533,7 @@ func (wh *WorkflowHandler) RespondActivityTaskCompletedById(ctx context.Context, nil, common.EmptyVersion, common.EmptyVersion, + componentRef, ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { @@ -1561,6 +1616,11 @@ func (wh *WorkflowHandler) RespondActivityTaskFailed( if err != nil { return nil, err } + namespaceName := namespaceEntry.Name().String() + + if len(taskToken.GetComponentRef()) > 0 && !wh.IsStandaloneActivityEnabled(namespaceName) { + return nil, serviceerror.NewUnavailable(activity.StandaloneActivityDisabledError) + } if request.GetFailure() != nil && request.GetFailure().GetApplicationFailureInfo() == nil { return nil, errFailureMustHaveApplicationFailureInfo @@ -1570,8 +1630,8 @@ func (wh *WorkflowHandler) RespondActivityTaskFailed( return nil, errIdentityTooLong } - sizeLimitError := wh.config.BlobSizeLimitError(namespaceEntry.Name().String()) - sizeLimitWarn := wh.config.BlobSizeLimitWarn(namespaceEntry.Name().String()) + sizeLimitError := wh.config.BlobSizeLimitError(namespaceName) + sizeLimitWarn := wh.config.BlobSizeLimitWarn(namespaceName) response := workflowservice.RespondActivityTaskFailedResponse{} @@ -1643,9 +1703,6 @@ func (wh *WorkflowHandler) RespondActivityTaskFailedById(ctx context.Context, re runID := request.GetRunId() // runID is optional so can be empty activityID := request.GetActivityId() - if workflowID == "" { - return nil, errWorkflowIDNotSet - } if activityID == "" { return nil, errActivityIDNotSet } @@ -1653,6 +1710,26 @@ func (wh *WorkflowHandler) RespondActivityTaskFailedById(ctx context.Context, re return nil, errIdentityTooLong } + // If workflowID is empty, it means the activity is a standalone activity and we need to set the component ref. + // Else this should be a validation error. + var componentRef []byte + if workflowID == "" { + if !wh.IsStandaloneActivityEnabled(request.GetNamespace()) { + return nil, errWorkflowIDNotSet + } + + ref := chasm.NewComponentRef[*activity.Activity](chasm.ExecutionKey{ + NamespaceID: namespaceID.String(), + BusinessID: activityID, + RunID: runID, + }) + + componentRef, err = ref.Serialize(wh.registry) + if err != nil { + return nil, err + } + } + taskToken := tasktoken.NewActivityTaskToken( namespaceID.String(), workflowID, @@ -1664,6 +1741,7 @@ func (wh *WorkflowHandler) RespondActivityTaskFailedById(ctx context.Context, re nil, common.EmptyVersion, common.EmptyVersion, + componentRef, ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { @@ -1755,13 +1833,18 @@ func (wh *WorkflowHandler) RespondActivityTaskCanceled(ctx context.Context, requ if err != nil { return nil, err } + namespaceName := namespaceEntry.Name().String() + + if len(taskToken.GetComponentRef()) > 0 && !wh.IsStandaloneActivityEnabled(namespaceName) { + return nil, serviceerror.NewUnavailable(activity.StandaloneActivityDisabledError) + } if len(request.GetIdentity()) > wh.config.MaxIDLengthLimit() { return nil, errIdentityTooLong } - sizeLimitError := wh.config.BlobSizeLimitError(namespaceEntry.Name().String()) - sizeLimitWarn := wh.config.BlobSizeLimitWarn(namespaceEntry.Name().String()) + sizeLimitError := wh.config.BlobSizeLimitError(namespaceName) + sizeLimitWarn := wh.config.BlobSizeLimitWarn(namespaceName) if err := common.CheckEventBlobSizeLimit( request.GetDetails().Size(), @@ -1820,9 +1903,6 @@ func (wh *WorkflowHandler) RespondActivityTaskCanceledById(ctx context.Context, runID := request.GetRunId() // runID is optional so can be empty activityID := request.GetActivityId() - if workflowID == "" { - return nil, errWorkflowIDNotSet - } if activityID == "" { return nil, errActivityIDNotSet } @@ -1830,6 +1910,26 @@ func (wh *WorkflowHandler) RespondActivityTaskCanceledById(ctx context.Context, return nil, errIdentityTooLong } + // If workflowID is empty, it means the activity is a standalone activity and we need to set the component ref. + // Else this should be a validation error. + var componentRef []byte + if workflowID == "" { + if !wh.IsStandaloneActivityEnabled(request.GetNamespace()) { + return nil, errWorkflowIDNotSet + } + + ref := chasm.NewComponentRef[*activity.Activity](chasm.ExecutionKey{ + NamespaceID: namespaceID.String(), + BusinessID: activityID, + RunID: runID, + }) + + componentRef, err = ref.Serialize(wh.registry) + if err != nil { + return nil, err + } + } + taskToken := tasktoken.NewActivityTaskToken( namespaceID.String(), workflowID, @@ -1841,6 +1941,7 @@ func (wh *WorkflowHandler) RespondActivityTaskCanceledById(ctx context.Context, nil, common.EmptyVersion, common.EmptyVersion, + componentRef, ) token, err := wh.tokenSerializer.Serialize(taskToken) if err != nil { @@ -6400,6 +6501,10 @@ func (wh *WorkflowHandler) DescribeWorker(ctx context.Context, request *workflow }, nil } +func (wh *WorkflowHandler) TriggerWorkflowRule(context.Context, *workflowservice.TriggerWorkflowRuleRequest) (*workflowservice.TriggerWorkflowRuleResponse, error) { + return nil, serviceerror.NewUnimplemented("method TriggerWorkflowRule not supported") +} + // PauseWorkflowExecution pauses a workflow execution. func (wh *WorkflowHandler) PauseWorkflowExecution(ctx context.Context, request *workflowservice.PauseWorkflowExecutionRequest) (_ *workflowservice.PauseWorkflowExecutionResponse, retError error) { defer log.CapturePanic(wh.logger, &retError) diff --git a/service/frontend/workflow_handler_test.go b/service/frontend/workflow_handler_test.go index d617360c4b..3b3bbc0bc6 100644 --- a/service/frontend/workflow_handler_test.go +++ b/service/frontend/workflow_handler_test.go @@ -192,6 +192,8 @@ func (s *WorkflowHandlerSuite) getWorkflowHandler(config *Config) *WorkflowHandl healthInterceptor, scheduler.NewSpecBuilder(), true, + nil, // Not testing activity handler here + nil, ) } diff --git a/service/history/api/command_attr_validator.go b/service/history/api/command_attr_validator.go index 73fcca2b5f..4d86a81138 100644 --- a/service/history/api/command_attr_validator.go +++ b/service/history/api/command_attr_validator.go @@ -5,12 +5,14 @@ import ( "strings" "github.com/google/uuid" + activitypb "go.temporal.io/api/activity/v1" commandpb "go.temporal.io/api/command/v1" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/common/backoff" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/namespace" @@ -93,92 +95,38 @@ func (v *CommandAttrValidator) ValidateActivityScheduleAttributes( activityType = attributes.ActivityType.GetName() } - if err := tqid.NormalizeAndValidate(attributes.TaskQueue, "", v.maxIDLengthLimit); err != nil { - return failedCause, fmt.Errorf("invalid TaskQueue on ScheduleActivityTaskCommand: %w. ActivityId=%s ActivityType=%s", err, activityID, activityType) - } - - if activityID == "" { - return failedCause, serviceerror.NewInvalidArgumentf("ActivityId is not set on ScheduleActivityTaskCommand. ActivityType=%s", activityType) - } - if activityType == "" { - return failedCause, serviceerror.NewInvalidArgumentf("ActivityType is not set on ScheduleActivityTaskCommand. ActivityID=%s", activityID) - } if attributes.RetryPolicy == nil { attributes.RetryPolicy = &commonpb.RetryPolicy{} } - if err := v.validateActivityRetryPolicy(namespaceID, attributes.RetryPolicy); err != nil { - return failedCause, fmt.Errorf("invalid ActivityRetryPolicy on SechduleActivityTaskCommand: %w. ActivityId=%s ActivityType=%s", err, activityID, activityType) - } - if len(activityID) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgumentf("ActivityId on ScheduleActivityTaskCommand exceeds length limit. ActivityId=%s ActivityType=%s Length=%d Limit=%d", activityID, activityType, len(activityID), v.maxIDLengthLimit) - } - if len(activityType) > v.maxIDLengthLimit { - return failedCause, serviceerror.NewInvalidArgumentf("ActivityType on ScheduleActivityTaskCommand exceeds length limit. ActivityId=%s ActivityType=%s Length=%d Limit=%d", activityID, activityType, len(activityType), v.maxIDLengthLimit) + opts := &activitypb.ActivityOptions{ + TaskQueue: attributes.TaskQueue, + ScheduleToCloseTimeout: attributes.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: attributes.GetScheduleToStartTimeout(), + StartToCloseTimeout: attributes.GetStartToCloseTimeout(), + HeartbeatTimeout: attributes.GetHeartbeatTimeout(), + RetryPolicy: attributes.RetryPolicy, } - // Only attempt to deduce and fill in unspecified timeouts only when all timeouts are non-negative. - if err := timestamp.ValidateAndCapProtoDuration(attributes.GetScheduleToCloseTimeout()); err != nil { - return failedCause, serviceerror.NewInvalidArgumentf("Invalid ScheduleToCloseTimeout for ScheduleActivityTaskCommand: %v. ActivityId=%s ActivityType=%s", err, activityID, activityType) - } - if err := timestamp.ValidateAndCapProtoDuration(attributes.GetScheduleToStartTimeout()); err != nil { - return failedCause, serviceerror.NewInvalidArgumentf("Invalid ScheduleToStartTimeout for ScheduleActivityTaskCommand: %v. ActivityId=%s ActivityType=%s", err, activityID, activityType) - } - if err := timestamp.ValidateAndCapProtoDuration(attributes.GetStartToCloseTimeout()); err != nil { - return failedCause, serviceerror.NewInvalidArgumentf("Invalid StartToCloseTimeout for ScheduleActivityTaskCommand: %v. ActivityId=%s ActivityType=%s", err, activityID, activityType) - } - if err := timestamp.ValidateAndCapProtoDuration(attributes.GetHeartbeatTimeout()); err != nil { - return failedCause, serviceerror.NewInvalidArgumentf("Invalid HeartbeatTimeout for ScheduleActivityTaskCommand: %v. ActivityId=%s ActivityType=%s", err, activityID, activityType) - } + err := activity.ValidateAndNormalizeActivityAttributes( + activityID, + activityType, + v.getDefaultActivityRetrySettings, + v.maxIDLengthLimit, + namespaceID, + opts, + attributes.GetPriority(), + runTimeout) - if err := priorities.Validate(attributes.Priority); err != nil { + if err != nil { return failedCause, err } - ScheduleToCloseSet := attributes.GetScheduleToCloseTimeout().AsDuration() > 0 - ScheduleToStartSet := attributes.GetScheduleToStartTimeout().AsDuration() > 0 - StartToCloseSet := attributes.GetStartToCloseTimeout().AsDuration() > 0 - - if ScheduleToCloseSet { - if ScheduleToStartSet { - attributes.ScheduleToStartTimeout = timestamp.MinDurationPtr(attributes.GetScheduleToStartTimeout(), - attributes.GetScheduleToCloseTimeout()) - } else { - attributes.ScheduleToStartTimeout = attributes.GetScheduleToCloseTimeout() - } - if StartToCloseSet { - attributes.StartToCloseTimeout = timestamp.MinDurationPtr(attributes.GetStartToCloseTimeout(), - attributes.GetScheduleToCloseTimeout()) - } else { - attributes.StartToCloseTimeout = attributes.GetScheduleToCloseTimeout() - } - } else if StartToCloseSet { - // We are in !validScheduleToClose due to the first if above - attributes.ScheduleToCloseTimeout = runTimeout - if !ScheduleToStartSet { - attributes.ScheduleToStartTimeout = runTimeout - } - } else { - // Deduction failed as there's not enough information to fill in missing timeouts. - return failedCause, serviceerror.NewInvalidArgumentf("A valid StartToClose or ScheduleToCloseTimeout is not set on ScheduleActivityTaskCommand. ActivityId=%s ActivityType=%s", activityID, activityType) - } - // ensure activity timeout never larger than workflow timeout - if runTimeout.AsDuration() > 0 { - runTimeoutDur := runTimeout.AsDuration() - if attributes.GetScheduleToCloseTimeout().AsDuration() > runTimeoutDur { - attributes.ScheduleToCloseTimeout = runTimeout - } - if attributes.GetScheduleToStartTimeout().AsDuration() > runTimeoutDur { - attributes.ScheduleToStartTimeout = runTimeout - } - if attributes.GetStartToCloseTimeout().AsDuration() > runTimeoutDur { - attributes.StartToCloseTimeout = runTimeout - } - if attributes.GetHeartbeatTimeout().AsDuration() > runTimeoutDur { - attributes.HeartbeatTimeout = runTimeout - } - } - attributes.HeartbeatTimeout = timestamp.MinDurationPtr(attributes.GetHeartbeatTimeout(), attributes.GetStartToCloseTimeout()) + attributes.ScheduleToCloseTimeout = opts.ScheduleToCloseTimeout + attributes.ScheduleToStartTimeout = opts.ScheduleToStartTimeout + attributes.StartToCloseTimeout = opts.StartToCloseTimeout + attributes.HeartbeatTimeout = opts.HeartbeatTimeout + attributes.RetryPolicy = opts.RetryPolicy return enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED, nil } diff --git a/service/history/api/recordactivitytaskheartbeat/api.go b/service/history/api/recordactivitytaskheartbeat/api.go index 637cf99e4f..936e3752f0 100644 --- a/service/history/api/recordactivitytaskheartbeat/api.go +++ b/service/history/api/recordactivitytaskheartbeat/api.go @@ -4,6 +4,8 @@ import ( "context" "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/common" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/metrics" @@ -27,6 +29,20 @@ func Invoke( return nil, consts.ErrDeserializingToken } + // Handle as standalone activity if token has component ref. + if componentRef := token.GetComponentRef(); len(componentRef) > 0 { + response, _, err := chasm.UpdateComponent( + ctx, + componentRef, + (*activity.Activity).RecordHeartbeat, + activity.WithToken[*historyservice.RecordActivityTaskHeartbeatRequest]{ + Token: token, + Request: req, + }, + ) + return response, err + } + _, err := api.GetActiveNamespace(shard, namespace.ID(req.GetNamespaceId()), token.WorkflowId) if err != nil { return nil, err diff --git a/service/history/api/recordactivitytaskstarted/api.go b/service/history/api/recordactivitytaskstarted/api.go index 85f0309e27..51504e14bd 100644 --- a/service/history/api/recordactivitytaskstarted/api.go +++ b/service/history/api/recordactivitytaskstarted/api.go @@ -12,6 +12,8 @@ import ( "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/common" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/metrics" @@ -42,6 +44,20 @@ func Invoke( workflowConsistencyChecker api.WorkflowConsistencyChecker, matchingClient matchingservice.MatchingServiceClient, ) (resp *historyservice.RecordActivityTaskStartedResponse, retError error) { + if activityRefProto := request.GetComponentRef(); len(activityRefProto) > 0 { + response, _, err := chasm.UpdateComponent( + ctx, + activityRefProto, + (*activity.Activity).HandleStarted, + request, + ) + + if err != nil { + return nil, err + } + + return response, nil + } var err error response := &historyservice.RecordActivityTaskStartedResponse{} diff --git a/service/history/api/respondactivitytaskcanceled/api.go b/service/history/api/respondactivitytaskcanceled/api.go index 66d1af600e..0d286c58c2 100644 --- a/service/history/api/respondactivitytaskcanceled/api.go +++ b/service/history/api/respondactivitytaskcanceled/api.go @@ -6,6 +6,8 @@ import ( enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/common" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/metrics" @@ -30,6 +32,34 @@ func Invoke( return nil, consts.ErrDeserializingToken } + // Handle standalone activity if component ref is present in the token + if componentRef := token.GetComponentRef(); len(componentRef) > 0 { + namespaceEntry, err := api.GetActiveNamespace(shard, namespace.ID(req.GetNamespaceId()), token.ActivityId) + if err != nil { + return nil, err + } + response, _, err := chasm.UpdateComponent( + ctx, + componentRef, + (*activity.Activity).HandleCanceled, + activity.RespondCancelledEvent{ + Request: req, + Token: token, + MetricsHandlerBuilderParams: activity.MetricsHandlerBuilderParams{ + Handler: shard.GetMetricsHandler(), + NamespaceName: namespaceEntry.Name().String(), + BreakdownMetricsByTaskQueue: shard.GetConfig().BreakdownMetricsByTaskQueue, + }, + }, + ) + + if err != nil { + return nil, err + } + + return response, nil + } + namespaceEntry, err := api.GetActiveNamespace(shard, namespace.ID(req.GetNamespaceId()), token.WorkflowId) if err != nil { return nil, err diff --git a/service/history/api/respondactivitytaskcompleted/api.go b/service/history/api/respondactivitytaskcompleted/api.go index 69764f9fd3..917c616cf2 100644 --- a/service/history/api/respondactivitytaskcompleted/api.go +++ b/service/history/api/respondactivitytaskcompleted/api.go @@ -6,6 +6,8 @@ import ( enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/common" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/metrics" @@ -30,6 +32,34 @@ func Invoke( return nil, consts.ErrDeserializingToken } + // Handle standalone activity if component ref is present in the token + if componentRef := token.GetComponentRef(); len(componentRef) > 0 { + namespaceEntry, err := api.GetActiveNamespace(shard, namespace.ID(req.GetNamespaceId()), token.ActivityId) + if err != nil { + return nil, err + } + response, _, err := chasm.UpdateComponent( + ctx, + componentRef, + (*activity.Activity).HandleCompleted, + activity.RespondCompletedEvent{ + Request: req, + Token: token, + MetricsHandlerBuilderParams: activity.MetricsHandlerBuilderParams{ + Handler: shard.GetMetricsHandler(), + NamespaceName: namespaceEntry.Name().String(), + BreakdownMetricsByTaskQueue: shard.GetConfig().BreakdownMetricsByTaskQueue, + }, + }, + ) + + if err != nil { + return nil, err + } + + return response, nil + } + namespaceEntry, err := api.GetActiveNamespace(shard, namespace.ID(req.GetNamespaceId()), token.WorkflowId) if err != nil { return nil, err diff --git a/service/history/api/respondactivitytaskfailed/api.go b/service/history/api/respondactivitytaskfailed/api.go index 0f70117d93..a622e90716 100644 --- a/service/history/api/respondactivitytaskfailed/api.go +++ b/service/history/api/respondactivitytaskfailed/api.go @@ -7,6 +7,8 @@ import ( enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/workflowservice/v1" "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/common" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/metrics" @@ -31,6 +33,34 @@ func Invoke( return nil, consts.ErrDeserializingToken } + // Handle standalone activity if component ref is present in the token + if componentRef := token.GetComponentRef(); len(componentRef) > 0 { + namespaceEntry, err := api.GetActiveNamespace(shard, namespace.ID(req.GetNamespaceId()), token.ActivityId) + if err != nil { + return nil, err + } + response, _, err := chasm.UpdateComponent( + ctx, + componentRef, + (*activity.Activity).HandleFailed, + activity.RespondFailedEvent{ + Request: req, + Token: token, + MetricsHandlerBuilderParams: activity.MetricsHandlerBuilderParams{ + Handler: shard.GetMetricsHandler(), + NamespaceName: namespaceEntry.Name().String(), + BreakdownMetricsByTaskQueue: shard.GetConfig().BreakdownMetricsByTaskQueue, + }, + }, + ) + + if err != nil { + return nil, err + } + + return response, nil + } + namespaceEntry, err := api.GetActiveNamespace(shard, namespace.ID(req.GetNamespaceId()), token.WorkflowId) if err != nil { return nil, err diff --git a/service/history/api/respondworkflowtaskcompleted/workflow_task_completed_handler.go b/service/history/api/respondworkflowtaskcompleted/workflow_task_completed_handler.go index 6697da876a..5f8902fe73 100644 --- a/service/history/api/respondworkflowtaskcompleted/workflow_task_completed_handler.go +++ b/service/history/api/respondworkflowtaskcompleted/workflow_task_completed_handler.go @@ -580,6 +580,7 @@ func (handler *workflowTaskCompletedHandler) handlePostCommandEagerExecuteActivi shardClock, ai.Version, ai.StartVersion, + nil, ) serializedToken, err := handler.tokenSerializer.Serialize(taskToken) if err != nil { diff --git a/service/history/chasm_engine.go b/service/history/chasm_engine.go index ad394283f1..3cc9af59a1 100644 --- a/service/history/chasm_engine.go +++ b/service/history/chasm_engine.go @@ -32,6 +32,7 @@ type ( shardController shard.Controller registry *chasm.Registry config *configs.Config + notifier *ChasmNotifier } newExecutionParams struct { @@ -56,6 +57,7 @@ var defaultTransitionOptions = chasm.TransitionOptions{ } var ChasmEngineModule = fx.Options( + fx.Provide(NewChasmNotifier), fx.Provide(newChasmEngine), fx.Provide(func(impl *ChasmEngine) chasm.Engine { return impl }), fx.Invoke(func(impl *ChasmEngine, shardController shard.Controller) { @@ -67,11 +69,13 @@ func newChasmEngine( executionCache cache.Cache, registry *chasm.Registry, config *configs.Config, + notifier *ChasmNotifier, ) *ChasmEngine { return &ChasmEngine{ executionCache: executionCache, registry: registry, config: config, + notifier: notifier, } } @@ -83,6 +87,10 @@ func (e *ChasmEngine) SetShardController( e.shardController = shardController } +func (e *ChasmEngine) NotifyExecution(key chasm.ExecutionKey) { + e.notifier.Notify(key) +} + func (e *ChasmEngine) NewExecution( ctx context.Context, executionRef chasm.ComponentRef, @@ -162,6 +170,10 @@ func (e *ChasmEngine) UpdateWithNewExecution( return chasm.ExecutionKey{}, nil, serviceerror.NewUnimplemented("UpdateWithNewExecution is not yet supported") } +// UpdateComponent applies updateFn to the component identified by the supplied component reference, +// returning the new component reference corresponding to the transition. An error is returned if +// the state transition specified by the supplied component reference is inconsistent with execution +// transition history. opts are currently ignored. func (e *ChasmEngine) UpdateComponent( ctx context.Context, ref chasm.ComponentRef, @@ -214,6 +226,9 @@ func (e *ChasmEngine) UpdateComponent( return newSerializedRef, nil } +// ReadComponent evaluates readFn against the current state of the component identified by the +// supplied component reference. An error is returned if the state transition specified by the +// component reference is inconsistent with execution transition history. opts are currently ignored. func (e *ChasmEngine) ReadComponent( ctx context.Context, ref chasm.ComponentRef, @@ -248,14 +263,121 @@ func (e *ChasmEngine) ReadComponent( return readFn(chasmContext, component) } +// PollComponent waits until the supplied predicate is satisfied when evaluated against the +// component identified by the supplied component reference. If there is no error, it returns (ref, +// nil) where ref is a component reference identifying the state at which the predicate was +// satisfied. It's possible that multiple state transitions (multiple notifications) occur between +// predicate checks, therefore the predicate must be monotonic: if it returns true at execution +// state transition s it must return true at all transitions t > s. It is an error if execution +// transition history is (after reloading from persistence) behind the requested ref, or if the ref +// is inconsistent with execution transition history. Thus when the predicate function is evaluated, +// it is guaranteed that the execution VT >= requestRef VT. opts are currently ignored. +// PollComponent subscribes to execution-level notifications. Suppose that an execution consists of +// one component A, and A has subcomponent B. Subscribers interested only in component B may be +// woken up unnecessarily (and thus evaluate the predicate unnecessarily) due to changes in parts of +// A that do not also belong to B. func (e *ChasmEngine) PollComponent( ctx context.Context, - executionRef chasm.ComponentRef, - predicateFn func(chasm.Context, chasm.Component) (any, bool, error), - operationFn func(chasm.MutableContext, chasm.Component, any) error, + requestRef chasm.ComponentRef, + monotonicPredicate func(chasm.Context, chasm.Component) (bool, error), opts ...chasm.TransitionOption, -) (newExecutionRef []byte, retError error) { - return nil, serviceerror.NewUnimplemented("PollComponent is not yet supported") +) (retRef []byte, retError error) { + + var ch <-chan struct{} + var unsubscribe func() + defer func() { + if unsubscribe != nil { + unsubscribe() + } + }() + + checkPredicateOrSubscribe := func() ([]byte, error) { + _, executionLease, err := e.getExecutionLease(ctx, requestRef) + if err != nil { + return nil, err + } + defer executionLease.GetReleaseFn()(nil) //nolint:revive + + ref, err := e.predicateSatisfied(ctx, monotonicPredicate, requestRef, executionLease) + if err != nil { + if errors.Is(err, consts.ErrStaleState) { + err = serviceerror.NewUnavailable("please retry") + } + return nil, err + } + if ref != nil { + return ref, nil + } + // Predicate not satisfied; subscribe before releasing the lock. + workflowKey := executionLease.GetContext().GetWorkflowKey() + ch, unsubscribe = e.notifier.Subscribe(chasm.ExecutionKey{ + NamespaceID: workflowKey.NamespaceID, + BusinessID: workflowKey.WorkflowID, + RunID: workflowKey.RunID, + }) + return nil, nil + } + + ref, err := checkPredicateOrSubscribe() + if err != nil || ref != nil { + return ref, err + } + + for { + select { + case <-ch: + ref, err = checkPredicateOrSubscribe() + if err != nil || ref != nil { + return ref, err + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +// predicateSatisfied is a helper function for PollComponent. It returns (ref, err) where ref is non-nil +// iff there's no error and predicate evaluates to true. +func (e *ChasmEngine) predicateSatisfied( + ctx context.Context, + predicate func(chasm.Context, chasm.Component) (bool, error), + ref chasm.ComponentRef, + executionLease api.WorkflowLease, +) ([]byte, error) { + chasmTree, ok := executionLease.GetMutableState().ChasmTree().(*chasm.Node) + if !ok { + return nil, serviceerror.NewInternalf( + "CHASM tree implementation not properly wired up, encountered type: %T, expected type: %T", + executionLease.GetMutableState().ChasmTree(), + &chasm.Node{}, + ) + } + + // It is not acceptable to declare the predicate to be satisfied against execution state that is + // behind the requested reference. However, getExecutionLease does not currently guarantee that + // execution VT >= ref VT, therefore we call IsStale() again here and return any error (which at + // this point must be ErrStaleState; ErrStaleReference has already been eliminated). + err := chasmTree.IsStale(ref) + if err != nil { + // ErrStaleState + // TODO(saa-yichao): this should be retryable if it is the failover version that is stale + return nil, err + } + // We know now that execution VT >= ref VT + + chasmContext := chasm.NewContext(ctx, chasmTree) + component, err := chasmTree.Component(chasmContext, ref) + if err != nil { + return nil, err + } + satisfied, err := predicate(chasmContext, component) + if err != nil { + return nil, err + } + if !satisfied { + return nil, nil + } + return chasmContext.Ref(component) } func (e *ChasmEngine) constructTransitionOptions( @@ -573,6 +695,14 @@ func (e *ChasmEngine) getShardContext( return e.shardController.GetShardByID(shardID) } +// getExecutionLease returns shard context and mutable state for the execution identified by the +// supplied component reference, with the lock held. An error is returned if the state transition +// specified by the component reference is inconsistent with mutable state transition history. If +// the state transition specified by the component reference is consistent with mutable state being +// stale, then mutable state is reloaded from persistence before returning. It does not check that +// mutable state is non-stale after reload. +// TODO(saa-yichao): if mutable state is stale after reload, return an error (retryable iff the failover +// version is stale since that is expected under some multi-cluster scenarios). func (e *ChasmEngine) getExecutionLease( ctx context.Context, ref chasm.ComponentRef, diff --git a/service/history/chasm_engine_test.go b/service/history/chasm_engine_test.go index 8f0e42a6e9..898f76a3ea 100644 --- a/service/history/chasm_engine_test.go +++ b/service/history/chasm_engine_test.go @@ -120,6 +120,7 @@ func (s *chasmEngineSuite) SetupTest() { s.executionCache, s.registry, s.config, + NewChasmNotifier(), ) s.engine.SetShardController(s.mockShardController) } @@ -579,6 +580,7 @@ func (s *chasmEngineSuite) TestUpdateComponent_Success() { return tests.UpdateWorkflowExecutionResponse, nil }, ).Times(1) + s.mockEngine.EXPECT().NotifyChasmExecution(ref.ExecutionKey, gomock.Any()).Return().Times(1) // TODO: validate returned component once Ref() method of chasm tree is implememented. _, err := s.engine.UpdateComponent( @@ -633,6 +635,246 @@ func (s *chasmEngineSuite) TestReadComponent_Success() { s.NoError(err) } +// TestPollComponent_Success_NoWait tests the behavior of PollComponent when the predicate is +// satisfied at the outset. +func (s *chasmEngineSuite) TestPollComponent_Success_NoWait() { + tv := testvars.New(s.T()) + tv = tv.WithRunID(tv.Any().RunID()) + + ref := chasm.NewComponentRef[*testComponent]( + chasm.ExecutionKey{ + NamespaceID: string(tests.NamespaceID), + BusinessID: tv.WorkflowID(), + RunID: tv.RunID(), + }, + ) + expectedActivityID := tv.ActivityID() + + s.mockExecutionManager.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()). + Return(&persistence.GetWorkflowExecutionResponse{ + State: s.buildPersistenceMutableState(ref.ExecutionKey, &persistencespb.ActivityInfo{ + ActivityId: expectedActivityID, + }), + }, nil).Times(1) + + newSerializedRef, err := s.engine.PollComponent( + context.Background(), + ref, + func(ctx chasm.Context, component chasm.Component) (bool, error) { + return true, nil + }, + ) + s.NoError(err) + + newRef, err := chasm.DeserializeComponentRef(newSerializedRef) + s.NoError(err) + s.Equal(ref.BusinessID, newRef.BusinessID) +} + +// TestPollComponent_Success_Wait tests the waiting behavior of PollComponent. +func (s *chasmEngineSuite) TestPollComponent_Success_Wait() { + testCases := []struct { + name string + useEmptyRunID bool + }{ + {"NonEmptyRunID", false}, + {"EmptyRunID", true}, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.testPollComponentWait(tc.useEmptyRunID) + }) + } +} + +func (s *chasmEngineSuite) testPollComponentWait(useEmptyRunID bool) { + // The predicate is not satisfied at the outset, so the call blocks waiting for notifications. + // UpdateComponent is used twice to update the execution in a way which does not satisfy the + // predicate, and a final third time in a way that does satisfy the predicate, causing the + // long-poll to return. + const numUpdatesTotal = 3 + + tv := testvars.New(s.T()) + tv = tv.WithRunID(tv.Any().RunID()) + + activityID := tv.ActivityID() + + // The poll ref may have empty RunID + pollRunID := tv.RunID() + if useEmptyRunID { + pollRunID = "" + } + pollRef := chasm.NewComponentRef[*testComponent]( + chasm.ExecutionKey{ + NamespaceID: string(tests.NamespaceID), + BusinessID: tv.WorkflowID(), + RunID: pollRunID, + }, + ) + + // The resolved execution key always has the actual RunID. + resolvedKey := chasm.ExecutionKey{ + NamespaceID: string(tests.NamespaceID), + BusinessID: tv.WorkflowID(), + RunID: tv.RunID(), + } + + // The update ref always uses the resolved key. + updateRef := chasm.NewComponentRef[*testComponent](resolvedKey) + + // For empty RunID, GetCurrentExecution is called to resolve it. + if useEmptyRunID { + s.mockExecutionManager.EXPECT().GetCurrentExecution(gomock.Any(), gomock.Any()). + Return(&persistence.GetCurrentExecutionResponse{ + RunID: tv.RunID(), + }, nil).AnyTimes() + } + + s.mockExecutionManager.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()). + Return(&persistence.GetWorkflowExecutionResponse{ + State: s.buildPersistenceMutableState(resolvedKey, &persistencespb.ActivityInfo{}), + }, nil). + Times(1) // subsequent reads during UpdateComponent and PollComponent are from cache + s.mockExecutionManager.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()). + Return(tests.UpdateWorkflowExecutionResponse, nil). + Times(numUpdatesTotal) + s.mockEngine.EXPECT().NotifyChasmExecution(resolvedKey, gomock.Any()).DoAndReturn( + func(key chasm.ExecutionKey, ref []byte) { + s.engine.notifier.Notify(key) + }, + ).Times(numUpdatesTotal) + + pollErr := make(chan error) + pollResult := make(chan []byte) + pollComponent := func() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + newSerializedRef, err := s.engine.PollComponent( + ctx, + pollRef, + func(ctx chasm.Context, component chasm.Component) (bool, error) { + tc, ok := component.(*testComponent) + s.True(ok) + satisfied := tc.ActivityInfo.ActivityId == activityID + return satisfied, nil + }, + ) + pollErr <- err + pollResult <- newSerializedRef + } + updateComponent := func(satisfyPredicate bool) { + _, err := s.engine.UpdateComponent( + context.Background(), + updateRef, + func(ctx chasm.MutableContext, component chasm.Component) error { + tc, ok := component.(*testComponent) + s.True(ok) + if satisfyPredicate { + tc.ActivityInfo.ActivityId = activityID + } + return nil + }, + ) + s.NoError(err) + } + assertEmptyChan := func(ch chan []byte) { + select { + case <-ch: + s.FailNow("expected channel to be empty") + default: + } + } + + // Start a PollComponent call. It will not return until the third execution update. + go pollComponent() + + // Perform two execution updates that do not satisfy the predicate followed by one that does. + for range 2 { + updateComponent(false) + time.Sleep(100 * time.Millisecond) //nolint:forbidigo + assertEmptyChan(pollResult) + } + updateComponent(true) + // The poll call has returned. + s.NoError(<-pollErr) + newSerializedRef := <-pollResult + s.NotNil(newSerializedRef) + + newRef, err := chasm.DeserializeComponentRef(newSerializedRef) + s.NoError(err) + s.Equal(tests.NamespaceID.String(), newRef.NamespaceID) + s.Equal(tv.WorkflowID(), newRef.BusinessID) + s.Equal(tv.RunID(), newRef.RunID) + + newActivityID := make(chan string, 1) + err = s.engine.ReadComponent( + context.Background(), + newRef, + func( + ctx chasm.Context, + component chasm.Component, + ) error { + tc, ok := component.(*testComponent) + s.True(ok) + newActivityID <- tc.ActivityInfo.ActivityId + return nil + }, + ) + s.NoError(err) + s.Equal(activityID, <-newActivityID) +} + +// TestPollComponent_StaleState tests that PollComponent returns a user-friendly Unavailable error +// when the submitted component reference is ahead of persisted state (e.g. due to namespace +// failover). +func (s *chasmEngineSuite) TestPollComponent_StaleState() { + tv := testvars.New(s.T()) + tv = tv.WithRunID(tv.Any().RunID()) + + executionKey := chasm.ExecutionKey{ + NamespaceID: string(tests.NamespaceID), + BusinessID: tv.WorkflowID(), + RunID: tv.RunID(), + } + + testComponentTypeID, ok := s.mockShard.ChasmRegistry().ComponentIDFor(&testComponent{}) + s.True(ok) + + s.mockExecutionManager.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()). + Return(&persistence.GetWorkflowExecutionResponse{ + State: s.buildPersistenceMutableState(executionKey, &persistencespb.ActivityInfo{}), + }, nil).AnyTimes() + + pRef := &persistencespb.ChasmComponentRef{ + NamespaceId: executionKey.NamespaceID, + BusinessId: executionKey.BusinessID, + RunId: executionKey.RunID, + ArchetypeId: uint32(testComponentTypeID), + ExecutionVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: s.namespaceEntry.FailoverVersion() + 1, // ahead of persisted state + TransitionCount: testTransitionCount, + }, + } + staleToken, err := pRef.Marshal() + s.NoError(err) + staleRef, err := chasm.DeserializeComponentRef(staleToken) + s.NoError(err) + + _, err = s.engine.PollComponent( + context.Background(), + staleRef, + func(ctx chasm.Context, component chasm.Component) (bool, error) { + s.Fail("predicate should not be called with stale ref") + return false, nil + }, + ) + s.Error(err) + var unavailable *serviceerror.Unavailable + s.ErrorAs(err, &unavailable) + s.Equal("please retry", unavailable.Message) +} + func (s *chasmEngineSuite) buildPersistenceMutableState( key chasm.ExecutionKey, componentState proto.Message, @@ -654,7 +896,7 @@ func (s *chasmEngineSuite) buildPersistenceMutableState( TransitionHistory: []*persistencespb.VersionedTransition{ { NamespaceFailoverVersion: s.namespaceEntry.FailoverVersion(), - TransitionCount: 10, + TransitionCount: testTransitionCount, }, }, ExecutionStats: &persistencespb.ExecutionStats{}, @@ -674,7 +916,7 @@ func (s *chasmEngineSuite) buildPersistenceMutableState( }, LastUpdateVersionedTransition: &persistencespb.VersionedTransition{ NamespaceFailoverVersion: s.namespaceEntry.FailoverVersion(), - TransitionCount: 10, + TransitionCount: testTransitionCount, }, Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ ComponentAttributes: &persistencespb.ChasmComponentAttributes{ @@ -699,6 +941,7 @@ func (s *chasmEngineSuite) serializeComponentState( const ( testComponentPausedSAName = "PausedSA" testComponentPausedMemoName = "PausedMemo" + testTransitionCount = 10 ) var ( diff --git a/service/history/chasm_notifier.go b/service/history/chasm_notifier.go new file mode 100644 index 0000000000..5707f3195b --- /dev/null +++ b/service/history/chasm_notifier.go @@ -0,0 +1,64 @@ +package history + +import ( + "sync" + + "go.temporal.io/server/chasm" +) + +type subscriptionTracker struct { + ch chan struct{} + numSubscribers int +} + +// ChasmNotifier allows subscribers to receive notifications relating to a CHASM execution. +type ChasmNotifier struct { + // TODO(saa-preview): use ShardedConcurrentTxMap + executions map[chasm.ExecutionKey]*subscriptionTracker + // TODO(saa-preview): consider RWMutex + lock sync.Mutex +} + +// NewChasmNotifier creates a new instance of ChasmNotifier. +func NewChasmNotifier() *ChasmNotifier { + return &ChasmNotifier{ + executions: make(map[chasm.ExecutionKey]*subscriptionTracker), + } +} + +// Subscribe returns a channel that will be closed when there is a notification relating to the +// execution, along with an unsubscribe function. No data will be written to the channel: on +// notification, the caller should determine whether the execution state they are waiting for has +// been reached and resubscribe if necessary, while holding a lock on the execution. The caller must +// arrange for the unsubscribe function to be called when they have finished monitoring the channel +// for notifications. It is safe to call the unsubscribe function multiple times and concurrently. +func (n *ChasmNotifier) Subscribe(key chasm.ExecutionKey) (<-chan struct{}, func()) { + n.lock.Lock() + defer n.lock.Unlock() + s, ok := n.executions[key] + if !ok { + s = &subscriptionTracker{ch: make(chan struct{})} + n.executions[key] = s + } + s.numSubscribers++ + return s.ch, sync.OnceFunc(func() { + n.lock.Lock() + defer n.lock.Unlock() + if n.executions[key] == s { + s.numSubscribers-- + if s.numSubscribers == 0 { + delete(n.executions, key) + } + } + }) +} + +// Notify notifies all subscribers subscribed to key by closing the channel. +func (n *ChasmNotifier) Notify(key chasm.ExecutionKey) { + n.lock.Lock() + defer n.lock.Unlock() + if s, ok := n.executions[key]; ok { + close(s.ch) + delete(n.executions, key) + } +} diff --git a/service/history/chasm_notifier_test.go b/service/history/chasm_notifier_test.go new file mode 100644 index 0000000000..7171945b37 --- /dev/null +++ b/service/history/chasm_notifier_test.go @@ -0,0 +1,137 @@ +package history + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/chasm" + "go.temporal.io/server/common/testing/testvars" +) + +func TestChasmNotifier_SubscribeAndNotify(t *testing.T) { + tv := testvars.New(t) + + notifier := NewChasmNotifier() + + executionKey := chasm.ExecutionKey{ + NamespaceID: tv.NamespaceID().String(), + BusinessID: tv.WorkflowID(), + RunID: tv.RunID(), + } + + // Multiple subscribers + subscriberCount := 100 + subscribers := make([]struct { + channel <-chan struct{} + }, subscriberCount) + + for i := range subscriberCount { + ch, unsubscribe := notifier.Subscribe(executionKey) + defer unsubscribe() //nolint:revive + subscribers[i].channel = ch + } + + // Single notification + notifier.Notify(executionKey) + + // All subscribers should receive it + for i, sub := range subscribers { + select { + case <-sub.channel: + case <-time.After(1 * time.Second): + t.Fatalf("subscriber %d: timeout waiting for notification", i) + } + } +} + +func TestChasmNotifier_KeyIsolation(t *testing.T) { + tv := testvars.New(t) + + notifier := NewChasmNotifier() + + executionKey1 := chasm.ExecutionKey{ + NamespaceID: tv.NamespaceID().String(), + BusinessID: tv.WorkflowID(), + RunID: tv.RunID(), + } + executionKey2 := chasm.ExecutionKey{ + NamespaceID: "different-namespace-id", + BusinessID: "different-workflow-id", + RunID: "different-run-id", + } + + channel, unsubscribe := notifier.Subscribe(executionKey1) + defer unsubscribe() + notifier.Notify(executionKey2) + select { + case <-channel: + t.Fatal("should not receive notification for different entity") + case <-time.After(50 * time.Millisecond): + } +} + +func TestChasmNotifier_ConstantMemory(t *testing.T) { + key := chasm.ExecutionKey{ + NamespaceID: "ns", + BusinessID: "wf", + RunID: "run", + } + notifier := NewChasmNotifier() + require.Empty(t, notifier.executions) + notifier.Subscribe(key) + require.Len(t, notifier.executions, 1) + notifier.Notify(key) + require.Empty(t, notifier.executions) + // Ignored: no subscribers + notifier.Notify(key) + require.Empty(t, notifier.executions) +} + +func TestChasmNotifier_Unsubscribe(t *testing.T) { + key := chasm.ExecutionKey{ + NamespaceID: "ns", + BusinessID: "wf", + RunID: "run", + } + + t.Run("StaleUnsubscribeIsSafe", func(t *testing.T) { + notifier := NewChasmNotifier() + _, u1 := notifier.Subscribe(key) + notifier.Notify(key) + // The notify call closed and deleted the original channel. + ch2, u2 := notifier.Subscribe(key) + defer u2() + // u1 should be a no-op. + u1() + select { + case <-ch2: + t.Fatal("notification channel was closed by stale unsubscribe function") + case <-time.After(1 * time.Second): + } + notifier.Notify(key) + select { + case <-ch2: + case <-time.After(1 * time.Second): + t.Fatal("notification channel should have been closed") + } + }) + + t.Run("IsIdempotent", func(t *testing.T) { + notifier := NewChasmNotifier() + _, u1 := notifier.Subscribe(key) + ch2, u2 := notifier.Subscribe(key) + defer u2() + + u1() + u1() + + select { + case <-ch2: + t.Fatal("unsubscribe should be idempotent; notification channel was closed by second call") + default: + } + notifier.Notify(key) + <-ch2 + }) +} diff --git a/service/history/fx.go b/service/history/fx.go index b22f554a78..b53f4bb7bc 100644 --- a/service/history/fx.go +++ b/service/history/fx.go @@ -6,6 +6,7 @@ import ( "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity" "go.temporal.io/server/common" commoncache "go.temporal.io/server/common/cache" "go.temporal.io/server/common/clock" @@ -86,6 +87,7 @@ var Module = fx.Options( callbacks.Module, nexusoperations.Module, fx.Invoke(nexusworkflow.RegisterCommandHandlers), + activity.HistoryModule, ) func ServerProvider(grpcServerOptions []grpc.ServerOption) *grpc.Server { diff --git a/service/history/handler.go b/service/history/handler.go index 0acf206af2..2fe506361a 100644 --- a/service/history/handler.go +++ b/service/history/handler.go @@ -150,6 +150,7 @@ var ( errWorkflowExecutionNotSet = serviceerror.NewInvalidArgument("WorkflowExecution not set on request.") errTaskQueueNotSet = serviceerror.NewInvalidArgument("Task queue not set.") errWorkflowIDNotSet = serviceerror.NewInvalidArgument("WorkflowId is not set on request.") + errBusinessIDNotSet = serviceerror.NewInvalidArgument("Business ID is not set on request.") errRunIDNotValid = serviceerror.NewInvalidArgument("RunId is not valid UUID.") errSourceClusterNotSet = serviceerror.NewInvalidArgument("Source Cluster not set on request.") errShardIDNotSet = serviceerror.NewInvalidArgument("ShardId not set on request.") @@ -2435,9 +2436,10 @@ func (h *Handler) convertError(err error) error { } func validateTaskToken(taskToken *tokenspb.Task) error { - if taskToken.GetWorkflowId() == "" { - return errWorkflowIDNotSet + if len(taskToken.GetComponentRef()) == 0 && taskToken.GetWorkflowId() == "" { + return errBusinessIDNotSet } + return nil } diff --git a/service/history/history_engine.go b/service/history/history_engine.go index 8b3b57db5d..7793ed6f52 100644 --- a/service/history/history_engine.go +++ b/service/history/history_engine.go @@ -132,6 +132,7 @@ type ( workflowDeleteManager deletemanager.DeleteManager eventSerializer serialization.Serializer workflowConsistencyChecker api.WorkflowConsistencyChecker + chasmEngine chasm.Engine versionChecker headers.VersionChecker versionMembershipCache cache.Cache tracer trace.Tracer @@ -171,6 +172,7 @@ func NewEngineWithShardContext( outboundQueueCBPool *circuitbreakerpool.OutboundQueueCircuitBreakerPool, persistenceRateLimiter quotas.RequestRateLimiter, testHooks testhooks.TestHooks, + chasmEngine chasm.Engine, ) historyi.Engine { currentClusterName := shard.GetClusterMetadata().GetCurrentClusterName() @@ -221,6 +223,7 @@ func NewEngineWithShardContext( syncStateRetriever: syncStateRetriever, outboundQueueCBPool: outboundQueueCBPool, testHooks: testHooks, + chasmEngine: chasmEngine, versionMembershipCache: versionMembershipCache, } @@ -855,6 +858,12 @@ func (e *historyEngineImpl) NotifyNewHistoryEvent( e.eventNotifier.NotifyNewHistoryEvent(notification) } +func (e *historyEngineImpl) NotifyChasmExecution(executionKey chasm.ExecutionKey, componentRef []byte) { + if e.chasmEngine != nil { + e.chasmEngine.NotifyExecution(executionKey) + } +} + func (e *historyEngineImpl) NotifyNewTasks( newTasks map[tasks.Category][]tasks.Task, ) { diff --git a/service/history/history_engine_factory.go b/service/history/history_engine_factory.go index 7b3280e3e9..b5ab55fae6 100644 --- a/service/history/history_engine_factory.go +++ b/service/history/history_engine_factory.go @@ -2,6 +2,7 @@ package history import ( "go.opentelemetry.io/otel/trace" + "go.temporal.io/server/chasm" "go.temporal.io/server/client" "go.temporal.io/server/common/cache" "go.temporal.io/server/common/persistence" @@ -47,6 +48,7 @@ type ( OutboundQueueCBPool *circuitbreakerpool.OutboundQueueCircuitBreakerPool PersistenceRateLimiter replication.PersistenceRateLimiter TestHooks testhooks.TestHooks + ChasmEngine chasm.Engine VersionMembershipCache cache.Cache } @@ -83,5 +85,6 @@ func (f *historyEngineFactory) CreateEngine( f.OutboundQueueCBPool, f.PersistenceRateLimiter, f.TestHooks, + f.ChasmEngine, ) } diff --git a/service/history/interfaces/engine.go b/service/history/interfaces/engine.go index 417c0e3688..a7dd9055d5 100644 --- a/service/history/interfaces/engine.go +++ b/service/history/interfaces/engine.go @@ -101,6 +101,7 @@ type ( NotifyNewHistoryEvent(event *events.Notification) NotifyNewTasks(tasks map[tasks.Category][]tasks.Task) + NotifyChasmExecution(executionKey chasm.ExecutionKey, componentRef []byte) // TODO(bergundy): This Environment should be host level once shard level workflow cache is deprecated. StateMachineEnvironment(operationTag metrics.Tag) hsm.Environment diff --git a/service/history/interfaces/engine_mock.go b/service/history/interfaces/engine_mock.go index 04c67cc03b..f976c352bf 100644 --- a/service/history/interfaces/engine_mock.go +++ b/service/history/interfaces/engine_mock.go @@ -414,6 +414,18 @@ func (mr *MockEngineMockRecorder) MergeDLQMessages(ctx, messagesRequest any) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeDLQMessages", reflect.TypeOf((*MockEngine)(nil).MergeDLQMessages), ctx, messagesRequest) } +// NotifyChasmExecution mocks base method. +func (m *MockEngine) NotifyChasmExecution(executionKey chasm.ExecutionKey, componentRef []byte) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "NotifyChasmExecution", executionKey, componentRef) +} + +// NotifyChasmExecution indicates an expected call of NotifyChasmExecution. +func (mr *MockEngineMockRecorder) NotifyChasmExecution(executionKey, componentRef any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotifyChasmExecution", reflect.TypeOf((*MockEngine)(nil).NotifyChasmExecution), executionKey, componentRef) +} + // NotifyNewHistoryEvent mocks base method. func (m *MockEngine) NotifyNewHistoryEvent(event *events.Notification) { m.ctrl.T.Helper() diff --git a/service/history/timer_queue_active_task_executor.go b/service/history/timer_queue_active_task_executor.go index f38fc23757..769edc5712 100644 --- a/service/history/timer_queue_active_task_executor.go +++ b/service/history/timer_queue_active_task_executor.go @@ -296,7 +296,7 @@ func (t *timerQueueActiveTaskExecutor) processSingleActivityTimeoutTask( // Note: we don't need to check activity Stamps. // This is because for the same attempts calls are idempotent. - failureMsg := fmt.Sprintf("activity %v timeout", timerSequenceID.TimerType.String()) + failureMsg := fmt.Sprintf(common.FailureReasonActivityTimeout, timerSequenceID.TimerType.String()) timeoutFailure := failure.NewTimeoutFailure(failureMsg, timerSequenceID.TimerType) retryState, err := mutableState.RetryActivity(ai, timeoutFailure) if err != nil { @@ -330,7 +330,7 @@ func (t *timerQueueActiveTaskExecutor) processSingleActivityTimeoutTask( // If retryState is Timeout then it means that expirationTime is expired. // ExpirationTime is expired when ScheduleToClose timeout is expired. const timeoutType = enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE - var failureMsg = fmt.Sprintf("activity %v timeout", timeoutType.String()) + var failureMsg = fmt.Sprintf(common.FailureReasonActivityTimeout, timeoutType.String()) timeoutFailure = failure.NewTimeoutFailure(failureMsg, timeoutType) } timeoutFailure.GetTimeoutFailureInfo().LastHeartbeatDetails = ai.LastHeartbeatDetails diff --git a/service/history/workflow/activity.go b/service/history/workflow/activity.go index fa60026825..de13209e20 100644 --- a/service/history/workflow/activity.go +++ b/service/history/workflow/activity.go @@ -41,6 +41,7 @@ import ( "go.temporal.io/server/api/historyservice/v1" persistencespb "go.temporal.io/server/api/persistence/v1" "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" "go.temporal.io/server/service/history/consts" historyi "go.temporal.io/server/service/history/interfaces" "go.temporal.io/server/service/history/workflow/matcher" @@ -59,15 +60,6 @@ func GetActivityState(ai *persistencespb.ActivityInfo) enumspb.PendingActivitySt return enumspb.PENDING_ACTIVITY_STATE_SCHEDULED } -func makeBackoffAlgorithm(requestedDelay *time.Duration) BackoffCalculatorAlgorithmFunc { - return func(duration *durationpb.Duration, coefficient float64, currentAttempt int32) time.Duration { - if requestedDelay != nil { - return *requestedDelay - } - return ExponentialBackoffAlgorithm(duration, coefficient, currentAttempt) - } -} - func UpdateActivityInfoForRetries( ai *persistencespb.ActivityInfo, version int64, @@ -145,7 +137,7 @@ func GetPendingActivityInfo( // in this case activity is at least scheduled p.NextAttemptScheduleTime = nil // we rely on the fact that ExponentialBackoffAlgorithm is deterministic, and there's no random jitter - interval := ExponentialBackoffAlgorithm(ai.RetryInitialInterval, ai.RetryBackoffCoefficient, p.Attempt) + interval := backoff.ExponentialBackoffAlgorithm(ai.RetryInitialInterval, ai.RetryBackoffCoefficient, p.Attempt) p.CurrentRetryInterval = durationpb.New(interval) } } @@ -247,7 +239,7 @@ func GetNextScheduledTime(ai *persistencespb.ActivityInfo) time.Time { nextScheduledTime := ai.ScheduledTime.AsTime() if ai.Attempt > 1 { // calculate new schedule time - interval := ExponentialBackoffAlgorithm(ai.RetryInitialInterval, ai.RetryBackoffCoefficient, ai.Attempt) + interval := backoff.ExponentialBackoffAlgorithm(ai.RetryInitialInterval, ai.RetryBackoffCoefficient, ai.Attempt) if ai.RetryMaximumInterval.AsDuration() != 0 && (interval <= 0 || interval > ai.RetryMaximumInterval.AsDuration()) { interval = ai.RetryMaximumInterval.AsDuration() diff --git a/service/history/workflow/mutable_state_impl.go b/service/history/workflow/mutable_state_impl.go index ccbf825714..653d39da7a 100644 --- a/service/history/workflow/mutable_state_impl.go +++ b/service/history/workflow/mutable_state_impl.go @@ -6094,7 +6094,7 @@ func (ms *MutableStateImpl) RetryActivity( retryMaxInterval, ai.RetryExpirationTime, ai.RetryBackoffCoefficient, - makeBackoffAlgorithm(delay), + backoff.MakeBackoffAlgorithm(delay), ) if retryState != enumspb.RETRY_STATE_IN_PROGRESS { return retryState, nil diff --git a/service/history/workflow/mutable_state_impl_restart_activity_test.go b/service/history/workflow/mutable_state_impl_restart_activity_test.go index dcb59720dd..2f96d2aa44 100644 --- a/service/history/workflow/mutable_state_impl_restart_activity_test.go +++ b/service/history/workflow/mutable_state_impl_restart_activity_test.go @@ -16,6 +16,7 @@ import ( taskqueuepb "go.temporal.io/api/taskqueue/v1" enumsspb "go.temporal.io/server/api/enums/v1" persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/backoff" commonclock "go.temporal.io/server/common/clock" "go.temporal.io/server/common/log" "go.temporal.io/server/common/primitives/timestamp" @@ -283,7 +284,7 @@ func (nbis *nextBackoffIntervalStub) nextBackoffInterval( maxInterval *durationpb.Duration, expirationTime *timestamppb.Timestamp, backoffCoefficient float64, - _ BackoffCalculatorAlgorithmFunc, + _ backoff.BackoffCalculatorAlgorithmFunc, ) (time.Duration, enumspb.RetryState) { nbis.recorded = fmt.Sprintf( nextBackoffIntervalParametersFormat, diff --git a/service/history/workflow/retry.go b/service/history/workflow/retry.go index aa3fa9527b..66f8348f9e 100644 --- a/service/history/workflow/retry.go +++ b/service/history/workflow/retry.go @@ -2,7 +2,6 @@ package workflow import ( "context" - "math" "slices" "time" @@ -28,12 +27,6 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" ) -type BackoffCalculatorAlgorithmFunc func(duration *durationpb.Duration, coefficient float64, currentAttempt int32) time.Duration - -func ExponentialBackoffAlgorithm(initInterval *durationpb.Duration, backoffCoefficient float64, currentAttempt int32) time.Duration { - return time.Duration(int64(float64(initInterval.AsDuration().Nanoseconds()) * math.Pow(backoffCoefficient, float64(currentAttempt-1)))) -} - // TODO treat 0 as 0, not infinite func getBackoffInterval( @@ -55,9 +48,9 @@ func getBackoffInterval( // Check if the remote worker sent an application failure indicating a custom backoff duration. delayedRetryDuration := nextRetryDelayFrom(failure) if delayedRetryDuration != nil { - return nextBackoffInterval(now, currentAttempt, maxAttempts, initInterval, maxInterval, expirationTime, backoffCoefficient, makeBackoffAlgorithm(delayedRetryDuration)) + return nextBackoffInterval(now, currentAttempt, maxAttempts, initInterval, maxInterval, expirationTime, backoffCoefficient, backoff.MakeBackoffAlgorithm(delayedRetryDuration)) } - return nextBackoffInterval(now, currentAttempt, maxAttempts, initInterval, maxInterval, expirationTime, backoffCoefficient, ExponentialBackoffAlgorithm) + return nextBackoffInterval(now, currentAttempt, maxAttempts, initInterval, maxInterval, expirationTime, backoffCoefficient, backoff.ExponentialBackoffAlgorithm) } func nextRetryDelayFrom(failure *failurepb.Failure) *time.Duration { @@ -82,7 +75,7 @@ func nextBackoffInterval( maxInterval *durationpb.Duration, expirationTime *timestamppb.Timestamp, backoffCoefficient float64, - intervalCalculator BackoffCalculatorAlgorithmFunc, + intervalCalculator backoff.BackoffCalculatorAlgorithmFunc, ) (time.Duration, enumspb.RetryState) { // TODO remove below checks, most are already set with correct values if currentAttempt < 1 { diff --git a/service/history/workflow/retry_test.go b/service/history/workflow/retry_test.go index 3b96bce6b5..c0f76d2af4 100644 --- a/service/history/workflow/retry_test.go +++ b/service/history/workflow/retry_test.go @@ -189,7 +189,7 @@ func Test_nextBackoffInterval(t *testing.T) { doNotCare(maxInterval(10*time.Second)), doNotCare(expirationIn(30*time.Second)), doNotCare[float64](2), - ExponentialBackoffAlgorithm, + backoff.ExponentialBackoffAlgorithm, ) assert.Equal(t, initialDelay, interval) assert.Equal(t, enumspb.RETRY_STATE_IN_PROGRESS, retryState) @@ -207,7 +207,7 @@ func Test_nextBackoffInterval(t *testing.T) { doNotCare(maxInterval(10*time.Second)), doNotCare(expirationIn(30*time.Second)), doNotCare[float64](2), - ExponentialBackoffAlgorithm, + backoff.ExponentialBackoffAlgorithm, ) assert.Equal(t, initialDelay, interval) assert.Equal(t, enumspb.RETRY_STATE_IN_PROGRESS, retryState) @@ -225,7 +225,7 @@ func Test_nextBackoffInterval(t *testing.T) { doNotCare(maxInterval(200*time.Second)), doNotCare(expirationIn(600*time.Second)), 3, - ExponentialBackoffAlgorithm, + backoff.ExponentialBackoffAlgorithm, ) assert.Equal(t, initialDelay*pow(3, int32(attempt)-1), interval) assert.Equal(t, enumspb.RETRY_STATE_IN_PROGRESS, retryState) @@ -241,7 +241,7 @@ func Test_nextBackoffInterval(t *testing.T) { maxInterval(maxBackoff), doNotCare(expirationIn(600*time.Second)), doNotCare[float64](2), - ExponentialBackoffAlgorithm, + backoff.ExponentialBackoffAlgorithm, ) assert.Equal(t, maxBackoff, interval) assert.Equal(t, enumspb.RETRY_STATE_IN_PROGRESS, retryState) @@ -256,7 +256,7 @@ func Test_nextBackoffInterval(t *testing.T) { doNotCare(maxInterval(10*time.Second)), doNotCare(expirationIn(600*time.Second)), doNotCare[float64](2), - ExponentialBackoffAlgorithm, + backoff.ExponentialBackoffAlgorithm, ) assert.Equal(t, backoff.NoBackoff, interval) assert.Equal(t, enumspb.RETRY_STATE_MAXIMUM_ATTEMPTS_REACHED, retryState) @@ -272,7 +272,7 @@ func Test_nextBackoffInterval(t *testing.T) { doNotCare(maxInterval(30*time.Minute)), doNotCare(expirationIn(60*time.Minute)), 2, - ExponentialBackoffAlgorithm, + backoff.ExponentialBackoffAlgorithm, ) assert.Equal(t, initialDelay*pow(2, 10-1), interval) assert.Equal(t, enumspb.RETRY_STATE_IN_PROGRESS, retryState) @@ -288,7 +288,7 @@ func Test_nextBackoffInterval(t *testing.T) { maxInterval(30*time.Minute), expirationIn(1*time.Minute), 2, - ExponentialBackoffAlgorithm, + backoff.ExponentialBackoffAlgorithm, ) assert.Equal(t, backoff.NoBackoff, interval) assert.Equal(t, enumspb.RETRY_STATE_TIMEOUT, retryState) @@ -304,7 +304,7 @@ func Test_nextBackoffInterval(t *testing.T) { maxInterval(30*time.Minute), expirationIn(0), 2, - ExponentialBackoffAlgorithm, + backoff.ExponentialBackoffAlgorithm, ) assert.Equal(t, backoff.NoBackoff, interval) assert.Equal(t, enumspb.RETRY_STATE_TIMEOUT, retryState) diff --git a/service/history/workflow/transaction_impl.go b/service/history/workflow/transaction_impl.go index 6301e4a591..3265c22967 100644 --- a/service/history/workflow/transaction_impl.go +++ b/service/history/workflow/transaction_impl.go @@ -197,6 +197,27 @@ func (t *TransactionImpl) UpdateWorkflowExecution( if persistence.OperationPossiblySucceeded(err) { NotifyWorkflowMutationTasks(engine, currentWorkflowMutation) NotifyWorkflowSnapshotTasks(engine, newWorkflowSnapshot) + + // TODO(dan): there is no test coverage for on-delete or on-create CHASM notifications. + + // Notify for current workflow if it has CHASM updates + if len(currentWorkflowMutation.UpsertChasmNodes) > 0 || + len(currentWorkflowMutation.DeleteChasmNodes) > 0 { + engine.NotifyChasmExecution(chasm.ExecutionKey{ + NamespaceID: currentWorkflowMutation.ExecutionInfo.NamespaceId, + BusinessID: currentWorkflowMutation.ExecutionInfo.WorkflowId, + RunID: currentWorkflowMutation.ExecutionState.RunId, + }, nil) + } + + // Notify for new workflow if it has CHASM nodes + if newWorkflowSnapshot != nil && len(newWorkflowSnapshot.ChasmNodes) > 0 { + engine.NotifyChasmExecution(chasm.ExecutionKey{ + NamespaceID: newWorkflowSnapshot.ExecutionInfo.NamespaceId, + BusinessID: newWorkflowSnapshot.ExecutionInfo.WorkflowId, + RunID: newWorkflowSnapshot.ExecutionState.RunId, + }, nil) + } } if err != nil { return 0, 0, err diff --git a/service/matching/forwarder.go b/service/matching/forwarder.go index e817b637f2..6a9b3da968 100644 --- a/service/matching/forwarder.go +++ b/service/matching/forwarder.go @@ -150,6 +150,7 @@ func (fwdr *Forwarder) ForwardTask(ctx context.Context, task *internalTask) erro VersionDirective: task.event.Data.GetVersionDirective(), Stamp: task.event.Data.GetStamp(), Priority: task.event.Data.GetPriority(), + ComponentRef: task.event.Data.GetComponentRef(), }, ) default: diff --git a/service/matching/matching_engine.go b/service/matching/matching_engine.go index 1cf61acd4e..e31ee3b719 100644 --- a/service/matching/matching_engine.go +++ b/service/matching/matching_engine.go @@ -579,6 +579,7 @@ func (e *matchingEngineImpl) AddActivityTask( VersionDirective: addRequest.VersionDirective, Stamp: addRequest.Stamp, Priority: addRequest.Priority, + ComponentRef: addRequest.ComponentRef, } return pm.AddTask(ctx, addTaskParams{ @@ -2902,7 +2903,6 @@ func (e *matchingEngineImpl) createPollActivityTaskQueueResponse( historyResponse *historyservice.RecordActivityTaskStartedResponse, metricsHandler metrics.Handler, ) *matchingservice.PollActivityTaskQueueResponse { - scheduledEvent := historyResponse.ScheduledEvent if scheduledEvent.GetActivityTaskScheduledEventAttributes() == nil { panic("GetActivityTaskScheduledEventAttributes is not set") @@ -2927,6 +2927,7 @@ func (e *matchingEngineImpl) createPollActivityTaskQueueResponse( historyResponse.GetClock(), historyResponse.GetVersion(), historyResponse.GetStartVersion(), + task.event.GetData().GetComponentRef(), ) serializedToken, _ := e.tokenSerializer.Serialize(taskToken) @@ -3059,6 +3060,7 @@ func (e *matchingEngineImpl) recordActivityTaskStarted( ScheduledDeployment: worker_versioning.DirectiveDeployment(task.event.Data.VersionDirective), VersionDirective: task.event.Data.VersionDirective, TaskDispatchRevisionNumber: task.taskDispatchRevisionNumber, + ComponentRef: task.event.Data.GetComponentRef(), } return e.historyClient.RecordActivityTaskStarted(ctx, recordStartedRequest) diff --git a/service/matching/pri_forwarder.go b/service/matching/pri_forwarder.go index f4b0f40229..f6c2672ba2 100644 --- a/service/matching/pri_forwarder.go +++ b/service/matching/pri_forwarder.go @@ -108,6 +108,7 @@ func (f *priForwarder) ForwardTask(ctx context.Context, task *internalTask) erro VersionDirective: task.event.Data.GetVersionDirective(), Stamp: task.event.Data.GetStamp(), Priority: task.event.Data.GetPriority(), + ComponentRef: task.event.Data.GetComponentRef(), }, ) default: diff --git a/tests/chasm_test.go b/tests/chasm_test.go index 56f62fa4d6..67541d1bf5 100644 --- a/tests/chasm_test.go +++ b/tests/chasm_test.go @@ -418,6 +418,9 @@ func (s *ChasmTestSuite) TestCountExecutions_GroupBy() { for _, group := range countResp.Groups { s.Len(group.Values, 1) totalCount += group.Count + var groupValue string + s.NoError(payload.Decode(group.Values[0], &groupValue)) + s.Contains([]string{"Running", "Completed"}, groupValue) } s.Equal(int64(5), totalCount) diff --git a/tests/standalone_activity_test.go b/tests/standalone_activity_test.go new file mode 100644 index 0000000000..9bebcf3b6c --- /dev/null +++ b/tests/standalone_activity_test.go @@ -0,0 +1,2955 @@ +package tests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + activitypb "go.temporal.io/api/activity/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + errordetailspb "go.temporal.io/api/errordetails/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/api/operatorservice/v1" + sdkpb "go.temporal.io/api/sdk/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm/lib/activity" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/testing/protorequire" + "go.temporal.io/server/common/testing/testvars" + "go.temporal.io/server/tests/testcore" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + defaultStartToCloseTimeout = 1 * time.Minute + defaultIdentity = "test-worker" +) + +var ( + defaultInput = payloads.EncodeString("Input") + defaultHeartbeatDetails = payloads.EncodeString("Heartbeat Details") + defaultResult = payloads.EncodeString("Done") + defaultRetryPolicy = &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(time.Second), + BackoffCoefficient: 2.0, + MaximumAttempts: 0, + MaximumInterval: durationpb.New(100 * time.Second), + } + defaultFailure = &failurepb.Failure{ + Message: "Failed Activity", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + Type: "Test", + NonRetryable: true, + }}, + } + defaultHeader = &commonpb.Header{ + Fields: map[string]*commonpb.Payload{ + "key-1": payload.EncodeString("value-1"), + "key-2": payload.EncodeString("value-2"), + }, + } + defaultSearchAttributes = &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + "CustomKeywordField": payload.EncodeString("value1"), + }, + } + defaultUserMetadata = &sdkpb.UserMetadata{ + Summary: payload.EncodeString("test-summary"), + Details: payload.EncodeString("test-details"), + } +) + +type standaloneActivityTestSuite struct { + testcore.FunctionalTestBase + tv *testvars.TestVars +} + +func TestStandaloneActivityTestSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(standaloneActivityTestSuite)) +} + +func (s *standaloneActivityTestSuite) SetupSuite() { + s.FunctionalTestBase.SetupSuite() + s.OverrideDynamicConfig( + dynamicconfig.EnableChasm, + true, + ) + s.OverrideDynamicConfig( + activity.Enabled, + true, + ) +} + +func (s *standaloneActivityTestSuite) SetupTest() { + s.FunctionalTestBase.SetupTest() + s.tv = testvars.New(s.T()) +} + +func (s *standaloneActivityTestSuite) TestIDReusePolicy() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + t.Run("RejectDuplicate", func(t *testing.T) { + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Result: defaultResult, + Identity: "new-worker", + }) + require.NoError(t, err) + + s.validateCompletion(ctx, t, activityID, runID, "new-worker") + + _, err = s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + Identity: s.tv.WorkerIdentity(), + Input: defaultInput, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + }, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + IdReusePolicy: enumspb.ACTIVITY_ID_REUSE_POLICY_REJECT_DUPLICATE, + }) + require.Error(t, err) + }) + + t.Run("AllowDuplicateFailedOnly", func(t *testing.T) { + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Failure: defaultFailure, + Identity: "new-worker", + }) + require.NoError(t, err) + + s.validateFailure(ctx, t, activityID, runID, nil, "new-worker") + + _, err = s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + Identity: s.tv.WorkerIdentity(), + Input: defaultInput, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + }, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + IdReusePolicy: enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY, + }) + require.NoError(t, err) + }) +} + +func (s *standaloneActivityTestSuite) TestIDConflictPolicy() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + t.Run("FailsIfExists", func(t *testing.T) { + startResponse := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + + // By default, unspecified conflict policy should be set to ACTIVITY_ID_CONFLICT_POLICY_FAIL, so no need to set explicitly + _, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + Identity: s.tv.WorkerIdentity(), + Input: defaultInput, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + }, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + }) + + require.Error(t, err) + statusErr := serviceerror.ToStatus(err) + require.Equal(t, codes.AlreadyExists, statusErr.Code()) + + var details *errordetailspb.ActivityExecutionAlreadyStartedFailure + for _, detail := range statusErr.Details() { + if d, ok := detail.(*errordetailspb.ActivityExecutionAlreadyStartedFailure); ok { + details = d + break + } + } + require.NotNil(t, details, "expected ActivityExecutionAlreadyStartedFailure in error details") + require.Equal(t, s.tv.RequestID(), details.StartRequestId) + require.Equal(t, startResponse.GetRunId(), details.RunId) + }) + + t.Run("UseExistingNoError", func(t *testing.T) { + firstStartResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + + secondStartResp, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + Identity: s.tv.WorkerIdentity(), + Input: defaultInput, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + }, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + IdConflictPolicy: enumspb.ACTIVITY_ID_CONFLICT_POLICY_USE_EXISTING, + RequestId: s.tv.RequestID(), + }) + require.NoError(t, err) + require.Equal(t, firstStartResp.RunId, secondStartResp.RunId) + // require.Equal(t, false, secondStartResp.GetStarted()) TODO enable this when we can set the flag correctly + }) +} + +func (s *standaloneActivityTestSuite) TestActivityCompleted() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Result: defaultResult, + Identity: "new-worker", + }) + require.NoError(t, err) + + s.validateCompletion(ctx, t, activityID, runID, "new-worker") +} + +func (s *standaloneActivityTestSuite) TestActivityCompletedByID() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + t.Run("WithRunID", func(t *testing.T) { + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskCompletedById(ctx, &workflowservice.RespondActivityTaskCompletedByIdRequest{ + Namespace: s.Namespace().String(), + RunId: runID, + ActivityId: activityID, + Result: defaultResult, + Identity: s.tv.WorkerIdentity(), + }) + require.NoError(t, err) + + s.validateCompletion(ctx, t, activityID, runID, s.tv.WorkerIdentity()) + }) + + t.Run("WithoutRunID", func(t *testing.T) { + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskCompletedById(ctx, &workflowservice.RespondActivityTaskCompletedByIdRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + Result: defaultResult, + Identity: s.tv.WorkerIdentity(), + }) + require.NoError(t, err) + + s.validateCompletion(ctx, t, activityID, runID, s.tv.WorkerIdentity()) + }) +} + +func (s *standaloneActivityTestSuite) TestActivityFailed() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Failure: defaultFailure, + Identity: "new-worker", + }) + require.NoError(t, err) + + s.validateFailure(ctx, t, activityID, runID, nil, "new-worker") +} + +func (s *standaloneActivityTestSuite) TestActivityFailedWithLastHeartbeat() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Failure: defaultFailure, + LastHeartbeatDetails: defaultHeartbeatDetails, + Identity: s.tv.WorkerIdentity(), + }) + require.NoError(t, err) + + s.validateFailure(ctx, t, activityID, runID, defaultHeartbeatDetails, s.tv.WorkerIdentity()) +} + +func (s *standaloneActivityTestSuite) TestActivityFailedByID() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + t.Run("WithRunID", func(t *testing.T) { + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskFailedById(ctx, &workflowservice.RespondActivityTaskFailedByIdRequest{ + Namespace: s.Namespace().String(), + RunId: runID, + ActivityId: activityID, + Failure: defaultFailure, + Identity: s.tv.WorkerIdentity(), + }) + require.NoError(t, err) + + s.validateFailure(ctx, t, activityID, runID, nil, s.tv.WorkerIdentity()) + }) + + t.Run("WithoutRunID", func(t *testing.T) { + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskFailedById(ctx, &workflowservice.RespondActivityTaskFailedByIdRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + Failure: defaultFailure, + Identity: s.tv.WorkerIdentity(), + }) + require.NoError(t, err) + + s.validateFailure(ctx, t, activityID, runID, nil, s.tv.WorkerIdentity()) + }) +} + +func (s *standaloneActivityTestSuite) TestActivityCancelled() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: s.tv.ActivityID(), + RunId: runID, + Identity: "cancelling-worker", + RequestId: s.tv.RequestID(), + Reason: "Test Cancellation", + }) + require.NoError(t, err) + + heartbeatResp, err := s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + }) + require.NoError(t, err) + require.True(t, heartbeatResp.GetCancelRequested(), "expected CancelRequested to be true but was false") + + details := &commonpb.Payloads{ + Payloads: []*commonpb.Payload{ + payload.EncodeString("Canceled Details"), + }, + } + + _, err = s.FrontendClient().RespondActivityTaskCanceled(ctx, &workflowservice.RespondActivityTaskCanceledRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Details: details, + Identity: "new-worker", + }) + require.NoError(t, err) + + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + IncludeInput: true, + IncludeOutcome: true, + }) + require.NoError(t, err) + + info := activityResp.GetInfo() + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_CANCELED, info.GetStatus(), + "expected Canceled but is %s", info.GetStatus()) + require.Equal(t, "Test Cancellation", info.GetCanceledReason()) + require.Equal(t, info.GetExecutionDuration().AsDuration(), time.Duration(0)) // Canceled doesn't set attempt completion, thus expect 0 here + require.Nil(t, info.GetCloseTime()) + protorequire.ProtoEqual(t, details, activityResp.GetOutcome().GetFailure().GetCanceledFailureInfo().GetDetails()) +} + +func (s *standaloneActivityTestSuite) TestActivityCancelledByID() { + testCases := []struct { + name string + includeRunID bool + }{ + { + name: "with run ID", + includeRunID: true, + }, + { + name: "without run ID", + includeRunID: false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + t.Cleanup(cancel) + + activityID := testcore.RandomizeStr(tc.name) + taskQueue := testcore.RandomizeStr(tc.name) + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + Identity: "cancelling-worker", + RequestId: s.tv.RequestID(), + Reason: "Test Cancellation", + }) + require.NoError(t, err) + + heartbeatResp, err := s.FrontendClient().RecordActivityTaskHeartbeatById(ctx, &workflowservice.RecordActivityTaskHeartbeatByIdRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + require.True(t, heartbeatResp.GetCancelRequested(), "expected CancelRequested to be true but was false") + + details := &commonpb.Payloads{ + Payloads: []*commonpb.Payload{ + payload.EncodeString("Canceled Details"), + }, + } + + cancelReq := &workflowservice.RespondActivityTaskCanceledByIdRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + Details: details, + Identity: "new-worker", + } + + if tc.includeRunID { + cancelReq.RunId = runID + } + + _, err = s.FrontendClient().RespondActivityTaskCanceledById(ctx, cancelReq) + require.NoError(t, err) + + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + IncludeInput: true, + IncludeOutcome: true, + }) + require.NoError(t, err) + + info := activityResp.GetInfo() + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_CANCELED, info.GetStatus(), + "expected Canceled but is %s", info.GetStatus()) + require.Equal(t, "Test Cancellation", info.GetCanceledReason()) + protorequire.ProtoEqual(t, details, activityResp.GetOutcome().GetFailure().GetCanceledFailureInfo().GetDetails()) + }) + } +} + +func (s *standaloneActivityTestSuite) TestActivityCancelled_FailsIfNeverRequested() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + details := &commonpb.Payloads{ + Payloads: []*commonpb.Payload{ + payload.EncodeString("Canceled Details"), + }, + } + + _, err := s.FrontendClient().RespondActivityTaskCanceled(ctx, &workflowservice.RespondActivityTaskCanceledRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Details: details, + Identity: "new-worker", + }) + var failedPreconditionErr *serviceerror.FailedPrecondition + require.ErrorAs(t, err, &failedPreconditionErr) +} + +func (s *standaloneActivityTestSuite) TestActivityCancelled_DuplicateRequestIDSucceeds() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + for i := 0; i < 2; i++ { + _, err := s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: s.tv.ActivityID(), + RunId: runID, + Identity: "cancelling-worker", + RequestId: "cancel-request-id", + Reason: "Test Cancellation", + }) + require.NoError(t, err) + } + + heartbeatResp, err := s.FrontendClient().RecordActivityTaskHeartbeatById(ctx, &workflowservice.RecordActivityTaskHeartbeatByIdRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + require.True(t, heartbeatResp.GetCancelRequested(), "expected CancelRequested to be true but was false") + + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + IncludeInput: true, + IncludeOutcome: true, + }) + require.NoError(t, err) + + info := activityResp.GetInfo() + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, info.GetStatus(), + "expected Running but is %s", info.GetStatus()) + require.Equal(t, enumspb.PENDING_ACTIVITY_STATE_CANCEL_REQUESTED, info.GetRunState(), + "expected CancelRequested but is %s", info.GetRunState()) + require.Equal(t, "Test Cancellation", info.GetCanceledReason()) +} + +func (s *standaloneActivityTestSuite) TestActivityCancelled_DifferentRequestIDFails() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: s.tv.ActivityID(), + RunId: runID, + Identity: "cancelling-worker", + RequestId: "cancel-request-id", + Reason: "Test Cancellation", + }) + require.NoError(t, err) + + _, err = s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: s.tv.ActivityID(), + RunId: runID, + Identity: "cancelling-worker", + RequestId: "different-cancel-request-id", + Reason: "Test Cancellation", + }) + var failedPreconditionErr *serviceerror.FailedPrecondition + require.ErrorAs(t, err, &failedPreconditionErr) +} + +func (s *standaloneActivityTestSuite) TestActivityFinishes_AfterCancelRequested() { + testCases := []struct { + name string + taskCompletionFn func(context.Context, *testing.T, []byte, string, string) error + expectedStatus enumspb.ActivityExecutionStatus + }{ + { + name: "finish with completion", + taskCompletionFn: func(ctx context.Context, t *testing.T, taskToken []byte, activityID string, runID string) error { + _, err := s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: taskToken, + Result: defaultResult, + }) + + return err + }, + expectedStatus: enumspb.ACTIVITY_EXECUTION_STATUS_COMPLETED, + }, + { + name: "finish with failure", + taskCompletionFn: func(ctx context.Context, t *testing.T, taskToken []byte, activityID string, runID string) error { + _, err := s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: taskToken, + Failure: defaultFailure, + }) + + return err + }, + expectedStatus: enumspb.ACTIVITY_EXECUTION_STATUS_FAILED, + }, + { + name: "finish with termination", + taskCompletionFn: func(ctx context.Context, t *testing.T, taskToken []byte, activityID string, runID string) error { + _, err := s.FrontendClient().TerminateActivityExecution(ctx, &workflowservice.TerminateActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + Reason: "Test Termination", + }) + + return err + }, + expectedStatus: enumspb.ACTIVITY_EXECUTION_STATUS_TERMINATED, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + t := s.T() + + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.Any().String() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + Identity: "cancelling-worker", + RequestId: s.tv.RequestID(), + Reason: "Test Cancellation", + }) + require.NoError(t, err) + + err = tc.taskCompletionFn(ctx, t, pollTaskResp.GetTaskToken(), activityID, runID) + require.NoError(t, err) + + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + }) + require.NoError(t, err) + + info := activityResp.GetInfo() + require.Equal(t, tc.expectedStatus, info.GetStatus()) + }) + } +} + +func (s *standaloneActivityTestSuite) TestRequestCancellation_FailsValidation() { + testCases := []struct { + name string + reqID string + reason string + }{ + { + name: "request ID too long", + reqID: string(make([]byte, 1001)), // dynamic config default is 1000 + reason: "", + }, + { + name: "reason too long", + reqID: "", + reason: string(make([]byte, 1001)), // dynamic config default is 1000 + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + t := s.T() + + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + _, err := s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: s.tv.ActivityID(), + RunId: "run-id", + Identity: "cancelling-worker", + RequestId: tc.reqID, + Reason: tc.reason, + }) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + }) + } +} + +func (s *standaloneActivityTestSuite) TestActivityImmediatelyCancelled_WhenInScheduledState() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + _, err := s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: s.tv.ActivityID(), + RunId: runID, + Identity: "cancelling-worker", + RequestId: s.tv.RequestID(), + Reason: "Test Cancellation", + }) + require.NoError(t, err) + + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + IncludeInput: true, + IncludeOutcome: true, + }) + require.NoError(t, err) + + info := activityResp.GetInfo() + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_CANCELED, info.GetStatus()) +} + +func (s *standaloneActivityTestSuite) TestActivityTerminated() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().TerminateActivityExecution(ctx, &workflowservice.TerminateActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + Reason: "Test Termination", + Identity: "terminator", + }) + require.NoError(t, err) + + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + IncludeInput: true, + IncludeOutcome: true, + }) + + info := activityResp.GetInfo() + + require.NoError(t, err) + s.validateBaseActivityResponse(t, activityID, runID, activityResp) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_TERMINATED, info.GetStatus(), + "expected Terminated but is %s", info.GetStatus()) + require.Equal(t, enumspb.PENDING_ACTIVITY_STATE_UNSPECIFIED, info.GetRunState(), + "expected Unspecified but is %s", info.GetRunState()) + require.EqualValues(t, 1, info.GetAttempt()) + require.Nil(t, info.GetCloseTime()) + require.Equal(t, info.GetExecutionDuration().AsDuration(), time.Duration(0)) // Terminated doesn't set attempt completion, thus expect 0 here + require.Equal(t, s.tv.WorkerIdentity(), info.GetLastWorkerIdentity()) + require.NotNil(t, info.GetLastStartedTime()) + require.Nil(t, info.GetLastFailure()) + + expectedFailure := &failurepb.Failure{ + Message: "Test Termination", + FailureInfo: &failurepb.Failure_TerminatedFailureInfo{}, + } + protorequire.ProtoEqual(t, expectedFailure, activityResp.GetOutcome().GetFailure()) +} + +func (s *standaloneActivityTestSuite) TestCompletedActivity_CannotTerminate() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + pollTaskResp := s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Result: defaultResult, + Identity: "new-worker", + }) + require.NoError(t, err) + + _, err = s.FrontendClient().TerminateActivityExecution(ctx, &workflowservice.TerminateActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + Reason: "Test Termination", + Identity: "worker", + }) + require.Error(t, err) +} + +func (s *standaloneActivityTestSuite) TestActivityTerminated_DuplicateRequestIDSucceeds() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().TerminateActivityExecution(ctx, &workflowservice.TerminateActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RequestId: "test-request-id", + RunId: runID, + Reason: "Test Termination", + Identity: "terminator", + }) + require.NoError(t, err) + + _, err = s.FrontendClient().TerminateActivityExecution(ctx, &workflowservice.TerminateActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RequestId: "test-request-id", + RunId: runID, + Reason: "Test Termination", + Identity: "terminator", + }) + require.NoError(t, err) +} + +func (s *standaloneActivityTestSuite) TestActivityTerminated_DifferentRequestIDFails() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue().String() + + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + s.pollActivityTaskAndValidate(ctx, t, activityID, taskQueue, runID) + + _, err := s.FrontendClient().TerminateActivityExecution(ctx, &workflowservice.TerminateActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RequestId: "test-request-id", + RunId: runID, + Reason: "Test Termination", + Identity: "terminator", + }) + require.NoError(t, err) + + _, err = s.FrontendClient().TerminateActivityExecution(ctx, &workflowservice.TerminateActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RequestId: "test-request-id-2", + RunId: runID, + Reason: "Test Termination", + Identity: "terminator", + }) + var failedPreconditionErr *serviceerror.FailedPrecondition + require.ErrorAs(t, err, &failedPreconditionErr) +} + +func (s *standaloneActivityTestSuite) TestRetryWithoutScheduleToCloseTimeout() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) + defer cancel() + + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + // Start activity without ScheduleToCloseTimeout + _, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Millisecond), + MaximumAttempts: 2, + }, + }) + require.NoError(t, err) + + // Attempt 1: fail retryably + pollResp1, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.EqualValues(t, 1, pollResp1.Attempt) + _, err = s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollResp1.TaskToken, + Failure: &failurepb.Failure{ + Message: "retryable failure", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{NonRetryable: false}, + }, + }, + }) + require.NoError(t, err) + + // Attempt 2 should be scheduled + pollResp2, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.EqualValues(t, 2, pollResp2.Attempt) +} + +func (s *standaloneActivityTestSuite) Test_ScheduleToCloseTimeout_WithRetry() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) + defer cancel() + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + // Start an activity + startResp, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: &commonpb.ActivityType{ + Name: "test-activity-type", + }, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + }, + // It's not possible to guarantee (e.g. via NextRetryDelay or RetryPolicy) that a retry + // will start with a delay <1s because of the use of TimerProcessorMaxTimeShift in the + // timer queue. Therefore we allow 1s for the ActivityDispatchTask to be executed, and + // time out the activity 1s into Attempt 2. + ScheduleToCloseTimeout: durationpb.New(2 * time.Second), + }) + require.NoError(t, err) + + // Fail attempt 1, causing the attempt counter to increment. + pollTaskResp, err := s.pollActivityTaskQueue(ctx, taskQueue) + require.NoError(t, err) + _, err = s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Failure: &failurepb.Failure{ + Message: "Retryable failure", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + NonRetryable: false, + NextRetryDelay: durationpb.New(1 * time.Second), + }}, + }, + }) + require.NoError(t, err) + _, err = s.pollActivityTaskQueue(ctx, taskQueue) + require.NoError(t, err) + + // Wait for schedule-to-close timeout. + pollActivityResp, err := s.FrontendClient().PollActivityExecution(ctx, &workflowservice.PollActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + require.Equal(t, enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, pollActivityResp.GetOutcome().GetFailure().GetTimeoutFailureInfo().GetTimeoutType(), + "expected ScheduleToCloseTimeout but is %s", pollActivityResp.GetOutcome().GetFailure().GetTimeoutFailureInfo().GetTimeoutType()) + + describeResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + require.Greater(t, describeResp.GetInfo().GetExecutionDuration().AsDuration(), time.Duration(0)) // should have non-zero as attempts have been made + require.False(t, describeResp.GetInfo().GetCloseTime().AsTime().IsZero()) +} + +// TestStartToCloseTimeout tests that a start-to-close timeout is recorded after the activity is +// started. It also verifies that DescribeActivityExecution can be used to long-poll for a TimedOut +// state change caused by execution of a timer task. +func (s *standaloneActivityTestSuite) TestStartToCloseTimeout() { + t := s.T() + + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue() + + startResp, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: &commonpb.ActivityType{ + Name: "test-activity-type", + }, + Input: payloads.EncodeString("test-activity-input"), + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue.Name, + }, + StartToCloseTimeout: durationpb.New(1 * time.Second), + // This test is expecting activity failure on start-to-close timeout. + RetryPolicy: &commonpb.RetryPolicy{ + MaximumAttempts: 1, + }, + RequestId: "test-request-id", + }) + require.NoError(t, err) + + // First poll: activity has not started yet + describeResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + require.NotNil(t, describeResp) + require.NotNil(t, describeResp.GetInfo()) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, describeResp.GetInfo().GetStatus(), + "expected Running but is %s", describeResp.GetInfo().GetStatus()) + require.Equal(t, enumspb.PENDING_ACTIVITY_STATE_SCHEDULED, describeResp.GetInfo().GetRunState(), + "expected Scheduled but is %s", describeResp.GetInfo().GetRunState()) + + // Worker poll to start the activity + pollTaskResp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue.Name, + }, + Identity: s.tv.WorkerIdentity(), + }) + require.NoError(t, err) + require.NotNil(t, pollTaskResp) + require.NotEmpty(t, pollTaskResp.TaskToken) + + // Second poll: activity has started + describeResp, err = s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + LongPollToken: describeResp.LongPollToken, + }) + require.NoError(t, err) + require.NotNil(t, describeResp) + require.NotNil(t, describeResp.GetInfo()) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, describeResp.GetInfo().GetStatus(), + "expected Running but is %s", describeResp.GetInfo().GetStatus()) + require.Equal(t, enumspb.PENDING_ACTIVITY_STATE_STARTED, describeResp.GetInfo().GetRunState(), + "expected Started but is %s", describeResp.GetInfo().GetRunState()) + + // Third poll: activity has timed out + describeResp, err = s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + IncludeOutcome: true, + LongPollToken: describeResp.LongPollToken, + }) + + require.NoError(t, err) + require.NotNil(t, describeResp) + require.NotNil(t, describeResp.GetInfo()) + + // The activity has timed out due to StartToClose. This is an attempt failure, therefore the + // failure should be in ActivityExecutionInfo.LastFailure as well as set as the outcome failure. + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, describeResp.GetInfo().GetStatus(), + "expected TimedOut but is %s", describeResp.GetInfo().GetStatus()) + require.Greater(t, describeResp.GetInfo().GetExecutionDuration().AsDuration(), time.Duration(0)) + require.False(t, describeResp.GetInfo().GetCloseTime().AsTime().IsZero()) + failure := describeResp.GetInfo().GetLastFailure() + require.NotNil(t, failure) + timeoutFailure := failure.GetTimeoutFailureInfo() + require.NotNil(t, timeoutFailure) + require.Equal(t, enumspb.TIMEOUT_TYPE_START_TO_CLOSE, timeoutFailure.GetTimeoutType(), + "expected StartToCloseTimeout but is %s", timeoutFailure.GetTimeoutType()) + + require.NotNil(t, describeResp.GetOutcome().GetFailure()) + protorequire.ProtoEqual(t, failure, describeResp.GetOutcome().GetFailure()) + require.Equal(t, enumspb.TIMEOUT_TYPE_START_TO_CLOSE, describeResp.GetOutcome().GetFailure().GetTimeoutFailureInfo().GetTimeoutType(), + "expected StartToCloseTimeout but is %s", describeResp.GetOutcome().GetFailure().GetTimeoutFailureInfo().GetTimeoutType()) +} + +// TestScheduleToStartTimeout tests that a schedule-to-start timeout is recorded after the activity is +// created but never started. It also verifies that DescribeActivityExecution can be used to long-poll for a TimedOut +// state change caused by execution of a timer task. +func (s *standaloneActivityTestSuite) TestScheduleToStartTimeout() { + t := s.T() + + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue() + + startResp, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: &commonpb.ActivityType{ + Name: "test-activity-type", + }, + Input: payloads.EncodeString("test-activity-input"), + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue.Name, + }, + ScheduleToStartTimeout: durationpb.New(100 * time.Millisecond), + StartToCloseTimeout: durationpb.New(30 * time.Second), + RequestId: "test-request-id", + }) + require.NoError(t, err) + + describeResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + + // Long poll to await activity timeout + describeResp, err = s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + IncludeOutcome: true, + LongPollToken: describeResp.LongPollToken, + }) + + require.NoError(t, err) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, describeResp.GetInfo().GetStatus(), + "expected TimedOut but is %s", describeResp.GetInfo().GetStatus()) + // Schedule to Start timeout does not overwrite attempt fields therefore execution duration should be zero and an + // attempt was never made + require.Equal(t, describeResp.GetInfo().GetExecutionDuration().AsDuration(), time.Duration(0)) + require.Nil(t, describeResp.GetInfo().GetCloseTime()) + require.Equal(t, enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, describeResp.GetOutcome().GetFailure().GetTimeoutFailureInfo().GetTimeoutType(), + "expected ScheduleToStartTimeout but is %s", describeResp.GetOutcome().GetFailure().GetTimeoutFailureInfo().GetTimeoutType()) +} + +func (s *standaloneActivityTestSuite) TestDescribeActivityExecution_NoWait() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue() + + startReq := &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + Header: defaultHeader, + HeartbeatTimeout: durationpb.New(45 * time.Second), + Identity: s.tv.WorkerIdentity(), + Input: defaultInput, + ScheduleToStartTimeout: durationpb.New(30 * time.Second), + ScheduleToCloseTimeout: durationpb.New(3 * time.Minute), + StartToCloseTimeout: durationpb.New(1 * time.Minute), + RequestId: s.tv.RequestID(), + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(2 * time.Second), + BackoffCoefficient: 1.5, + MaximumAttempts: 3, + MaximumInterval: durationpb.New(111 * time.Second), + }, + Priority: &commonpb.Priority{ + FairnessKey: "test-key", + }, + SearchAttributes: defaultSearchAttributes, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue.GetName(), + }, + UserMetadata: defaultUserMetadata, + } + + startResp, err := s.FrontendClient().StartActivityExecution(ctx, startReq) + require.NoError(t, err) + + t.Run("MinimalResponse", func(t *testing.T) { + describeResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + // Omit RunID to verify that latest run will be used + IncludeInput: false, + IncludeOutcome: false, + }) + require.NoError(t, err) + require.NotNil(t, describeResp.LongPollToken) + require.Equal(t, startResp.RunId, describeResp.RunId) + require.Nil(t, describeResp.Input) + require.Nil(t, describeResp.GetOutcome().GetResult()) + require.Nil(t, describeResp.GetOutcome().GetFailure()) + }) + + t.Run("FullResponse", func(t *testing.T) { + describeResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + IncludeInput: true, + IncludeOutcome: true, + }) + require.NoError(t, err) + + respInfo := describeResp.GetInfo() + require.NotNil(t, describeResp.LongPollToken) + require.NotNil(t, respInfo) + + expectedExpirationTime := timestamppb.New(respInfo.GetScheduleTime().AsTime().Add( + startReq.GetScheduleToCloseTimeout().AsDuration())) + + expected := &activitypb.ActivityExecutionInfo{ + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + Attempt: 1, + ExpirationTime: expectedExpirationTime, + Header: defaultHeader, + HeartbeatTimeout: startReq.GetHeartbeatTimeout(), + RetryPolicy: startReq.GetRetryPolicy(), + RunId: startResp.RunId, + RunState: enumspb.PENDING_ACTIVITY_STATE_SCHEDULED, + Priority: startReq.GetPriority(), + ScheduleToCloseTimeout: startReq.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: startReq.GetScheduleToStartTimeout(), + StartToCloseTimeout: startReq.GetStartToCloseTimeout(), + Status: enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, + SearchAttributes: defaultSearchAttributes, + TaskQueue: taskQueue.Name, + UserMetadata: defaultUserMetadata, + } + + diff := cmp.Diff(expected, respInfo, + protocmp.Transform(), + // Ignore non-deterministic fields. Validated separately. + protocmp.IgnoreFields(&activitypb.ActivityExecutionInfo{}, + "execution_duration", + "schedule_time", + "state_transition_count", + ), + ) + require.Empty(t, diff) + require.Equal(t, respInfo.GetExecutionDuration().AsDuration(), time.Duration(0)) // Never completed, so expect 0 + require.Nil(t, describeResp.GetInfo().GetCloseTime()) + require.Positive(t, respInfo.GetScheduleTime().AsTime().Unix()) + require.Positive(t, respInfo.GetStateTransitionCount()) + + protorequire.ProtoEqual(t, defaultInput, describeResp.Input) + + // Activity is scheduled but not completed, so no outcome yet + require.Nil(t, describeResp.GetOutcome().GetResult()) + require.Nil(t, describeResp.GetOutcome().GetFailure()) + }) +} + +func (s *standaloneActivityTestSuite) TestDescribeActivityExecution_WaitAnyStateChange() { + // Long poll for any state change. PollActivityTaskQueue is used to cause a state change. + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue() + + startResp, err := s.startActivity(ctx, activityID, taskQueue.Name) + require.NoError(t, err) + + // First poll lacks token and therefore responds immediately, returning a token + firstDescribeResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + // RunId: startResp.RunId, // RunID is now required by validation [?] + IncludeInput: true, + }) + require.NoError(t, err) + require.NotNil(t, firstDescribeResp.LongPollToken) + require.NotNil(t, firstDescribeResp.Info) + require.Equal(t, firstDescribeResp.RunId, startResp.RunId) + + expected := &activitypb.ActivityExecutionInfo{ + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + Attempt: 1, + HeartbeatTimeout: durationpb.New(0), + RetryPolicy: defaultRetryPolicy, + RunId: startResp.RunId, + RunState: enumspb.PENDING_ACTIVITY_STATE_SCHEDULED, + SearchAttributes: &commonpb.SearchAttributes{}, + ScheduleToCloseTimeout: durationpb.New(0), + ScheduleToStartTimeout: durationpb.New(0), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, + TaskQueue: taskQueue.Name, + } + diff := cmp.Diff(expected, firstDescribeResp.GetInfo(), + protocmp.Transform(), + // Ignore non-deterministic fields. Validated separately. + protocmp.IgnoreFields(&activitypb.ActivityExecutionInfo{}, + "execution_duration", + "schedule_time", + "state_transition_count", + ), + ) + require.Empty(t, diff) + + taskQueuePollErr := make(chan error, 1) + activityPollDone := make(chan struct{}) + var describeResp *workflowservice.DescribeActivityExecutionResponse + var describeErr error + + go func() { + defer close(activityPollDone) + // Second poll uses token and therefore waits for a state transition + describeResp, describeErr = s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + IncludeInput: true, + LongPollToken: firstDescribeResp.LongPollToken, + }) + }() + + // TODO(dan): race here: subscription might not be established yet + + // Worker picks up activity task, triggering transition (via RecordActivityTaskStarted) + go func() { + _, err := s.pollActivityTaskQueue(ctx, taskQueue.Name) + taskQueuePollErr <- err + }() + + select { + case <-activityPollDone: + require.NoError(t, describeErr) + require.NotNil(t, describeResp) + require.NotNil(t, describeResp.Info) + + expected := &activitypb.ActivityExecutionInfo{ + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + Attempt: 1, + HeartbeatTimeout: durationpb.New(0), + LastWorkerIdentity: defaultIdentity, + RetryPolicy: defaultRetryPolicy, + RunId: startResp.RunId, + RunState: enumspb.PENDING_ACTIVITY_STATE_STARTED, + ScheduleToCloseTimeout: durationpb.New(0), + ScheduleToStartTimeout: durationpb.New(0), + SearchAttributes: &commonpb.SearchAttributes{}, + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, + TaskQueue: taskQueue.Name, + } + diff := cmp.Diff(expected, describeResp.GetInfo(), + protocmp.Transform(), + // Ignore non-deterministic fields. Validated separately. + protocmp.IgnoreFields(&activitypb.ActivityExecutionInfo{}, + "execution_duration", + "last_started_time", + "schedule_time", + "state_transition_count", + ), + ) + require.Empty(t, diff) + + protorequire.ProtoEqual(t, defaultInput, describeResp.Input) + + case <-ctx.Done(): + t.Fatal("DescribeActivityExecution timed out") + } + + err = <-taskQueuePollErr + require.NoError(t, err) +} + +func (s *standaloneActivityTestSuite) TestPollActivityExecution() { + testCases := []struct { + name string + expectedStatus enumspb.ActivityExecutionStatus + taskCompletionFn func(context.Context, []byte) error + completionValidationFn func(*testing.T, *workflowservice.PollActivityExecutionResponse) + }{ + { + name: "successful completion", + expectedStatus: enumspb.ACTIVITY_EXECUTION_STATUS_COMPLETED, + taskCompletionFn: func(ctx context.Context, taskToken []byte) error { + _, err := s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: taskToken, + Result: defaultResult, + }) + + return err + }, + completionValidationFn: func(t *testing.T, response *workflowservice.PollActivityExecutionResponse) { + protorequire.ProtoEqual(t, defaultResult, response.GetOutcome().GetResult()) + }, + }, + { + name: "failure completion", + expectedStatus: enumspb.ACTIVITY_EXECUTION_STATUS_FAILED, + taskCompletionFn: func(ctx context.Context, taskToken []byte) error { + _, err := s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: taskToken, + Failure: defaultFailure, + }) + + return err + }, + completionValidationFn: func(t *testing.T, response *workflowservice.PollActivityExecutionResponse) { + protorequire.ProtoEqual(t, defaultFailure, response.GetOutcome().GetFailure()) + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + t.Cleanup(cancel) + activityID := s.tv.Any().String() + taskQueue := s.tv.TaskQueue().String() + + startResp, err := s.startActivity(ctx, activityID, taskQueue) + require.NoError(t, err) + pollTaskResp, err := s.pollActivityTaskQueue(ctx, taskQueue) + require.NoError(t, err) + err = tc.taskCompletionFn(ctx, pollTaskResp.TaskToken) + require.NoError(t, err) + pollActivityResp, err := s.FrontendClient().PollActivityExecution(ctx, &workflowservice.PollActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + require.NotNil(t, pollActivityResp) + tc.completionValidationFn(t, pollActivityResp) + }) + } +} + +func (s *standaloneActivityTestSuite) TestPollActivityExecution_EmptyRunID() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + t.Cleanup(cancel) + + activityID := s.tv.Any().String() + taskQueue := s.tv.TaskQueue().String() + + startResp, err := s.startActivity(ctx, activityID, taskQueue) + require.NoError(t, err) + + pollTaskResp, err := s.pollActivityTaskQueue(ctx, taskQueue) + require.NoError(t, err) + + _, err = s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Result: defaultResult, + }) + require.NoError(t, err) + + pollActivityResp, err := s.FrontendClient().PollActivityExecution(ctx, &workflowservice.PollActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: "", // resolves to current run ID + }) + require.NoError(t, err) + require.NotNil(t, pollActivityResp) + require.Equal(t, startResp.RunId, pollActivityResp.GetRunId()) + protorequire.ProtoEqual(t, defaultResult, pollActivityResp.GetOutcome().GetResult()) +} + +func (s *standaloneActivityTestSuite) TestPollActivityExecution_NotFound() { + t := s.T() + ctx := testcore.NewContext() + + existingActivityID := s.tv.ActivityID() + tq := s.tv.TaskQueue() + startResp, err := s.startActivity(ctx, existingActivityID, tq.Name) + require.NoError(t, err) + existingRunID := startResp.RunId + require.NotEmpty(t, existingRunID) + existingNamespace := s.Namespace().String() + + var notFoundErr *serviceerror.NotFound + var namespaceNotFoundErr *serviceerror.NamespaceNotFound + + testCases := []struct { + name string + request *workflowservice.PollActivityExecutionRequest + expectedErr error + expectedErrMsg string + }{ + { + name: "NonExistentNamespace", + request: &workflowservice.PollActivityExecutionRequest{ + Namespace: "non-existent-namespace", + ActivityId: existingActivityID, + RunId: existingRunID, + }, + expectedErr: namespaceNotFoundErr, + expectedErrMsg: "Namespace non-existent-namespace is not found.", + }, + { + name: "NonExistentActivityID", + request: &workflowservice.PollActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: "non-existent-activity", + RunId: existingRunID, + }, + expectedErr: notFoundErr, + expectedErrMsg: "activity execution not found", + }, + { + name: "NonExistentRunID", + request: &workflowservice.PollActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: existingActivityID, + RunId: "11111111-2222-3333-4444-555555555555", + }, + expectedErr: notFoundErr, + expectedErrMsg: "activity execution not found", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := s.FrontendClient().PollActivityExecution(ctx, tc.request) + require.ErrorAs(t, err, &tc.expectedErr) //nolint:testifylint + require.Equal(t, tc.expectedErrMsg, tc.expectedErr.Error()) + }) + } +} + +func (s *standaloneActivityTestSuite) TestPollActivityExecution_InvalidArgument() { + t := s.T() + ctx := testcore.NewContext() + + existingNamespace := s.Namespace().String() + validRunID := "11111111-2222-3333-4444-555555555555" + + testCases := []struct { + name string + request *workflowservice.PollActivityExecutionRequest + expectedErr string + }{ + { + name: "EmptyNamespace", + request: &workflowservice.PollActivityExecutionRequest{ + Namespace: "", + ActivityId: "activity-id", + RunId: validRunID, + }, + expectedErr: "Namespace is empty", + }, + { + name: "EmptyActivityID", + request: &workflowservice.PollActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: "", + RunId: validRunID, + }, + expectedErr: "activity ID is required", + }, + { + name: "ActivityIDTooLong", + request: &workflowservice.PollActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: string(make([]byte, 2000)), + RunId: validRunID, + }, + expectedErr: "activity ID exceeds length limit", + }, + { + name: "InvalidRunID", + request: &workflowservice.PollActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: "activity-id", + RunId: "invalid-uuid", + }, + expectedErr: "invalid run id", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := s.FrontendClient().PollActivityExecution(ctx, tc.request) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, invalidArgErr.Message, tc.expectedErr) + }) + } +} + +// TODO(dan): add tests that DescribeActivityExecution can wait for deletion, termination, cancellation etc + +func (s *standaloneActivityTestSuite) TestListActivityExecutions() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + activityType := s.tv.ActivityType().GetName() + taskQueue := s.tv.TaskQueue().GetName() + startResp := s.startAndValidateActivity(ctx, t, activityID, taskQueue) + runID := startResp.RunId + + verifyListQuery := func(t *testing.T, query string) { + t.Helper() + var resp *workflowservice.ListActivityExecutionsResponse + s.Eventually( + func() bool { + var err error + resp, err = s.FrontendClient().ListActivityExecutions(ctx, &workflowservice.ListActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + PageSize: 10, + Query: query, + }) + return err == nil && len(resp.GetExecutions()) >= 1 + }, + testcore.WaitForESToSettle, + 100*time.Millisecond, + ) + require.Len(t, resp.GetExecutions(), 1, "expected exactly 1 result for query: %s", query) + exec := resp.GetExecutions()[0] + // Verify all ActivityExecutionListInfo fields + s.Equal(activityID, exec.GetActivityId()) + s.Equal(runID, exec.GetRunId()) + s.Equal(activityType, exec.GetActivityType().GetName()) + s.Equal(taskQueue, exec.GetTaskQueue()) + s.Equal(enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, exec.GetStatus()) + s.NotNil(exec.GetScheduleTime()) + s.Nil(exec.GetCloseTime()) // Running activity has no close time + s.Nil(exec.GetExecutionDuration()) // Running activity has no execution duration + s.GreaterOrEqual(exec.GetStateSizeBytes(), int64(0)) + s.GreaterOrEqual(exec.GetStateTransitionCount(), int64(0)) + } + + t.Run("QueryByActivityId", func(t *testing.T) { + verifyListQuery(t, fmt.Sprintf("ActivityId = '%s'", activityID)) + }) + + t.Run("QueryByActivityType", func(t *testing.T) { + verifyListQuery(t, fmt.Sprintf("ActivityType = '%s'", activityType)) + }) + + t.Run("QueryByActivityStatus", func(t *testing.T) { + verifyListQuery(t, fmt.Sprintf("ActivityStatus = 'Running' AND ActivityType = '%s'", activityType)) + }) + + t.Run("QueryByTaskQueue", func(t *testing.T) { + verifyListQuery(t, fmt.Sprintf("ActivityTaskQueue = '%s' AND ActivityType = '%s'", taskQueue, activityType)) + }) + + t.Run("QueryByMultipleFields", func(t *testing.T) { + verifyListQuery(t, fmt.Sprintf("ActivityId = '%s' AND ActivityType = '%s'", activityID, activityType)) + }) + + t.Run("QueryByCustomSearchAttribute", func(t *testing.T) { + customSAName := "CustomKeywordField" + customSAValue := "custom-sa-test-value" + customSAActivityID := "custom-sa-activity-id" + + _, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: customSAActivityID, + ActivityType: &commonpb.ActivityType{Name: "custom-sa-activity-type"}, + Identity: s.tv.WorkerIdentity(), + Input: defaultInput, + TaskQueue: &taskqueuepb.TaskQueue{Name: s.tv.TaskQueue().GetName()}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + RequestId: s.tv.RequestID(), + SearchAttributes: &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + customSAName: payload.EncodeString(customSAValue), + }, + }, + }) + require.NoError(t, err) + + var resp *workflowservice.ListActivityExecutionsResponse + s.Eventually( + func() bool { + var err error + resp, err = s.FrontendClient().ListActivityExecutions(ctx, &workflowservice.ListActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + PageSize: 10, + Query: fmt.Sprintf("%s = '%s'", customSAName, customSAValue), + }) + return err == nil && len(resp.GetExecutions()) >= 1 + }, + testcore.WaitForESToSettle, + 100*time.Millisecond, + ) + require.Len(t, resp.GetExecutions(), 1) + exec := resp.GetExecutions()[0] + s.Equal(customSAActivityID, exec.GetActivityId()) + s.NotNil(exec.GetSearchAttributes()) + returnedSA := exec.GetSearchAttributes().GetIndexedFields()[customSAName] + s.NotNil(returnedSA) + var returnedValue string + s.NoError(payload.Decode(returnedSA, &returnedValue)) + s.Equal(customSAValue, returnedValue) + }) + + t.Run("InvalidQuery", func(t *testing.T) { + _, err := s.FrontendClient().ListActivityExecutions(ctx, &workflowservice.ListActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + PageSize: 10, + Query: "invalid query syntax !!!", + }) + s.ErrorAs(err, new(*serviceerror.InvalidArgument)) + }) + + t.Run("InvalidSearchAttribute", func(t *testing.T) { + _, err := s.FrontendClient().ListActivityExecutions(ctx, &workflowservice.ListActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + PageSize: 10, + Query: "NonExistentField = 'value'", + }) + s.ErrorAs(err, new(*serviceerror.InvalidArgument)) + }) + + t.Run("NamespaceNotFound", func(t *testing.T) { + _, err := s.FrontendClient().ListActivityExecutions(ctx, &workflowservice.ListActivityExecutionsRequest{ + Namespace: "non-existent-namespace", + PageSize: 10, + Query: "", + }) + s.ErrorAs(err, new(*serviceerror.NamespaceNotFound)) + }) +} + +func (s *standaloneActivityTestSuite) TestCountActivityExecutions() { + t := s.T() + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) + defer cancel() + + activityID := s.tv.ActivityID() + activityType := s.tv.ActivityType().GetName() + s.startAndValidateActivity(ctx, t, activityID, s.tv.TaskQueue().GetName()) + + verifyCountQuery := func(t *testing.T, query string, expectedCount int) { + t.Helper() + s.Eventually( + func() bool { + resp, err := s.FrontendClient().CountActivityExecutions(ctx, &workflowservice.CountActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + Query: query, + }) + return err == nil && resp.GetCount() == int64(expectedCount) + }, + testcore.WaitForESToSettle, + 100*time.Millisecond, + ) + } + + t.Run("CountByActivityId", func(t *testing.T) { + verifyCountQuery(t, fmt.Sprintf("ActivityId = '%s'", activityID), 1) + }) + + t.Run("CountByActivityType", func(t *testing.T) { + verifyCountQuery(t, fmt.Sprintf("ActivityType = '%s'", activityType), 1) + }) + + t.Run("CountByActivityStatus", func(t *testing.T) { + verifyCountQuery(t, fmt.Sprintf("ActivityStatus = 'Running' AND ActivityType = '%s'", activityType), 1) + }) + + t.Run("CountByTaskQueue", func(t *testing.T) { + verifyCountQuery(t, fmt.Sprintf("ActivityTaskQueue = '%s' AND ActivityType = '%s'", s.tv.TaskQueue().GetName(), activityType), 1) + }) + + t.Run("GroupByActivityStatus", func(t *testing.T) { + groupByType := &commonpb.ActivityType{Name: "count-groupby-test-type"} + taskQueue := s.tv.TaskQueue().GetName() + + for i := range 3 { + id := fmt.Sprintf("%s-%d", groupByType.Name, i) + resp, err := s.startActivityWithType(ctx, id, taskQueue, groupByType) + require.NoError(t, err) + require.NotEmpty(t, resp.GetRunId()) + } + + query := fmt.Sprintf("ActivityType = '%s' GROUP BY ActivityStatus", groupByType.Name) + var resp *workflowservice.CountActivityExecutionsResponse + s.Eventually( + func() bool { + var err error + resp, err = s.FrontendClient().CountActivityExecutions(ctx, &workflowservice.CountActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + Query: query, + }) + return err == nil && resp.GetCount() == 3 + }, + testcore.WaitForESToSettle, + 100*time.Millisecond, + ) + + require.Len(t, resp.GetGroups(), 1) + s.Equal(int64(3), resp.GetGroups()[0].GetCount()) + var groupValue string + require.NoError(t, payload.Decode(resp.GetGroups()[0].GetGroupValues()[0], &groupValue)) + s.Equal("Running", groupValue) + }) + + t.Run("CountByCustomSearchAttribute", func(t *testing.T) { + customSAName := "ActivityCountCustomKeyword" + customSAValue := "count-custom-sa-value" + + _, err := s.OperatorClient().AddSearchAttributes(ctx, &operatorservice.AddSearchAttributesRequest{ + Namespace: s.Namespace().String(), + SearchAttributes: map[string]enumspb.IndexedValueType{ + customSAName: enumspb.INDEXED_VALUE_TYPE_KEYWORD, + }, + }) + require.NoError(t, err) + + s.Eventually(func() bool { + descResp, err := s.OperatorClient().ListSearchAttributes(ctx, &operatorservice.ListSearchAttributesRequest{ + Namespace: s.Namespace().String(), + }) + if err != nil { + return false + } + _, ok := descResp.CustomAttributes[customSAName] + return ok + }, 10*time.Second, 100*time.Millisecond) + + for i := range 2 { + _, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: fmt.Sprintf("count-custom-sa-%d", i), + ActivityType: &commonpb.ActivityType{Name: "count-custom-sa-type"}, + Identity: s.tv.WorkerIdentity(), + Input: defaultInput, + TaskQueue: &taskqueuepb.TaskQueue{Name: s.tv.TaskQueue().GetName()}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + RequestId: s.tv.RequestID(), + SearchAttributes: &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + customSAName: payload.EncodeString(customSAValue), + }, + }, + }) + require.NoError(t, err) + } + + s.Eventually( + func() bool { + resp, err := s.FrontendClient().CountActivityExecutions(ctx, &workflowservice.CountActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + Query: fmt.Sprintf("%s = '%s'", customSAName, customSAValue), + }) + return err == nil && resp.GetCount() == 2 + }, + testcore.WaitForESToSettle, + 100*time.Millisecond, + ) + }) + + t.Run("GroupByUnsupportedField", func(t *testing.T) { + _, err := s.FrontendClient().CountActivityExecutions(ctx, &workflowservice.CountActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + Query: "GROUP BY ActivityType", + }) + s.ErrorAs(err, new(*serviceerror.InvalidArgument)) + s.Contains(err.Error(), "'GROUP BY' clause is only supported for ExecutionStatus") + }) + + t.Run("InvalidQuery", func(t *testing.T) { + _, err := s.FrontendClient().CountActivityExecutions(ctx, &workflowservice.CountActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + Query: "invalid query syntax !!!", + }) + s.ErrorAs(err, new(*serviceerror.InvalidArgument)) + }) + + t.Run("InvalidSearchAttribute", func(t *testing.T) { + _, err := s.FrontendClient().CountActivityExecutions(ctx, &workflowservice.CountActivityExecutionsRequest{ + Namespace: s.Namespace().String(), + Query: "NonExistentField = 'value'", + }) + s.ErrorAs(err, new(*serviceerror.InvalidArgument)) + }) + + t.Run("NamespaceNotFound", func(t *testing.T) { + _, err := s.FrontendClient().CountActivityExecutions(ctx, &workflowservice.CountActivityExecutionsRequest{ + Namespace: "non-existent-namespace", + Query: "", + }) + s.ErrorAs(err, new(*serviceerror.NamespaceNotFound)) + }) +} + +func (s *standaloneActivityTestSuite) TestDescribeActivityExecution_DeadlineExceeded() { + t := s.T() + ctx := testcore.NewContext() + + // Start an activity and get initial long-poll state token + activityID := s.tv.ActivityID() + taskQueue := s.tv.TaskQueue() + startResp, err := s.startActivity(ctx, activityID, taskQueue.Name) + require.NoError(t, err) + describeResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + + // The DescribeActivityExecution calls below use a long-poll token and will necessarily time out, + // because the activity undergoes no further state transitions. + + // The timeout imposed by the server is essentially + // Min(CallerTimeout - LongPollBuffer, LongPollTimeout) + + // Case 1: Caller sets a deadline which has room for the buffer. History returns empty success + // result with at least buffer remaining before the caller deadline. + t.Run("CallerDeadlineNotExceeded", func(t *testing.T) { + // CallerTimeout - LongPollBuffer is far in the future + s.OverrideDynamicConfig(activity.LongPollBuffer, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 9999*time.Millisecond) + defer cancel() + + // DescribeActivityExecution will return when this long poll timeout expires. + s.OverrideDynamicConfig(activity.LongPollTimeout, 10*time.Millisecond) + + describeResp, err = s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + LongPollToken: describeResp.LongPollToken, + }) + // The server uses an empty non-error response to indicate to the caller that it should resubmit + // its long-poll. + require.NoError(t, err) + require.Empty(t, describeResp.GetInfo()) + }) + + // Case 2: caller does not set a deadline. In practice this is equivalent to them setting a 30s + // deadline since that is what Histry receives. In this case History times out the wait at + // LongPollTimeout and the caller gets an empty response. + t.Run("NoCallerDeadline", func(t *testing.T) { + // The caller sets no deadline. However, the ctx received by the history service handler + // will have a 30s deadline that was applied by one of the upstream server layers, so we + // still must use a buffer < 30s. + ctx := context.Background() + s.OverrideDynamicConfig(activity.LongPollBuffer, 29*time.Second) + // DescribeActivityExecution will return when this long poll timeout expires. + s.OverrideDynamicConfig(activity.LongPollTimeout, 10*time.Millisecond) + + _, err = s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + LongPollToken: describeResp.LongPollToken, + }) + require.NoError(t, err) + require.Empty(t, describeResp.GetInfo()) + }) + + // Case 3: caller sets a deadline that is < the buffer. In this case DescribeActivityExecution + // will return an empty result immediately, and there is a race between caller receiving that + // and caller's client timing out the request. Therefore we do not test this. +} + +func (s *standaloneActivityTestSuite) TestDescribeActivityExecution_NotFound() { + t := s.T() + ctx := testcore.NewContext() + + existingActivityID := s.tv.ActivityID() + tq := s.tv.TaskQueue() + startResp, err := s.startActivity(ctx, existingActivityID, tq.Name) + require.NoError(t, err) + existingRunID := startResp.RunId + require.NotEmpty(t, existingRunID) + existingNamespace := s.Namespace().String() + + var notFoundErr *serviceerror.NotFound + var namespaceNotFoundErr *serviceerror.NamespaceNotFound + + testCases := []struct { + name string + request *workflowservice.DescribeActivityExecutionRequest + expectedErr error + expectedErrMsg string + }{ + { + name: "NonExistentNamespace", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: "non-existent-namespace", + ActivityId: existingActivityID, + RunId: existingRunID, + }, + expectedErr: namespaceNotFoundErr, + expectedErrMsg: "Namespace non-existent-namespace is not found.", + }, + { + name: "NonExistentActivityID", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: "non-existent-activity", + RunId: existingRunID, + }, + expectedErr: notFoundErr, + expectedErrMsg: "activity execution not found", + }, + { + name: "NonExistentRunID", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: existingActivityID, + RunId: "11111111-2222-3333-4444-555555555555", + }, + expectedErr: notFoundErr, + expectedErrMsg: "activity execution not found", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := s.FrontendClient().DescribeActivityExecution(ctx, tc.request) + require.ErrorAs(t, err, &tc.expectedErr) //nolint:testifylint + require.Equal(t, tc.expectedErrMsg, tc.expectedErr.Error()) + }) + } + + t.Run("LongPollNonExistentActivity", func(t *testing.T) { + // Poll to get a token + validPollResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: existingActivityID, + RunId: existingRunID, + }) + require.NoError(t, err) + + // Use the token with a non-existent activity + _, err = s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: "non-existent-activity", + RunId: existingRunID, + LongPollToken: validPollResp.LongPollToken, + }) + var notFoundErr *serviceerror.NotFound + require.ErrorAs(t, err, ¬FoundErr) + require.Equal(t, "activity execution not found", notFoundErr.Message) + }) +} + +func (s *standaloneActivityTestSuite) TestDescribeActivityExecution_InvalidArgument() { + + t := s.T() + ctx := testcore.NewContext() + + existingActivityID := s.tv.ActivityID() + tq := s.tv.TaskQueue() + startResp, err := s.startActivity(ctx, existingActivityID, tq.Name) + require.NoError(t, err) + existingRunID := startResp.RunId + require.NotEmpty(t, existingRunID) + existingNamespace := s.Namespace().String() + + validActivityID := "activity-id" + validRunID := "11111111-2222-3333-4444-555555555555" + + testCases := []struct { + name string + request *workflowservice.DescribeActivityExecutionRequest + expectedErr string + }{ + { + name: "EmptyNamespace", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: "", + ActivityId: validActivityID, + RunId: validRunID, + }, + expectedErr: "Namespace is empty", + }, + { + name: "EmptyActivityID", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: "", + RunId: validRunID, + }, + expectedErr: "activity ID is required", + }, + { + name: "ActivityIDTooLong", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: string(make([]byte, 2000)), + RunId: validRunID, + }, + expectedErr: "activity ID exceeds length limit", + }, + { + name: "InvalidRunID", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: validActivityID, + RunId: "invalid-uuid", + }, + expectedErr: "invalid run id", + }, + { + name: "RunIdNotRequiredWhenWaitPolicyAbsent", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: existingActivityID, + RunId: "", + }, + expectedErr: "", + }, + { + name: "RunIdNotRequiredWhenLongPollTokenAbsent", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: existingActivityID, + RunId: "", + }, + expectedErr: "", + }, + { + name: "RunIdRequiredWhenLongPollTokenPresent", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: validActivityID, + RunId: "", + LongPollToken: []byte("doesn't-matter"), + }, + expectedErr: "run id is required", + }, + { + name: "MalformedLongPollToken", + request: &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: existingActivityID, + RunId: existingRunID, + LongPollToken: []byte("invalid-token"), + }, + expectedErr: "invalid long poll token", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := s.FrontendClient().DescribeActivityExecution(ctx, tc.request) + if tc.expectedErr == "" { + require.NoError(t, err) + return + } + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, invalidArgErr.Message, tc.expectedErr) + }) + } + + t.Run("LongPollTokenFromWrongExecution", func(t *testing.T) { + validPollResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: existingActivityID, + RunId: existingRunID, + }) + require.NoError(t, err) + require.NotEmpty(t, validPollResp.LongPollToken) + + activityID2 := s.tv.Any().String() + startResp2, err := s.startActivity(ctx, activityID2, tq.Name) + require.NoError(t, err) + require.NotEmpty(t, startResp2.GetRunId()) + + _, err = s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: existingNamespace, + ActivityId: activityID2, + RunId: startResp2.GetRunId(), + LongPollToken: validPollResp.LongPollToken, + }) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Equal(t, "long poll token does not match execution", invalidArgErr.Message) + }) + + // TODO(dan): add test for long poll token from non-existent execution +} + +func (s *standaloneActivityTestSuite) TestHeartbeat() { + t := s.T() + ctx := testcore.NewContext() + heartbeatDetails := payloads.EncodeString("Heartbeat Details") + + t.Run("InvalidArgument", func(t *testing.T) { + testCases := []struct { + name string + taskToken []byte + expectedErr string + }{ + { + name: "EmptyTaskToken", + taskToken: nil, + expectedErr: "Task token not set on request", + }, + { + name: "MalformedTaskToken", + taskToken: []byte("invalid-token-data"), + expectedErr: "Error deserializing task token", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: tc.taskToken, + Details: heartbeatDetails, + }) + require.Error(t, err) + statusErr := serviceerror.ToStatus(err) + require.NotNil(t, statusErr) + require.Equal(t, codes.InvalidArgument, statusErr.Code()) + require.Contains(t, statusErr.Message(), tc.expectedErr) + }) + } + }) + + t.Run("StaleToken", func(t *testing.T) { + // Start an activity and get a valid task token, then complete it + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + _, err := s.startActivity(ctx, activityID, taskQueue) + require.NoError(t, err) + + pollResp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + + _, err = s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollResp.TaskToken, + Result: defaultResult, + }) + require.NoError(t, err) + + // Heartbeat with stale token (activity already completed) + _, err = s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollResp.TaskToken, + Details: heartbeatDetails, + }) + require.Error(t, err) + statusErr := serviceerror.ToStatus(err) + require.Equal(t, codes.NotFound, statusErr.Code()) + require.Contains(t, statusErr.Message(), "activity task not found") + }) + + t.Run("StaleAttemptToken", func(t *testing.T) { + // Start an activity with retries, fail first attempt, then try to heartbeat with old token. + // Use NextRetryDelay=1s to ensure the retry dispatch happens within test timeout. + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + _, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + ScheduleToCloseTimeout: durationpb.New(1 * time.Minute), + RetryPolicy: &commonpb.RetryPolicy{ + MaximumAttempts: 3, + }, + }) + require.NoError(t, err) + + // Poll and get task token for attempt 1 + attempt1Resp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.EqualValues(t, 1, attempt1Resp.Attempt) + + // Fail the task with NextRetryDelay to control retry timing + _, err = s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: attempt1Resp.TaskToken, + Failure: &failurepb.Failure{ + Message: "retryable failure", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + NonRetryable: false, + NextRetryDelay: durationpb.New(1 * time.Second), + }}, + }, + }) + require.NoError(t, err) + + // Poll to get attempt 2 (ensures retry has happened) + attempt2Resp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.EqualValues(t, 2, attempt2Resp.Attempt) + + // Heartbeat with the attempt 2 token + _, err = s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: attempt2Resp.TaskToken, + Details: heartbeatDetails, + }) + require.NoError(t, err) + + // Try to heartbeat with the old attempt 1 token - should fail with NotFound + _, err = s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: attempt1Resp.TaskToken, + Details: heartbeatDetails, + }) + require.Error(t, err) + statusErr := serviceerror.ToStatus(err) + require.Equal(t, codes.NotFound, statusErr.Code()) + require.Contains(t, statusErr.Message(), "activity task not found") + }) + + t.Run("ResponseIncludesCancelRequested", func(t *testing.T) { + // Start activity, worker accepts task, request cancellation, worker heartbeats. + // Verify: heartbeat response has cancel_requested=true. + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + }) + require.NoError(t, err) + runID := startResp.RunId + + // Worker accepts task + pollResp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + + // Heartbeat before cancellation - cancel_requested should be false + hbResp, err := s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollResp.TaskToken, + Details: heartbeatDetails, + }) + require.NoError(t, err) + require.False(t, hbResp.CancelRequested) + + // Request cancellation + _, err = s.FrontendClient().RequestCancelActivityExecution(ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + RequestId: s.tv.RequestID(), + Reason: "test cancellation", + }) + require.NoError(t, err) + + // Heartbeat after cancellation - cancel_requested should be true + hbResp, err = s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollResp.TaskToken, + Details: heartbeatDetails, + }) + require.NoError(t, err) + require.True(t, hbResp.CancelRequested) + }) + + t.Run("HeartbeatDetailsAvailableOnRetry", func(t *testing.T) { + // Start activity (with retries), worker accepts, heartbeats with details, + // worker fails the task. Worker accepts retry attempt. + // Verify: retry task contains previous heartbeat details. + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + _, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + ScheduleToCloseTimeout: durationpb.New(5 * time.Minute), + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Millisecond), + MaximumAttempts: 3, + }, + }) + require.NoError(t, err) + + // First attempt: worker accepts task + pollResp1, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.EqualValues(t, 1, pollResp1.Attempt) + require.Nil(t, pollResp1.HeartbeatDetails) // No heartbeat details on first attempt + + // Worker heartbeats with details + _, err = s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollResp1.TaskToken, + Details: heartbeatDetails, + }) + require.NoError(t, err) + + // Worker fails with retryable error + _, err = s.FrontendClient().RespondActivityTaskFailed(ctx, &workflowservice.RespondActivityTaskFailedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollResp1.TaskToken, + Failure: &failurepb.Failure{ + Message: "retryable failure", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + NonRetryable: false, + }, + }, + }, + }) + require.NoError(t, err) + + // Second attempt: worker accepts retry task + pollResp2, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.EqualValues(t, 2, pollResp2.Attempt) + + // Verify: heartbeat details from first attempt are available + protorequire.ProtoEqual(t, heartbeatDetails, pollResp2.HeartbeatDetails) + }) + + t.Run("ActivityTimesOutWithoutHeartbeat", func(t *testing.T) { + // Start activity (no retries), worker accepts task, time passes beyond + // heartbeat timeout, worker never heartbeats. + // Verify: activity status is TIMED_OUT. + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + HeartbeatTimeout: durationpb.New(1 * time.Second), + RetryPolicy: &commonpb.RetryPolicy{ + MaximumAttempts: 1, // No retries + }, + }) + require.NoError(t, err) + + // Worker accepts task (starts the activity) + pollTaskResp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.NotEmpty(t, pollTaskResp.TaskToken) + + // Long poll for completion (heartbeat timeout will fire) + pollResp, err := s.FrontendClient().PollActivityExecution(ctx, &workflowservice.PollActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + }) + require.NoError(t, err) + require.Equal(t, enumspb.TIMEOUT_TYPE_HEARTBEAT, pollResp.GetOutcome().GetFailure().GetTimeoutFailureInfo().GetTimeoutType(), + "expected timeout type=Heartbeat but is %s", pollResp.GetOutcome().GetFailure().GetTimeoutFailureInfo().GetTimeoutType()) + }) + + t.Run("ActivityRetriesOnHeartbeatTimeout", func(t *testing.T) { + // Start activity (with retries), worker accepts task, time passes beyond + // heartbeat timeout, worker never heartbeats. + // Verify: activity returns to SCHEDULED (or new task available). + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + _, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + ScheduleToCloseTimeout: durationpb.New(5 * time.Minute), + HeartbeatTimeout: durationpb.New(1 * time.Second), + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Millisecond), + MaximumAttempts: 2, + }, + }) + require.NoError(t, err) + + // Attempt 1: worker accepts task + pollTaskResp1, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.EqualValues(t, 1, pollTaskResp1.Attempt) + + // Don't heartbeat - let it timeout and retry + // Second attempt: worker accepts retry task + pollTaskResp2, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.EqualValues(t, 2, pollTaskResp2.Attempt) + }) + + t.Run("HeartbeatKeepsActivityAlive", func(t *testing.T) { + // Start activity, worker accepts, worker heartbeats within timeout, + // more time passes, worker heartbeats again, worker completes. + // Verify: activity status is COMPLETED. + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + HeartbeatTimeout: durationpb.New(1 * time.Second), + RetryPolicy: &commonpb.RetryPolicy{ + MaximumAttempts: 1, // No retries - timeout would be terminal + }, + }) + require.NoError(t, err) + + // Worker accepts task + pollTaskResp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.NotEmpty(t, pollTaskResp.TaskToken) + + // Heartbeat before timeout + time.Sleep(600 * time.Millisecond) //nolint:forbidigo + _, err = s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Details: heartbeatDetails, + }) + require.NoError(t, err) + + // Wait again, then heartbeat again + time.Sleep(600 * time.Millisecond) //nolint:forbidigo + _, err = s.FrontendClient().RecordActivityTaskHeartbeat(ctx, &workflowservice.RecordActivityTaskHeartbeatRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Details: heartbeatDetails, + }) + require.NoError(t, err) + + // Complete the activity + _, err = s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Result: defaultResult, + }) + require.NoError(t, err) + + // Verify activity completed successfully (didn't timeout) + pollResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + IncludeOutcome: true, + }) + require.NoError(t, err) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_COMPLETED, pollResp.GetInfo().GetStatus(), + "expected status=Completed but is %s", pollResp.GetInfo().GetStatus()) + protorequire.ProtoEqual(t, defaultResult, pollResp.GetOutcome().GetResult()) + }) + + t.Run("RecordHeartbeatByIDStaysAlive", func(t *testing.T) { + // Start activity, worker accepts, worker heartbeats within timeout, + // more time passes, worker heartbeats again, worker completes. + // Verify: activity status is COMPLETED. + activityID := testcore.RandomizeStr(t.Name()) + taskQueue := testcore.RandomizeStr(t.Name()) + + startResp, err := s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: s.tv.ActivityType(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue}, + StartToCloseTimeout: durationpb.New(1 * time.Minute), + HeartbeatTimeout: durationpb.New(1 * time.Second), + RetryPolicy: &commonpb.RetryPolicy{ + MaximumAttempts: 1, // No retries - timeout would be terminal + }, + }) + require.NoError(t, err) + + // Worker accepts task + pollTaskResp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, + }) + require.NoError(t, err) + require.NotEmpty(t, pollTaskResp.TaskToken) + + // Heartbeat before timeout + _, err = s.FrontendClient().RecordActivityTaskHeartbeatById(ctx, &workflowservice.RecordActivityTaskHeartbeatByIdRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + Details: heartbeatDetails, + }) + require.NoError(t, err) + + // Complete the activity + _, err = s.FrontendClient().RespondActivityTaskCompleted(ctx, &workflowservice.RespondActivityTaskCompletedRequest{ + Namespace: s.Namespace().String(), + TaskToken: pollTaskResp.TaskToken, + Result: defaultResult, + }) + require.NoError(t, err) + + // Verify activity completed successfully (didn't timeout) + pollResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResp.RunId, + IncludeOutcome: true, + }) + require.NoError(t, err) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_COMPLETED, pollResp.GetInfo().GetStatus(), + "expected status=Completed but is %s", pollResp.GetInfo().GetStatus()) + protorequire.ProtoEqual(t, defaultResult, pollResp.GetOutcome().GetResult()) + }) +} + +func (s *standaloneActivityTestSuite) pollActivityTaskQueue(ctx context.Context, taskQueue string) (*workflowservice.PollActivityTaskQueueResponse, error) { + return s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + Identity: defaultIdentity, + }) +} + +func (s *standaloneActivityTestSuite) startAndValidateActivity( + ctx context.Context, + t *testing.T, + activityID string, + taskQueue string, +) *workflowservice.StartActivityExecutionResponse { + startResponse, err := s.startActivity(ctx, activityID, taskQueue) + + require.NoError(t, err) + require.NotNil(t, startResponse.GetRunId()) + require.True(t, startResponse.Started) + + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: startResponse.RunId, + IncludeInput: true, + IncludeOutcome: true, + }) + + info := activityResp.GetInfo() + + require.NoError(t, err) + s.validateBaseActivityResponse(t, activityID, startResponse.RunId, activityResp) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, info.GetStatus(), + "expected Running but is %s", info.GetStatus()) + require.Equal(t, enumspb.PENDING_ACTIVITY_STATE_SCHEDULED, info.GetRunState(), + "expected Scheduled but is %s", info.GetRunState()) + require.EqualValues(t, 1, info.GetAttempt()) + require.Nil(t, activityResp.Outcome) + require.Nil(t, info.GetLastFailure()) + require.Nil(t, info.GetHeartbeatDetails()) + require.Nil(t, info.GetLastHeartbeatTime()) + + return startResponse +} + +func (s *standaloneActivityTestSuite) pollActivityTaskAndValidate( + ctx context.Context, + t *testing.T, + activityID string, + taskQueue string, + runID string, +) *workflowservice.PollActivityTaskQueueResponse { + pollTaskResp, err := s.FrontendClient().PollActivityTaskQueue(ctx, &workflowservice.PollActivityTaskQueueRequest{ + Namespace: s.Namespace().String(), + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + Kind: enumspb.TASK_QUEUE_KIND_NORMAL, + }, + Identity: s.tv.WorkerIdentity(), + }) + require.NoError(t, err) + require.Equal(t, activityID, pollTaskResp.GetActivityId()) + protorequire.ProtoEqual(t, s.tv.ActivityType(), pollTaskResp.GetActivityType()) + require.EqualValues(t, 1, pollTaskResp.Attempt) + + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + IncludeInput: true, + IncludeOutcome: true, + }) + + info := activityResp.GetInfo() + + require.NoError(t, err) + s.validateBaseActivityResponse(t, activityID, runID, activityResp) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING, info.GetStatus(), + "expected Running but is %s", info.GetStatus()) + require.Equal(t, enumspb.PENDING_ACTIVITY_STATE_STARTED, info.GetRunState(), + "expected Started but is %s", info.GetRunState()) + require.EqualValues(t, 1, info.GetAttempt()) + require.Equal(t, s.tv.WorkerIdentity(), info.GetLastWorkerIdentity()) + require.NotNil(t, info.GetLastStartedTime()) + require.Nil(t, activityResp.Outcome) + require.Nil(t, info.GetLastFailure()) + require.Nil(t, info.GetHeartbeatDetails()) + require.Nil(t, info.GetLastHeartbeatTime()) + + return pollTaskResp +} + +func (s *standaloneActivityTestSuite) validateCompletion( + ctx context.Context, + t *testing.T, + activityID string, + runID string, + workerIdentity string, +) { + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + IncludeInput: true, + IncludeOutcome: true, + }) + + info := activityResp.GetInfo() + + require.NoError(t, err) + s.validateBaseActivityResponse(t, activityID, runID, activityResp) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_COMPLETED, info.GetStatus(), + "expected Completed but is %s", info.GetStatus()) + require.Equal(t, enumspb.PENDING_ACTIVITY_STATE_UNSPECIFIED, info.GetRunState(), + "expected Unspecified but is %s", info.GetRunState()) + require.EqualValues(t, 1, info.GetAttempt()) + require.Greater(t, info.GetExecutionDuration().AsDuration(), time.Duration(0)) + require.False(t, info.GetCloseTime().AsTime().IsZero()) + require.Equal(t, workerIdentity, info.GetLastWorkerIdentity()) + require.NotNil(t, info.GetLastStartedTime()) + require.Nil(t, info.GetLastFailure()) + require.Nil(t, info.GetHeartbeatDetails()) + require.Nil(t, info.GetLastHeartbeatTime()) + protorequire.ProtoEqual(t, defaultResult, activityResp.GetOutcome().GetResult()) +} + +func (s *standaloneActivityTestSuite) validateFailure( + ctx context.Context, + t *testing.T, + activityID string, + runID string, + expectedHeartbeatDetails *commonpb.Payloads, + workerIdentity string, +) { + activityResp, err := s.FrontendClient().DescribeActivityExecution(ctx, &workflowservice.DescribeActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + RunId: runID, + IncludeInput: true, + IncludeOutcome: true, + }) + + info := activityResp.GetInfo() + + require.NoError(t, err) + s.validateBaseActivityResponse(t, activityID, runID, activityResp) + require.Equal(t, enumspb.ACTIVITY_EXECUTION_STATUS_FAILED, info.GetStatus(), + "expected Failed but is %s", info.GetStatus()) + require.Equal(t, enumspb.PENDING_ACTIVITY_STATE_UNSPECIFIED, info.GetRunState(), + "expected Unspecified but is %s", info.GetRunState()) + require.EqualValues(t, 1, info.GetAttempt()) + require.Greater(t, info.GetExecutionDuration().AsDuration(), time.Duration(0)) + require.False(t, info.GetCloseTime().AsTime().IsZero()) + require.Equal(t, workerIdentity, info.GetLastWorkerIdentity()) + require.NotNil(t, info.GetLastStartedTime()) + protorequire.ProtoEqual(t, defaultFailure, info.GetLastFailure()) + protorequire.ProtoEqual(t, defaultFailure, activityResp.GetOutcome().GetFailure()) + + if expectedHeartbeatDetails != nil { + protorequire.ProtoEqual(t, expectedHeartbeatDetails, info.GetHeartbeatDetails()) + } +} + +func (s *standaloneActivityTestSuite) validateBaseActivityResponse( + t *testing.T, + activityID string, + expectedRunID string, + response *workflowservice.DescribeActivityExecutionResponse, +) { + require.NotNil(t, response.LongPollToken) + require.Equal(t, activityID, response.GetInfo().GetActivityId()) + require.Equal(t, s.tv.ActivityType(), response.GetInfo().GetActivityType()) + require.Equal(t, expectedRunID, response.RunId) + require.NotNil(t, response.GetInfo().GetScheduleTime()) + protorequire.ProtoEqual(t, defaultInput, response.GetInput()) +} + +func (s *standaloneActivityTestSuite) startActivity(ctx context.Context, activityID string, taskQueue string) (*workflowservice.StartActivityExecutionResponse, error) { + return s.startActivityWithType(ctx, activityID, taskQueue, s.tv.ActivityType()) +} + +func (s *standaloneActivityTestSuite) startActivityWithType(ctx context.Context, activityID string, taskQueue string, activityType *commonpb.ActivityType) (*workflowservice.StartActivityExecutionResponse, error) { + return s.FrontendClient().StartActivityExecution(ctx, &workflowservice.StartActivityExecutionRequest{ + Namespace: s.Namespace().String(), + ActivityId: activityID, + ActivityType: activityType, + Identity: s.tv.WorkerIdentity(), + Input: defaultInput, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: taskQueue, + }, + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + RequestId: s.tv.RequestID(), + }) +}