diff --git a/README.md b/README.md index 0dbec6d2..c507ad7a 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ To build shared library for development use: Note, this is not `compile:dev` because debug-mode in Rust has [an issue](https://github.com/rust-lang/rust/issues/34283) that causes runtime stack size problems. -To build and test release: +To lint, build, and test release: bundle exec rake diff --git a/temporalio/.rubocop.yml b/temporalio/.rubocop.yml index f15ee0a5..3003a1e5 100644 --- a/temporalio/.rubocop.yml +++ b/temporalio/.rubocop.yml @@ -26,7 +26,7 @@ Layout/ClassStructure: # The default is too small and triggers simply setting lots of values on a proto Metrics/AbcSize: - Max: 75 + Max: 200 # The default is too small Metrics/BlockLength: @@ -44,6 +44,10 @@ Metrics/CyclomaticComplexity: Metrics/MethodLength: Max: 100 +# The default is too small +Metrics/ModuleLength: + Max: 1000 + # The default is too small Metrics/PerceivedComplexity: Max: 25 diff --git a/temporalio/Rakefile b/temporalio/Rakefile index c3fc7f3c..0d0f8ecb 100644 --- a/temporalio/Rakefile +++ b/temporalio/Rakefile @@ -33,7 +33,18 @@ Steep::RakeTask.new require 'yard' -YARD::Rake::YardocTask.new +module CustomizeYardWarnings # rubocop:disable Style/Documentation + def process + super + rescue YARD::Parser::UndocumentableError + # We ignore if it's an API warning + raise unless statement.last.file.start_with?('lib/temporalio/api/') + end +end + +YARD::Handlers::Ruby::ConstantHandler.prepend(CustomizeYardWarnings) + +YARD::Rake::YardocTask.new { |t| t.options = ['--fail-on-warning'] } require 'fileutils' require 'google/protobuf' @@ -321,4 +332,4 @@ Rake::Task[:build].enhance([:copy_parent_files]) do rm ['LICENSE', 'README.md'] end -task default: ['rubocop', 'compile', 'rbs:install_collection', 'steep', 'test'] +task default: ['rubocop', 'yard', 'compile', 'rbs:install_collection', 'steep', 'test'] diff --git a/temporalio/lib/temporalio/client.rb b/temporalio/lib/temporalio/client.rb index c3aa1a9f..2851ce87 100644 --- a/temporalio/lib/temporalio/client.rb +++ b/temporalio/lib/temporalio/client.rb @@ -2,13 +2,18 @@ require 'google/protobuf/well_known_types' require 'temporalio/api' +require 'temporalio/client/async_activity_handle' require 'temporalio/client/connection' +require 'temporalio/client/implementation' require 'temporalio/client/interceptor' +require 'temporalio/client/workflow_execution' +require 'temporalio/client/workflow_execution_count' require 'temporalio/client/workflow_handle' +require 'temporalio/client/workflow_query_reject_condition' require 'temporalio/common_enums' require 'temporalio/converters' require 'temporalio/error' -require 'temporalio/internal/proto_utils' +require 'temporalio/retry_policy' require 'temporalio/runtime' require 'temporalio/search_attributes' @@ -45,10 +50,10 @@ class Client # TLS options are present, those TLS options will be used. # @param data_converter [Converters::DataConverter] Data converter to use for all data conversions to/from payloads. # @param interceptors [Array] Set of interceptors that are chained together to allow intercepting of - # client calls. The earlier interceptors wrap the later ones. Any interceptors that also implement - # {Worker::Interceptor} will be used as worker interceptors too so they should not be given separately when - # creating a worker. - # @param default_workflow_query_reject_condition [Api::Enums::V1::QueryRejectCondition, nil] Default rejection + # client calls. The earlier interceptors wrap the later ones. Any interceptors that also implement worker + # interceptor will be used as worker interceptors too so they should not be given separately when creating a + # worker. + # @param default_workflow_query_reject_condition [WorkflowQueryRejectCondition, nil] Default rejection # condition for workflow queries if not set during query. See {WorkflowHandle.query} for details on the # rejection condition. # @param rpc_metadata [Hash] Headers to use for all calls to the server. Keys here can be overriden @@ -116,11 +121,10 @@ def self.connect( # @param interceptors [Array] Set of interceptors that are chained together to allow intercepting of # client calls. The earlier interceptors wrap the later ones. # - # Any interceptors that also implement {Worker::Interceptor} will be used as worker interceptors too so they - # should not be given separately when creating a worker. - # @param default_workflow_query_reject_condition [Api::Enums::V1::QueryRejectCondition, nil] Default rejection - # condition for workflow queries if not set during query. See {WorkflowHandle.query} for details on the - # rejection condition. + # Any interceptors that also implement worker interceptor will be used as worker interceptors too so they should + # not be given separately when creating a worker. + # @param default_workflow_query_reject_condition [WorkflowQueryRejectCondition, nil] Default rejection condition for + # workflow queries if not set during query. See {WorkflowHandle.query} for details on the rejection condition. # # @see connect def initialize( @@ -218,7 +222,7 @@ def start_workflow( @impl.start_workflow(Interceptor::StartWorkflowInput.new( workflow:, args:, - id:, + workflow_id: id, task_queue:, execution_timeout:, run_timeout:, @@ -264,7 +268,7 @@ def start_workflow( # # @return [Object] Successful result of the workflow. # @raise [Error::WorkflowAlreadyStartedError] Workflow already exists. - # @raise [Error::WorkflowFailureError] Workflow failed with {Error::WorkflowFailureError.cause} as cause. + # @raise [Error::WorkflowFailureError] Workflow failed with +cause+ as the cause. # @raise [Error::RPCError] RPC error from call. def execute_workflow( workflow, @@ -320,103 +324,72 @@ def workflow_handle( run_id: nil, first_execution_run_id: nil ) - WorkflowHandle.new(self, workflow_id, run_id:, result_run_id: run_id, first_execution_run_id:) + WorkflowHandle.new(client: self, id: workflow_id, run_id:, result_run_id: run_id, first_execution_run_id:) end - # @!visibility private - def _impl - @impl + # List workflows. + # + # @param query [String, nil] A Temporal visibility list filter. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @return [Enumerator] Enumerable workflow executions. + # + # @raise [Error::RPCError] RPC error from call. + # + # @see https://docs.temporal.io/visibility + def list_workflows( + query = nil, + rpc_metadata: nil, + rpc_timeout: nil + ) + @impl.list_workflows(Interceptor::ListWorkflowsInput.new( + query:, + rpc_metadata:, + rpc_timeout: + )) end - # @!visibility private - class Implementation < Interceptor::Outbound - def initialize(client) - super(nil) - @client = client - end - - # @!visibility private - def start_workflow(input) - # TODO(cretz): Signal/update with start - req = Api::WorkflowService::V1::StartWorkflowExecutionRequest.new( - request_id: SecureRandom.uuid, - namespace: @client.namespace, - workflow_type: Api::Common::V1::WorkflowType.new(name: input.workflow.to_s), - workflow_id: input.id, - task_queue: Api::TaskQueue::V1::TaskQueue.new(name: input.task_queue.to_s), - input: @client.data_converter.to_payloads(input.args), - workflow_execution_timeout: Internal::ProtoUtils.seconds_to_duration(input.execution_timeout), - workflow_run_timeout: Internal::ProtoUtils.seconds_to_duration(input.run_timeout), - workflow_task_timeout: Internal::ProtoUtils.seconds_to_duration(input.task_timeout), - identity: @client.connection.identity, - workflow_id_reuse_policy: input.id_reuse_policy, - workflow_id_conflict_policy: input.id_conflict_policy, - retry_policy: input.retry_policy&.to_proto, - cron_schedule: input.cron_schedule, - memo: Internal::ProtoUtils.memo_to_proto(input.memo, @client.data_converter), - search_attributes: input.search_attributes&.to_proto, - workflow_start_delay: Internal::ProtoUtils.seconds_to_duration(input.start_delay), - request_eager_execution: input.request_eager_start, - header: input.headers - ) - - # Send request - begin - resp = @client.workflow_service.start_workflow_execution( - req, - rpc_retry: true, - rpc_metadata: input.rpc_metadata, - rpc_timeout: input.rpc_timeout - ) - rescue Error::RPCError => e - # Unpack and raise already started if that's the error, otherwise default raise - if e.code == Error::RPCError::Code::ALREADY_EXISTS && e.grpc_status.details.first - details = e.grpc_status.details.first.unpack(Api::ErrorDetails::V1::WorkflowExecutionAlreadyStartedFailure) - if details - raise Error::WorkflowAlreadyStartedError.new( - workflow_id: req.workflow_id, - workflow_type: req.workflow_type.name, - run_id: details.run_id - ) - end - end - raise - end + # Count workflows. + # + # @param query [String, nil] A Temporal visibility list filter. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @return [WorkflowExecutionCount] Count of workflows. + # + # @raise [Error::RPCError] RPC error from call. + # + # @see https://docs.temporal.io/visibility + def count_workflows( + query = nil, + rpc_metadata: nil, + rpc_timeout: nil + ) + @impl.count_workflows(Interceptor::CountWorkflowsInput.new( + query:, + rpc_metadata:, + rpc_timeout: + )) + end - # Return handle - WorkflowHandle.new( - @client, - input.id, - result_run_id: resp.run_id, - first_execution_run_id: resp.run_id - ) + # Get an async activity handle. + # + # @param task_token_or_id_reference [String, ActivityIDReference] Task token string or activity ID reference. + # @return [AsyncActivityHandle] + def async_activity_handle(task_token_or_id_reference) + if task_token_or_id_reference.is_a?(ActivityIDReference) + AsyncActivityHandle.new(client: self, task_token: nil, id_reference: task_token_or_id_reference) + elsif task_token_or_id_reference.is_a?(String) + AsyncActivityHandle.new(client: self, task_token: task_token_or_id_reference, id_reference: nil) + else + raise ArgumentError, 'Must be a string task token or an ActivityIDReference' end + end - # @!visibility private - def fetch_workflow_history_event_page(input) - req = Api::WorkflowService::V1::GetWorkflowExecutionHistoryRequest.new( - namespace: @client.namespace, - execution: Api::Common::V1::WorkflowExecution.new( - workflow_id: input.id, - run_id: input.run_id || '' - ), - maximum_page_size: input.page_size || 0, - next_page_token: input.next_page_token, - wait_new_event: input.wait_new_event, - history_event_filter_type: input.event_filter_type, - skip_archival: input.skip_archival - ) - resp = @client.workflow_service.get_workflow_execution_history( - req, - rpc_retry: true, - rpc_metadata: input.rpc_metadata, - rpc_timeout: input.rpc_timeout - ) - Interceptor::FetchWorkflowHistoryEventPage.new( - events: resp.history&.events || [], - next_page_token: resp.next_page_token.empty? ? nil : resp.next_page_token - ) - end + # @!visibility private + def _impl + @impl end end end diff --git a/temporalio/lib/temporalio/client/activity_id_reference.rb b/temporalio/lib/temporalio/client/activity_id_reference.rb new file mode 100644 index 00000000..23eaf97a --- /dev/null +++ b/temporalio/lib/temporalio/client/activity_id_reference.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +require 'temporalio/api' +require 'temporalio/client/interceptor' +require 'temporalio/error' + +module Temporalio + class Client + # Reference to an existing activity by its workflow ID, run ID, and activity ID. + class ActivityIDReference + # @return [String] ID for the workflow. + attr_reader :workflow_id + + # @return [String, nil] Run ID for the workflow. + attr_reader :run_id + + # @return [String] ID for the activity. + attr_reader :activity_id + + # Create an activity ID reference. + # + # @param workflow_id [String] ID for the workflow. + # @param run_id [String, nil] Run ID for the workflow. + # @param activity_id [String] ID for the workflow. + def initialize(workflow_id:, run_id:, activity_id:) + @workflow_id = workflow_id + @run_id = run_id + @activity_id = activity_id + end + end + end +end diff --git a/temporalio/lib/temporalio/client/async_activity_handle.rb b/temporalio/lib/temporalio/client/async_activity_handle.rb new file mode 100644 index 00000000..19406e0d --- /dev/null +++ b/temporalio/lib/temporalio/client/async_activity_handle.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +require 'temporalio/api' +require 'temporalio/client/activity_id_reference' +require 'temporalio/client/interceptor' +require 'temporalio/error' + +module Temporalio + class Client + # Handle representing an external activity for completion and heartbeat. This is usually created via + # {Client.async_activity_handle}. + class AsyncActivityHandle + # @return [String, nil] Task token if created with a task token. Mutually exclusive with {id_reference}. + attr_reader :task_token + + # @return [ActivityIDReference, nil] Activity ID reference if created with one. Mutually exclusive with + # {task_token}. + attr_reader :id_reference + + # @!visibility private + def initialize(client:, task_token:, id_reference:) + @client = client + @task_token = task_token + @id_reference = id_reference + end + + # Record a heartbeat for the activity. + # + # @param details [Array] Details of the heartbeat. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + def heartbeat( + *details, + rpc_metadata: nil, + rpc_timeout: nil + ) + raise NotImplementedError + end + + # Complete the activity. + # + # @param result [Object, nil] Result of the activity. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + def complete( + result = nil, + rpc_metadata: nil, + rpc_timeout: nil + ) + raise NotImplementedError + end + + # Fail the activity. + # + # @param error [Exception] Error for the activity. + # @param last_heartbeat_details [Array] Last heartbeat details for the activity. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + def fail( + error, + last_heartbeat_details: [], + rpc_metadata: nil, + rpc_timeout: nil + ) + raise NotImplementedError + end + + # Report the activity as cancelled. + # + # @param details [Array] Cancellation details. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + def report_cancellation( + *details, + rpc_metadata: nil, + rpc_timeout: nil + ) + raise NotImplementedError + end + end + end +end diff --git a/temporalio/lib/temporalio/client/implementation.rb b/temporalio/lib/temporalio/client/implementation.rb new file mode 100644 index 00000000..0b5964e0 --- /dev/null +++ b/temporalio/lib/temporalio/client/implementation.rb @@ -0,0 +1,389 @@ +# frozen_string_literal: true + +require 'google/protobuf/well_known_types' +require 'temporalio/api' +require 'temporalio/client/activity_id_reference' +require 'temporalio/client/async_activity_handle' +require 'temporalio/client/connection' +require 'temporalio/client/interceptor' +require 'temporalio/client/workflow_execution' +require 'temporalio/client/workflow_execution_count' +require 'temporalio/client/workflow_handle' +require 'temporalio/common_enums' +require 'temporalio/converters' +require 'temporalio/error' +require 'temporalio/error/failure' +require 'temporalio/internal/proto_utils' +require 'temporalio/runtime' +require 'temporalio/search_attributes' + +module Temporalio + class Client + # @!visibility private + class Implementation < Interceptor::Outbound + def initialize(client) + super(nil) + @client = client + end + + # @!visibility private + def start_workflow(input) + # TODO(cretz): Signal/update with start + req = Api::WorkflowService::V1::StartWorkflowExecutionRequest.new( + request_id: SecureRandom.uuid, + namespace: @client.namespace, + workflow_type: Api::Common::V1::WorkflowType.new(name: input.workflow.to_s), + workflow_id: input.workflow_id, + task_queue: Api::TaskQueue::V1::TaskQueue.new(name: input.task_queue.to_s), + input: @client.data_converter.to_payloads(input.args), + workflow_execution_timeout: Internal::ProtoUtils.seconds_to_duration(input.execution_timeout), + workflow_run_timeout: Internal::ProtoUtils.seconds_to_duration(input.run_timeout), + workflow_task_timeout: Internal::ProtoUtils.seconds_to_duration(input.task_timeout), + identity: @client.connection.identity, + workflow_id_reuse_policy: input.id_reuse_policy, + workflow_id_conflict_policy: input.id_conflict_policy, + retry_policy: input.retry_policy&.to_proto, + cron_schedule: input.cron_schedule, + memo: Internal::ProtoUtils.memo_to_proto(input.memo, @client.data_converter), + search_attributes: input.search_attributes&.to_proto, + workflow_start_delay: Internal::ProtoUtils.seconds_to_duration(input.start_delay), + request_eager_execution: input.request_eager_start, + header: input.headers + ) + + # Send request + begin + resp = @client.workflow_service.start_workflow_execution( + req, + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + rescue Error::RPCError => e + # Unpack and raise already started if that's the error, otherwise default raise + if e.code == Error::RPCError::Code::ALREADY_EXISTS && e.grpc_status.details.first + details = e.grpc_status.details.first.unpack(Api::ErrorDetails::V1::WorkflowExecutionAlreadyStartedFailure) + if details + raise Error::WorkflowAlreadyStartedError.new( + workflow_id: req.workflow_id, + workflow_type: req.workflow_type.name, + run_id: details.run_id + ) + end + end + raise + end + + # Return handle + WorkflowHandle.new( + client: @client, + id: input.workflow_id, + run_id: nil, + result_run_id: resp.run_id, + first_execution_run_id: resp.run_id + ) + end + + # @!visibility private + def list_workflows(input) + Enumerator.new do |yielder| + req = Api::WorkflowService::V1::ListWorkflowExecutionsRequest.new( + namespace: @client.namespace, + query: input.query || '' + ) + loop do + resp = @client.workflow_service.list_workflow_executions( + req, + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + resp.executions.each { |raw_info| yielder << WorkflowExecution.new(raw_info, @client.data_converter) } + break if resp.next_page_token.empty? + + req.next_page_token = resp.next_page_token + end + end + end + + # @!visibility private + def count_workflows(input) + resp = @client.workflow_service.count_workflow_executions( + Api::WorkflowService::V1::CountWorkflowExecutionsRequest.new( + namespace: @client.namespace, + query: input.query || '' + ), + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + WorkflowExecutionCount.new( + resp.count, + resp.groups.map do |group| + WorkflowExecutionCount::AggregationGroup.new( + group.count, + group.group_values.map { |payload| SearchAttributes.value_from_payload(payload) } + ) + end + ) + end + + # @!visibility private + def describe_workflow(input) + resp = @client.workflow_service.describe_workflow_execution( + Api::WorkflowService::V1::DescribeWorkflowExecutionRequest.new( + namespace: @client.namespace, + execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: input.workflow_id, + run_id: input.run_id || '' + ) + ), + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + WorkflowExecution::Description.new(resp, @client.data_converter) + end + + # @!visibility private + def fetch_workflow_history_events(input) + Enumerator.new do |yielder| + req = Api::WorkflowService::V1::GetWorkflowExecutionHistoryRequest.new( + namespace: @client.namespace, + execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: input.workflow_id, + run_id: input.run_id || '' + ), + wait_new_event: input.wait_new_event, + history_event_filter_type: input.event_filter_type, + skip_archival: input.skip_archival + ) + loop do + resp = @client.workflow_service.get_workflow_execution_history( + req, + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + resp.history&.events&.each { |event| yielder << event } + break if resp.next_page_token.empty? + + req.next_page_token = resp.next_page_token + end + end + end + + # @!visibility private + def signal_workflow(input) + @client.workflow_service.signal_workflow_execution( + Api::WorkflowService::V1::SignalWorkflowExecutionRequest.new( + namespace: @client.namespace, + workflow_execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: input.workflow_id, + run_id: input.run_id || '' + ), + signal_name: input.signal, + input: @client.data_converter.to_payloads(input.args), + header: input.headers, + identity: @client.connection.identity, + request_id: SecureRandom.uuid + ), + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + nil + end + + # @!visibility private + def query_workflow(input) + begin + resp = @client.workflow_service.query_workflow( + Api::WorkflowService::V1::QueryWorkflowRequest.new( + namespace: @client.namespace, + execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: input.workflow_id, + run_id: input.run_id || '' + ), + query: Api::Query::V1::WorkflowQuery.new( + query_type: input.query, + query_args: @client.data_converter.to_payloads(input.args), + header: input.headers + ), + query_reject_condition: input.reject_condition || 0 + ), + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + rescue Error::RPCError => e + # If the status is INVALID_ARGUMENT, we can assume it's a query failed + # error + raise Error::WorkflowQueryFailedError, e.message if e.code == Error::RPCError::Code::INVALID_ARGUMENT + + raise + end + unless resp.query_rejected.nil? + raise Error::WorkflowQueryRejectedError.new(status: Internal::ProtoUtils.enum_to_int( + Api::Enums::V1::WorkflowExecutionStatus, resp.query_rejected.status + )) + end + + results = @client.data_converter.from_payloads(resp.query_result) + warn("Expected 0 or 1 query result, got #{results.size}") if results.size > 1 + results&.first + end + + # @!visibility private + def start_workflow_update(input) + if input.wait_for_stage == WorkflowUpdateWaitStage::ADMITTED + raise ArgumentError, 'ADMITTED wait stage not supported' + end + + req = Api::WorkflowService::V1::UpdateWorkflowExecutionRequest.new( + namespace: @client.namespace, + workflow_execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: input.workflow_id, + run_id: input.run_id || '' + ), + request: Api::Update::V1::Request.new( + meta: Api::Update::V1::Meta.new( + update_id: input.update_id, + identity: @client.connection.identity + ), + input: Api::Update::V1::Input.new( + name: input.update, + args: @client.data_converter.to_payloads(input.args), + header: input.headers + ) + ), + wait_policy: Api::Update::V1::WaitPolicy.new( + lifecycle_stage: input.wait_for_stage + ) + ) + + # Repeatedly try to invoke start until the update reaches user-provided + # wait stage or is at least ACCEPTED (as of the time of this writing, + # the user cannot specify sooner than ACCEPTED) + # @type var resp: untyped + resp = nil + loop do + resp = @client.workflow_service.update_workflow_execution( + req, + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + + # We're only done if the response stage is after the requested stage + # or the response stage is accepted + break if resp.stage >= req.wait_policy.lifecycle_stage || resp.stage >= WorkflowUpdateWaitStage::ACCEPTED + rescue Error::RPCError => e + # Deadline exceeded or cancel is a special error type + if e.code == Error::RPCError::Code::DEADLINE_EXCEEDED || e.code == Error::RPCError::Code::CANCELLED + raise Error::WorkflowUpdateRPCTimeoutOrCanceledError + end + + raise + end + + # If the user wants to wait until completed, we must poll until outcome + # if not already there + if input.wait_for_stage == WorkflowUpdateWaitStage::COMPLETED && !resp.outcome + resp.outcome = @client._impl.poll_workflow_update(PollWorkflowUpdateInput.new( + workflow_id: input.workflow_id, + run_id: input.run_id, + update_id: input.update_id, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + )) + end + + WorkflowUpdateHandle.new( + client: @client, + id: input.update_id, + workflow_id: input.workflow_id, + workflow_run_id: input.run_id, + known_outcome: resp.outcome + ) + end + + # @!visibility private + def poll_workflow_update(input) + req = Api::WorkflowService::V1::PollWorkflowExecutionUpdateRequest.new( + namespace: @client.namespace, + update_ref: Api::Update::V1::UpdateRef.new( + workflow_execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: input.workflow_id, + run_id: input.run_id || '' + ), + update_id: input.update_id + ), + identity: @client.connection.identity, + wait_policy: Api::Update::V1::WaitPolicy.new( + lifecycle_stage: WorkflowUpdateWaitStage::COMPLETED + ) + ) + + # Continue polling as long as we have no outcome + loop do + resp = @client.workflow_service.poll_workflow_execution_update( + req, + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + return resp.outcome if resp.outcome + rescue Error::RPCError => e + # Deadline exceeded or cancel is a special error type + if e.code == Error::RPCError::Code::DEADLINE_EXCEEDED || e.code == Error::RPCError::Code::CANCELLED + raise Error::WorkflowUpdateRPCTimeoutOrCanceledError + end + + raise + end + end + + # @!visibility private + def cancel_workflow(input) + @client.workflow_service.request_cancel_workflow_execution( + Api::WorkflowService::V1::RequestCancelWorkflowExecutionRequest.new( + namespace: @client.namespace, + workflow_execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: input.workflow_id, + run_id: input.run_id || '' + ), + first_execution_run_id: input.first_execution_run_id, + identity: @client.connection.identity, + request_id: SecureRandom.uuid + ), + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + nil + end + + # @!visibility private + def terminate_workflow(input) + @client.workflow_service.terminate_workflow_execution( + Api::WorkflowService::V1::TerminateWorkflowExecutionRequest.new( + namespace: @client.namespace, + workflow_execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: input.workflow_id, + run_id: input.run_id || '' + ), + reason: input.reason || '', + first_execution_run_id: input.first_execution_run_id, + details: @client.data_converter.to_payloads(input.details), + identity: @client.connection.identity + ), + rpc_retry: true, + rpc_metadata: input.rpc_metadata, + rpc_timeout: input.rpc_timeout + ) + nil + end + end + end +end diff --git a/temporalio/lib/temporalio/client/interceptor.rb b/temporalio/lib/temporalio/client/interceptor.rb index 30cdb816..dfec9f21 100644 --- a/temporalio/lib/temporalio/client/interceptor.rb +++ b/temporalio/lib/temporalio/client/interceptor.rb @@ -5,8 +5,8 @@ class Client # Mixin for intercepting clients. Classes that +include+ this should implement their own {intercept_client} that # returns their own instance of {Outbound}. # - # @note Input classes herein may get new requeired fields added and therefore the constructors of the Input - # classes may change in backwards incompatible ways. Users should not try to construct Input classes themselves. + # @note Input classes herein may get new required fields added and therefore the constructors of the Input classes + # may change in backwards incompatible ways. Users should not try to construct Input classes themselves. module Interceptor # Method called when intercepting a client. This is called upon client creation. # @@ -21,7 +21,7 @@ def intercept_client(next_interceptor) StartWorkflowInput = Struct.new( :workflow, :args, - :id, + :workflow_id, :task_queue, :execution_timeout, :run_timeout, @@ -40,12 +40,35 @@ def intercept_client(next_interceptor) keyword_init: true ) - # Input for {Outbound.fetch_workflow_history_event_page}. - FetchWorkflowHistoryEventPageInput = Struct.new( - :id, + # Input for {Outbound.list_workflows}. + ListWorkflowsInput = Struct.new( + :query, + :rpc_metadata, + :rpc_timeout, + keyword_init: true + ) + + # Input for {Outbound.count_workflows}. + CountWorkflowsInput = Struct.new( + :query, + :rpc_metadata, + :rpc_timeout, + keyword_init: true + ) + + # Input for {Outbound.describe_workflow}. + DescribeWorkflowInput = Struct.new( + :workflow_id, + :run_id, + :rpc_metadata, + :rpc_timeout, + keyword_init: true + ) + + # Input for {Outbound.fetch_workflow_history_events}. + FetchWorkflowHistoryEventsInput = Struct.new( + :workflow_id, :run_id, - :page_size, - :next_page_token, :wait_new_event, :event_filter_type, :skip_archival, @@ -54,10 +77,74 @@ def intercept_client(next_interceptor) keyword_init: true ) - # Output for {Outbound.fetch_workflow_history_event_page}. - FetchWorkflowHistoryEventPage = Struct.new( - :events, - :next_page_token, + # Input for {Outbound.signal_workflow}. + SignalWorkflowInput = Struct.new( + :workflow_id, + :run_id, + :signal, + :args, + :headers, + :rpc_metadata, + :rpc_timeout, + keyword_init: true + ) + + # Input for {Outbound.query_workflow}. + QueryWorkflowInput = Struct.new( + :workflow_id, + :run_id, + :query, + :args, + :reject_condition, + :headers, + :rpc_metadata, + :rpc_timeout, + keyword_init: true + ) + + # Input for {Outbound.start_workflow_update}. + StartWorkflowUpdateInput = Struct.new( + :workflow_id, + :run_id, + :update_id, + :update, + :args, + :wait_for_stage, + :headers, + :rpc_metadata, + :rpc_timeout, + keyword_init: true + ) + + # Input for {Outbound.poll_workflow_update}. + PollWorkflowUpdateInput = Struct.new( + :workflow_id, + :run_id, + :update_id, + :rpc_metadata, + :rpc_timeout, + keyword_init: true + ) + + # Input for {Outbound.cancel_workflow}. + CancelWorkflowInput = Struct.new( + :workflow_id, + :run_id, + :first_execution_run_id, + :rpc_metadata, + :rpc_timeout, + keyword_init: true + ) + + # Input for {Outbound.terminate_workflow}. + TerminateWorkflowInput = Struct.new( + :workflow_id, + :run_id, + :first_execution_run_id, + :reason, + :details, + :rpc_metadata, + :rpc_timeout, keyword_init: true ) @@ -82,12 +169,81 @@ def start_workflow(input) next_interceptor.start_workflow(input) end - # Called everytime the client needs a page of workflow history. This includes getting the result. + # Called for every {Client.list_workflows} call. + # + # @param input [ListWorkflowsInput] Input. + # @return [Enumerator] Enumerable workflow executions. + def list_workflows(input) + next_interceptor.list_workflows(input) + end + + # Called for every {Client.count_workflows} call. + # + # @param input [CountWorkflowsInput] Input. + # @return [WorkflowExecutionCount] Workflow count. + def count_workflows(input) + next_interceptor.count_workflows(input) + end + + # Called for every {WorkflowHandle.describe} call. + # + # @param input [DescribeWorkflowInput] Input. + # @return [WorkflowExecution::Description] Workflow description. + def describe_workflow(input) + next_interceptor.describe_workflow(input) + end + + # Called everytime the client needs workflow history. This includes getting the result. + # + # @param input [FetchWorkflowHistoryEventsInput] Input. + # @return [Enumerator] Event enumerator. + def fetch_workflow_history_events(input) + next_interceptor.fetch_workflow_history_events(input) + end + + # Called for every {WorkflowHandle.signal} call. + # + # @param input [SignalWorkflowInput] Input. + def signal_workflow(input) + next_interceptor.signal_workflow(input) + end + + # Called for every {WorkflowHandle.query} call. + # + # @param input [QueryWorkflowInput] Input. + # @return [Object, nil] Query result. + def query_workflow(input) + next_interceptor.query_workflow(input) + end + + # Called for every {WorkflowHandle.start_update} call. + # + # @param input [StartWorkflowUpdateInput] Input. + # @return [WorkflowUpdateHandle] Update handle. + def start_workflow_update(input) + next_interceptor.start_workflow_update(input) + end + + # Called when polling for update result. + # + # @param input [PollWorkflowUpdateInput] Input. + # @return [Api::Update::V1::Outcome] Update outcome. + def poll_workflow_update(input) + next_interceptor.poll_workflow_update(input) + end + + # Called for every {WorkflowHandle.cancel} call. + # + # @param input [CancelWorkflowInput] Input. + def cancel_workflow(input) + next_interceptor.cancel_workflow(input) + end + + # Called for every {WorkflowHandle.terminate} call. # - # @param input [FetchWorkflowHistoryEventPageInput] Input. - # @return [FetchWorkflowHistoryEventPage] Event page. - def fetch_workflow_history_event_page(input) - next_interceptor.fetch_workflow_history_event_page(input) + # @param input [TerminateWorkflowInput] Input. + def terminate_workflow(input) + next_interceptor.terminate_workflow(input) end end end diff --git a/temporalio/lib/temporalio/client/workflow_execution.rb b/temporalio/lib/temporalio/client/workflow_execution.rb new file mode 100644 index 00000000..27c01c2e --- /dev/null +++ b/temporalio/lib/temporalio/client/workflow_execution.rb @@ -0,0 +1,103 @@ +# frozen_string_literal: true + +require 'temporalio/api' +require 'temporalio/client/workflow_execution_status' +require 'temporalio/internal/proto_utils' +require 'temporalio/search_attributes' + +module Temporalio + class Client + # Info for a single workflow execution run. + class WorkflowExecution + # @return [Api::Workflow::V1::WorkflowExecutionInfo] Underlying protobuf info. + attr_reader :raw_info + + # @!visibility private + def initialize(raw_info, data_converter) + @raw_info = raw_info + @data_converter = data_converter + end + + # @return [Time, nil] When the workflow was closed if closed. + def close_time + @raw_info.close_time&.to_time + end + + # @return [Time, nil] When this workflow run started or should start. + def execution_time + @raw_info.execution_time&.to_time + end + + # @return [Integer] Number of events in the history. + def history_length + @raw_info.history_length + end + + # @return [String] ID for the workflow. + def id + @raw_info.execution.workflow_id + end + + # @return [Hash, nil] Memo for the workflow. + def memo + @memo = Internal::ProtoUtils.memo_from_proto(@raw_info.memo, @data_converter) unless defined?(@memo) + @memo + end + + # @return [String, nil] ID for the parent workflow if this was started as a child. + def parent_id + @raw_info.parent_execution&.workflow_id + end + + # @return [String, nil] Run ID for the parent workflow if this was started as a child. + def parent_run_id + @raw_info.parent_execution&.run_id + end + + # @return [String] Run ID for this workflow run. + def run_id + @raw_info.execution.run_id + end + + # @return [SearchAttributes, nil] Current set of search attributes if any. + def search_attributes + unless defined?(@search_attributes) + @search_attributes = SearchAttributes.from_proto(@raw_info.search_attributes) + end + @search_attributes + end + + # @return [Time] When the workflow was created. + def start_time + @raw_info.start_time.to_time + end + + # @return [WorkflowExecutionStatus] Status for the workflow. + def status + Internal::ProtoUtils.enum_to_int(Api::Enums::V1::WorkflowExecutionStatus, @raw_info.status) + end + + # @return [String] Task queue for the workflow. + def task_queue + @raw_info.task_queue + end + + # @return [String] Type name for the workflow. + def workflow_type + @raw_info.type.name + end + + # Description for a single workflow execution run. + class Description < WorkflowExecution + # @return [Api::WorkflowService::V1::DescribeWorkflowExecutionResponse] Underlying protobuf description. + attr_reader :raw_description + + # @!visibility private + def initialize(raw_description, data_converter) + super(raw_description.workflow_execution_info, data_converter) + @raw_description = raw_description + end + end + end + end +end diff --git a/temporalio/lib/temporalio/client/workflow_execution_count.rb b/temporalio/lib/temporalio/client/workflow_execution_count.rb new file mode 100644 index 00000000..a3300d72 --- /dev/null +++ b/temporalio/lib/temporalio/client/workflow_execution_count.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module Temporalio + class Client + # Representation of a count from a count workflows call. + class WorkflowExecutionCount + # @return [Integer] Approximate number of workflows matching the original query. If the query had a group-by + # clause, this is simply the sum of all the counts in {groups}. + attr_reader :count + + # @return [Array] Groups if the query had a group-by clause, or empty if not. + attr_reader :groups + + # @!visibility private + def initialize(count, groups) + @count = count + @groups = groups + end + + # Aggregation group if the workflow count query had a group-by clause. + class AggregationGroup + # @return [Integer] Approximate number of workflows matching the original query for this group. + attr_reader :count + + # @return [Array] Search attribute values for this group. + attr_reader :group_values + + # @!visibility private + def initialize(count, group_values) + @count = count + @group_values = group_values + end + end + end + end +end diff --git a/temporalio/lib/temporalio/client/workflow_execution_status.rb b/temporalio/lib/temporalio/client/workflow_execution_status.rb new file mode 100644 index 00000000..c68df02e --- /dev/null +++ b/temporalio/lib/temporalio/client/workflow_execution_status.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +require 'temporalio/api' + +module Temporalio + class Client + # Status of a workflow execution. + module WorkflowExecutionStatus + RUNNING = Api::Enums::V1::WorkflowExecutionStatus::WORKFLOW_EXECUTION_STATUS_RUNNING + COMPLETED = Api::Enums::V1::WorkflowExecutionStatus::WORKFLOW_EXECUTION_STATUS_COMPLETED + FAILED = Api::Enums::V1::WorkflowExecutionStatus::WORKFLOW_EXECUTION_STATUS_FAILED + CANCELED = Api::Enums::V1::WorkflowExecutionStatus::WORKFLOW_EXECUTION_STATUS_CANCELED + TERMINATED = Api::Enums::V1::WorkflowExecutionStatus::WORKFLOW_EXECUTION_STATUS_TERMINATED + CONTINUED_AS_NEW = Api::Enums::V1::WorkflowExecutionStatus::WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW + TIMED_OUT = Api::Enums::V1::WorkflowExecutionStatus::WORKFLOW_EXECUTION_STATUS_TIMED_OUT + end + end +end diff --git a/temporalio/lib/temporalio/client/workflow_handle.rb b/temporalio/lib/temporalio/client/workflow_handle.rb index e30292ff..22e681e1 100644 --- a/temporalio/lib/temporalio/client/workflow_handle.rb +++ b/temporalio/lib/temporalio/client/workflow_handle.rb @@ -2,7 +2,10 @@ require 'temporalio/api' require 'temporalio/client/interceptor' +require 'temporalio/client/workflow_update_handle' +require 'temporalio/client/workflow_update_wait_stage' require 'temporalio/error' +require 'temporalio/workflow_history' module Temporalio class Client @@ -12,8 +15,8 @@ class WorkflowHandle # @return [String] ID for the workflow. attr_reader :id - # Run ID used for {signal}, {query}, and {update} calls if present to ensure the signal/query/update happen on - # this exact run. + # Run ID used for {signal}, {query}, and {start_update}/{execute_update} calls if present to ensure the + # signal/query/update happen on this exact run. # # This is only created via {Client.workflow_handle}. {Client.start_workflow} will not set this value. # @@ -44,8 +47,8 @@ class WorkflowHandle # @return [String, nil] First execution run ID. attr_reader :first_execution_run_id - # Create a workflow handle. {Client.workflow_handle} is preferred over instantiating this directly. - def initialize(client, id, run_id: nil, result_run_id: nil, first_execution_run_id: nil) + # @!visibility private + def initialize(client:, id:, run_id:, result_run_id:, first_execution_run_id:) @client = client @id = id @run_id = run_id @@ -65,7 +68,7 @@ def initialize(client, id, run_id: nil, result_run_id: nil, first_execution_run_ # # @return [Object] Result of the workflow after being converted by the data converter. # - # @raise [Error::WorkflowFailureError] Workflow failed with {Error::WorkflowFailureError#cause} as cause. + # @raise [Error::WorkflowFailureError] Workflow failed with +cause+ as the cause. # @raise [Error::WorkflowContinuedAsNewError] Workflow continued as new and +follow_runs+ is +false+. # @raise [Error::RPCError] RPC error from call. def result( @@ -77,12 +80,11 @@ def result( hist_run_id = result_run_id loop do # Get close event - event = fetch_history_events_for_run( - hist_run_id, - page_size: nil, + event = fetch_history_events( wait_new_event: true, event_filter_type: Api::Enums::V1::HistoryEventFilterType::HISTORY_EVENT_FILTER_TYPE_CLOSE_EVENT, skip_archival: true, + specific_run_id: hist_run_id, rpc_metadata:, rpc_timeout: ).next @@ -100,33 +102,28 @@ def result( hist_run_id = attrs.new_execution_run_id next if follow_runs && hist_run_id && !hist_run_id.empty? - raise Error::WorkflowFailureError.new(cause: @client.data_converter.from_failure(attrs.failure)) + raise Error::WorkflowFailureError.new, cause: @client.data_converter.from_failure(attrs.failure) when :EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED attrs = event.workflow_execution_canceled_event_attributes - raise Error::WorkflowFailureError.new( - cause: Error::CancelledError.new( - 'Workflow execution cancelled', - details: @client.data_converter.from_payloads(attrs&.details) - ) + raise Error::WorkflowFailureError.new, cause: Error::CanceledError.new( + 'Workflow execution canceled', + details: @client.data_converter.from_payloads(attrs&.details) ) when :EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED attrs = event.workflow_execution_terminated_event_attributes - raise Error::WorkflowFailureError.new( - cause: Error::TerminatedError.new( - attrs.reason.empty? ? 'Workflow execution cancelled' : attrs.reason, - details: @client.data_converter.from_payloads(attrs&.details) - ) + raise Error::WorkflowFailureError.new, cause: Error::TerminatedError.new( + Internal::ProtoUtils.string_or(attrs.reason, 'Workflow execution terminated'), + details: @client.data_converter.from_payloads(attrs&.details) ) when :EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT attrs = event.workflow_execution_timed_out_event_attributes hist_run_id = attrs.new_execution_run_id next if follow_runs && hist_run_id && !hist_run_id.empty? - raise Error::WorkflowFailureError.new( - cause: Error::TimeoutError.new( - 'Workflow execution timed out', - type: Api::Enums::V1::TimeoutType::TIMEOUT_TYPE_START_TO_CLOSE - ) + raise Error::WorkflowFailureError.new, cause: Error::TimeoutError.new( + 'Workflow execution timed out', + type: Api::Enums::V1::TimeoutType::TIMEOUT_TYPE_START_TO_CLOSE, + last_heartbeat_details: [] ) when :EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW attrs = event.workflow_execution_continued_as_new_event_attributes @@ -141,70 +138,307 @@ def result( end end - # Fetch an enumerable of history events for this workflow. Internally this is done in paginated form, but it is - # presented as an enumerable. + # Get workflow details. This will get details for the {run_id} if present. To use a different run ID, create a new + # handle via {Client.workflow_handle}. + # + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @return [WorkflowExecution::Description] Workflow description. + # + # @raise [Error::RPCError] RPC error from call. + # + # @note Handles created as a result of {Client.start_workflow} will describe the latest workflow with the same + # workflow ID even if it is unrelated to the started workflow. + def describe( + rpc_metadata: nil, + rpc_timeout: nil + ) + @client._impl.describe_workflow(Interceptor::DescribeWorkflowInput.new( + workflow_id: id, + run_id:, + rpc_metadata:, + rpc_timeout: + )) + end + + # Get workflow history. This is a helper on top of {fetch_history_events}. + # + # @param event_filter_type [Api::Enums::V1::HistoryEventFilterType] Types of events to fetch. + # @param skip_archival [Boolean] Whether to skip archival. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @return [WorkflowHistory] Workflow history. + # + # @raise [Error::RPCError] RPC error from call. + def fetch_history( + event_filter_type: Api::Enums::V1::HistoryEventFilterType::HISTORY_EVENT_FILTER_TYPE_ALL_EVENT, + skip_archival: false, + rpc_metadata: nil, + rpc_timeout: nil + ) + WorkflowHistory.new( + fetch_history_events( + event_filter_type:, + skip_archival:, + rpc_metadata:, + rpc_timeout: + ).to_a + ) + end + + # Fetch an enumerator of history events for this workflow. Internally this is done in paginated form, but it is + # presented as an enumerator. # - # @param page_size [Integer, nil] Page size for each internal page fetch. Most users will not need to set this - # since the enumerable hides pagination. # @param wait_new_event [Boolean] If +true+, when the end of the current set of events is reached but the workflow # is not complete, this will wait for the next event. If +false+, the enumerable completes at the end of current # history. # @param event_filter_type [Api::Enums::V1::HistoryEventFilterType] Types of events to fetch. # @param skip_archival [Boolean] Whether to skip archival. + # @param specific_run_id [String, nil] Run ID to fetch events for. Default is the {run_id}. Most users will not + # need to set this and instead use the one on the class. # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. # @param rpc_timeout [Float, nil] Number of seconds before timeout. # - # @return [Enumerable] Enumerable events. + # @return [Enumerator] Enumerable events. + # + # @raise [Error::RPCError] RPC error from call. def fetch_history_events( - page_size: nil, wait_new_event: false, event_filter_type: Api::Enums::V1::HistoryEventFilterType::HISTORY_EVENT_FILTER_TYPE_ALL_EVENT, skip_archival: false, + specific_run_id: run_id, + rpc_metadata: nil, + rpc_timeout: nil + ) + @client._impl.fetch_workflow_history_events(Interceptor::FetchWorkflowHistoryEventsInput.new( + workflow_id: id, + run_id: specific_run_id, + wait_new_event:, + event_filter_type:, + skip_archival:, + rpc_metadata:, + rpc_timeout: + )) + end + + # Send a signal to the workflow. This will signal for {run_id} if present. To use a different run ID, create a new + # handle via {Client.workflow_handle}. + # + # @param signal [String] Signal name. + # @param args [Array] Signal arguments. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @raise [Error::RPCError] RPC error from call. + # + # @note Handles created as a result of {Client.start_workflow} will signal the latest workflow with the same + # workflow ID even if it is unrelated to the started workflow. + def signal( + signal, + *args, + rpc_metadata: nil, + rpc_timeout: nil + ) + @client._impl.signal_workflow(Interceptor::SignalWorkflowInput.new( + workflow_id: id, + run_id:, + signal:, + args:, + headers: {}, + rpc_metadata:, + rpc_timeout: + )) + end + + # Query the workflow. This will query for {run_id} if present. To use a different run ID, create a new handle via + # {Client.workflow_handle}. + # + # @param query [String] Query name. + # @param args [Array] Query arguments. + # @param reject_condition [WorkflowQueryRejectCondition, nil] Condition for rejecting the query. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @return [Object, nil] Query result. + # + # @raise [Error::WorkflowQueryFailedError] The query on the workflow returned a failure. + # @raise [Error::WorkflowQueryRejectedError] A query reject condition was satisfied. + # @raise [Error::RPCError] RPC error from call. + # + # @note Handles created as a result of {Client.start_workflow} will query the latest workflow with the same + # workflow ID even if it is unrelated to the started workflow. + def query( + query, + *args, + reject_condition: @client.options.default_workflow_query_reject_condition, + rpc_metadata: nil, + rpc_timeout: nil + ) + @client._impl.query_workflow(Interceptor::QueryWorkflowInput.new( + workflow_id: id, + run_id:, + query:, + args:, + reject_condition:, + headers: {}, + rpc_metadata:, + rpc_timeout: + )) + end + + # Send an update request to the workflow and return a handle to it. This will target the workflow with {run_id} if + # present. To use a different run ID, create a new handle via {Client.workflow_handle}. + # + # @param update [String] Update name. + # @param args [Array] Update arguments. + # @param wait_for_stage [WorkflowUpdateWaitStage] Required stage to wait until returning. ADMITTED is not + # currently supported. See https://docs.temporal.io/workflows#update for more details. + # @param id [String] ID of the update. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @return [WorkflowUpdateHandle] The update handle. + # + # @raise [Error::WorkflowUpdateRPCTimeoutOrCanceledError] This update call timed out or was canceled. This doesn't + # mean the update itself was timed out or cancelled. + # @raise [Error::RPCError] RPC error from call. + # + # @note Handles created as a result of {Client.start_workflow} will send updates the latest workflow with the same + # workflow ID even if it is unrelated to the started workflow. + # @note WARNING: This API is experimental. + def start_update( + update, + *args, + wait_for_stage:, + id: SecureRandom.uuid, + rpc_metadata: nil, + rpc_timeout: nil + ) + @client._impl.start_workflow_update(Interceptor::StartWorkflowUpdateInput.new( + workflow_id: self.id, + run_id:, + update_id: id, + update:, + args:, + wait_for_stage:, + headers: {}, + rpc_metadata:, + rpc_timeout: + )) + end + + # Send an update request to the workflow and wait for it to complete. This will target the workflow with {run_id} + # if present. To use a different run ID, create a new handle via {Client.workflow_handle}. + # + # @param update [String] Update name. + # @param args [Array] Update arguments. + # @param id [String] ID of the update. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @return [Object, nil] Update result. + # + # @raise [Error::WorkflowUpdateFailedError] If the update failed. + # @raise [Error::WorkflowUpdateRPCTimeoutOrCanceledError] This update call timed out or was canceled. This doesn't + # mean the update itself was timed out or cancelled. + # @raise [Error::RPCError] RPC error from call. + # + # @note Handles created as a result of {Client.start_workflow} will send updates the latest workflow with the same + # workflow ID even if it is unrelated to the started workflow. + # @note WARNING: This API is experimental. + def execute_update( + update, + *args, + id: SecureRandom.uuid, rpc_metadata: nil, rpc_timeout: nil ) - fetch_history_events_for_run( - run_id, - page_size:, - wait_new_event:, - event_filter_type:, - skip_archival:, + start_update( + update, + *args, + wait_for_stage: WorkflowUpdateWaitStage::COMPLETED, + id:, rpc_metadata:, rpc_timeout: - ) + ).result end - private + # Get a handle for an update. The handle can be used to wait on the update result. + # + # @param id [String] ID of the update. + # @param specific_run_id [String, nil] Workflow run ID to get update handle for. Default is the {run_id}. Most + # users will not need to set this and instead use the one on the class. + # + # @return [WorkflowUpdateHandle] The update handle. + # + # @note WARNING: This API is experimental. + def update_handle( + id, + specific_run_id: run_id + ) + WorkflowUpdateHandle.new( + client: @client, + id:, + workflow_id: self.id, + workflow_run_id: specific_run_id, + known_outcome: nil + ) + end - def fetch_history_events_for_run( - run_id, - page_size:, - wait_new_event:, - event_filter_type:, - skip_archival:, - rpc_metadata:, - rpc_timeout: + # Cancel the workflow. This will issue a cancellation for {run_id} if present. This call will make sure to use the + # run chain starting from {first_execution_run_id} if present. To create handles with these values, use + # {Client.workflow_handle}. + # + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @raise [Error::RPCError] RPC error from call. + # + # @note Handles created as a result of signal with start will cancel the latest workflow with the same workflow ID + # even if it is unrelated to the started workflow. + def cancel( + rpc_metadata: nil, + rpc_timeout: nil ) - Enumerator.new do |yielder| - input = Interceptor::FetchWorkflowHistoryEventPageInput.new( - id:, - run_id:, - page_size:, - next_page_token: nil, - wait_new_event:, - event_filter_type:, - skip_archival:, - rpc_metadata:, - rpc_timeout: - ) - loop do - page = @client._impl.fetch_workflow_history_event_page(input) - page.events.each { |event| yielder << event } - break unless page.next_page_token + @client._impl.cancel_workflow(Interceptor::CancelWorkflowInput.new( + workflow_id: id, + run_id:, + first_execution_run_id:, + rpc_metadata:, + rpc_timeout: + )) + end - input.next_page_token = page.next_page_token - end - end + # Terminate the workflow. This will issue a termination for {run_id} if present. This call will make sure to use + # the run chain starting from {first_execution_run_id} if present. To create handles with these values, use + # {Client.workflow_handle}. + # + # @param reason [String, nil] Reason for the termination. + # @param details [Array] Details to store on the termination. + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @raise [Error::RPCError] RPC error from call. + # + # @note Handles created as a result of signal with start will terminate the latest workflow with the same workflow + # ID even if it is unrelated to the started workflow. + def terminate( + reason = nil, + details: [], + rpc_metadata: nil, + rpc_timeout: nil + ) + @client._impl.terminate_workflow(Interceptor::TerminateWorkflowInput.new( + workflow_id: id, + run_id:, + first_execution_run_id:, + reason:, + details:, + rpc_metadata:, + rpc_timeout: + )) end end end diff --git a/temporalio/lib/temporalio/client/workflow_query_reject_condition.rb b/temporalio/lib/temporalio/client/workflow_query_reject_condition.rb new file mode 100644 index 00000000..46dfe454 --- /dev/null +++ b/temporalio/lib/temporalio/client/workflow_query_reject_condition.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require 'temporalio/api' + +module Temporalio + class Client + # Whether a query should be rejected in certain conditions. + module WorkflowQueryRejectCondition + NONE = Api::Enums::V1::QueryRejectCondition::QUERY_REJECT_CONDITION_NONE + NOT_OPEN = Api::Enums::V1::QueryRejectCondition::QUERY_REJECT_CONDITION_NOT_OPEN + NOT_COMPLETED_CLEANLY = Api::Enums::V1::QueryRejectCondition::QUERY_REJECT_CONDITION_NOT_COMPLETED_CLEANLY + end + end +end diff --git a/temporalio/lib/temporalio/client/workflow_update_handle.rb b/temporalio/lib/temporalio/client/workflow_update_handle.rb new file mode 100644 index 00000000..2a954a38 --- /dev/null +++ b/temporalio/lib/temporalio/client/workflow_update_handle.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +require 'temporalio/api' +require 'temporalio/client/interceptor' +require 'temporalio/error' + +module Temporalio + class Client + # Handle for a workflow update execution request. This is usually created via {WorkflowHandle.start_update} or + # {WorkflowHandle.update_handle}. + class WorkflowUpdateHandle + # @return [String] ID for the workflow update. + attr_reader :id + + # @return [String] ID for the workflow. + attr_reader :workflow_id + + # @return [String, nil] Run ID for the workflow. + attr_reader :workflow_run_id + + # @!visibility private + def initialize(client:, id:, workflow_id:, workflow_run_id:, known_outcome:) + @client = client + @id = id + @workflow_id = workflow_id + @workflow_run_id = workflow_run_id + @known_outcome = known_outcome + end + + # @return [Boolean] True if the result is already known and {result} will not make a blocking call, false if + # {result} will make a blocking call because the result is not yet known. + def result_obtained? + !@known_outcome.nil? + end + + # Wait for and return the result of the update. The result may already be known in which case no network call is + # made. Otherwise the result will be polled for until it is returned. + # + # @param rpc_metadata [Hash, nil] Headers to include on the RPC call. + # @param rpc_timeout [Float, nil] Number of seconds before timeout. + # + # @return [Object, nil] Update result. + # + # @raise [Error::WorkflowUpdateFailedError] If the update failed. + # @raise [Error::WorkflowUpdateRPCTimeoutOrCanceledError] This update call timed out or was canceled. This doesn't + # mean the update itself was timed out or cancelled. + # @raise [Error::RPCError] RPC error from call. + def result(rpc_metadata: nil, rpc_timeout: nil) + @known_outcome ||= @client._impl.poll_workflow_update(Interceptor::PollWorkflowUpdateInput.new( + workflow_id:, + run_id: workflow_run_id, + update_id: id, + rpc_metadata:, + rpc_timeout: + )) + + if @known_outcome.failure + raise Error::WorkflowUpdateFailedError.new, cause: @client.data_converter.from_failure(@known_outcome.failure) + end + + results = @client.data_converter.from_payloads(@known_outcome.success) + warn("Expected 0 or 1 update result, got #{results.size}") if results.size > 1 + results.first + end + end + end +end diff --git a/temporalio/lib/temporalio/client/workflow_update_wait_stage.rb b/temporalio/lib/temporalio/client/workflow_update_wait_stage.rb new file mode 100644 index 00000000..843d12c9 --- /dev/null +++ b/temporalio/lib/temporalio/client/workflow_update_wait_stage.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +require 'temporalio/api' + +module Temporalio + class Client + # Stage to wait for workflow update to reach before returning from {WorkflowHandle.start_update}. + module WorkflowUpdateWaitStage + ADMITTED = + Api::Enums::V1::UpdateWorkflowExecutionLifecycleStage::UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ADMITTED + ACCEPTED = + Api::Enums::V1::UpdateWorkflowExecutionLifecycleStage::UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED + COMPLETED = + Api::Enums::V1::UpdateWorkflowExecutionLifecycleStage::UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED + end + end +end diff --git a/temporalio/lib/temporalio/converters/data_converter.rb b/temporalio/lib/temporalio/converters/data_converter.rb index 6bec7c4b..322f3a81 100644 --- a/temporalio/lib/temporalio/converters/data_converter.rb +++ b/temporalio/lib/temporalio/converters/data_converter.rb @@ -45,9 +45,9 @@ def initialize( # @param value [Object] Ruby value. # @return [Api::Common::V1::Payload] Converted and encoded payload. def to_payload(value) - payload_converter.to_payload(value) - # TODO(cretz): - # payload = payload_codec.encode_payload(payload) if payload_codec + payload = payload_converter.to_payload(value) + payload = payload_codec.encode([payload]).first if payload_codec + payload end # Convert multiple Ruby values to a payload set and encode it. @@ -55,10 +55,9 @@ def to_payload(value) # @param values [Object] Ruby values, converted to array via {::Array}. # @return [Api::Common::V1::Payloads] Converted and encoded payload set. def to_payloads(values) - payloads = Array(values).map { |value| payload_converter.to_payload(value) } - # TODO(cretz): - # payloads = payload_codec.encode_payloads(payloads) if payload_codec - Api::Common::V1::Payloads.new(payloads:) + payloads = payload_converter.to_payloads(values) + payloads.payloads.replace(payload_codec.encode(payloads.payloads)) if payload_codec && !payloads.payloads.empty? + payloads end # Decode and convert a payload to a Ruby value. @@ -66,19 +65,21 @@ def to_payloads(values) # @param payload [Api::Common::V1::Payload] Encoded payload. # @return [Object] Decoded and converted Ruby value. def from_payload(payload) - # TODO(cretz): - # payload = payload_codec.decode_payload(payload) if payload_codec + payload = payload_codec.decode([payload]).first if payload_codec payload_converter.from_payload(payload) end # Decode and convert a payload set to Ruby values. # - # @param payloads [Api::Common::V1::Payloads] Encoded payload set. + # @param payloads [Api::Common::V1::Payloads, nil] Encoded payload set. # @return [Array] Decoded and converted Ruby values. def from_payloads(payloads) - # TODO(cretz): - # payloads = payload_codec.decode_payloads(payloads) if payload_codec - payloads.payloads.map { |payload| payload_converter.from_payload(payload) } + return [] unless payloads && !payloads.payloads.empty? + + if payload_codec && !payloads.payloads.empty? + payloads = Api::Common::V1::Payloads.new(payloads: payload_codec.decode(payloads.payloads)) + end + payload_converter.from_payloads(payloads) end # Convert a Ruby error to a Temporal failure and encode it. @@ -86,9 +87,7 @@ def from_payloads(payloads) # @param error [Exception] Ruby error. # @return [Api::Failure::V1::Failure] Converted and encoded failure. def to_failure(error) - failure_converter.to_failure(error, payload_converter) - # TODO(cretz): - # failure = payload_codec.encode_failure(failure) if payload_codec + failure_converter.to_failure(error, self) end # Decode and convert a Temporal failure to a Ruby error. @@ -96,9 +95,7 @@ def to_failure(error) # @param failure [Api::Failure::V1::Failure] Encoded failure. # @return [Exception] Decoded and converted Ruby error. def from_failure(failure) - # TODO(cretz): - # failure = payload_codec.decode_failure(failure) if payload_codec - failure_converter.from_failure(failure, payload_converter) + failure_converter.from_failure(failure, self) end end end diff --git a/temporalio/lib/temporalio/converters/failure_converter.rb b/temporalio/lib/temporalio/converters/failure_converter.rb index 2aa4e6a5..c65ced58 100644 --- a/temporalio/lib/temporalio/converters/failure_converter.rb +++ b/temporalio/lib/temporalio/converters/failure_converter.rb @@ -1,5 +1,9 @@ # frozen_string_literal: true +require 'temporalio/api' +require 'temporalio/error' +require 'temporalio/internal/proto_utils' + module Temporalio module Converters # Base class for converting Ruby errors to/from Temporal failures. @@ -9,6 +13,10 @@ def self.default @default ||= Ractor.make_shareable(FailureConverter.new) end + # @return [Boolean] If +true+, the message and stack trace of the failure will be moved into the encoded attribute + # section of the failure which can be encoded with a codec. + attr_reader :encode_common_attributes + # Create failure converter. # # @param encode_common_attributes [Boolean] If +true+, the message and stack trace of the failure will be moved @@ -20,21 +28,172 @@ def initialize(encode_common_attributes: false) # Convert a Ruby error to a Temporal failure. # # @param error [Exception] Ruby error. - # @param payload_converter [PayloadConverter] Payload converter. + # @param converter [DataConverter, PayloadConverter] Converter for payloads. # @return [Api::Failure::V1::Failure] Converted failure. - def to_failure(error, payload_converter) - # TODO - raise NotImplementedError + def to_failure(error, converter) + failure = Api::Failure::V1::Failure.new( + message: error.message, + stack_trace: error.backtrace&.join("\n") + ) + cause = error.cause + failure.cause = to_failure(cause, converter) if cause + + # Convert specific error type details + case error + when Error::ApplicationError + failure.application_failure_info = Api::Failure::V1::ApplicationFailureInfo.new( + type: error.type, + non_retryable: error.non_retryable, + details: converter.to_payloads(error.details), + next_retry_delay: Internal::ProtoUtils.seconds_to_duration(error.next_retry_delay) + ) + when Error::TimeoutError + failure.timeout_failure_info = Api::Failure::V1::TimeoutFailureInfo.new( + timeout_type: error.type, + last_heartbeat_details: converter.to_payloads(error.last_heartbeat_details) + ) + when Error::CanceledError + failure.canceled_failure_info = Api::Failure::V1::CanceledFailureInfo.new( + details: converter.to_payloads(error.details) + ) + when Error::TerminatedError + failure.terminated_failure_info = Api::Failure::V1::TerminatedFailureInfo.new + when Error::ServerError + failure.server_failure_info = Api::Failure::V1::ServerFailureInfo.new( + non_retryable: error.non_retryable + ) + when Error::ActivityError + failure.activity_failure_info = Api::Failure::V1::ActivityFailureInfo.new( + scheduled_event_id: error.scheduled_event_id, + started_event_id: error.started_event_id, + identity: error.identity, + activity_type: Api::Common::V1::ActivityType.new(name: error.activity_type), + activity_id: error.activity_id, + retry_state: error.retry_state + ) + when Error::ChildWorkflowError + failure.child_workflow_execution_failure_info = Api::Failure::V1::ChildWorkflowExecutionFailureInfo.new( + namespace: error.namespace, + workflow_execution: Api::Common::V1::WorkflowExecution.new( + workflow_id: error.workflow_id, + run_id: error.run_id + ), + workflow_type: Api::Common::V1::WorkflowType.new(name: error.workflow_type), + initiated_event_id: error.initiated_event_id, + started_event_id: error.started_event_id, + retry_state: error.retry_state + ) + else + failure.application_failure_info = Api::Failure::V1::ApplicationFailureInfo.new( + type: error.class.name.split('::').last + ) + end + + # If encoding common attributes, move message and stack trace + if @encode_common_attributes + failure.encoded_attributes = converter.to_payload( + { message: failure.message, stack_trace: failure.stack_trace } + ) + failure.message = 'Encoded failure' + failure.stack_trace = '' + end + + failure end # Convert a Temporal failure to a Ruby error. # # @param failure [Api::Failure::V1::Failure] Failure. - # @param payload_converter [PayloadConverter] Payload converter. - # @return [Exception] Converted Ruby error. - def from_failure(failure, payload_converter) - # TODO - raise NotImplementedError + # @param converter [DataConverter, PayloadConverter] Converter for payloads. + # @return [Error::Failure] Converted Ruby error. + def from_failure(failure, converter) + # If encoded attributes have any of the fields we expect, try to decode + # but ignore any error + unless failure.encoded_attributes.nil? + begin + attrs = converter.from_payload(failure.encoded_attributes) + if attrs.is_a?(Hash) + # Shallow dup failure here to avoid affecting caller + failure = failure.dup + failure.message = attrs['message'] if attrs.key?('message') + failure.stack_trace = attrs['stack_trace'] if attrs.key?('stack_trace') + end + rescue StandardError + # Ignore failures + end + end + + # Convert + error = if failure.application_failure_info + Error::ApplicationError.new( + Internal::ProtoUtils.string_or(failure.message, 'Application error'), + *converter.from_payloads(failure.application_failure_info.details), + type: Internal::ProtoUtils.string_or(failure.application_failure_info.type), + non_retryable: failure.application_failure_info.non_retryable, + next_retry_delay: failure.application_failure_info.next_retry_delay&.to_f + ) + elsif failure.timeout_failure_info + Error::TimeoutError.new( + Internal::ProtoUtils.string_or(failure.message, 'Timeout'), + type: Internal::ProtoUtils.enum_to_int(Api::Enums::V1::TimeoutType, + failure.timeout_failure_info.timeout_type), + last_heartbeat_details: converter.from_payloads( + failure.timeout_failure_info.last_heartbeat_details + ) + ) + elsif failure.canceled_failure_info + Error::CanceledError.new( + Internal::ProtoUtils.string_or(failure.message, 'Canceled'), + details: converter.from_payloads(failure.canceled_failure_info.details) + ) + elsif failure.terminated_failure_info + Error::TerminatedError.new( + Internal::ProtoUtils.string_or(failure.message, 'Terminated'), + details: [] + ) + elsif failure.server_failure_info + Error::ServerError.new( + Internal::ProtoUtils.string_or(failure.message, 'Server error'), + non_retryable: failure.server_failure_info.non_retryable + ) + elsif failure.activity_failure_info + Error::ActivityError.new( + Internal::ProtoUtils.string_or(failure.message, 'Activity error'), + scheduled_event_id: failure.activity_failure_info.scheduled_event_id, + started_event_id: failure.activity_failure_info.started_event_id, + identity: failure.activity_failure_info.identity, + activity_type: failure.activity_failure_info.activity_type.name, + activity_id: failure.activity_failure_info.activity_id, + retry_state: Internal::ProtoUtils.enum_to_int( + Api::Enums::V1::RetryState, + failure.activity_failure_info.retry_state, + zero_means_nil: true + ) + ) + elsif failure.child_workflow_execution_failure_info + Error::ChildWorkflowError.new( + Internal::ProtoUtils.string_or(failure.message, 'Child workflow error'), + namespace: failure.child_workflow_execution_failure_info.namespace, + workflow_id: failure.child_workflow_execution_failure_info.workflow_execution.workflow_id, + run_id: failure.child_workflow_execution_failure_info.workflow_execution.run_id, + workflow_type: failure.child_workflow_execution_failure_info.workflow_type.name, + initiated_event_id: failure.child_workflow_execution_failure_info.initiated_event_id, + started_event_id: failure.child_workflow_execution_failure_info.started_event_id, + retry_state: Internal::ProtoUtils.enum_to_int( + Api::Enums::V1::RetryState, + failure.child_workflow_execution_failure_info.retry_state, + zero_means_nil: true + ) + ) + else + Error::Failure.new(Internal::ProtoUtils.string_or(failure.message, 'Failure error')) + end + + Error._with_backtrace_and_cause( + error, + backtrace: failure.stack_trace.split("\n"), + cause: failure.cause ? from_failure(failure.cause, converter) : nil + ) end end end diff --git a/temporalio/lib/temporalio/converters/payload_codec.rb b/temporalio/lib/temporalio/converters/payload_codec.rb index 9787ba55..d970be59 100644 --- a/temporalio/lib/temporalio/converters/payload_codec.rb +++ b/temporalio/lib/temporalio/converters/payload_codec.rb @@ -2,8 +2,25 @@ module Temporalio module Converters - class PayloadCodec # rubocop:disable Lint/EmptyClass - # TODO + # Base class for encoding and decoding payloads. Commonly used for encryption. + class PayloadCodec + # Encode the given payloads into a new set of payloads. + # + # @param payloads [Enumerable] Payloads to encode. This value should not be mutated. + # @return [Array] Encoded payloads. Note, this does not have to be the same number as + # payloads given, but it must be at least one and cannot be more than was given. + def encode(payloads) + raise NotImplementedError + end + + # Decode the given payloads into a new set of payloads. + # + # @param payloads [Enumerable] Payloads to decode. This value should not be mutated. + # @return [Array] Decoded payloads. Note, this does not have to be the same number as + # payloads given, but it must be at least one and cannot be more than was given. + def decode(payloads) + raise NotImplementedError + end end end end diff --git a/temporalio/lib/temporalio/converters/payload_converter.rb b/temporalio/lib/temporalio/converters/payload_converter.rb index 97b100b6..5a2ca2aa 100644 --- a/temporalio/lib/temporalio/converters/payload_converter.rb +++ b/temporalio/lib/temporalio/converters/payload_converter.rb @@ -11,7 +11,7 @@ module Temporalio module Converters # Base class for converting Ruby values to/from Temporal payloads. class PayloadConverter - # @return [PayloadConverter] Default payload converter. + # @return [PayloadConverter::Composite] Default payload converter. def self.default @default ||= new_with_defaults end @@ -20,7 +20,7 @@ def self.default # # @param json_parse_options [Hash] Options for {::JSON.parse}. # @param json_generate_options [Hash] Options for {::JSON.generate}. - # @return [PayloadConverter] Created payload converter. + # @return [PayloadConverter::Composite] Created payload converter. def self.new_with_defaults(json_parse_options: { create_additions: true }, json_generate_options: {}) Ractor.make_shareable( PayloadConverter::Composite.new( @@ -41,6 +41,16 @@ def to_payload(value) raise NotImplementedError end + # Convert multiple Ruby values to a payload set. + # + # @param values [Object] Ruby values, converted to array via {::Array}. + # @return [Api::Common::V1::Payloads] Converted payload set. + def to_payloads(values) + Api::Common::V1::Payloads.new( + payloads: Array(values).map { |value| to_payload(value) } + ) + end + # Convert a payload to a Ruby value. # # @param payload [Api::Common::V1::Payload] Payload. @@ -48,6 +58,16 @@ def to_payload(value) def from_payload(payload) raise NotImplementedError end + + # Convert a payload set to Ruby values. + # + # @param payloads [Api::Common::V1::Payloads, nil] Payload set. + # @return [Array] Converted Ruby values. + def from_payloads(payloads) + return [] unless payloads + + payloads.payloads.map { |payload| from_payload(payload) } + end end end end diff --git a/temporalio/lib/temporalio/converters/payload_converter/binary_null.rb b/temporalio/lib/temporalio/converters/payload_converter/binary_null.rb index 7cda6422..304bddb4 100644 --- a/temporalio/lib/temporalio/converters/payload_converter/binary_null.rb +++ b/temporalio/lib/temporalio/converters/payload_converter/binary_null.rb @@ -25,7 +25,7 @@ def to_payload(value) end # (see Encoding.from_payload) - def from_payload(_payload) + def from_payload(payload) # rubocop:disable Lint/UnusedMethodArgument nil end end diff --git a/temporalio/lib/temporalio/converters/payload_converter/json_plain.rb b/temporalio/lib/temporalio/converters/payload_converter/json_plain.rb index 22831b62..14ed58a1 100644 --- a/temporalio/lib/temporalio/converters/payload_converter/json_plain.rb +++ b/temporalio/lib/temporalio/converters/payload_converter/json_plain.rb @@ -1,5 +1,6 @@ # frozen_string_literal: true +require 'json' require 'temporalio/api' require 'temporalio/converters/payload_converter/encoding' diff --git a/temporalio/lib/temporalio/error.rb b/temporalio/lib/temporalio/error.rb index 02307969..9209e90d 100644 --- a/temporalio/lib/temporalio/error.rb +++ b/temporalio/lib/temporalio/error.rb @@ -5,16 +5,38 @@ module Temporalio # Superclass for all Temporal errors class Error < StandardError + # Whether the error represents some form of cancellation from an activity or workflow. + # + # @param error [Exception] Error to check. + # @return [Boolean] True if some form of canceled, false otherwise. + def self.canceled?(error) + error.is_a?(CanceledError) || + (error.is_a?(ActivityError) && error.cause.is_a?(CanceledError)) || + (error.is_a?(ChildWorkflowError) && error.cause.is_a?(CanceledError)) + end + + # @!visibility private + def self._with_backtrace_and_cause(err, backtrace:, cause:) + if cause + # The only way to set a _real_ cause in Ruby is to use `raise`. Even if + # you try to override `def cause`, it won't be outputted in situations + # where Ruby outputs cause. + begin + raise(err, err.message, backtrace, cause:) + rescue StandardError => e + e + end + else + err.set_backtrace(backtrace) + err + end + end + # Error that is returned from when a workflow is unsuccessful. class WorkflowFailureError < Error - # @return [Exception] Cause of the failure. - attr_reader :cause - - # @param cause [Exception] Cause of the failure. - def initialize(cause:) + # @!visibility private + def initialize super('Workflow failed') - - @cause = cause end end @@ -23,29 +45,45 @@ class WorkflowContinuedAsNewError < Error # @return [String] New execution run ID the workflow continued to. attr_reader :new_run_id - # @param new_run_id [String] New execution run ID the workflow continued to. + # @!visibility private def initialize(new_run_id:) super('Workflow execution continued as new') @new_run_id = new_run_id end end - # Error raised by a client or workflow when a workflow execution has already started. - class WorkflowAlreadyStartedError < Error - # @return [String] ID of the already-started workflow. - attr_reader :workflow_id + # Error that occurs when a query fails. + class WorkflowQueryFailedError < Error + end - # @return [String] Workflow type name of the already-started workflow. - attr_reader :workflow_type + # Error that occurs when a query was rejected. + class WorkflowQueryRejectedError < Error + # @return [Client::WorkflowExecutionStatus] Workflow execution status causing rejection. + attr_reader :status - # @return [String] Run ID of the already-started workflow if this was raised by the client. - attr_reader :run_id + # @!visibility private + def initialize(status:) + super("Query rejected, #{status}") + @status = status + end + end - def initialize(workflow_id:, workflow_type:, run_id:) - super('Workflow execution already started') - @workflow_id = workflow_id - @workflow_type = workflow_type - @run_id = run_id + # Error that occurs when an update fails. + class WorkflowUpdateFailedError < Error + # @!visibility private + def initialize + super('Workflow update failed') + end + end + + # Error that occurs when update RPC call times out or is cancelled. + # + # @note This is not related to any general concept of timing out or cancelling a running update, this is only + # related to the client call itself. + class WorkflowUpdateRPCTimeoutOrCanceledError < Error + # @!visibility private + def initialize + super('Timeout or cancellation waiting for update') end end @@ -77,7 +115,7 @@ def create_grpc_status # Status code for RPC errors. These are gRPC status codes. module Code OK = 0 - CANCELLED = 1 + CANCELED = 1 # Intentionally one-L while gRPC is two-L UNKNOWN = 2 INVALID_ARGUMENT = 3 DEADLINE_EXCEEDED = 4 diff --git a/temporalio/lib/temporalio/error/failure.rb b/temporalio/lib/temporalio/error/failure.rb index adac59d3..1cf35f31 100644 --- a/temporalio/lib/temporalio/error/failure.rb +++ b/temporalio/lib/temporalio/error/failure.rb @@ -1,29 +1,218 @@ # frozen_string_literal: true +require 'temporalio/api' require 'temporalio/error' module Temporalio class Error # Base class for all Temporal serializable failures. class Failure < Error - # @return [Api::Failure::V1::Failure, nil] Raw gRPC failure if this was converted from one. - attr_reader :raw + end + + # Error raised by a client or workflow when a workflow execution has already started. + class WorkflowAlreadyStartedError < Failure + # @return [String] ID of the already-started workflow. + attr_reader :workflow_id + + # @return [String] Workflow type name of the already-started workflow. + attr_reader :workflow_type + + # @return [String] Run ID of the already-started workflow if this was raised by the client. + attr_reader :run_id + + # @!visibility private + def initialize(workflow_id:, workflow_type:, run_id:) + super('Workflow execution already started') + @workflow_id = workflow_id + @workflow_type = workflow_type + @run_id = run_id + end + end + + # Error raised during workflow/activity execution. + class ApplicationError < Failure + # @return [Array] User-defined details on the error. + attr_reader :details + + # @return [String, nil] General error type. + attr_reader :type + + # @return [Boolean] Whether the error was set as non-retryable when created. + # + # @note This is not whether the error is non-retryable via other means such as retry policy. This is just + # whether the error was marked non-retryable upon creation by the user. + attr_reader :non_retryable + + # @return [Float, nil] Delay in seconds before the next activity retry attempt. + attr_reader :next_retry_delay - # Create failure. + # Create an application error. # - # @param message [String] Message string. - # @param cause [Exception, nil] Cause of this exception. - # @param raw [Api::Failure::V1::Failure, nil] Raw gRPC value if any. - def initialize(message, cause: nil, raw: nil) + # @param message [String] Error message. + # @param details [Array] Error details. + # @param type [String, nil] Error type. + # @param non_retryable [Boolean] Whether this error should be considered non-retryable. + # @param next_retry_delay [Float, nil] Specific amount of time to delay before next retry. + def initialize(message, *details, type: nil, non_retryable: false, next_retry_delay: nil) + super(message) + @details = details + @type = type + @non_retryable = non_retryable + @next_retry_delay = next_retry_delay + end + + # @return [Boolean] Inverse of {non_retryable}. + def retryable? + !@non_retryable + end + end + + # Error raised on workflow/activity cancellation. + class CanceledError < Failure + attr_reader :details + + # @!visibility private + def initialize(message, details:) + super(message) + @details = details + end + end + + # Error raised on workflow termination. + class TerminatedError < Failure + # @return [Array] User-defined details on the error. + attr_reader :details + + # @!visibility private + def initialize(message, details:) super(message) + @details = details + end + end + + # Error raised on workflow/activity timeout. + class TimeoutError < Failure + # @return [TimeoutType] Type of timeout error. + attr_reader :type + + # @return [Array] Last heartbeat details if this is for an activity heartbeat. + attr_reader :last_heartbeat_details + + # @!visibility private + def initialize(message, type:, last_heartbeat_details:) + super(message) + @type = type + @last_heartbeat_details = last_heartbeat_details + end + + # Type of timeout error. + module TimeoutType + START_TO_CLOSE = Api::Enums::V1::TimeoutType::TIMEOUT_TYPE_START_TO_CLOSE + SCHEDULE_TO_START = Api::Enums::V1::TimeoutType::TIMEOUT_TYPE_SCHEDULE_TO_START + SCHEDULE_TO_CLOSE = Api::Enums::V1::TimeoutType::TIMEOUT_TYPE_SCHEDULE_TO_CLOSE + HEARTBEAT = Api::Enums::V1::TimeoutType::TIMEOUT_TYPE_HEARTBEAT + end + end + + # Error originating in the Temporal server. + class ServerError < Failure + # @return [Boolean] Whether this error is non-retryable. + attr_reader :non_retryable + + # @!visibility private + def initialize(message, non_retryable:) + super(message) + @non_retryable = non_retryable + end + + # @return [Boolean] Inverse of {non_retryable}. + def retryable? + !@non_retryable + end + end + + # Current retry state of the workflow/activity during error. + module RetryState + IN_PROGRESS = Api::Enums::V1::RetryState::RETRY_STATE_IN_PROGRESS + NON_RETRYABLE_FAILURE = Api::Enums::V1::RetryState::RETRY_STATE_NON_RETRYABLE_FAILURE + TIMEOUT = Api::Enums::V1::RetryState::RETRY_STATE_TIMEOUT + MAXIMUM_ATTEMPTS_REACHED = Api::Enums::V1::RetryState::RETRY_STATE_MAXIMUM_ATTEMPTS_REACHED + RETRY_POLICY_NOT_SET = Api::Enums::V1::RetryState::RETRY_STATE_RETRY_POLICY_NOT_SET + INTERNAL_SERVER_ERROR = Api::Enums::V1::RetryState::RETRY_STATE_INTERNAL_SERVER_ERROR + CANCEL_REQUESTED = Api::Enums::V1::RetryState::RETRY_STATE_CANCEL_REQUESTED + end + + # Error raised on activity failure. + class ActivityError < Failure + # @return [Integer] Scheduled event ID for this activity. + attr_reader :scheduled_event_id + # @return [Integer] Started event ID for this activity. + attr_reader :started_event_id + # @return [String] Client/worker identity. + attr_reader :identity + # @return [String] Activity type name. + attr_reader :activity_type + # @return [String] Activity ID. + attr_reader :activity_id + # @return [RetryState, nil] Retry state. + attr_reader :retry_state - @cause = cause - @raw = raw + # @!visibility private + def initialize( + message, + scheduled_event_id:, + started_event_id:, + identity:, + activity_type:, + activity_id:, + retry_state: + ) + super(message) + @scheduled_event_id = scheduled_event_id + @started_event_id = started_event_id + @identity = identity + @activity_type = activity_type + @activity_id = activity_id + @retry_state = retry_state end + end + + # Error raised on child workflow failure. + class ChildWorkflowError < Failure + # @return [String] Child workflow namespace. + attr_reader :namespace + # @return [String] Child workflow ID. + attr_reader :workflow_id + # @return [String] Child workflow run ID. + attr_reader :run_id + # @return [String] Child workflow type name. + attr_reader :workflow_type + # @return [Integer] Child workflow initiated event ID. + attr_reader :initiated_event_id + # @return [Integer] Child workflow started event ID. + attr_reader :started_event_id + # @return [RetryState, nil] Retry state. + attr_reader :retry_state - # @return [Exception, nil] Cause of the failure. - def cause - @cause || super + # @!visibility private + def initialize( + message, + namespace:, + workflow_id:, + run_id:, + workflow_type:, + initiated_event_id:, + started_event_id:, + retry_state: + ) + super(message) + @namespace = namespace + @workflow_id = workflow_id + @run_id = run_id + @workflow_type = workflow_type + @initiated_event_id = initiated_event_id + @started_event_id = started_event_id + @retry_state = retry_state end end end diff --git a/temporalio/lib/temporalio/internal/proto_utils.rb b/temporalio/lib/temporalio/internal/proto_utils.rb index 8b1046a3..1e84eff4 100644 --- a/temporalio/lib/temporalio/internal/proto_utils.rb +++ b/temporalio/lib/temporalio/internal/proto_utils.rb @@ -16,10 +16,32 @@ def self.seconds_to_duration(seconds_float) end # @!visibility private - def self.memo_to_proto(hash, data_converter) + def self.memo_to_proto(hash, converter) return nil if hash.nil? - Api::Common::V1::Memo.new(fields: hash.transform_values { |val| data_converter.to_payload(val) }) + Api::Common::V1::Memo.new(fields: hash.transform_values { |val| converter.to_payload(val) }) + end + + # @!visibility private + def self.memo_from_proto(memo, converter) + return nil if memo.nil? + + memo.fields.each_with_object({}) { |(key, val), h| h[key] = converter.from_payload(val) } # rubocop:disable Style/HashTransformValues + end + + # @!visibility private + def self.string_or(str, default = nil) + str && !str.empty? ? str : default + end + + # @!visibility private + def self.enum_to_int(enum_mod, enum_val, zero_means_nil: false) + # Per https://protobuf.dev/reference/ruby/ruby-generated/#enum when + # enums are read back, they are symbols if they are known or number + # otherwise + enum_val = enum_mod.resolve(enum_val) || raise('Unexpected missing symbol') if enum_val.is_a?(Symbol) + enum_val = nil if zero_means_nil && enum_val.zero? + enum_val end end end diff --git a/temporalio/lib/temporalio/retry_policy.rb b/temporalio/lib/temporalio/retry_policy.rb index 8df98ca4..383a91ef 100644 --- a/temporalio/lib/temporalio/retry_policy.rb +++ b/temporalio/lib/temporalio/retry_policy.rb @@ -32,18 +32,19 @@ def initialize(*, **kwargs) # @!visibility private def to_proto - raise 'Initial interval cannot be negative' if @initial_interval.negative? - raise 'Backoff coefficient cannot be less than 1' if @backoff_coefficient < 1 - raise 'Max interval cannot be negative' if @max_interval&.negative? - raise 'Max interval cannot be less than initial interval' if @max_interval && @max_interval < @initial_interval - raise 'Max attempts cannot be negative' if @max_attempts.negative? + # @type self: RetryPolicy + raise 'Initial interval cannot be negative' if initial_interval.negative? + raise 'Backoff coefficient cannot be less than 1' if backoff_coefficient < 1 + raise 'Max interval cannot be negative' if max_interval&.negative? + raise 'Max interval cannot be less than initial interval' if max_interval && max_interval < initial_interval + raise 'Max attempts cannot be negative' if max_attempts.negative? Api::Common::V1::RetryPolicy.new( - initial_interval: Internal::ProtoUtils.seconds_to_duration(@initial_interval), - backoff_coefficient: @backoff_coefficient, - maximum_interval: Internal::ProtoUtils.seconds_to_duration(@max_interval), - maximum_attempts: @max_attempts, - non_retryable_error_types: @non_retryable_error_types + initial_interval: Internal::ProtoUtils.seconds_to_duration(initial_interval), + backoff_coefficient:, + maximum_interval: Internal::ProtoUtils.seconds_to_duration(max_interval), + maximum_attempts: max_attempts, + non_retryable_error_types: ) end end diff --git a/temporalio/lib/temporalio/search_attributes.rb b/temporalio/lib/temporalio/search_attributes.rb index 1872ed9a..db7c5b43 100644 --- a/temporalio/lib/temporalio/search_attributes.rb +++ b/temporalio/lib/temporalio/search_attributes.rb @@ -108,17 +108,24 @@ def initialize(key, value) # @!visibility private def self.from_proto(proto) + return nil unless proto raise ArgumentError, 'Expected proto search attribute' unless proto.is_a?(Api::Common::V1::SearchAttributes) SearchAttributes.new(proto.indexed_fields.map do |key_name, payload| # rubocop:disable Style/MapToHash key = Key.new(key_name, IndexedValueType::PROTO_VALUES[payload.metadata['type']]) - value = Converters::PayloadConverter.default.from_payload(payload) - # Time needs to be converted - value = Time.iso8601(value) if key.type == IndexedValueType::TIME && value.is_a?(String) + value = value_from_payload(payload) [key, value] end.to_h) end + # @!visibility private + def self.value_from_payload(payload) + value = Converters::PayloadConverter.default.from_payload(payload) + # Time needs to be converted + value = Time.iso8601(value) if payload.metadata['type'] == 'DateTime' && value.is_a?(String) + value + end + # Create a search attribute collection. # # @param existing [SearchAttributes, Hash, nil] Existing collection. This can be another @@ -201,6 +208,18 @@ def dup SearchAttributes.new(self) end + # @return [Boolean] Whether the set of attributes is empty. + def empty? + length.zero? + end + + # @return [Integer] Number of attributes. + def length + @raw_hash.length + end + + alias size length + # Return a new search attributes collection with updates applied. # # @param updates [Update] Updates created via {Key#value_set} or {Key#value_unset}. diff --git a/temporalio/lib/temporalio/testing/workflow_environment.rb b/temporalio/lib/temporalio/testing/workflow_environment.rb index f30bbff9..fa7d81b4 100644 --- a/temporalio/lib/temporalio/testing/workflow_environment.rb +++ b/temporalio/lib/temporalio/testing/workflow_environment.rb @@ -63,7 +63,7 @@ def self.start_local( ) server_options = Internal::Bridge::Testing::EphemeralServer::StartDevServerOptions.new( existing_path: dev_server_existing_path, - sdk_name: 'sdk-python', + sdk_name: 'sdk-ruby', sdk_version: VERSION, download_version: dev_server_download_version, download_dest_dir: dev_server_download_dest_dir, diff --git a/temporalio/lib/temporalio/workflow_history.rb b/temporalio/lib/temporalio/workflow_history.rb new file mode 100644 index 00000000..c60dc20c --- /dev/null +++ b/temporalio/lib/temporalio/workflow_history.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module Temporalio + # Representation of a workflow's history. + class WorkflowHistory + # History events for the workflow. + attr_reader :events + + # @!visibility private + def initialize(events) + @events = events + end + end +end diff --git a/temporalio/rbs_collection.lock.yaml b/temporalio/rbs_collection.lock.yaml index 274e8d62..99b3a2d0 100644 --- a/temporalio/rbs_collection.lock.yaml +++ b/temporalio/rbs_collection.lock.yaml @@ -1,6 +1,22 @@ --- path: ".gem_rbs_collection" gems: +- name: activemodel + version: '7.0' + source: + type: git + name: ruby/gem_rbs_collection + revision: 2275d263564ec90f749ea9ff8fce8160b9ed58e9 + remote: https://github.com/ruby/gem_rbs_collection.git + repo_dir: gems +- name: activerecord + version: '7.1' + source: + type: git + name: ruby/gem_rbs_collection + revision: 2275d263564ec90f749ea9ff8fce8160b9ed58e9 + remote: https://github.com/ruby/gem_rbs_collection.git + repo_dir: gems - name: activesupport version: '7.0' source: @@ -133,6 +149,10 @@ gems: version: '0' source: type: stdlib +- name: timeout + version: '0' + source: + type: stdlib - name: tsort version: '0' source: diff --git a/temporalio/sig/temporalio/client.rbs b/temporalio/sig/temporalio/client.rbs index c623b3a8..5e2c07ce 100644 --- a/temporalio/sig/temporalio/client.rbs +++ b/temporalio/sig/temporalio/client.rbs @@ -5,14 +5,14 @@ module Temporalio attr_accessor namespace: String attr_accessor data_converter: Converters::DataConverter attr_accessor interceptors: Array[Interceptor] - attr_accessor default_workflow_query_reject_condition: Integer + attr_accessor default_workflow_query_reject_condition: WorkflowQueryRejectCondition::enum? def initialize: ( connection: Connection, namespace: String, data_converter: Converters::DataConverter, interceptors: Array[Interceptor], - default_workflow_query_reject_condition: Integer + default_workflow_query_reject_condition: WorkflowQueryRejectCondition::enum? ) -> void end @@ -23,7 +23,7 @@ module Temporalio ?tls: bool | Connection::TLSOptions, ?data_converter: Converters::DataConverter, ?interceptors: Array[Interceptor], - ?default_workflow_query_reject_condition: Integer, + ?default_workflow_query_reject_condition: WorkflowQueryRejectCondition::enum?, ?rpc_metadata: Hash[String, String], ?rpc_retry: Connection::RPCRetryOptions, ?identity: String, @@ -40,22 +40,18 @@ module Temporalio namespace: String, ?data_converter: Converters::DataConverter, ?interceptors: Array[Interceptor], - ?default_workflow_query_reject_condition: Integer + ?default_workflow_query_reject_condition: WorkflowQueryRejectCondition::enum? ) -> void def connection: -> Connection def namespace: -> String def data_converter: -> Converters::DataConverter - - # TODO(cretz): Update when generated - # def workflow_service: -> Connection::WorkflowService - # def operator_service: -> Connection::OperatorService - def workflow_service: -> untyped - def operator_service: -> untyped + def workflow_service: -> Connection::WorkflowService + def operator_service: -> Connection::OperatorService def start_workflow: ( String workflow, - *Object args, + *Object? args, id: String, task_queue: String, ?execution_timeout: Float?, @@ -75,7 +71,7 @@ module Temporalio def execute_workflow: ( String workflow, - *Object args, + *Object? args, id: String, task_queue: String, ?execution_timeout: Float?, @@ -98,5 +94,21 @@ module Temporalio ?run_id: String?, ?first_execution_run_id: String? ) -> WorkflowHandle + + def list_workflows: ( + ?String query, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> Enumerator[WorkflowExecution, WorkflowExecution] + + def count_workflows: ( + ?String query, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> WorkflowExecutionCount + + def async_activity_handle: ( + String | ActivityIDReference task_token_or_id_reference + ) -> AsyncActivityHandle end end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/activity_id_reference.rbs b/temporalio/sig/temporalio/client/activity_id_reference.rbs new file mode 100644 index 00000000..1c55861e --- /dev/null +++ b/temporalio/sig/temporalio/client/activity_id_reference.rbs @@ -0,0 +1,11 @@ +module Temporalio + class Client + class ActivityIDReference + attr_reader workflow_id: String + attr_reader run_id: String? + attr_reader activity_id: String + + def initialize: (workflow_id: String, run_id: String?, activity_id: String) -> void + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/async_activity_handle.rbs b/temporalio/sig/temporalio/client/async_activity_handle.rbs new file mode 100644 index 00000000..db6881ac --- /dev/null +++ b/temporalio/sig/temporalio/client/async_activity_handle.rbs @@ -0,0 +1,39 @@ +module Temporalio + class Client + class AsyncActivityHandle + attr_reader task_token: String? + attr_reader id_reference: ActivityIDReference? + + def initialize: ( + client: Client, + task_token: String?, + id_reference: ActivityIDReference? + ) -> void + + def heartbeat: ( + *Object details, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> void + + def complete: ( + ?Object? result, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> void + + def fail: ( + Exception error, + ?last_heartbeat_details: Array[Object], + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> void + + def report_cancellation: ( + *Object details, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> void + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/interceptor.rbs b/temporalio/sig/temporalio/client/interceptor.rbs index 2ff5a35f..22050ed7 100644 --- a/temporalio/sig/temporalio/client/interceptor.rbs +++ b/temporalio/sig/temporalio/client/interceptor.rbs @@ -5,8 +5,8 @@ module Temporalio class StartWorkflowInput attr_accessor workflow: String - attr_accessor args: Array[Object] - attr_accessor id: String + attr_accessor args: Array[Object?] + attr_accessor workflow_id: String attr_accessor task_queue: String attr_accessor execution_timeout: Float? attr_accessor run_timeout: Float? @@ -25,8 +25,8 @@ module Temporalio def initialize: ( workflow: String, - args: Array[Object], - id: String, + args: Array[Object?], + workflow_id: String, task_queue: String, execution_timeout: Float?, run_timeout: Float?, @@ -45,11 +45,47 @@ module Temporalio ) -> void end - class FetchWorkflowHistoryEventPageInput - attr_accessor id: String + class ListWorkflowsInput + attr_accessor query: String? + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? + + def initialize: ( + query: String?, + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? + ) -> void + end + + class CountWorkflowsInput + attr_accessor query: String? + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? + + def initialize: ( + query: String?, + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? + ) -> void + end + + class DescribeWorkflowInput + attr_accessor workflow_id: String + attr_accessor run_id: String? + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? + + def initialize: ( + workflow_id: String, + run_id: String?, + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? + ) -> void + end + + class FetchWorkflowHistoryEventsInput + attr_accessor workflow_id: String attr_accessor run_id: String? - attr_accessor page_size: Integer? - attr_accessor next_page_token: String? attr_accessor wait_new_event: bool attr_accessor event_filter_type: Integer attr_accessor skip_archival: bool @@ -57,10 +93,8 @@ module Temporalio attr_accessor rpc_timeout: Float? def initialize: ( - id: String, + workflow_id: String, run_id: String?, - page_size: Integer?, - next_page_token: String?, wait_new_event: bool, event_filter_type: Integer, skip_archival: bool, @@ -69,13 +103,121 @@ module Temporalio ) -> void end - class FetchWorkflowHistoryEventPage - attr_accessor events: Enumerable[untyped] - attr_accessor next_page_token: String? + class SignalWorkflowInput + attr_accessor workflow_id: String + attr_accessor run_id: String? + attr_accessor signal: String + attr_accessor args: Array[Object?] + attr_accessor headers: Hash[String, String] + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? + + def initialize: ( + workflow_id: String, + run_id: String?, + signal: String, + args: Array[Object?], + headers: Hash[String, String], + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? + ) -> void + end + + class QueryWorkflowInput + attr_accessor workflow_id: String + attr_accessor run_id: String? + attr_accessor query: String + attr_accessor args: Array[Object?] + attr_accessor reject_condition: WorkflowQueryRejectCondition::enum? + attr_accessor headers: Hash[String, String] + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? def initialize: ( - events: Enumerable[untyped]?, - next_page_token: String? + workflow_id: String, + run_id: String?, + query: String, + args: Array[Object?], + reject_condition: WorkflowQueryRejectCondition::enum?, + headers: Hash[String, String], + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? + ) -> void + end + + class StartWorkflowUpdateInput + attr_accessor workflow_id: String + attr_accessor run_id: String? + attr_accessor update_id: String + attr_accessor update: String + attr_accessor args: Array[Object?] + attr_accessor wait_for_stage: WorkflowUpdateWaitStage::enum + attr_accessor headers: Hash[String, String] + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? + + def initialize: ( + workflow_id: String, + run_id: String?, + update_id: String, + update: String, + args: Array[Object?], + wait_for_stage: WorkflowUpdateWaitStage::enum, + headers: Hash[String, String], + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? + ) -> void + end + + class PollWorkflowUpdateInput + attr_accessor workflow_id: String + attr_accessor run_id: String? + attr_accessor update_id: String + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? + + def initialize: ( + workflow_id: String, + run_id: String?, + update_id: String, + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? + ) -> void + end + + class CancelWorkflowInput + attr_accessor workflow_id: String + attr_accessor run_id: String? + attr_accessor first_execution_run_id: String? + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? + + def initialize: ( + workflow_id: String, + run_id: String?, + first_execution_run_id: String?, + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? + ) -> void + end + + class TerminateWorkflowInput + attr_accessor workflow_id: String + attr_accessor run_id: String? + attr_accessor first_execution_run_id: String? + attr_accessor reason: String? + attr_accessor details: Array[Object] + attr_accessor rpc_metadata: Hash[String, String]? + attr_accessor rpc_timeout: Float? + + def initialize: ( + workflow_id: String, + run_id: String?, + first_execution_run_id: String?, + reason: String?, + details: Array[Object?], + rpc_metadata: Hash[String, String]?, + rpc_timeout: Float? ) -> void end @@ -86,9 +228,25 @@ module Temporalio def start_workflow: (StartWorkflowInput input) -> WorkflowHandle - def fetch_workflow_history_event_page: ( - FetchWorkflowHistoryEventPageInput input - ) -> FetchWorkflowHistoryEventPage + def list_workflows: (ListWorkflowsInput input) -> Enumerator[WorkflowExecution, WorkflowExecution] + + def count_workflows: (CountWorkflowsInput input) -> WorkflowExecutionCount + + def describe_workflow: (DescribeWorkflowInput input) -> WorkflowExecution::Description + + def fetch_workflow_history_events: (FetchWorkflowHistoryEventsInput input) -> Enumerator[untyped, untyped] + + def signal_workflow: (SignalWorkflowInput input) -> void + + def query_workflow: (QueryWorkflowInput input) -> Object? + + def start_workflow_update: (StartWorkflowUpdateInput input) -> WorkflowUpdateHandle + + def poll_workflow_update: (PollWorkflowUpdateInput input) -> untyped + + def cancel_workflow: (CancelWorkflowInput input) -> void + + def terminate_workflow: (TerminateWorkflowInput input) -> void end end end diff --git a/temporalio/sig/temporalio/client/workflow_execution.rbs b/temporalio/sig/temporalio/client/workflow_execution.rbs new file mode 100644 index 00000000..b1669424 --- /dev/null +++ b/temporalio/sig/temporalio/client/workflow_execution.rbs @@ -0,0 +1,29 @@ +module Temporalio + class Client + class WorkflowExecution + attr_reader raw_info: untyped + + def initialize: (untyped raw_info, Converters::DataConverter data_converter) -> void + + def close_time: -> Time? + def execution_time: -> Time? + def history_length: -> Integer + def id: -> String + def memo: -> Hash[String, Object] + def parent_id: -> String? + def parent_run_id: -> String? + def run_id: -> String + def search_attributes: -> SearchAttributes? + def start_time: -> Time + def status: -> WorkflowExecutionStatus::enum + def task_queue: -> String + def workflow_type: -> String + + class Description < WorkflowExecution + attr_reader raw_description: untyped + + def initialize: (untyped raw_description, Converters::DataConverter data_converter) -> void + end + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/workflow_execution_count.rbs b/temporalio/sig/temporalio/client/workflow_execution_count.rbs new file mode 100644 index 00000000..74ddf429 --- /dev/null +++ b/temporalio/sig/temporalio/client/workflow_execution_count.rbs @@ -0,0 +1,17 @@ +module Temporalio + class Client + class WorkflowExecutionCount + attr_reader count: Integer + attr_reader groups: Array[AggregationGroup] + + def initialize: (Integer count, Array[AggregationGroup] groups) -> void + + class AggregationGroup + attr_reader count: Integer + attr_reader group_values: Array[Object] + + def initialize: (Integer count, Array[Object] group_values) -> void + end + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/workflow_execution_status.rbs b/temporalio/sig/temporalio/client/workflow_execution_status.rbs new file mode 100644 index 00000000..c490c940 --- /dev/null +++ b/temporalio/sig/temporalio/client/workflow_execution_status.rbs @@ -0,0 +1,16 @@ + +module Temporalio + class Client + module WorkflowExecutionStatus + type enum = Integer + + RUNNING: enum + COMPLETED: enum + FAILED: enum + CANCELED: enum + TERMINATED: enum + CONTINUED_AS_NEW: enum + TIMED_OUT: enum + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/workflow_handle.rbs b/temporalio/sig/temporalio/client/workflow_handle.rbs index 1a4591f0..76277ab9 100644 --- a/temporalio/sig/temporalio/client/workflow_handle.rbs +++ b/temporalio/sig/temporalio/client/workflow_handle.rbs @@ -7,11 +7,11 @@ module Temporalio attr_reader first_execution_run_id: String? def initialize: ( - Client client, - String id, - ?run_id: String?, - ?result_run_id: String?, - ?first_execution_run_id: String? + client: Client, + id: String, + run_id: String?, + result_run_id: String?, + first_execution_run_id: String? ) -> void def result: ( @@ -20,24 +20,74 @@ module Temporalio ?rpc_timeout: Float? ) -> Object + def describe: ( + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> WorkflowExecution::Description + + def fetch_history: ( + ?event_filter_type: Integer, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> WorkflowHistory + def fetch_history_events: ( - ?page_size: Integer?, ?wait_new_event: bool, ?event_filter_type: Integer, ?skip_archival: bool, + ?specific_run_id: String?, ?rpc_metadata: Hash[String, String]?, ?rpc_timeout: Float? - ) -> Enumerable[untyped] - - private def fetch_history_events_for_run: ( - String? run_id, - page_size: Integer?, - wait_new_event: bool, - event_filter_type: Integer, - skip_archival: bool, - rpc_metadata: Hash[String, String]?, - rpc_timeout: Float? ) -> Enumerator[untyped, untyped] + + def signal: ( + String signal, + *Object? args, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> void + + def query: ( + String query, + *Object? args, + ?reject_condition: WorkflowQueryRejectCondition::enum?, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> Object? + + def start_update: ( + String update, + *Object? args, + wait_for_stage: WorkflowUpdateWaitStage::enum, + ?id: String, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> WorkflowUpdateHandle + + def execute_update: ( + String update, + *Object? args, + ?id: String, + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> Object? + + def update_handle: ( + String id, + ?specific_run_id: String? + ) -> WorkflowUpdateHandle + + def cancel: ( + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> void + + def terminate: ( + ?String? reason, + ?details: Array[Object?], + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> void end end end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/workflow_query_reject_condition.rbs b/temporalio/sig/temporalio/client/workflow_query_reject_condition.rbs new file mode 100644 index 00000000..1205fd1b --- /dev/null +++ b/temporalio/sig/temporalio/client/workflow_query_reject_condition.rbs @@ -0,0 +1,12 @@ + +module Temporalio + class Client + module WorkflowQueryRejectCondition + type enum = Integer + + NONE: enum + NOT_OPEN: enum + NOT_COMPLETED_CLEANLY: enum + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/workflow_update_handle.rbs b/temporalio/sig/temporalio/client/workflow_update_handle.rbs new file mode 100644 index 00000000..c5e2af9b --- /dev/null +++ b/temporalio/sig/temporalio/client/workflow_update_handle.rbs @@ -0,0 +1,24 @@ +module Temporalio + class Client + class WorkflowUpdateHandle + attr_reader id: String + attr_reader workflow_id: String + attr_reader workflow_run_id: String? + + def initialize: ( + client: Client, + id: String, + workflow_id: String, + workflow_run_id: String?, + known_outcome: untyped? + ) -> void + + def result_obtained?: -> bool + + def result: ( + ?rpc_metadata: Hash[String, String]?, + ?rpc_timeout: Float? + ) -> Object? + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/client/workflow_update_wait_stage.rbs b/temporalio/sig/temporalio/client/workflow_update_wait_stage.rbs new file mode 100644 index 00000000..52ded533 --- /dev/null +++ b/temporalio/sig/temporalio/client/workflow_update_wait_stage.rbs @@ -0,0 +1,12 @@ + +module Temporalio + class Client + module WorkflowUpdateWaitStage + type enum = Integer + + ADMITTED: enum + ACCEPTED: enum + COMPLETED: enum + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/converters/failure_converter.rbs b/temporalio/sig/temporalio/converters/failure_converter.rbs index 4e85e3d9..24bca016 100644 --- a/temporalio/sig/temporalio/converters/failure_converter.rbs +++ b/temporalio/sig/temporalio/converters/failure_converter.rbs @@ -5,8 +5,8 @@ module Temporalio def initialize: (?encode_common_attributes: bool) -> void - def to_failure: (Exception error, PayloadConverter payload_converter) -> untyped - def from_failure: (untyped failure, PayloadConverter payload_converter) -> Exception + def to_failure: (Exception error, DataConverter | PayloadConverter converter) -> untyped + def from_failure: (untyped failure, DataConverter | PayloadConverter converter) -> Exception end end end \ No newline at end of file diff --git a/temporalio/sig/temporalio/converters/payload_codec.rbs b/temporalio/sig/temporalio/converters/payload_codec.rbs index 3d174b45..3208c151 100644 --- a/temporalio/sig/temporalio/converters/payload_codec.rbs +++ b/temporalio/sig/temporalio/converters/payload_codec.rbs @@ -1,7 +1,8 @@ module Temporalio module Converters class PayloadCodec - # TODO + def encode: (Enumerable[untyped] payloads) -> Array[untyped] + def decode: (Enumerable[untyped] payloads) -> Array[untyped] end end end \ No newline at end of file diff --git a/temporalio/sig/temporalio/converters/payload_converter.rbs b/temporalio/sig/temporalio/converters/payload_converter.rbs index 1c191ce5..f0f6d9a9 100644 --- a/temporalio/sig/temporalio/converters/payload_converter.rbs +++ b/temporalio/sig/temporalio/converters/payload_converter.rbs @@ -9,7 +9,10 @@ module Temporalio ) -> PayloadConverter def to_payload: (Object? value) -> untyped + def to_payloads: (Array[Object?] values) -> untyped + def from_payload: (untyped payload) -> Object? + def from_payloads: (untyped payloads) -> Array[Object?] end end end \ No newline at end of file diff --git a/temporalio/sig/temporalio/error.rbs b/temporalio/sig/temporalio/error.rbs index 70b350ce..cb0effa6 100644 --- a/temporalio/sig/temporalio/error.rbs +++ b/temporalio/sig/temporalio/error.rbs @@ -1,9 +1,15 @@ module Temporalio class Error < StandardError - class WorkflowFailureError < Error - attr_reader cause: Exception + def self.canceled?: (Exception error) -> bool + + def self._with_backtrace_and_cause: ( + Exception err, + backtrace: Array[String], + cause: Exception? + ) -> Exception - def initialize: (cause: Exception) -> void + class WorkflowFailureError < Error + def initialize: -> void end class WorkflowContinuedAsNewError < Error @@ -12,16 +18,21 @@ module Temporalio def initialize: (new_run_id: String) -> void end - class WorkflowAlreadyStartedError < Error - attr_reader workflow_id: String - attr_reader workflow_type: String - attr_reader run_id: String + class WorkflowQueryFailedError < Error + end - def initialize: ( - workflow_id: String, - workflow_type: String, - run_id: String - ) -> void + class WorkflowQueryRejectedError < Error + attr_reader status: Client::WorkflowExecutionStatus::enum + + def initialize: (status: Client::WorkflowExecutionStatus::enum) -> void + end + + class WorkflowUpdateFailedError < Error + def initialize: -> void + end + + class WorkflowUpdateRPCTimeoutOrCanceledError < Error + def initialize: -> void end class RPCError < Error diff --git a/temporalio/sig/temporalio/error/failure.rbs b/temporalio/sig/temporalio/error/failure.rbs new file mode 100644 index 00000000..035530aa --- /dev/null +++ b/temporalio/sig/temporalio/error/failure.rbs @@ -0,0 +1,119 @@ +module Temporalio + class Error + class Failure < Error + end + + class WorkflowAlreadyStartedError < Failure + attr_reader workflow_id: String + attr_reader workflow_type: String + attr_reader run_id: String + + def initialize: (workflow_id: String, workflow_type: String, run_id: String) -> void + end + + class ApplicationError < Failure + attr_reader details: Array[Object?] + attr_reader type: String? + attr_reader non_retryable: bool + attr_reader next_retry_delay: Float? + + def initialize: ( + String message, + *Object? details, + ?type: String?, + ?non_retryable: bool, + ?next_retry_delay: Float? + ) -> void + + def retryable?: -> bool + end + + class CanceledError < Failure + attr_reader details: Array[Object?] + + def initialize: (String message, details: Array[Object?]) -> void + end + + class TerminatedError < Failure + attr_reader details: Array[Object?] + + def initialize: (String message, details: Array[Object?]) -> void + end + + class TimeoutError < Failure + attr_reader type: TimeoutType::enum + attr_reader last_heartbeat_details: Array[Object?] + + def initialize: (String message, type: TimeoutType::enum, last_heartbeat_details: Array[Object?]) -> void + + module TimeoutType + type enum = Integer + + START_TO_CLOSE: enum + SCHEDULE_TO_START: enum + SCHEDULE_TO_CLOSE: enum + HEARTBEAT: enum + end + end + + class ServerError < Failure + attr_reader non_retryable: bool + + def initialize: (String message, non_retryable: bool) -> void + + def retryable?: -> bool + end + + module RetryState + type enum = Integer + + IN_PROGRESS: enum + NON_RETRYABLE_FAILURE: enum + TIMEOUT: enum + MAXIMUM_ATTEMPTS_REACHED: enum + RETRY_POLICY_NOT_SET: enum + INTERNAL_SERVER_ERROR: enum + CANCEL_REQUESTED: enum + end + + class ActivityError < Failure + attr_reader scheduled_event_id: Integer + attr_reader started_event_id: Integer + attr_reader identity: String + attr_reader activity_type: String + attr_reader activity_id: String + attr_reader retry_state: RetryState::enum? + + def initialize: ( + String message, + scheduled_event_id: Integer, + started_event_id: Integer, + identity: String, + activity_type: String, + activity_id: String, + retry_state: RetryState::enum? + ) -> void + end + + class ChildWorkflowError < Failure + attr_reader namespace: String + attr_reader workflow_id: String + attr_reader run_id: String + attr_reader workflow_type: String + attr_reader initiated_event_id: Integer + attr_reader started_event_id: Integer + attr_reader retry_state: RetryState::enum? + + def initialize: ( + String message, + namespace: String, + workflow_id: String, + run_id: String, + workflow_type: String, + initiated_event_id: Integer, + started_event_id: Integer, + retry_state: RetryState::enum? + ) -> void + end + end +end \ No newline at end of file diff --git a/temporalio/sig/temporalio/internal/proto_utils.rbs b/temporalio/sig/temporalio/internal/proto_utils.rbs index 55ed0adf..a66e6aae 100644 --- a/temporalio/sig/temporalio/internal/proto_utils.rbs +++ b/temporalio/sig/temporalio/internal/proto_utils.rbs @@ -4,9 +4,19 @@ module Temporalio def self.seconds_to_duration: (Float? seconds_float) -> untyped? def self.memo_to_proto: ( - Hash[String, untyped?]? hash, - Converters::DataConverter data_converter + Hash[String, Object?]? hash, + Converters::DataConverter | Converters::PayloadConverter converter ) -> untyped? + + def self.memo_from_proto: ( + untyped? memo, + Converters::DataConverter | Converters::PayloadConverter converter + ) -> Hash[String, Object?]? + + def self.string_or: (String? str, ?nil default) -> String? + | (String? str, String default) -> String + + def self.enum_to_int: (untyped enum_mod, untyped enum_val, ?zero_means_nil: bool) -> Integer end end end \ No newline at end of file diff --git a/temporalio/sig/temporalio/search_attributes.rbs b/temporalio/sig/temporalio/search_attributes.rbs index 8905d0d9..5e409de0 100644 --- a/temporalio/sig/temporalio/search_attributes.rbs +++ b/temporalio/sig/temporalio/search_attributes.rbs @@ -19,7 +19,9 @@ module Temporalio def initialize: (Key key, Object? value) -> void end - def self.from_proto: (untyped proto) -> SearchAttributes + def self.from_proto: (untyped proto) -> SearchAttributes? + + def self.value_from_payload: (untyped payload) -> Object? def initialize: (SearchAttributes existing) -> void | (Hash[Key, Object] existing) -> void @@ -37,6 +39,12 @@ module Temporalio def dup: -> SearchAttributes + def empty?: -> bool + + def length: -> Integer + + alias size length + def update: (*Update updates) -> SearchAttributes def update!: (*Update updates) -> void diff --git a/temporalio/sig/temporalio/workflow_history.rbs b/temporalio/sig/temporalio/workflow_history.rbs new file mode 100644 index 00000000..43605ed4 --- /dev/null +++ b/temporalio/sig/temporalio/workflow_history.rbs @@ -0,0 +1,7 @@ +module Temporalio + class WorkflowHistory + attr_reader events: Array[untyped] + + def initialize: (Array[untyped] events) -> void + end +end \ No newline at end of file diff --git a/temporalio/temporalio.gemspec b/temporalio/temporalio.gemspec index 56cc74fa..db09160c 100644 --- a/temporalio/temporalio.gemspec +++ b/temporalio/temporalio.gemspec @@ -28,7 +28,10 @@ Gem::Specification.new do |spec| spec.add_dependency 'google-protobuf', '>= 3.27.0' + spec.add_development_dependency 'activemodel' + spec.add_development_dependency 'activerecord' spec.add_development_dependency 'async' + spec.add_development_dependency 'base64' spec.add_development_dependency 'grpc', '>= 1.65.0.pre2' spec.add_development_dependency 'grpc-tools' spec.add_development_dependency 'minitest' @@ -37,6 +40,7 @@ Gem::Specification.new do |spec| spec.add_development_dependency 'rbs', '~> 3.5.3' spec.add_development_dependency 'rb_sys', '~> 0.9.63' spec.add_development_dependency 'rubocop' + spec.add_development_dependency 'sqlite3', '~> 1.4' spec.add_development_dependency 'steep', '~> 1.7.1' spec.add_development_dependency 'yard' end diff --git a/temporalio/test/client_test.rb b/temporalio/test/client_test.rb index 5f4b4813..4d3f7585 100644 --- a/temporalio/test/client_test.rb +++ b/temporalio/test/client_test.rb @@ -10,34 +10,6 @@ def test_version_number assert !Temporalio::VERSION.nil? end - def start_simple_workflows - # Create ephemeral test server - env.with_kitchen_sink_worker do |task_queue| - # Start 5 workflows - handles = 5.times.map do |i| - env.client.start_workflow( - 'kitchen_sink', - { actions: [{ result: { value: "result-#{i}" } }] }, - id: "wf-#{SecureRandom.uuid}", - task_queue: - ) - end - # Check all results - results = handles.map(&:result) - assert_equal %w[result-0 result-1 result-2 result-3 result-4], results - end - end - - def test_start_simple_workflows_threaded - start_simple_workflows - end - - def test_start_simple_workflows_async - Sync do - start_simple_workflows - end - end - def test_lazy_connection assert env.client.connection.connected? client = Temporalio::Client.connect(env.client.connection.target_host, env.client.namespace, lazy_connect: true) @@ -54,139 +26,123 @@ def test_lazy_connection assert client.connection.connected? end - def test_workflow_exists - env.with_kitchen_sink_worker do |task_queue| - # Create a workflow that hangs - handle = env.client.start_workflow( - 'kitchen_sink', - { actions: [{ action_signal: 'complete' }] }, - id: "wf-#{SecureRandom.uuid}", - task_queue: - ) + class TrackCallsInterceptor + include Temporalio::Client::Interceptor + + attr_accessor :calls + + def initialize + @calls = [] + end + + def intercept_client(next_interceptor) + Outbound.new(self, next_interceptor) + end - # Confirm next one fails as already started - err = assert_raises(Temporalio::Error::WorkflowAlreadyStartedError) do - env.client.start_workflow( - 'kitchen_sink', - { actions: [{ action_signal: 'complete' }] }, - id: handle.id, - task_queue: - ) + class Outbound < Temporalio::Client::Interceptor::Outbound + def initialize(root, next_interceptor) + super(next_interceptor) + @root = root end - assert_equal handle.id, err.workflow_id - assert_equal 'kitchen_sink', err.workflow_type - assert_equal handle.result_run_id, err.run_id - # But that we can start another with an ID conflict policy that terminates - # it - new_handle = env.client.start_workflow( - 'kitchen_sink', - { actions: [{ result: { value: 'done' } }] }, - id: handle.id, - task_queue:, - id_conflict_policy: Temporalio::WorkflowIDConflictPolicy::TERMINATE_EXISTING - ) - assert_equal handle.id, new_handle.id - refute_equal handle.result_run_id, new_handle.result_run_id - - # Now confirm complete and another fails w/ on duplicate failed only - assert_equal 'done', new_handle.result - assert_raises(Temporalio::Error::WorkflowAlreadyStartedError) do - env.client.start_workflow( - 'kitchen_sink', - { actions: [{ result: { value: 'done' } }] }, - id: handle.id, - task_queue:, - id_reuse_policy: Temporalio::WorkflowIDReusePolicy::ALLOW_DUPLICATE_FAILED_ONLY - ) + def start_workflow(input) + @root.calls.push(['start_workflow', input]) + super + end + + def list_workflows(input) + @root.calls.push(['list_workflows', input]) + super + end + + def count_workflows(input) + @root.calls.push(['count_workflows', input]) + super + end + + def describe_workflow(input) + @root.calls.push(['describe_workflow', input]) + super + end + + def fetch_workflow_history_events(input) + @root.calls.push(['fetch_workflow_history_events', input]) + super + end + + def signal_workflow(input) + @root.calls.push(['signal_workflow', input]) + super + end + + def query_workflow(input) + @root.calls.push(['query_workflow', input]) + super + end + + def start_workflow_update(input) + @root.calls.push(['start_workflow_update', input]) + super + end + + def poll_workflow_update(input) + @root.calls.push(['poll_workflow_update', input]) + super + end + + def cancel_workflow(input) + @root.calls.push(['cancel_workflow', input]) + super + end + + def terminate_workflow(input) + @root.calls.push(['terminate_workflow', input]) + super end end end - def test_search_attributes_and_memo - # Make sure all keys on server - key_text = Temporalio::SearchAttributes::Key.new('ruby-key-text', - Temporalio::SearchAttributes::IndexedValueType::TEXT) - key_keyword = Temporalio::SearchAttributes::Key.new('ruby-key-keyword', - Temporalio::SearchAttributes::IndexedValueType::KEYWORD) - key_integer = Temporalio::SearchAttributes::Key.new('ruby-key-integer', - Temporalio::SearchAttributes::IndexedValueType::INTEGER) - key_float = Temporalio::SearchAttributes::Key.new('ruby-key-float', - Temporalio::SearchAttributes::IndexedValueType::FLOAT) - key_boolean = Temporalio::SearchAttributes::Key.new('ruby-key-boolean', - Temporalio::SearchAttributes::IndexedValueType::BOOLEAN) - key_time = Temporalio::SearchAttributes::Key.new('ruby-key-time', - Temporalio::SearchAttributes::IndexedValueType::TIME) - key_keyword_list = Temporalio::SearchAttributes::Key.new( - 'ruby-key-keyword-list', - Temporalio::SearchAttributes::IndexedValueType::KEYWORD_LIST - ) - env.ensure_search_attribute_keys(key_text, key_keyword, key_integer, key_float, key_boolean, key_time, - key_keyword_list) + def test_interceptor + # Create client with interceptor + track = TrackCallsInterceptor.new + new_options = env.client.options.dup + new_options.interceptors = [track] + client = Temporalio::Client.new(**new_options.to_h) # steep:ignore + # Run a bunch of calls env.with_kitchen_sink_worker do |task_queue| - # Remove precision because server doesn't store sub-second - now = Time.at(Time.now.to_i) - # Start a workflow with the different keys - handle = env.client.start_workflow( + handle = client.start_workflow( 'kitchen_sink', - { actions: [{ result: { value: 'done' } }] }, + { + actions: [ + { query_handler: { name: 'some-query' } }, + { update_handler: { name: 'some-update' } } + ], + action_signal: 'some-signal' + }, id: "wf-#{SecureRandom.uuid}", - task_queue:, - search_attributes: Temporalio::SearchAttributes.new( - { - key_text => 'some text', - key_keyword => 'some keyword', - key_integer => 123, - key_float => 45.67, - key_boolean => true, - key_time => now, - key_keyword_list => ['some keyword list 1', 'some keyword list 2'] - } - ), - memo: { 'foo' => 'bar', 'baz' => %w[qux1 qux2] } + task_queue: ) + # Query, update, signal, result, describe, cancel, terminate + assert_equal 'query-done', handle.query('some-query', 'query-done') + assert_equal 'update-done', handle.execute_update('some-update', 'update-done') + handle.signal('some-signal', { result: { value: 'done' } }) + assert_equal 'done', handle.result + assert_equal Temporalio::Client::WorkflowExecutionStatus::COMPLETED, handle.describe.status + handle.cancel + assert_raises(Temporalio::Error::RPCError) { handle.terminate } - # Describe and check - # TODO(cretz): Switch to high-level describe when written - describe_resp = env.client.workflow_service.describe_workflow_execution( - Temporalio::Api::WorkflowService::V1::DescribeWorkflowExecutionRequest.new( - namespace: env.client.namespace, - execution: Temporalio::Api::Common::V1::WorkflowExecution.new(workflow_id: handle.id) - ) - ) - assert_equal 'bar', env.client.data_converter.from_payload( - describe_resp.workflow_execution_info.memo.fields['foo'] - ) - assert_equal %w[qux1 qux2], env.client.data_converter.from_payload( - describe_resp.workflow_execution_info.memo.fields['baz'] - ) - attrs = Temporalio::SearchAttributes.from_proto(describe_resp.workflow_execution_info.search_attributes) - assert_equal 'some text', attrs[key_text] - assert_equal 'some keyword', attrs[key_keyword] - assert_equal 123, attrs[key_integer] - assert_equal 45.67, attrs[key_float] - assert_equal true, attrs[key_boolean] - assert_equal now, attrs[key_time] - assert_equal ['some keyword list 1', 'some keyword list 2'], attrs[key_keyword_list] + # Confirm those calls present + assert_equal(%w[start_workflow query_workflow start_workflow_update signal_workflow + fetch_workflow_history_events describe_workflow cancel_workflow terminate_workflow], + track.calls.map(&:first)) + assert(track.calls.all? { |v| v.last.workflow_id == handle.id }) + + # Clear it out and do non-id-specific calls + track.calls.clear + assert_empty client.list_workflows("WorkflowType = 'test-interceptor-does-not-exist'").to_a + assert_equal 0, client.count_workflows("WorkflowType = 'test-interceptor-does-not-exist'").count + assert_equal(%w[list_workflows count_workflows], track.calls.map(&:first)) end end - - # TODO(cretz): Tests to write: - # * Simple workflow with basic param and return type - # * Workflow start delay works (just put a start delay and check its value, don't have to run) - # * Workflow retry policy - # * Workflow client interceptors all called properly - # * Workflow search attributes and memo - # * Workflow list (specific ID) - # * Workflow counting - # * Workflow cloud test - # * Workflow recreate client with splatted options - # * Start workflow other options - # * CAN for get result following - # * Workflow failure w/ details - # * Get result not found - # * Cancelling RPC of get result - # * Query/Signal/Update obvious stuff - # * Basic describe support end diff --git a/temporalio/test/client_workflow_test.rb b/temporalio/test/client_workflow_test.rb new file mode 100644 index 00000000..6d8f14f3 --- /dev/null +++ b/temporalio/test/client_workflow_test.rb @@ -0,0 +1,472 @@ +# frozen_string_literal: true + +require 'async' +require 'temporalio/client' +require 'temporalio/testing' +require 'test' + +class ClientWorkflowTest < Test + def start_simple + # Create ephemeral test server + env.with_kitchen_sink_worker do |task_queue| + # Start 5 workflows + handles = 5.times.map do |i| + env.client.start_workflow( + 'kitchen_sink', + { actions: [{ result: { value: "result-#{i}" } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + end + # Check all results + results = handles.map(&:result) + assert_equal %w[result-0 result-1 result-2 result-3 result-4], results + end + end + + def test_start_simple_threaded + start_simple + end + + def test_start_simple_async + Sync do + start_simple + end + end + + def test_workflow_exists + env.with_kitchen_sink_worker do |task_queue| + # Create a workflow that hangs + handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ action_signal: 'complete' }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + + # Confirm next one fails as already started + err = assert_raises(Temporalio::Error::WorkflowAlreadyStartedError) do + env.client.start_workflow( + 'kitchen_sink', + { actions: [{ action_signal: 'complete' }] }, + id: handle.id, + task_queue: + ) + end + assert_equal handle.id, err.workflow_id + assert_equal 'kitchen_sink', err.workflow_type + assert_equal handle.result_run_id, err.run_id + + # But that we can start another with an ID conflict policy that terminates + # it + new_handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ result: { value: 'done' } }] }, + id: handle.id, + task_queue:, + id_conflict_policy: Temporalio::WorkflowIDConflictPolicy::TERMINATE_EXISTING + ) + assert_equal handle.id, new_handle.id + refute_equal handle.result_run_id, new_handle.result_run_id + + # Now confirm complete and another fails w/ on duplicate failed only + assert_equal 'done', new_handle.result + assert_raises(Temporalio::Error::WorkflowAlreadyStartedError) do + env.client.start_workflow( + 'kitchen_sink', + { actions: [{ result: { value: 'done' } }] }, + id: handle.id, + task_queue:, + id_reuse_policy: Temporalio::WorkflowIDReusePolicy::ALLOW_DUPLICATE_FAILED_ONLY + ) + end + end + end + + def test_lazy_connect + client = Temporalio::Client.connect( + env.client.connection.target_host, + env.client.namespace, + lazy_connect: true + ) + # Not connected until we do something + refute client.connection.connected? + client.start_workflow( + 'does-not-exist', + id: "wf-#{SecureRandom.uuid}", + task_queue: "tq-#{SecureRandom.uuid}" + ) + assert client.connection.connected? + end + + def test_describe + # Make sure all keys on server + env.ensure_common_search_attribute_keys + + env.with_kitchen_sink_worker do |task_queue| + # Remove precision because server doesn't store sub-second + now = Time.at(Time.now.to_i) + # Start a workflow with the different SA/memo + handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ result: { value: 'done' } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue:, + search_attributes: Temporalio::SearchAttributes.new( + { + ATTR_KEY_TEXT => 'some text', + ATTR_KEY_KEYWORD => 'some keyword', + ATTR_KEY_INTEGER => 123, + ATTR_KEY_FLOAT => 45.67, + ATTR_KEY_BOOLEAN => true, + ATTR_KEY_TIME => now, + ATTR_KEY_KEYWORD_LIST => ['some keyword list 1', 'some keyword list 2'] + } + ), + memo: { 'foo' => 'bar', 'baz' => %w[qux1 qux2] } + ) + # Wait until done + handle.result + + # Describe and check + desc = handle.describe + assert_instance_of Time, desc.close_time + assert_instance_of Time, desc.execution_time + assert_instance_of Integer, desc.history_length + assert_equal handle.id, desc.id + assert_equal({ 'foo' => 'bar', 'baz' => %w[qux1 qux2] }, desc.memo) + assert_nil desc.parent_id + assert_nil desc.parent_run_id + assert_equal handle.result_run_id, desc.run_id + assert_instance_of Time, desc.start_time + assert_equal Temporalio::Client::WorkflowExecutionStatus::COMPLETED, desc.status + # @type var attrs: Temporalio::SearchAttributes + attrs = desc.search_attributes + assert_equal 'some text', attrs[ATTR_KEY_TEXT] + assert_equal 'some keyword', attrs[ATTR_KEY_KEYWORD] + assert_equal 123, attrs[ATTR_KEY_INTEGER] + assert_equal 45.67, attrs[ATTR_KEY_FLOAT] + assert_equal true, attrs[ATTR_KEY_BOOLEAN] + assert_equal now, attrs[ATTR_KEY_TIME] + assert_equal ['some keyword list 1', 'some keyword list 2'], attrs[ATTR_KEY_KEYWORD_LIST] + assert_equal task_queue, desc.task_queue + assert_equal 'kitchen_sink', desc.workflow_type + end + end + + def test_start_delay + env.with_kitchen_sink_worker do |task_queue| + handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ result: { value: { donekey: 'doneval' } } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue:, + start_delay: 0.01 + ) + assert_equal({ 'donekey' => 'doneval' }, handle.result) + assert_equal 0.01, + handle + .fetch_history_events + .first + .workflow_execution_started_event_attributes + .first_workflow_task_backoff + .to_f + end + end + + def test_failure + env.with_kitchen_sink_worker do |task_queue| + # Simple error + err = assert_raises(Temporalio::Error::WorkflowFailureError) do + env.client.execute_workflow( + 'kitchen_sink', + { actions: [{ error: { message: 'some error', type: 'error-type', details: { foo: 'bar', baz: 123.45 } } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + end + assert_instance_of Temporalio::Error::ApplicationError, err.cause + assert_equal 'some error', err.cause.message + assert_equal 'error-type', err.cause.type + refute err.cause.non_retryable + assert_equal [{ 'foo' => 'bar', 'baz' => 123.45 }], err.cause.details + + # Activity does not exist, for checking causes + err = assert_raises(Temporalio::Error::WorkflowFailureError) do + env.client.execute_workflow( + 'kitchen_sink', + { actions: [{ execute_activity: { name: 'does-not-exist' } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + end + assert_instance_of Temporalio::Error::ActivityError, err.cause + assert_instance_of Temporalio::Error::ApplicationError, err.cause.cause + assert_includes err.cause.cause.message, 'does-not-exist' + end + end + + def test_retry_policy + env.with_kitchen_sink_worker do |task_queue| + err = assert_raises(Temporalio::Error::WorkflowFailureError) do + env.client.execute_workflow( + 'kitchen_sink', + { actions: [{ error: { attempt: true } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue:, + retry_policy: Temporalio::RetryPolicy.new( + initial_interval: 0.01, + max_attempts: 2 + ) + ) + end + assert_instance_of Temporalio::Error::ApplicationError, err.cause + assert_equal 'attempt 2', err.cause.message + end + end + + def test_list_and_count + # Make sure all keys on server + env.ensure_common_search_attribute_keys + + # Start 5 workflows, 3 that complete and 2 that don't, and with different + # SAs for odd ones vs even + env.with_kitchen_sink_worker do |task_queue| + handles = 5.times.map do |i| + env.client.start_workflow( + 'kitchen_sink', + if i <= 2 + { actions: [{ result: { value: 'done' } }] } + else + { action_signal: 'wait' } + end, + id: "wf-#{SecureRandom.uuid}-#{i}", + task_queue:, + search_attributes: Temporalio::SearchAttributes.new( + { + ATTR_KEY_TEXT => 'test-list', + ATTR_KEY_KEYWORD => i.even? ? 'even' : 'odd' + } + ) + ) + end + + # Make sure all 5 come back in list + assert_eventually do + wfs = env.client.list_workflows("`#{ATTR_KEY_TEXT.name}` = 'test-list'").to_a + assert_equal 5, wfs.size + # Check each item is present too + assert_equal handles.map(&:id).sort, wfs.map(&:id).sort + # Check the first has search attr + assert_equal 'test-list', wfs.first&.search_attributes&.[](ATTR_KEY_TEXT) + end + + # Query for just the odd ones and make sure it's two + assert_eventually do + wfs = env.client.list_workflows("`#{ATTR_KEY_TEXT.name}` = 'test-list' AND " \ + "`#{ATTR_KEY_KEYWORD.name}` = 'odd'").to_a + assert_equal 2, wfs.size + end + + # Normal count + assert_eventually do + count = env.client.count_workflows("`#{ATTR_KEY_TEXT.name}` = 'test-list'") + assert_equal 5, count.count + assert_empty count.groups + end + + # Count with group by making sure eventually first 3 are complete + assert_eventually do + count = env.client.count_workflows("`#{ATTR_KEY_TEXT.name}` = 'test-list' GROUP BY ExecutionStatus") + assert_equal 5, count.count + groups = count.groups.sort_by(&:count) + # 2 running, 3 completed + assert_equal 2, groups[0].count + assert_equal ['Running'], groups[0].group_values + assert_equal 3, groups[1].count + assert_equal ['Completed'], groups[1].group_values + end + end + end + + def test_continue_as_new + env.with_kitchen_sink_worker do |task_queue| + handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ continue_as_new: { while_above_zero: 1, result: 'done' } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + assert_equal 'done', handle.result + + # Confirm what happens if we do not follow runs on the handle + assert_raises(Temporalio::Error::WorkflowContinuedAsNewError) do + handle.result(follow_runs: false) + end + end + end + + def test_not_found + handle = env.client.workflow_handle('does-not-exist') + err = assert_raises(Temporalio::Error::RPCError) do + handle.describe + end + assert_equal Temporalio::Error::RPCError::Code::NOT_FOUND, err.code + err = assert_raises(Temporalio::Error::RPCError) do + handle.result + end + assert_equal Temporalio::Error::RPCError::Code::NOT_FOUND, err.code + end + + def test_config_change + env.with_kitchen_sink_worker do |task_queue| + # Regular query works + handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ query_handler: { name: 'some query' } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + handle.result + assert_equal 'some query arg', handle.query('some query', 'some query arg') + + # Now demonstrate simple configuration change w/ default reject condition + new_options = env.client.options.dup + new_options.default_workflow_query_reject_condition = Temporalio::Client::WorkflowQueryRejectCondition::NOT_OPEN + new_client = Temporalio::Client.new(**new_options.to_h) # steep:ignore + err = assert_raises(Temporalio::Error::WorkflowQueryRejectedError) do + new_client.workflow_handle(handle.id).query('some query', 'some query arg') + end + assert_equal Temporalio::Client::WorkflowExecutionStatus::COMPLETED, err.status + end + end + + def test_signal + env.with_kitchen_sink_worker do |task_queue| + handle = env.client.start_workflow( + 'kitchen_sink', + { action_signal: 'some signal' }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + handle.signal('some signal', { result: { value: 'some signal arg' } }) + assert_equal 'some signal arg', handle.result + end + end + + def test_query + env.with_kitchen_sink_worker do |task_queue| + handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ query_handler: { name: 'some query' } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + handle.result + assert_equal 'some query arg', handle.query('some query', 'some query arg') + + # Check query not present + err = assert_raises(Temporalio::Error::WorkflowQueryFailedError) do + handle.query('unknown query') + end + assert_includes err.message, 'unknown query' + + # Query reject condition + err = assert_raises(Temporalio::Error::WorkflowQueryRejectedError) do + handle.query('some query', 'some query arg', + reject_condition: Temporalio::Client::WorkflowQueryRejectCondition::NOT_OPEN) + end + assert_equal Temporalio::Client::WorkflowExecutionStatus::COMPLETED, err.status + end + end + + def test_update + env.with_kitchen_sink_worker do |task_queue| + handle = env.client.start_workflow( + 'kitchen_sink', + { + actions: [ + { update_handler: { name: 'update-success' } }, + { update_handler: { name: 'update-fail', error: 'update failed' } }, + { update_handler: { name: 'update-wait', wait_for_signal: 'finish-update' } } + ], + action_signal: 'wait' + }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + + # Simple update success + assert_equal 'update-result', handle.execute_update('update-success', 'update-result') + + # Simple update failure + err = assert_raises(Temporalio::Error::WorkflowUpdateFailedError) do + handle.execute_update('update-fail', 'update-result') + end + assert_instance_of Temporalio::Error::ApplicationError, err.cause + assert_equal 'update failed', err.cause.message + + # Immediate complete update success via start+result + update_handle = handle.start_update( + 'update-success', + 'update-result', + # TODO(cretz): Can make this ACCEPTED once https://github.com/temporalio/temporal/pull/6477 released + wait_for_stage: Temporalio::Client::WorkflowUpdateWaitStage::COMPLETED + ) + assert update_handle.result_obtained? + assert_equal 'update-result', update_handle.result + + # Async complete + update_handle = handle.start_update( + 'update-wait', + 'update-result', + wait_for_stage: Temporalio::Client::WorkflowUpdateWaitStage::ACCEPTED + ) + refute update_handle.result_obtained? + handle.signal('finish-update', 'update-result-from-signal') + assert_equal 'update-result-from-signal', update_handle.result + assert update_handle.result_obtained? + end + end + + def test_cancel + env.with_kitchen_sink_worker do |task_queue| + handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ sleep: { millis: 50_000 } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + handle.cancel + err = assert_raises(Temporalio::Error::WorkflowFailureError) do + handle.result + end + assert_instance_of Temporalio::Error::CanceledError, err.cause + end + end + + def test_terminate + env.with_kitchen_sink_worker do |task_queue| + handle = env.client.start_workflow( + 'kitchen_sink', + { actions: [{ sleep: { millis: 50_000 } }] }, + id: "wf-#{SecureRandom.uuid}", + task_queue: + ) + handle.terminate('some reason', details: ['some details']) + err = assert_raises(Temporalio::Error::WorkflowFailureError) do + handle.result + end + assert_instance_of Temporalio::Error::TerminatedError, err.cause + assert_equal 'some reason', err.cause.message + assert_equal ['some details'], err.cause.details + end + end + + # TODO(cretz): Tests to write: + # * Cancelling RPC of get result (and figuring out cancel token and Ruby async lib compat and thread/fiber raise, etc) + # * Also cancelling waiting for update + # * Workflow cloud test + # * Signal/update with start + # * Async activity +end diff --git a/temporalio/test/converters/data_converter_test.rb b/temporalio/test/converters/data_converter_test.rb new file mode 100644 index 00000000..5f46e45b --- /dev/null +++ b/temporalio/test/converters/data_converter_test.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +require 'temporalio/api' +require 'temporalio/converters/data_converter' +require 'temporalio/converters/payload_codec' +require 'temporalio/testing' +require 'test' + +module Converters + class DataConverterTest < Test + class Base64Codec + def encode(payloads) + payloads.map do |p| + Temporalio::Api::Common::V1::Payload.new( + metadata: { 'encoding' => 'test/base64' }, + data: Base64.strict_encode64(p.to_proto) + ) + end + end + + def decode(payloads) + payloads.map do |p| + Temporalio::Api::Common::V1::Payload.decode( + Base64.strict_decode64(p.data) + ) + end + end + end + + def test_with_codec + converter = Temporalio::Converters::DataConverter.new( + failure_converter: Ractor.make_shareable( + Temporalio::Converters::FailureConverter.new(encode_common_attributes: true) + ), + payload_codec: Base64Codec.new + ) + + # Single payload + payload = converter.to_payload('abc') + assert_equal 'test/base64', payload.metadata['encoding'] + assert_equal 'abc', converter.from_payload(payload) + + # Multi-payload + payloads = converter.to_payloads(['abc', 123]) + assert_equal(['test/base64', 'test/base64'], payloads.payloads.map { |p| p.metadata['encoding'] }) + assert_equal ['abc', 123], converter.from_payloads(payloads) + + # Failure + failure = converter.to_failure(Temporalio::Error._with_backtrace_and_cause( + Temporalio::Error::ApplicationError.new('outer', { foo: 'bar' }), + backtrace: %w[stack-val-1 stack-val-2], + cause: Temporalio::Error::ApplicationError.new('inner', { baz: 123 }) + )) + assert_equal 'Encoded failure', failure.message + assert_equal '', failure.stack_trace + assert_equal 'test/base64', failure.encoded_attributes.metadata['encoding'] + assert_equal 'test/base64', failure.application_failure_info.details.payloads.first.metadata['encoding'] + assert_equal 'Encoded failure', failure.cause.message + assert_equal '', failure.cause.stack_trace + assert_equal 'test/base64', failure.cause.encoded_attributes.metadata['encoding'] + assert_equal 'test/base64', failure.cause.application_failure_info.details.payloads.first.metadata['encoding'] + error = converter.from_failure(failure) + assert_instance_of Temporalio::Error::ApplicationError, error + assert_equal 'outer', error.message + assert_equal %w[stack-val-1 stack-val-2], error.backtrace + assert_equal 'bar', error.details.first['foo'] # steep:ignore + assert_instance_of Temporalio::Error::ApplicationError, error.cause + assert_equal 'inner', error.cause&.message + assert_equal [], error.cause&.backtrace + assert_equal 123, error.cause.details.first['baz'] # steep:ignore + end + end +end diff --git a/temporalio/test/converters/failure_converter_test.rb b/temporalio/test/converters/failure_converter_test.rb new file mode 100644 index 00000000..660ee5c1 --- /dev/null +++ b/temporalio/test/converters/failure_converter_test.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: true + +require 'temporalio/api' +require 'temporalio/converters/failure_converter' +require 'temporalio/testing' +require 'test' + +module Converters + class FailureConverterTest < Test + def test_failure_with_causes + # Make multiple nested errors + orig_err = assert_raises do + begin + begin + raise 'Unset error class' + rescue StandardError + raise Temporalio::Error::ApplicationError, 'Application error no details' + end + rescue StandardError + raise Temporalio::Error::ApplicationError.new('Application error with details', { foo: 'bar' }) + end + rescue StandardError + raise Temporalio::Error::ChildWorkflowError.new( + 'Child error', + namespace: 'ns', + workflow_id: 'wfid', + run_id: 'runid', + workflow_type: 'wftype', + initiated_event_id: 123, + started_event_id: 456, + retry_state: Temporalio::Error::RetryState::RETRY_POLICY_NOT_SET + ) + end + + # Confirm multiple nested + assert_instance_of Temporalio::Error::ChildWorkflowError, orig_err + refute_empty orig_err.backtrace + assert_equal 'Child error', orig_err.message + assert_equal Temporalio::Error::RetryState::RETRY_POLICY_NOT_SET, orig_err.retry_state + assert_instance_of Temporalio::Error::ApplicationError, orig_err.cause + refute_empty orig_err.cause.backtrace + assert_equal 'Application error with details', orig_err.cause.message + assert_equal [{ foo: 'bar' }], orig_err.cause.details + assert_nil orig_err.cause.type + assert_instance_of Temporalio::Error::ApplicationError, orig_err.cause.cause + assert_equal 'Application error no details', orig_err.cause.cause.message + assert_empty orig_err.cause.cause.details + assert_instance_of RuntimeError, orig_err.cause.cause.cause + assert_equal 'Unset error class', orig_err.cause.cause.cause.message + + # Confirm serialized as expected + failure = Temporalio::Converters::DataConverter.default.to_failure(orig_err) + assert_equal 'Child error', failure.message + refute_empty failure.stack_trace + assert_equal 'wfid', failure.child_workflow_execution_failure_info.workflow_execution.workflow_id + assert_equal Temporalio::Error::RetryState::RETRY_POLICY_NOT_SET, + Temporalio::Internal::ProtoUtils.enum_to_int( + Temporalio::Api::Enums::V1::RetryState, + failure.child_workflow_execution_failure_info.retry_state + ) + assert_equal 'Application error with details', failure.cause.message + assert_empty failure.cause.application_failure_info.type + refute_nil failure.cause.application_failure_info.details + assert_equal 'Application error no details', failure.cause.cause.message + assert_empty failure.cause.cause.application_failure_info.details.payloads + assert_equal 'Unset error class', failure.cause.cause.cause.message + assert_equal 'RuntimeError', failure.cause.cause.cause.application_failure_info.type + + # Confirm deserialized as expected + # @type var new_err: untyped + new_err = Temporalio::Converters::DataConverter.default.from_failure(failure) + assert_instance_of Temporalio::Error::ChildWorkflowError, new_err + assert_equal orig_err.backtrace, new_err.backtrace + assert_equal 'Child error', new_err.message + assert_equal Temporalio::Error::RetryState::RETRY_POLICY_NOT_SET, new_err.retry_state + assert_instance_of Temporalio::Error::ApplicationError, new_err.cause + assert_equal orig_err.cause.backtrace, new_err.cause.backtrace + assert_equal 'Application error with details', new_err.cause.message + assert_equal [{ 'foo' => 'bar' }], new_err.cause.details + assert_nil new_err.cause.type + assert_instance_of Temporalio::Error::ApplicationError, new_err.cause.cause + assert_equal orig_err.cause.cause.backtrace, new_err.cause.cause.backtrace + assert_equal 'Application error no details', new_err.cause.cause.message + assert_empty new_err.cause.cause.details + assert_instance_of Temporalio::Error::ApplicationError, new_err.cause.cause.cause + assert_equal orig_err.cause.cause.cause.backtrace, new_err.cause.cause.cause.backtrace + assert_equal 'Unset error class', new_err.cause.cause.cause.message + assert_equal 'RuntimeError', new_err.cause.cause.cause.type + end + + # TODO(cretz): Test with encoded + end +end diff --git a/temporalio/test/converters/payload_converter_test.rb b/temporalio/test/converters/payload_converter_test.rb new file mode 100644 index 00000000..b28baf60 --- /dev/null +++ b/temporalio/test/converters/payload_converter_test.rb @@ -0,0 +1,179 @@ +# frozen_string_literal: true + +require 'active_model' +require 'active_record' +require 'temporalio/api' +require 'temporalio/converters/payload_converter' +require 'test' + +module Converters + class PayloadConverterTest < Test + # @type method assert_payload: ( + # untyped input, + # String expected_encoding, + # String expected_data, + # ?expected_decoded_input: untyped, + # ?converter: Temporalio::Converters::PayloadConverter + # ) -> untyped + def assert_payload( + input, + expected_encoding, + expected_data, + expected_decoded_input: nil, + converter: Temporalio::Converters::PayloadConverter.default + ) + # Convert and check contents + payload = converter.to_payload(input) + assert_equal expected_encoding, payload.metadata['encoding'] + assert_equal expected_data, payload.data + + # Convert back and check + new_input = converter.from_payload(payload) + expected_decoded_input ||= input + if expected_decoded_input.nil? + assert_nil new_input + else + assert_equal expected_decoded_input, new_input + end + payload + end + + def test_default_converter + # Basic types + assert_payload nil, 'binary/null', '' + assert_payload 'test str'.encode(Encoding::ASCII_8BIT), 'binary/plain', 'test str' + payload = assert_payload( + Temporalio::Api::Common::V1::WorkflowExecution.new(workflow_id: 'id1'), + 'json/protobuf', + '{"workflowId":"id1"}' + ) + assert_equal 'temporal.api.common.v1.WorkflowExecution', payload.metadata['messageType'] + assert_payload( + { foo: 'bar', 'baz' => 'qux' }, 'json/plain', '{"foo":"bar","baz":"qux"}', + expected_decoded_input: { 'foo' => 'bar', 'baz' => 'qux' } + ) + assert_payload 1234, 'json/plain', '1234' + assert_payload 12.34, 'json/plain', '12.34' + assert_payload true, 'json/plain', 'true' + assert_payload false, 'json/plain', 'false' + assert_payload ['str', nil, { 'a' => false }, 1234], 'json/plain', '["str",null,{"a":false},1234]' + + # Circular ref + some_arr = [] + some_arr << some_arr + assert_raises(JSON::NestingError) do + assert_payload some_arr, 'json/plain', 'whatever' + end + + # Time without addition is a time string (not ISO-8601) + time = Time.now + assert_payload time, 'json/plain', "\"#{time}\"", expected_decoded_input: time.to_s + # Time with addition comes back as the same object (but not very useful + # outside of Ruby) + require 'json/add/time' + assert_payload time, 'json/plain', time.to_json + end + + def test_binary_proto + # Make a new converter with all default converters except json proto so + # that binary proto takes precedent + converter = Temporalio::Converters::PayloadConverter::Composite.new( + *Temporalio::Converters::PayloadConverter.default.converters.values.reject do |conv| + conv.is_a?(Temporalio::Converters::PayloadConverter::JSONProtobuf) + end + ) + + proto = Temporalio::Api::Common::V1::WorkflowExecution.new(workflow_id: 'id1') + payload = assert_payload(proto, 'binary/protobuf', proto.to_proto, converter:) + assert_equal 'temporal.api.common.v1.WorkflowExecution', payload.metadata['messageType'] + end + + # Need this support library for active model to work + module ActiveRecordJSONSupport + extend ActiveSupport::Concern + include ActiveModel::Serializers::JSON + + included do + def to_json(*args) + hash = as_json + hash[::JSON.create_id] = self.class.name + hash.to_json(*args) + end + + def self.json_create(object) + object.delete(::JSON.create_id) + ret = new + ret.attributes = object + ret + end + end + end + + module ActiveModelJSONSupport + extend ActiveSupport::Concern + include ActiveRecordJSONSupport + + included do + def attributes=(hash) + hash.each do |key, value| + send("#{key}=", value) + end + end + + def attributes + instance_values + end + end + end + + class MyActiveRecordObject < ActiveRecord::Base + include ActiveRecordJSONSupport + end + + class MyActiveModelObject + include ActiveModel::API + include ActiveModelJSONSupport + + attr_accessor :foo, :bar + end + + def test_active_record_and_model + # Make conn and schema + ActiveRecord::Base.establish_connection( + adapter: 'sqlite3', + database: ':memory:' + ) + ActiveRecord::Schema.define do + create_table :my_active_record_objects, force: true do |t| + t.string :foo + t.integer :baz + end + end + + # Make obj + obj = MyActiveRecordObject.new(foo: 'bar', baz: 1234) + + # Convert and check contents + converter = Temporalio::Converters::PayloadConverter.default + payload = converter.to_payload(obj) + assert_equal 'json/plain', payload.metadata['encoding'] + assert_equal obj.to_json, payload.data + # Convert back and check + new_obj = converter.from_payload(payload) + assert_instance_of MyActiveRecordObject, new_obj + assert_equal obj.attributes, new_obj.attributes + + # Do the same for active model + obj = MyActiveModelObject.new(foo: 1234, bar: 'baz') + # Convert and check contents + converter = Temporalio::Converters::PayloadConverter.default + payload = converter.to_payload(obj) + assert_equal 'json/plain', payload.metadata['encoding'] + assert_equal obj.to_json, payload.data + # Convert back and check + new_obj = converter.from_payload(payload) + assert_instance_of MyActiveModelObject, new_obj + assert_equal obj.attributes, new_obj.attributes + end + end +end diff --git a/temporalio/test/extra_assertions.rb b/temporalio/test/extra_assertions.rb new file mode 100644 index 00000000..81ef5341 --- /dev/null +++ b/temporalio/test/extra_assertions.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +module ExtraAssertions + def assert_eventually(timeout: 10, interval: 0.2) + start_time = Time.now + loop do + begin + return yield + rescue Minitest::Assertion => e + raise e if Time.now - start_time > timeout + end + sleep(interval) + end + end +end diff --git a/temporalio/test/golangworker/main.go b/temporalio/test/golangworker/main.go index 8ada1a84..ec105d7d 100644 --- a/temporalio/test/golangworker/main.go +++ b/temporalio/test/golangworker/main.go @@ -5,15 +5,21 @@ import ( "errors" "fmt" "log" + "log/slog" "os" "time" "go.temporal.io/sdk/client" + sdklog "go.temporal.io/sdk/log" "go.temporal.io/sdk/temporal" "go.temporal.io/sdk/worker" "go.temporal.io/sdk/workflow" ) +func init() { + slog.SetLogLoggerLevel(slog.LevelWarn) +} + func main() { if len(os.Args) != 4 { log.Fatalf("expected endpoint, namespace, and task queue arg, found %v args", len(os.Args)-1) @@ -24,17 +30,21 @@ func main() { } func run(endpoint, namespace, taskQueue string) error { - log.Printf("Creating client to %v", endpoint) - cl, err := client.NewClient(client.Options{HostPort: endpoint, Namespace: namespace}) + slog.Info("Creating client") + cl, err := client.Dial(client.Options{ + HostPort: endpoint, + Namespace: namespace, + Logger: sdklog.NewStructuredLogger(slog.Default()), + }) if err != nil { return fmt.Errorf("failed to create client: %w", err) } defer cl.Close() - log.Printf("Creating worker") + slog.Info("Creating worker") w := worker.New(cl, taskQueue, worker.Options{}) w.RegisterWorkflowWithOptions(KitchenSinkWorkflow, workflow.RegisterOptions{Name: "kitchen_sink"}) - defer log.Printf("Stopping worker") + defer slog.Info("Stopping worker") return w.Run(worker.InterruptCh()) } @@ -49,6 +59,7 @@ type KitchenSinkAction struct { ContinueAsNew *ContinueAsNewAction `json:"continue_as_new"` Sleep *SleepAction `json:"sleep"` QueryHandler *QueryHandlerAction `json:"query_handler"` + UpdateHandler *UpdateHandlerAction `json:"update_handler"` Signal *SignalAction `json:"signal"` ExecuteActivity *ExecuteActivityAction `json:"execute_activity"` } @@ -60,6 +71,7 @@ type ResultAction struct { type ErrorAction struct { Message string `json:"message"` + Type string `json:"type"` Details interface{} `json:"details"` Attempt bool `json:"attempt"` } @@ -78,6 +90,12 @@ type QueryHandlerAction struct { Error string `json:"error"` } +type UpdateHandlerAction struct { + Name string `json:"name"` + Error string `json:"error"` + WaitForSignal string `json:"wait_for_signal"` +} + type SignalAction struct { Name string `json:"name"` } @@ -145,7 +163,7 @@ func handleAction( if action.Error.Details != nil { details = append(details, action.Error.Details) } - return true, nil, temporal.NewApplicationError(action.Error.Message, "", details...) + return true, nil, temporal.NewApplicationError(action.Error.Message, action.Error.Type, details...) case action.ContinueAsNew != nil: if action.ContinueAsNew.WhileAboveZero > 0 { @@ -171,6 +189,25 @@ func handleAction( return true, nil, err } + case action.UpdateHandler != nil: + err := workflow.SetUpdateHandler( + ctx, + action.UpdateHandler.Name, + func(ctx workflow.Context, arg string) (string, error) { + if action.UpdateHandler.Error != "" { + return "", errors.New(action.UpdateHandler.Error) + } else if action.UpdateHandler.WaitForSignal != "" { + var sigVal string + workflow.GetSignalChannel(ctx, action.UpdateHandler.WaitForSignal).Receive(ctx, &sigVal) + return sigVal, nil + } else { + return arg, nil + } + }) + if err != nil { + return true, nil, err + } + case action.Signal != nil: workflow.GetSignalChannel(ctx, action.Signal.Name).Receive(ctx, nil) diff --git a/temporalio/test/sig/client_test.rbs b/temporalio/test/sig/client_test.rbs deleted file mode 100644 index e7011028..00000000 --- a/temporalio/test/sig/client_test.rbs +++ /dev/null @@ -1,3 +0,0 @@ -class ClientTest < Test - def start_simple_workflows: -> void -end \ No newline at end of file diff --git a/temporalio/test/sig/client_workflow_test.rbs b/temporalio/test/sig/client_workflow_test.rbs new file mode 100644 index 00000000..00ba03cf --- /dev/null +++ b/temporalio/test/sig/client_workflow_test.rbs @@ -0,0 +1,3 @@ +class ClientWorkflowTest < Test + def start_simple: -> void +end \ No newline at end of file diff --git a/temporalio/test/sig/extra_assertions.rbs b/temporalio/test/sig/extra_assertions.rbs new file mode 100644 index 00000000..2336a1ac --- /dev/null +++ b/temporalio/test/sig/extra_assertions.rbs @@ -0,0 +1,3 @@ +module ExtraAssertions + def assert_eventually: [T] (?timeout: Float, ?interval: Float) { -> T } -> T +end \ No newline at end of file diff --git a/temporalio/test/sig/test.rbs b/temporalio/test/sig/test.rbs index 5a325f71..54a09f27 100644 --- a/temporalio/test/sig/test.rbs +++ b/temporalio/test/sig/test.rbs @@ -1,4 +1,14 @@ class Test < Minitest::Test + include ExtraAssertions + + ATTR_KEY_TEXT: Temporalio::SearchAttributes::Key + ATTR_KEY_KEYWORD: Temporalio::SearchAttributes::Key + ATTR_KEY_INTEGER: Temporalio::SearchAttributes::Key + ATTR_KEY_FLOAT: Temporalio::SearchAttributes::Key + ATTR_KEY_BOOLEAN: Temporalio::SearchAttributes::Key + ATTR_KEY_TIME: Temporalio::SearchAttributes::Key + ATTR_KEY_KEYWORD_LIST: Temporalio::SearchAttributes::Key + def env: -> TestEnvironment class TestEnvironment @@ -17,6 +27,8 @@ class Test < Minitest::Test def kitchen_sink_exe: -> String + def ensure_common_search_attribute_keys: -> void + def ensure_search_attribute_keys: (*Temporalio::SearchAttributes::Key keys) -> void end end \ No newline at end of file diff --git a/temporalio/test/test.rb b/temporalio/test/test.rb index a87f409b..b9eb4396 100644 --- a/temporalio/test/test.rb +++ b/temporalio/test/test.rb @@ -1,9 +1,32 @@ # frozen_string_literal: true +require 'extra_assertions' require 'minitest/autorun' +require 'securerandom' require 'singleton' +require 'temporalio/testing' +require 'timeout' class Test < Minitest::Test + include ExtraAssertions + + ATTR_KEY_TEXT = Temporalio::SearchAttributes::Key.new('ruby-key-text', + Temporalio::SearchAttributes::IndexedValueType::TEXT) + ATTR_KEY_KEYWORD = Temporalio::SearchAttributes::Key.new('ruby-key-keyword', + Temporalio::SearchAttributes::IndexedValueType::KEYWORD) + ATTR_KEY_INTEGER = Temporalio::SearchAttributes::Key.new('ruby-key-integer', + Temporalio::SearchAttributes::IndexedValueType::INTEGER) + ATTR_KEY_FLOAT = Temporalio::SearchAttributes::Key.new('ruby-key-float', + Temporalio::SearchAttributes::IndexedValueType::FLOAT) + ATTR_KEY_BOOLEAN = Temporalio::SearchAttributes::Key.new('ruby-key-boolean', + Temporalio::SearchAttributes::IndexedValueType::BOOLEAN) + ATTR_KEY_TIME = Temporalio::SearchAttributes::Key.new('ruby-key-time', + Temporalio::SearchAttributes::IndexedValueType::TIME) + ATTR_KEY_KEYWORD_LIST = Temporalio::SearchAttributes::Key.new( + 'ruby-key-keyword-list', + Temporalio::SearchAttributes::IndexedValueType::KEYWORD_LIST + ) + def env TestEnvironment.instance end @@ -63,6 +86,11 @@ def kitchen_sink_exe end end + def ensure_common_search_attribute_keys + ensure_search_attribute_keys(ATTR_KEY_TEXT, ATTR_KEY_KEYWORD, ATTR_KEY_INTEGER, ATTR_KEY_FLOAT, ATTR_KEY_BOOLEAN, + ATTR_KEY_TIME, ATTR_KEY_KEYWORD_LIST) + end + def ensure_search_attribute_keys(*keys) # Do a list and collect ones not present list_resp = client.operator_service.list_search_attributes(