diff --git a/crates/client/src/lib.rs b/crates/client/src/lib.rs index c4999908f..970cab024 100644 --- a/crates/client/src/lib.rs +++ b/crates/client/src/lib.rs @@ -170,6 +170,8 @@ pub struct ConnectionOptions { #[builder(default = "temporal-rust".to_owned())] #[cfg_attr(feature = "core-based-sdk", builder(setters(vis = "pub")))] client_name: String, + // TODO [rust-sdk-branch]: SDK should set this to its version. Doing that probably easiest + // after adding proper client interceptors. /// The version of the SDK being implemented on top of core. Is set as `client-version` header /// in all RPC calls. The server decides if the client is supported based on this. #[builder(default = VERSION.to_owned())] diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index ad7d86f1e..22825c79a 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -26,6 +26,7 @@ anyhow = "1.0" async-trait = "0.1" base64 = "0.22" bon = { workspace = true } +crc32fast = "1" dirs = { version = "6.0", optional = true } derive_more = { workspace = true } erased-serde = "0.4" diff --git a/crates/common/src/activity_definition.rs b/crates/common/src/activity_definition.rs index d50b328c1..5e6b40c1c 100644 --- a/crates/common/src/activity_definition.rs +++ b/crates/common/src/activity_definition.rs @@ -9,7 +9,7 @@ use crate::data_converters::{TemporalDeserializable, TemporalSerializable}; /// Implement on a marker struct to define an activity. pub trait ActivityDefinition { /// Type of the input argument to the workflow - type Input: TemporalDeserializable + 'static; + type Input: TemporalDeserializable + TemporalSerializable + 'static; /// Type of the output of the workflow type Output: TemporalSerializable + 'static; diff --git a/crates/common/src/data_converters.rs b/crates/common/src/data_converters.rs index 2f1e47c78..4fddf5e1b 100644 --- a/crates/common/src/data_converters.rs +++ b/crates/common/src/data_converters.rs @@ -47,9 +47,28 @@ impl PayloadConverter { // TODO [rust-sdk-branch]: Proto binary, other standard built-ins } +#[derive(Debug)] pub enum PayloadConversionError { WrongEncoding, - EncodingError(Box), + EncodingError(Box), +} + +impl std::fmt::Display for PayloadConversionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PayloadConversionError::WrongEncoding => write!(f, "Wrong encoding"), + PayloadConversionError::EncodingError(err) => write!(f, "Encoding error: {}", err), + } + } +} + +impl std::error::Error for PayloadConversionError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + PayloadConversionError::WrongEncoding => None, + PayloadConversionError::EncodingError(err) => Some(err.as_ref()), + } + } } pub trait FailureConverter { @@ -71,13 +90,13 @@ pub struct DefaultFailureConverter; pub trait PayloadCodec { fn encode( &self, - payloads: Vec, context: &SerializationContext, + payloads: Vec, ) -> BoxFuture<'static, Vec>; fn decode( &self, - payloads: Vec, context: &SerializationContext, + payloads: Vec, ) -> BoxFuture<'static, Vec>; } pub struct DefaultPayloadCodec; @@ -94,7 +113,7 @@ pub trait TemporalSerializable { None } } -/// + /// Indicates some type can be deserialized for use with Temporal. /// /// You don't need to implement this unless you are using a non-serde-compatible custom converter, @@ -102,8 +121,8 @@ pub trait TemporalSerializable { pub trait TemporalDeserializable: Sized { fn from_serde( _: &dyn ErasedSerdePayloadConverter, - _: Payload, _: &SerializationContext, + _: Payload, ) -> Option { None } @@ -122,26 +141,26 @@ pub struct RawValue { pub trait GenericPayloadConverter { fn to_payload( &self, - val: &T, context: &SerializationContext, + val: &T, ) -> Result; #[allow(clippy::wrong_self_convention)] fn from_payload( &self, - payload: Payload, context: &SerializationContext, + payload: Payload, ) -> Result; } impl GenericPayloadConverter for PayloadConverter { fn to_payload( &self, - val: &T, context: &SerializationContext, + val: &T, ) -> Result { match self { PayloadConverter::Serde(pc) => { - Ok(pc.to_payload(val.as_serde().ok_or_else(|| todo!())?, context)?) + Ok(pc.to_payload(context, val.as_serde().ok_or_else(|| todo!())?)?) } PayloadConverter::UseWrappers => { Ok(T::to_payload(val, context).ok_or_else(|| todo!())?) @@ -153,12 +172,12 @@ impl GenericPayloadConverter for PayloadConverter { fn from_payload( &self, - payload: Payload, context: &SerializationContext, + payload: Payload, ) -> Result { match self { PayloadConverter::Serde(pc) => { - Ok(T::from_serde(pc.as_ref(), payload, context).ok_or_else(|| todo!())?) + Ok(T::from_serde(pc.as_ref(), context, payload).ok_or_else(|| todo!())?) } PayloadConverter::UseWrappers => { Ok(T::from_payload(payload, context).ok_or_else(|| todo!())?) @@ -183,13 +202,13 @@ where { fn from_serde( pc: &dyn ErasedSerdePayloadConverter, - payload: Payload, context: &SerializationContext, + payload: Payload, ) -> Option where Self: Sized, { - erased_serde::deserialize(&mut pc.from_payload(payload, context).ok()?).ok() + erased_serde::deserialize(&mut pc.from_payload(context, payload).ok()?).ok() } } @@ -197,8 +216,8 @@ struct SerdeJsonPayloadConverter; impl ErasedSerdePayloadConverter for SerdeJsonPayloadConverter { fn to_payload( &self, + _: &SerializationContext, value: &dyn erased_serde::Serialize, - _context: &SerializationContext, ) -> Result { let as_json = serde_json::to_vec(value).map_err(|_| todo!())?; Ok(Payload { @@ -213,8 +232,8 @@ impl ErasedSerdePayloadConverter for SerdeJsonPayloadConverter { fn from_payload( &self, + _: &SerializationContext, payload: Payload, - _context: &SerializationContext, ) -> Result>, PayloadConversionError> { // TODO: Would check metadata let json_v: serde_json::Value = @@ -225,14 +244,14 @@ impl ErasedSerdePayloadConverter for SerdeJsonPayloadConverter { pub trait ErasedSerdePayloadConverter: Send + Sync { fn to_payload( &self, - value: &dyn erased_serde::Serialize, context: &SerializationContext, + value: &dyn erased_serde::Serialize, ) -> Result; #[allow(clippy::wrong_self_convention)] fn from_payload( &self, - payload: Payload, context: &SerializationContext, + payload: Payload, ) -> Result>, PayloadConversionError>; } @@ -299,15 +318,15 @@ impl FailureConverter for DefaultFailureConverter { impl PayloadCodec for DefaultPayloadCodec { fn encode( &self, - payloads: Vec, _: &SerializationContext, + payloads: Vec, ) -> BoxFuture<'static, Vec> { async move { payloads }.boxed() } fn decode( &self, - payloads: Vec, _: &SerializationContext, + payloads: Vec, ) -> BoxFuture<'static, Vec> { async move { payloads }.boxed() } diff --git a/crates/common/src/worker.rs b/crates/common/src/worker.rs index 9370f0b2d..796abcecf 100644 --- a/crates/common/src/worker.rs +++ b/crates/common/src/worker.rs @@ -2,7 +2,12 @@ //! with workers. use crate::protos::{coresdk, temporal, temporal::api::enums::v1::VersioningBehavior}; -use std::str::FromStr; +use std::{ + fs::File, + io::{self, BufReader, Read}, + str::FromStr, + sync::OnceLock, +}; /// Specifies which task types a worker will poll for. /// @@ -84,6 +89,19 @@ pub struct WorkerDeploymentOptions { pub default_versioning_behavior: Option, } +impl WorkerDeploymentOptions { + pub fn from_build_id(build_id: String) -> Self { + Self { + version: WorkerDeploymentVersion { + deployment_name: "".to_owned(), + build_id, + }, + use_worker_versioning: false, + default_versioning_behavior: None, + } + } +} + #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct WorkerDeploymentVersion { /// Name of the deployment @@ -138,3 +156,33 @@ impl From for WorkerDepl } } } + +static CACHED_BUILD_ID: OnceLock = OnceLock::new(); + +/// Build ID derived from hashing the on-disk bytes of the current executable. +/// Deterministic across machines for the same binary. Cached per-process. +pub fn build_id_from_current_exe() -> &'static str { + CACHED_BUILD_ID + .get_or_init(|| compute_crc32_exe_id().unwrap_or_else(|_| "undetermined".to_owned())) +} + +fn compute_crc32_exe_id() -> io::Result { + let exe_path = std::env::current_exe()?; + let file = File::open(exe_path)?; + let mut reader = BufReader::new(file); + + let mut hasher = crc32fast::Hasher::new(); + let mut buf = [0u8; 128 * 1024]; + + loop { + let n = reader.read(&mut buf)?; + if n == 0 { + break; + } + hasher.update(&buf[..n]); + } + + let crc = hasher.finalize(); + + Ok(format!("{:08x}", crc)) +} diff --git a/crates/macros/src/definitions.rs b/crates/macros/src/definitions.rs index 330bb6351..82fb0e25a 100644 --- a/crates/macros/src/definitions.rs +++ b/crates/macros/src/definitions.rs @@ -1,8 +1,8 @@ use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; -use quote::{format_ident, quote}; +use quote::{format_ident, quote, quote_spanned}; use syn::{ - FnArg, ImplItem, ItemImpl, ReturnType, Type, TypePath, + Attribute, FnArg, ImplItem, ItemImpl, ReturnType, Type, TypePath, parse::{Parse, ParseStream}, spanned::Spanned, }; @@ -12,8 +12,14 @@ pub(crate) struct ActivitiesDefinition { activities: Vec, } +#[derive(Default)] +struct ActivityAttributes { + name_override: Option, +} + struct ActivityMethod { method: syn::ImplItemFn, + attributes: ActivityAttributes, is_async: bool, is_static: bool, input_type: Option, @@ -48,6 +54,7 @@ impl Parse for ActivitiesDefinition { } fn parse_activity_method(method: &syn::ImplItemFn) -> syn::Result { + let attributes = extract_activity_attributes(method.attrs.as_slice())?; let is_async = method.sig.asyncness.is_some(); // Determine if static (no self receiver) or instance (Arc) @@ -72,6 +79,7 @@ fn parse_activity_method(method: &syn::ImplItemFn) -> syn::Result syn::Result syn::Result { + let mut activity_attributes = ActivityAttributes::default(); + + for attr in attrs { + if attr.path().is_ident("activity") && attr.meta.require_list().is_ok() { + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("name") { + let value = meta.value()?; + let expr: syn::Expr = value.parse()?; + activity_attributes.name_override = Some(expr); + Ok(()) + } else { + Err(meta.error("unsupported activity attribute")) + } + })?; + } + } + + Ok(activity_attributes) +} + fn validate_arc_self_type(ty: &Type) -> syn::Result<()> { let expected: Type = syn::parse_quote!(Arc); @@ -150,40 +179,99 @@ fn extract_output_type(sig: &syn::Signature) -> Option { impl ActivitiesDefinition { pub(crate) fn codegen(&self) -> TokenStream { let impl_type = &self.impl_block.self_ty; + let impl_type_name = type_name_string(impl_type); let module_name = type_to_snake_case(impl_type); let module_ident = format_ident!("{}", module_name); - // Generate the original impl block with #[activity] attributes stripped. We need that since - // it's what's actually going to get called by the worker to run them. + // Generate the original impl block with: + // - #[activity] attributes stripped + // - Activity methods renamed with __ prefix let mut cleaned_impl = self.impl_block.clone(); for item in &mut cleaned_impl.items { if let ImplItem::Fn(method) = item { + let is_activity = method + .attrs + .iter() + .any(|attr| attr.path().is_ident("activity")); + method .attrs .retain(|attr| !attr.path().is_ident("activity")); + + // Rename activity methods with __ prefix + if is_activity { + let new_name = format_ident!("__{}", method.sig.ident); + method.sig.ident = new_name; + } } } + // Generate marker structs (inside module, no external references) let activity_structs: Vec<_> = self .activities .iter() .map(|act| { - let visibility = &act.method.vis; + let visibility = match &act.method.vis { + syn::Visibility::Inherited => &syn::parse_quote!(pub(super)), + o => o, + }; + let struct_name = method_name_to_pascal_case(&act.method.sig.ident); let struct_ident = format_ident!("{}", struct_name); - quote! { + let span = act.method.span(); + quote_spanned! { span=> #visibility struct #struct_ident; } }) .collect(); + // Generate consts in impl block pointing to marker structs + let activity_consts: Vec<_> = self + .activities + .iter() + .map(|act| { + let visibility = &act.method.vis; + let method_ident = &act.method.sig.ident; + let struct_name = method_name_to_pascal_case(&act.method.sig.ident); + let struct_ident = format_ident!("{}", struct_name); + let span = act.method.span(); + // Copy #[allow(...)] attributes from the method to the const + let allow_attrs: Vec<_> = act + .method + .attrs + .iter() + .filter(|attr| attr.path().is_ident("allow")) + .collect(); + quote_spanned! { span=> + #[allow(non_upper_case_globals)] + #(#allow_attrs)* + #visibility const #method_ident: #module_ident::#struct_ident = #module_ident::#struct_ident; + } + }) + .collect(); + + // Generate run methods on marker structs (outside module to reference impl_type) + let run_impls: Vec<_> = self + .activities + .iter() + .map(|act| self.generate_run_impl(act, impl_type, &module_ident)) + .collect(); + + // Generate ActivityDefinition and ExecutableActivity impls (outside module) let activity_impls: Vec<_> = self .activities .iter() - .map(|act| self.generate_activity_definition_impl(act, impl_type, &module_name)) + .map(|act| { + self.generate_activity_definition_impl( + act, + impl_type, + &impl_type_name, + &module_ident, + ) + }) .collect(); - let implementer_impl = self.generate_activity_implementer_impl(impl_type); + let implementer_impl = self.generate_activity_implementer_impl(impl_type, &module_ident); let has_only_static = if self.activities.iter().all(|a| a.is_static) { quote! { @@ -193,34 +281,135 @@ impl ActivitiesDefinition { quote! {} }; + // Generate impl block with consts + let const_impl = quote! { + impl #impl_type { + #(#activity_consts)* + } + }; + let output = quote! { #cleaned_impl - pub mod #module_ident { - use super::*; + #const_impl + // Module contains only the marker structs (no use super::*) + mod #module_ident { #(#activity_structs)* + } - #(#activity_impls)* + // Run methods, trait impls are outside the module + #(#run_impls)* - #implementer_impl + #(#activity_impls)* - #has_only_static - } + #implementer_impl + + #has_only_static }; output.into() } + fn generate_run_impl( + &self, + activity: &ActivityMethod, + impl_type: &Type, + module_ident: &syn::Ident, + ) -> TokenStream2 { + let struct_name = method_name_to_pascal_case(&activity.method.sig.ident); + let struct_ident = format_ident!("{}", struct_name); + let prefixed_method = format_ident!("__{}", activity.method.sig.ident); + + let input_type = activity + .input_type + .as_ref() + .map(|t| quote! { #t }) + .unwrap_or(quote! { () }); + let output_type = activity + .output_type + .as_ref() + .map(|t| quote! { #t }) + .unwrap_or(quote! { () }); + + // Build the parameters and call based on static vs instance and input + let (params, method_call) = if activity.is_static { + let params = if activity.input_type.is_some() { + quote! { self, ctx: ::temporalio_sdk::activities::ActivityContext, input: #input_type } + } else { + quote! { self, ctx: ::temporalio_sdk::activities::ActivityContext } + }; + let call = if activity.input_type.is_some() { + quote! { #impl_type::#prefixed_method(ctx, input) } + } else { + quote! { #impl_type::#prefixed_method(ctx) } + }; + (params, call) + } else { + let params = if activity.input_type.is_some() { + quote! { self, instance: ::std::sync::Arc<#impl_type>, ctx: ::temporalio_sdk::activities::ActivityContext, input: #input_type } + } else { + quote! { self, instance: ::std::sync::Arc<#impl_type>, ctx: ::temporalio_sdk::activities::ActivityContext } + }; + let call = if activity.input_type.is_some() { + quote! { #impl_type::#prefixed_method(instance, ctx, input) } + } else { + quote! { #impl_type::#prefixed_method(instance, ctx) } + }; + (params, call) + }; + + let return_type = + quote! { Result<#output_type, ::temporalio_sdk::activities::ActivityError> }; + + // If the method returns void (no return type), wrap with Ok(()) + let result_wrapper = if activity.output_type.is_none() { + quote! { ; Ok(()) } + } else { + quote! {} + }; + + // Common methods for all marker structs + let common_methods = quote! { + /// Returns the activity name (delegates to ActivityDefinition::name()) + pub fn name(&self) -> &'static str { + ::name() + } + }; + + if activity.is_async { + quote! { + impl #module_ident::#struct_ident { + #common_methods + + pub async fn run(#params) -> #return_type { + #method_call.await #result_wrapper + } + } + } + } else { + quote! { + impl #module_ident::#struct_ident { + #common_methods + + pub fn run(#params) -> #return_type { + #method_call #result_wrapper + } + } + } + } + } + fn generate_activity_definition_impl( &self, activity: &ActivityMethod, impl_type: &Type, - module_name: &str, + impl_type_name: &str, + module_ident: &syn::Ident, ) -> TokenStream2 { let struct_name = method_name_to_pascal_case(&activity.method.sig.ident); let struct_ident = format_ident!("{}", struct_name); - let method_ident = &activity.method.sig.ident; + let prefixed_method = format_ident!("__{}", activity.method.sig.ident); let input_type = activity .input_type @@ -233,7 +422,12 @@ impl ActivitiesDefinition { .map(|t| quote! { #t }) .unwrap_or(quote! { () }); - let activity_name = format!("{}::{}", module_name, struct_name); + let activity_name = if let Some(ref name_expr) = activity.attributes.name_override { + quote! { #name_expr } + } else { + let default_name = format!("{}::{}", impl_type_name, activity.method.sig.ident); + quote! { #default_name } + }; let receiver_pattern = if activity.is_static { quote! { _receiver } @@ -243,14 +437,14 @@ impl ActivitiesDefinition { let method_call = if activity.input_type.is_some() { if activity.is_static { - quote! { #impl_type::#method_ident(ctx, input) } + quote! { #impl_type::#prefixed_method(ctx, input) } } else { - quote! { #impl_type::#method_ident(receiver.unwrap(), ctx, input) } + quote! { #impl_type::#prefixed_method(receiver.unwrap(), ctx, input) } } } else if activity.is_static { - quote! { #impl_type::#method_ident(ctx) } + quote! { #impl_type::#prefixed_method(ctx) } } else { - quote! { #impl_type::#method_ident(receiver.unwrap(), ctx) } + quote! { #impl_type::#prefixed_method(receiver.unwrap(), ctx) } }; // Add input parameter to execute signature only if needed @@ -281,7 +475,7 @@ impl ActivitiesDefinition { }; quote! { - impl ::temporalio_common::ActivityDefinition for #struct_ident { + impl ::temporalio_common::ActivityDefinition for #module_ident::#struct_ident { type Input = #input_type; type Output = #output_type; @@ -293,7 +487,7 @@ impl ActivitiesDefinition { } } - impl ::temporalio_sdk::activities::ExecutableActivity for #struct_ident { + impl ::temporalio_sdk::activities::ExecutableActivity for #module_ident::#struct_ident { type Implementer = #impl_type; fn execute( @@ -311,29 +505,19 @@ impl ActivitiesDefinition { } } - fn generate_activity_implementer_impl(&self, impl_type: &Type) -> TokenStream2 { - let static_activities: Vec<_> = self - .activities - .iter() - .filter(|a| a.is_static) - .map(|a| { - let struct_name = method_name_to_pascal_case(&a.method.sig.ident); - let struct_ident = format_ident!("{}", struct_name); - quote! { - worker_options.register_activity::<#struct_ident>(); - } - }) - .collect(); - + fn generate_activity_implementer_impl( + &self, + impl_type: &Type, + module_ident: &syn::Ident, + ) -> TokenStream2 { let instance_activities: Vec<_> = self .activities .iter() - .filter(|a| !a.is_static) .map(|a| { let struct_name = method_name_to_pascal_case(&a.method.sig.ident); let struct_ident = format_ident!("{}", struct_name); quote! { - worker_options.register_activity_with_instance::<#struct_ident>(self.clone()); + defs.register_activity::<#module_ident::#struct_ident>(self.clone()); } }) .collect(); @@ -346,15 +530,9 @@ impl ActivitiesDefinition { quote! { impl ::temporalio_sdk::activities::ActivityImplementer for #impl_type { - fn register_all_static( - worker_options: &mut ::temporalio_sdk::WorkerOptionsBuilder, - ) { - #(#static_activities)* - } - - fn register_all_instance( + fn register_all( self: ::std::sync::Arc, - worker_options: &mut ::temporalio_sdk::WorkerOptionsBuilder, + defs: &mut ::temporalio_sdk::activities::ActivityDefinitions, ) { #register_instance_body } @@ -404,3 +582,12 @@ fn method_name_to_pascal_case(ident: &syn::Ident) -> String { } result } + +fn type_name_string(ty: &Type) -> String { + if let Type::Path(type_path) = ty + && let Some(segment) = type_path.path.segments.last() + { + return segment.ident.to_string(); + } + panic!("Cannot extract type name from impl block - expected a simple type path"); +} diff --git a/crates/macros/src/lib.rs b/crates/macros/src/lib.rs index 5f9ad1730..79fe035e4 100644 --- a/crates/macros/src/lib.rs +++ b/crates/macros/src/lib.rs @@ -4,6 +4,10 @@ use syn::parse_macro_input; mod definitions; mod fsm_impl; +/// Can be used to define Activities for invocation and execution. Using this macro requires that +/// you also depend on the `temporalio_sdk` crate. +/// +/// For a usage example, see that crate's documentation. #[proc_macro_attribute] pub fn activities(_attr: TokenStream, item: TokenStream) -> TokenStream { let def: definitions::ActivitiesDefinition = @@ -23,9 +27,9 @@ pub fn activity(_attr: TokenStream, item: TokenStream) -> TokenStream { /// /// An example state machine definition of a card reader for unlocking a door: /// ``` -/// use temporalio_macros::fsm; /// use std::convert::Infallible; /// use temporalio_common::fsm_trait::{StateMachine, TransitionResult}; +/// use temporalio_macros::fsm; /// /// fsm! { /// name CardReader; command Commands; error Infallible; shared_state SharedState; @@ -39,7 +43,7 @@ pub fn activity(_attr: TokenStream, item: TokenStream) -> TokenStream { /// /// #[derive(Clone)] /// pub struct SharedState { -/// last_id: Option +/// last_id: Option, /// } /// /// #[derive(Debug, Clone, Eq, PartialEq, Hash)] @@ -71,8 +75,11 @@ pub fn activity(_attr: TokenStream, item: TokenStream) -> TokenStream { /// } /// /// impl Locked { -/// fn on_card_readable(&self, shared_dat: &mut SharedState, data: CardData) -/// -> CardReaderTransition { +/// fn on_card_readable( +/// &self, +/// shared_dat: &mut SharedState, +/// data: CardData, +/// ) -> CardReaderTransition { /// match &shared_dat.last_id { /// // Arbitrarily deny the same person entering twice in a row /// Some(d) if d == &data => TransitionResult::ok(vec![], Locked {}.into()), diff --git a/crates/sdk-core/machine_coverage/ActivityMachine_Coverage.puml b/crates/sdk-core/machine_coverage/ActivityMachine_Coverage.puml index 67f326270..a5b5ed085 100644 --- a/crates/sdk-core/machine_coverage/ActivityMachine_Coverage.puml +++ b/crates/sdk-core/machine_coverage/ActivityMachine_Coverage.puml @@ -26,7 +26,7 @@ StartedActivityCancelEventRecorded -[#blue]-> TimedOut: ActivityTaskTimedOut StartedActivityCancelEventRecorded -[#blue]-> Canceled: ActivityTaskCanceled Canceled -[#blue]-> Canceled: ActivityTaskStarted Canceled -[#blue]-> Canceled: ActivityTaskCompleted -TimedOut --> [*] -Failed --> [*] Completed --> [*] +Failed --> [*] +TimedOut --> [*] @enduml \ No newline at end of file diff --git a/crates/sdk-core/machine_coverage/UpdateMachine_Coverage.puml b/crates/sdk-core/machine_coverage/UpdateMachine_Coverage.puml index dfd4e1bd2..2d9f28548 100644 --- a/crates/sdk-core/machine_coverage/UpdateMachine_Coverage.puml +++ b/crates/sdk-core/machine_coverage/UpdateMachine_Coverage.puml @@ -14,6 +14,6 @@ CompletedCommandCreated -[#blue]-> CompletedCommandRecorded: WorkflowExecutionUp CompletedImmediately -[#blue]-> CompletedImmediatelyAcceptCreated: CommandProtocolMessage CompletedImmediatelyAcceptCreated -[#blue]-> CompletedImmediatelyCompleteCreated: CommandProtocolMessage CompletedImmediatelyCompleteCreated -[#blue]-> CompletedCommandCreated: WorkflowExecutionUpdateAccepted -Rejected --> [*] CompletedCommandRecorded --> [*] +Rejected --> [*] @enduml \ No newline at end of file diff --git a/crates/sdk-core/machine_coverage/WorkflowTaskMachine_Coverage.puml b/crates/sdk-core/machine_coverage/WorkflowTaskMachine_Coverage.puml index e65467351..d6fd502d6 100644 --- a/crates/sdk-core/machine_coverage/WorkflowTaskMachine_Coverage.puml +++ b/crates/sdk-core/machine_coverage/WorkflowTaskMachine_Coverage.puml @@ -5,7 +5,7 @@ Scheduled --> TimedOut: WorkflowTaskTimedOut Started -[#blue]-> Completed: WorkflowTaskCompleted Started -[#blue]-> Failed: WorkflowTaskFailed Started -[#blue]-> TimedOut: WorkflowTaskTimedOut -TimedOut --> [*] Completed --> [*] Failed --> [*] +TimedOut --> [*] @enduml \ No newline at end of file diff --git a/crates/sdk-core/src/worker/mod.rs b/crates/sdk-core/src/worker/mod.rs index 6d1732ec5..3951d4480 100644 --- a/crates/sdk-core/src/worker/mod.rs +++ b/crates/sdk-core/src/worker/mod.rs @@ -130,7 +130,7 @@ pub struct WorkerConfig { pub client_identity_override: Option, /// If set nonzero, workflows will be cached and sticky task queues will be used, meaning that /// history updates are applied incrementally to suspended instances of workflow execution. - /// Workflows are evicted according to a least-recently-used policy one the cache maximum is + /// Workflows are evicted according to a least-recently-used policy once the cache maximum is /// reached. Workflows may also be explicitly evicted at any time, or as a result of errors /// or failures. #[builder(default = 0)] diff --git a/crates/sdk-core/src/worker/tuner.rs b/crates/sdk-core/src/worker/tuner.rs index a721976ac..dbe4122b8 100644 --- a/crates/sdk-core/src/worker/tuner.rs +++ b/crates/sdk-core/src/worker/tuner.rs @@ -26,6 +26,23 @@ pub struct TunerHolder { nexus_supplier: Arc + Send + Sync>, } +impl TunerHolder { + /// Create a tuner with fixed size slot suppliers for all slot kinds. + pub fn fixed_size( + workflow_slots: usize, + activity_slots: usize, + local_activity_slots: usize, + nexus_slots: usize, + ) -> Self { + Self { + wft_supplier: Arc::new(FixedSizeSlotSupplier::new(workflow_slots)), + act_supplier: Arc::new(FixedSizeSlotSupplier::new(activity_slots)), + la_supplier: Arc::new(FixedSizeSlotSupplier::new(local_activity_slots)), + nexus_supplier: Arc::new(FixedSizeSlotSupplier::new(nexus_slots)), + } + } +} + /// Can be used to construct a [TunerHolder] without needing to manually construct each /// [SlotSupplier]. Useful for lang bridges to allow more easily passing through user options. #[derive(Clone, Debug, bon::Builder)] diff --git a/crates/sdk-core/tests/common/activity_functions.rs b/crates/sdk-core/tests/common/activity_functions.rs new file mode 100644 index 000000000..92209cae7 --- /dev/null +++ b/crates/sdk-core/tests/common/activity_functions.rs @@ -0,0 +1,45 @@ +use std::time::Duration; +use temporalio_common::protos::DEFAULT_ACTIVITY_TYPE; +use temporalio_macros::activities; +use temporalio_sdk::activities::{ActivityContext, ActivityError}; +use tokio::time::sleep; + +pub(crate) struct StdActivities; + +#[activities] +impl StdActivities { + #[activity] + pub(crate) async fn echo(_ctx: ActivityContext, e: String) -> Result { + Ok(e) + } + + /// Activity that does nothing and returns success + #[activity] + pub(crate) async fn no_op(_ctx: ActivityContext, _: ()) -> Result<(), ActivityError> { + Ok(()) + } + + /// Also a no-op, but uses the default name from history construction to work with + /// canned histories + #[activity(name = DEFAULT_ACTIVITY_TYPE)] + pub(crate) async fn default(_ctx: ActivityContext, _: ()) -> Result<(), ActivityError> { + Ok(()) + } + + /// Activity that sleeps for provided duration. Name is overriden to provide compatibility with + /// some canned histories. + #[activity(name = "delay")] + pub(crate) async fn delay( + _ctx: ActivityContext, + duration: Duration, + ) -> Result<(), ActivityError> { + sleep(duration).await; + Ok(()) + } + + /// Always fails + #[activity] + pub(crate) async fn always_fail(_ctx: ActivityContext) -> Result<(), ActivityError> { + Err(anyhow::anyhow!("Oh no I failed!").into()) + } +} diff --git a/crates/sdk-core/tests/common/mod.rs b/crates/sdk-core/tests/common/mod.rs index 8a6b59e87..463db4675 100644 --- a/crates/sdk-core/tests/common/mod.rs +++ b/crates/sdk-core/tests/common/mod.rs @@ -1,6 +1,7 @@ //! Common integration testing utilities //! These utilities are specific to integration tests and depend on the full temporal-client stack. +pub(crate) mod activity_functions; pub(crate) mod fake_grpc_server; pub(crate) mod http_proxy; pub(crate) mod workflows; @@ -48,10 +49,11 @@ use temporalio_common::{ Logger, OtelCollectorOptions, PrometheusExporterOptions, TelemetryOptions, build_otlp_metric_exporter, metrics::CoreMeter, start_prometheus_metric_exporter, }, - worker::WorkerTaskTypes, + worker::{WorkerDeploymentOptions, WorkerDeploymentVersion, WorkerTaskTypes}, }; use temporalio_sdk::{ - IntoActivityFunc, Worker, WorkflowFunction, + Worker, WorkerOptions, WorkflowFunction, + activities::ActivityImplementer, interceptors::{ FailOnNondeterminismInterceptor, InterceptorWithNext, ReturnWorkflowExitValueInterceptor, WorkerInterceptor, @@ -98,9 +100,13 @@ pub(crate) async fn init_core_and_create_wf(test_name: &str) -> CoreWfStarter { starter } +pub(crate) fn integ_namespace() -> String { + env::var(INTEG_NAMESPACE_ENV_VAR).unwrap_or(NAMESPACE.to_string()) +} + pub(crate) fn integ_worker_config(tq: &str) -> WorkerConfig { WorkerConfig::builder() - .namespace(env::var(INTEG_NAMESPACE_ENV_VAR).unwrap_or(NAMESPACE.to_string())) + .namespace(integ_namespace()) .task_queue(tq) .max_outstanding_activities(100_usize) .max_outstanding_local_activities(100_usize) @@ -114,6 +120,20 @@ pub(crate) fn integ_worker_config(tq: &str) -> WorkerConfig { .expect("Configuration options construct properly") } +pub(crate) fn integ_sdk_config(tq: &str) -> WorkerOptions { + WorkerOptions::new(tq) + .deployment_options(WorkerDeploymentOptions { + version: WorkerDeploymentVersion { + deployment_name: "".to_owned(), + build_id: "test_build_id".to_owned(), + }, + use_worker_versioning: false, + default_versioning_behavior: None, + }) + .task_types(WorkerTaskTypes::all()) + .build() +} + /// Create a worker replay instance preloaded with provided histories. Returns the worker impl. pub(crate) fn init_core_replay_preloaded(test_name: &str, histories: I) -> CoreWorker where @@ -143,7 +163,7 @@ where I: Stream + Send + 'static, { let core = init_core_replay_stream("replay_worker_test", histories); - let mut worker = Worker::new_from_core(Arc::new(core), "replay_q".to_string()); + let mut worker = Worker::new_from_core(Arc::new(core)); worker.set_worker_interceptor(FailOnNondeterminismInterceptor {}); worker } @@ -216,13 +236,16 @@ pub(crate) async fn get_cloud_client() -> Client { pub(crate) struct CoreWfStarter { /// Used for both the task queue and workflow id task_queue_name: String, - pub worker_config: WorkerConfig, + pub sdk_config: WorkerOptions, /// Options to use when starting workflow(s) pub workflow_options: WorkflowOptions, initted_worker: OnceCell, runtime_override: Option>, client_override: Option, min_local_server_version: Option, + /// Run when initializing, allows for altering the config used to init the core worker + #[allow(clippy::type_complexity)] // It's not tho + core_config_mutator: Option>, } struct InitializedWorker { worker: Arc, @@ -289,16 +312,16 @@ impl CoreWfStarter { ) -> Self { let task_q_salt = rand_6_chars(); let task_queue = format!("{test_name}_{task_q_salt}"); - let mut worker_config = integ_worker_config(&task_queue); - worker_config.max_cached_workflows = 1000_usize; + let sdk_config = integ_sdk_config(&task_queue); Self { task_queue_name: task_queue, - worker_config, + sdk_config, initted_worker: OnceCell::new(), workflow_options: Default::default(), runtime_override: runtime_override.map(Arc::new), client_override, min_local_server_version: None, + core_config_mutator: None, } } @@ -307,23 +330,29 @@ impl CoreWfStarter { pub(crate) fn clone_no_worker(&self) -> Self { Self { task_queue_name: self.task_queue_name.clone(), - worker_config: self.worker_config.clone(), + sdk_config: self.sdk_config.clone(), workflow_options: self.workflow_options.clone(), runtime_override: self.runtime_override.clone(), client_override: self.client_override.clone(), min_local_server_version: self.min_local_server_version.clone(), initted_worker: Default::default(), + core_config_mutator: self.core_config_mutator.clone(), } } pub(crate) async fn worker(&mut self) -> TestWorker { - let w = self.get_worker().await; - let mut w = TestWorker::new(w); + let worker = self.get_worker().await; + let sdk = Worker::new_from_core_activities(worker, self.sdk_config.activities()); + let mut w = TestWorker::new(sdk); w.client = Some(self.get_client().await); w } + pub(crate) fn set_core_cfg_mutator(&mut self, mutator: impl Fn(&mut WorkerConfig) + 'static) { + self.core_config_mutator = Some(Arc::new(mutator)) + } + pub(crate) async fn shutdown(&mut self) { self.get_worker().await.shutdown().await; } @@ -441,7 +470,6 @@ impl CoreWfStarter { } else { init_integ_telem().unwrap() }; - let cfg = self.worker_config.clone(); let (connection, client) = if let Some(client) = self.client_override.take() { // Extract the connection from the client to pass to init_worker let connection = client.connection().clone(); @@ -452,11 +480,19 @@ impl CoreWfStarter { opts.metrics_meter = rt.telemetry().get_temporal_metric_meter(); let connection = Connection::connect(opts).await.expect("Must connect"); let client_opts = - temporalio_client::ClientOptions::new(cfg.namespace.clone()).build(); + temporalio_client::ClientOptions::new(integ_namespace()).build(); let client = Client::new(connection.clone(), client_opts); (connection, client) }; - let worker = init_worker(rt, cfg, connection).expect("Worker inits cleanly"); + let mut core_config = self + .sdk_config + .to_core_options(client.namespace()) + .expect("sdk config converts to core config"); + if let Some(ref ccm) = self.core_config_mutator { + ccm(&mut core_config); + } + let worker = + init_worker(rt, core_config, connection).expect("Worker inits cleanly"); InitializedWorker { worker: Arc::new(worker), client, @@ -469,7 +505,6 @@ impl CoreWfStarter { /// Provides conveniences for running integ tests with the SDK (against real server or mocks) pub(crate) struct TestWorker { inner: Worker, - pub core_worker: Arc, client: Option, pub started_workflows: Arc>>, /// If set true (default), and a client is available, we will fetch workflow results to @@ -478,14 +513,9 @@ pub(crate) struct TestWorker { } impl TestWorker { /// Create a new test worker - pub(crate) fn new(core_worker: Arc) -> Self { - let inner = Worker::new_from_core( - core_worker.clone(), - core_worker.get_config().task_queue.clone(), - ); + pub(crate) fn new(sdk: Worker) -> Self { Self { - inner, - core_worker, + inner: sdk, client: None, started_workflows: Arc::new(Mutex::new(vec![])), fetch_results: true, @@ -497,10 +527,9 @@ impl TestWorker { } pub(crate) fn worker_instance_key(&self) -> Uuid { - self.core_worker.worker_instance_key() + self.inner.worker_instance_key() } - // TODO: Maybe trait-ify? pub(crate) fn register_wf>( &mut self, workflow_type: impl Into, @@ -509,12 +538,12 @@ impl TestWorker { self.inner.register_wf(workflow_type, wf_function) } - pub(crate) fn register_activity( + pub(crate) fn register_activities( &mut self, - activity_type: impl Into, - act_function: impl IntoActivityFunc, - ) { - self.inner.register_activity(activity_type, act_function) + instance: AI, + ) -> &mut Self { + self.inner.register_activities::(instance); + self } /// Create a handle that can be used to submit workflows. Useful when workflows need to be @@ -632,6 +661,10 @@ impl TestWorker { tokio::try_join!(self.inner.run(), get_results_waiter)?; Ok(()) } + + pub(crate) fn core_worker(&self) -> Arc { + self.inner.core_worker() + } } pub(crate) struct TestWorkerSubmitterHandle { @@ -947,7 +980,7 @@ pub(crate) fn build_fake_sdk(mock_cfg: MockPollCfg) -> temporalio_sdk::Worker { c.ignore_evicts_on_shutdown = false; }); let core = mock_worker(mock); - let mut worker = temporalio_sdk::Worker::new_from_core(Arc::new(core), "replay_q".to_string()); + let mut worker = temporalio_sdk::Worker::new_from_core(Arc::new(core)); worker.set_worker_interceptor(FailOnNondeterminismInterceptor {}); worker } @@ -964,7 +997,7 @@ pub(crate) fn mock_sdk_cfg( let mut mock = build_mock_pollers(poll_cfg); mock.worker_cfg(mutator); let core = mock_worker(mock); - TestWorker::new(Arc::new(core)) + TestWorker::new(temporalio_sdk::Worker::new_from_core(Arc::new(core))) } #[derive(Default)] diff --git a/crates/sdk-core/tests/common/workflows.rs b/crates/sdk-core/tests/common/workflows.rs index f1c42408a..14eb74d21 100644 --- a/crates/sdk-core/tests/common/workflows.rs +++ b/crates/sdk-core/tests/common/workflows.rs @@ -1,31 +1,33 @@ +use crate::common::activity_functions::StdActivities; use std::time::Duration; -use temporalio_common::{ - prost_dur, - protos::{coresdk::AsJsonPayloadExt, temporal::api::common::v1::RetryPolicy}, -}; +use temporalio_common::{prost_dur, protos::temporal::api::common::v1::RetryPolicy}; use temporalio_sdk::{ActivityOptions, LocalActivityOptions, WfContext, WorkflowResult}; pub(crate) async fn la_problem_workflow(ctx: WfContext) -> WorkflowResult<()> { - ctx.local_activity(LocalActivityOptions { - activity_type: "delay".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_micros(15))), - backoff_coefficient: 1_000., - maximum_interval: Some(prost_dur!(from_millis(1500))), - maximum_attempts: 4, - non_retryable_error_types: vec![], + ctx.start_local_activity( + StdActivities::delay, + Duration::from_secs(15), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_micros(15))), + backoff_coefficient: 1_000., + maximum_interval: Some(prost_dur!(from_millis(1500))), + maximum_attempts: 4, + non_retryable_error_types: vec![], + }, + timer_backoff_threshold: Some(Duration::from_secs(1)), + ..Default::default() }, - timer_backoff_threshold: Some(Duration::from_secs(1)), - ..Default::default() - }) + )? .await; - ctx.activity(ActivityOptions { - activity_type: "delay".to_string(), - start_to_close_timeout: Some(Duration::from_secs(20)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + StdActivities::delay, + Duration::from_secs(15), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(20)), + ..Default::default() + }, + )? .await; Ok(().into()) } diff --git a/crates/sdk-core/tests/heavy_tests.rs b/crates/sdk-core/tests/heavy_tests.rs index 0ed12be14..1bbe49ec5 100644 --- a/crates/sdk-core/tests/heavy_tests.rs +++ b/crates/sdk-core/tests/heavy_tests.rs @@ -7,7 +7,8 @@ mod fuzzy_workflow; use crate::common::get_integ_runtime_options; use common::{ - CoreWfStarter, init_integ_telem, prom_metrics, rand_6_chars, workflows::la_problem_workflow, + CoreWfStarter, activity_functions::StdActivities, init_integ_telem, prom_metrics, rand_6_chars, + workflows::la_problem_workflow, }; use futures_util::{ StreamExt, @@ -24,6 +25,7 @@ use std::{ use temporalio_client::{ GetWorkflowResultOptions, WfClientExt, WorkflowClientTrait, WorkflowOptions, }; +use temporalio_macros::activities; use temporalio_common::{ protos::{ @@ -32,18 +34,24 @@ use temporalio_common::{ }, worker::WorkerTaskTypes, }; -use temporalio_sdk::{ActivityOptions, WfContext, WorkflowResult, activities::ActivityContext}; -use temporalio_sdk_core::{CoreRuntime, PollerBehavior, ResourceBasedTuner, ResourceSlotOptions}; +use temporalio_sdk::{ + ActivityOptions, WfContext, WorkflowResult, + activities::{ActivityContext, ActivityError}, +}; +use temporalio_sdk_core::{ + CoreRuntime, PollerBehavior, ResourceBasedTuner, ResourceSlotOptions, TunerHolder, +}; #[tokio::test] async fn activity_load() { const CONCURRENCY: usize = 512; let mut starter = CoreWfStarter::new("activity_load"); - starter.worker_config.max_outstanding_workflow_tasks = Some(CONCURRENCY); - starter.worker_config.max_cached_workflows = CONCURRENCY; - starter.worker_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10); - starter.worker_config.max_outstanding_activities = Some(CONCURRENCY); + starter.sdk_config.max_cached_workflows = CONCURRENCY; + starter.sdk_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10); + starter.sdk_config.tuner = + Arc::new(TunerHolder::fixed_size(CONCURRENCY, CONCURRENCY, 100, 100)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let activity_id = "act-1"; @@ -52,21 +60,27 @@ async fn activity_load() { let wf_fn = move |ctx: WfContext| { let task_queue = task_queue.clone(); - let payload = "yo".as_json_payload().unwrap(); + let input_str = "yo".to_string(); async move { - let activity = ActivityOptions { - activity_id: Some(activity_id.to_string()), - activity_type: "test_activity".to_string(), - input: payload.clone(), - task_queue, - schedule_to_start_timeout: Some(activity_timeout), - start_to_close_timeout: Some(activity_timeout), - schedule_to_close_timeout: Some(activity_timeout), - heartbeat_timeout: Some(activity_timeout), - cancellation_type: ActivityCancellationType::TryCancel, - ..Default::default() - }; - let res = ctx.activity(activity).await.unwrap_ok_payload(); + let res = ctx + .start_activity( + StdActivities::echo, + input_str.clone(), + ActivityOptions { + activity_id: Some(activity_id.to_string()), + task_queue, + schedule_to_start_timeout: Some(activity_timeout), + start_to_close_timeout: Some(activity_timeout), + schedule_to_close_timeout: Some(activity_timeout), + heartbeat_timeout: Some(activity_timeout), + cancellation_type: ActivityCancellationType::TryCancel, + ..Default::default() + }, + ) + .unwrap() + .await + .unwrap_ok_payload(); + let payload = input_str.as_json_payload().unwrap(); assert_eq!(res.data, payload.data); Ok(().into()) } @@ -75,10 +89,6 @@ async fn activity_load() { let starting = Instant::now(); let wf_type = "activity_load"; worker.register_wf(wf_type.to_owned(), wf_fn); - worker.register_activity( - "test_activity", - |_ctx: ActivityContext, echo: String| async move { Ok(echo) }, - ); join_all((0..CONCURRENCY).map(|i| { let worker = &worker; let wf_id = format!("activity_load_{i}"); @@ -108,12 +118,8 @@ async fn chunky_activities_resource_based() { const WORKFLOWS: usize = 100; let mut starter = CoreWfStarter::new("chunky_activities_resource_based"); - starter.worker_config.max_outstanding_workflow_tasks = None; - starter.worker_config.max_outstanding_local_activities = None; - starter.worker_config.max_outstanding_activities = None; - starter.worker_config.max_outstanding_nexus_tasks = None; - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(10_usize); - starter.worker_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10_usize); + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(10_usize); + starter.sdk_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10_usize); let mut tuner = ResourceBasedTuner::new(0.7, 0.7); tuner .with_workflow_slots_options(ResourceSlotOptions::new( @@ -122,23 +128,50 @@ async fn chunky_activities_resource_based() { Duration::from_millis(0), )) .with_activity_slots_options(ResourceSlotOptions::new(5, 1000, Duration::from_millis(50))); - starter.worker_config.tuner = Some(Arc::new(tuner)); + starter.sdk_config.tuner = Arc::new(tuner); + + struct ChunkyActivities; + #[activities] + impl ChunkyActivities { + #[activity] + async fn chunky_echo(_ctx: ActivityContext, echo: String) -> Result { + tokio::task::spawn_blocking(move || { + // Allocate a gig and then do some CPU stuff on it + let mut mem = vec![0_u8; 1000 * 1024 * 1024]; + for _ in 1..10 { + for i in 0..mem.len() { + mem[i] &= mem[mem.len() - 1 - i] + } + } + Ok(echo) + }) + .await? + } + } + + starter.sdk_config.register_activities(ChunkyActivities); let mut worker = starter.worker().await; let activity_id = "act-1"; let activity_timeout = Duration::from_secs(30); let wf_fn = move |ctx: WfContext| { - let payload = "yo".as_json_payload().unwrap(); + let input_str = "yo".to_string(); async move { - let activity = ActivityOptions { - activity_id: Some(activity_id.to_string()), - activity_type: "test_activity".to_string(), - input: payload.clone(), - start_to_close_timeout: Some(activity_timeout), - ..Default::default() - }; - let res = ctx.activity(activity).await.unwrap_ok_payload(); + let res = ctx + .start_activity( + ChunkyActivities::chunky_echo, + input_str.clone(), + ActivityOptions { + activity_id: Some(activity_id.to_string()), + start_to_close_timeout: Some(activity_timeout), + ..Default::default() + }, + ) + .unwrap() + .await + .unwrap_ok_payload(); + let payload = input_str.as_json_payload().unwrap(); assert_eq!(res.data, payload.data); Ok(().into()) } @@ -147,22 +180,6 @@ async fn chunky_activities_resource_based() { let starting = Instant::now(); let wf_type = "chunky_activity_wf"; worker.register_wf(wf_type.to_owned(), wf_fn); - worker.register_activity( - "test_activity", - |_ctx: ActivityContext, echo: String| async move { - tokio::task::spawn_blocking(move || { - // Allocate a gig and then do some CPU stuff on it - let mut mem = vec![0_u8; 1000 * 1024 * 1024]; - for _ in 1..10 { - for i in 0..mem.len() { - mem[i] &= mem[mem.len() - 1 - i] - } - } - Ok(echo) - }) - .await? - }, - ); join_all((0..WORKFLOWS).map(|i| { let worker = &worker; let wf_id = format!("chunk_activity_{i}"); @@ -202,10 +219,10 @@ async fn workflow_load() { init_integ_telem(); let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("workflow_load", rt); - starter.worker_config.max_outstanding_workflow_tasks = Some(5); - starter.worker_config.max_cached_workflows = 200; - starter.worker_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10); - starter.worker_config.max_outstanding_activities = Some(100); + starter.sdk_config.max_cached_workflows = 200; + starter.sdk_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10); + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(5, 100, 100, 100)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let sigchan = ctx.make_signal_channel(SIGNAME).map(Ok); @@ -213,12 +230,15 @@ async fn workflow_load() { let real_stuff = async move { for _ in 0..5 { - ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; ctx.timer(Duration::from_secs(1)).await; } @@ -230,10 +250,6 @@ async fn workflow_load() { Ok(().into()) }); - worker.register_activity( - "echo_activity", - |_ctx: ActivityContext, echo_me: String| async move { Ok(echo_me) }, - ); let client = starter.get_client().await; let mut workflow_handles = vec![]; @@ -279,18 +295,15 @@ async fn workflow_load() { async fn evict_while_la_running_no_interference() { let wf_name = "evict_while_la_running_no_interference"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.max_outstanding_local_activities = Some(20); - starter.worker_config.max_cached_workflows = 20; + starter.sdk_config.max_cached_workflows = 20; // Though it doesn't make sense to set wft higher than cached workflows, leaving this commented // introduces more instability that can be useful in the test. // starter.max_wft(20); + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(100, 10, 20, 1)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), la_problem_workflow); - worker.register_activity("delay", |_: ActivityContext, _: String| async { - tokio::time::sleep(Duration::from_secs(15)).await; - Ok(()) - }); let client = starter.get_client().await; let subfs = FuturesUnordered::new(); @@ -305,7 +318,7 @@ async fn evict_while_la_running_no_interference() { ) .await .unwrap(); - let cw = worker.core_worker.clone(); + let cw = worker.core_worker(); let client = client.clone(); subfs.push(async move { // Evict the workflow @@ -345,9 +358,9 @@ pub async fn many_parallel_timers_longhist(ctx: WfContext) -> WorkflowResult<()> async fn can_paginate_long_history() { let wf_name = "can_paginate_long_history"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); // Do not use sticky queues so we are forced to paginate once history gets long - starter.worker_config.max_cached_workflows = 0; + starter.sdk_config.max_cached_workflows = 0; let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), many_parallel_timers_longhist); @@ -387,19 +400,35 @@ async fn poller_autoscaling_basic_loadtest() { let num_workflows = 100; let wf_name = "poller_load"; let mut starter = CoreWfStarter::new("poller_load"); - starter.worker_config.max_cached_workflows = 5000; - starter.worker_config.max_outstanding_workflow_tasks = Some(1000); - starter.worker_config.max_outstanding_activities = Some(1000); - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.max_cached_workflows = 5000; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(1000, 1000, 100, 1)); + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; - starter.worker_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; + + struct JitteryActivities; + #[activities] + impl JitteryActivities { + #[activity] + async fn jittery_echo( + _ctx: ActivityContext, + echo: String, + ) -> Result { + // Add some jitter to completions + let rand_millis = rand::rng().random_range(0..500); + tokio::time::sleep(Duration::from_millis(rand_millis)).await; + Ok(echo) + } + } + + starter.sdk_config.register_activities(JitteryActivities); let mut worker = starter.worker().await; let shutdown_handle = worker.inner_mut().shutdown_handle(); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -408,12 +437,15 @@ async fn poller_autoscaling_basic_loadtest() { let real_stuff = async move { for _ in 0..5 { - ctx.activity(ActivityOptions { - activity_type: "echo".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + JitteryActivities::jittery_echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; } }; @@ -424,12 +456,6 @@ async fn poller_autoscaling_basic_loadtest() { Ok(().into()) }); - worker.register_activity("echo", |_: ActivityContext, echo: String| async move { - // Add some jitter to completions - let rand_millis = rand::rng().random_range(0..500); - tokio::time::sleep(Duration::from_millis(rand_millis)).await; - Ok(echo) - }); let client = starter.get_client().await; let mut workflow_handles = vec![]; diff --git a/crates/sdk-core/tests/heavy_tests/fuzzy_workflow.rs b/crates/sdk-core/tests/heavy_tests/fuzzy_workflow.rs index 61b916536..7faee7b1f 100644 --- a/crates/sdk-core/tests/heavy_tests/fuzzy_workflow.rs +++ b/crates/sdk-core/tests/heavy_tests/fuzzy_workflow.rs @@ -1,13 +1,11 @@ -use crate::common::CoreWfStarter; +use crate::common::{CoreWfStarter, activity_functions::StdActivities}; use futures_util::{FutureExt, StreamExt, sink, stream::FuturesUnordered}; use rand::{Rng, SeedableRng, prelude::Distribution, rngs::SmallRng}; -use std::{future, time::Duration}; +use std::{future, sync::Arc, time::Duration}; use temporalio_client::{WfClientExt, WorkflowClientTrait, WorkflowOptions}; use temporalio_common::protos::coresdk::{AsJsonPayloadExt, FromJsonPayloadExt, IntoPayloadsExt}; -use temporalio_sdk::{ - ActivityOptions, LocalActivityOptions, WfContext, WorkflowResult, - activities::{ActivityContext, ActivityError}, -}; +use temporalio_sdk::{ActivityOptions, LocalActivityOptions, WfContext, WorkflowResult}; +use temporalio_sdk_core::TunerHolder; use tokio_util::sync::CancellationToken; const FUZZY_SIG: &str = "fuzzy_sig"; @@ -31,10 +29,6 @@ impl Distribution for FuzzyWfActionSampler { } } -async fn echo(_ctx: ActivityContext, echo_me: String) -> Result { - Ok(echo_me) -} - async fn fuzzy_wf_def(ctx: WfContext) -> WorkflowResult<()> { let sigchan = ctx .make_signal_channel(FUZZY_SIG) @@ -46,21 +40,27 @@ async fn fuzzy_wf_def(ctx: WfContext) -> WorkflowResult<()> { .take_until(done.cancelled()) .for_each_concurrent(None, |action| match action { FuzzyWfAction::DoAct => ctx - .activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + .start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .map(|_| ()) .boxed(), FuzzyWfAction::DoLocalAct => ctx - .local_activity(LocalActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + .start_local_activity( + StdActivities::echo, + "hi!".to_string(), + LocalActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .map(|_| ()) .boxed(), FuzzyWfAction::Shutdown => { @@ -78,12 +78,12 @@ async fn fuzzy_workflow() { let num_workflows = 200; let wf_name = "fuzzy_wf"; let mut starter = CoreWfStarter::new("fuzzy_workflow"); - starter.worker_config.max_outstanding_workflow_tasks = Some(25); - starter.worker_config.max_cached_workflows = 25; - starter.worker_config.max_outstanding_activities = Some(25); + starter.sdk_config.max_cached_workflows = 25; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(25, 25, 100, 100)); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), fuzzy_wf_def); - worker.register_activity("echo_activity", echo); + worker.register_activities(StdActivities); + let client = starter.get_client().await; let mut workflow_handles = vec![]; diff --git a/crates/sdk-core/tests/integ_tests/activity_functions.rs b/crates/sdk-core/tests/integ_tests/activity_functions.rs deleted file mode 100644 index 165099d77..000000000 --- a/crates/sdk-core/tests/integ_tests/activity_functions.rs +++ /dev/null @@ -1,5 +0,0 @@ -use temporalio_sdk::activities::{ActivityContext, ActivityError}; - -pub(crate) async fn echo(_ctx: ActivityContext, e: String) -> Result { - Ok(e) -} diff --git a/crates/sdk-core/tests/integ_tests/heartbeat_tests.rs b/crates/sdk-core/tests/integ_tests/heartbeat_tests.rs index 9785cfda0..4f2e0a077 100644 --- a/crates/sdk-core/tests/integ_tests/heartbeat_tests.rs +++ b/crates/sdk-core/tests/integ_tests/heartbeat_tests.rs @@ -1,4 +1,4 @@ -use crate::common::{CoreWfStarter, init_core_and_create_wf}; +use crate::common::{CoreWfStarter, activity_functions::StdActivities, init_core_and_create_wf}; use assert_matches::assert_matches; use std::time::Duration; use temporalio_client::{WfClientExt, WorkflowOptions}; @@ -7,7 +7,7 @@ use temporalio_common::{ protos::{ DEFAULT_ACTIVITY_TYPE, coresdk::{ - ActivityHeartbeat, ActivityTaskCompletion, AsJsonPayloadExt, IntoCompletion, + ActivityHeartbeat, ActivityTaskCompletion, IntoCompletion, activity_result::{ self, ActivityExecutionResult, ActivityResolution, activity_resolution as act_res, }, @@ -25,7 +25,7 @@ use temporalio_common::{ test_utils::schedule_activity_cmd, }, }; -use temporalio_sdk::{ActivityOptions, WfContext, activities::ActivityContext}; +use temporalio_sdk::{ActivityOptions, WfContext}; use temporalio_sdk_core::test_help::{WorkerTestHelpers, drain_pollers_and_shutdown}; use tokio::time::sleep; @@ -183,28 +183,26 @@ async fn many_act_fails_with_heartbeats() { async fn activity_doesnt_heartbeat_hits_timeout_then_completes() { let wf_name = "activity_doesnt_heartbeat_hits_timeout_then_completes"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; - worker.register_activity( - "echo_activity", - |_ctx: ActivityContext, echo_me: String| async move { - sleep(Duration::from_secs(4)).await; - Ok(echo_me) - }, - ); + worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let res = ctx - .activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(10)), - heartbeat_timeout: Some(Duration::from_secs(2)), - retry_policy: Some(RetryPolicy { - maximum_attempts: 1, + .start_activity( + StdActivities::delay, + Duration::from_secs(4), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(10)), + heartbeat_timeout: Some(Duration::from_secs(2)), + retry_policy: Some(RetryPolicy { + maximum_attempts: 1, + ..Default::default() + }), ..Default::default() - }), - ..Default::default() - }) + }, + ) + .unwrap() .await; assert_eq!(res.timed_out(), Some(TimeoutType::Heartbeat)); Ok(().into()) diff --git a/crates/sdk-core/tests/integ_tests/metrics_tests.rs b/crates/sdk-core/tests/integ_tests/metrics_tests.rs index 1874e2d38..5eae3040f 100644 --- a/crates/sdk-core/tests/integ_tests/metrics_tests.rs +++ b/crates/sdk-core/tests/integ_tests/metrics_tests.rs @@ -23,7 +23,7 @@ use temporalio_common::{ prost_dur, protos::{ coresdk::{ - ActivityTaskCompletion, AsJsonPayloadExt, + ActivityTaskCompletion, activity_result::ActivityExecutionResult, nexus::{NexusTaskCompletion, nexus_task, nexus_task_completion}, workflow_activation::{WorkflowActivationJob, workflow_activation_job}, @@ -57,6 +57,7 @@ use temporalio_common::{ }, worker::WorkerTaskTypes, }; +use temporalio_macros::activities; use temporalio_sdk::{ ActivityOptions, CancellableFuture, LocalActivityOptions, NexusOperationOptions, WfContext, activities::{ActivityContext, ActivityError}, @@ -401,7 +402,7 @@ async fn query_of_closed_workflow_doesnt_tick_terminal_metric( let mut starter = CoreWfStarter::new_with_runtime("query_of_closed_workflow_doesnt_tick_terminal_metric", rt); // Disable cache to ensure replay happens completely - starter.worker_config.max_cached_workflows = 0_usize; + starter.sdk_config.max_cached_workflows = 0_usize; let worker = starter.get_worker().await; let run_id = starter.start_wf().await; let task = worker.poll_workflow_activation().await.unwrap(); @@ -770,70 +771,87 @@ async fn activity_metrics() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "activity_metrics"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter.worker_config.graceful_shutdown_period = Some(Duration::from_secs(1)); + starter.sdk_config.graceful_shutdown_period = Some(Duration::from_secs(1)); + + struct PassFailActivities; + #[activities] + impl PassFailActivities { + #[activity(name = "pass_fail_act")] + async fn pass_fail_act(ctx: ActivityContext, i: String) -> Result { + match i.as_str() { + "pass" => Ok("pass".to_string()), + "cancel" => { + ctx.cancelled().await; + Err(ActivityError::cancelled()) + } + _ => Err(anyhow!("fail").into()), + } + } + } + + starter.sdk_config.register_activities(PassFailActivities); let task_queue = starter.get_task_queue().to_owned(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_string(), |ctx: WfContext| async move { - let normal_act_pass = ctx.activity(ActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "pass".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(1)), - ..Default::default() - }); - let normal_act_fail = ctx.activity(ActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "fail".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(1)), - retry_policy: Some(RetryPolicy { - maximum_attempts: 1, - ..Default::default() - }), - ..Default::default() - }); + let normal_act_pass = ctx + .start_activity( + PassFailActivities::pass_fail_act, + "pass".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(1)), + ..Default::default() + }, + ) + .unwrap(); + let normal_act_fail = ctx + .start_activity( + PassFailActivities::pass_fail_act, + "fail".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(1)), + retry_policy: Some(RetryPolicy { + maximum_attempts: 1, + ..Default::default() + }), + ..Default::default() + }, + ) + .unwrap(); join!(normal_act_pass, normal_act_fail); - let local_act_pass = ctx.local_activity(LocalActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "pass".as_json_payload().expect("serializes fine"), - ..Default::default() - }); - let local_act_fail = ctx.local_activity(LocalActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "fail".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - maximum_attempts: 1, + let local_act_pass = ctx.start_local_activity( + PassFailActivities::pass_fail_act, + "pass".to_string(), + LocalActivityOptions::default(), + )?; + let local_act_fail = ctx.start_local_activity( + PassFailActivities::pass_fail_act, + "fail".to_string(), + LocalActivityOptions { + retry_policy: RetryPolicy { + maximum_attempts: 1, + ..Default::default() + }, ..Default::default() }, - ..Default::default() - }); - let local_act_cancel = ctx.local_activity(LocalActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "cancel".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - maximum_attempts: 1, + )?; + let local_act_cancel = ctx.start_local_activity( + PassFailActivities::pass_fail_act, + "cancel".to_string(), + LocalActivityOptions { + retry_policy: RetryPolicy { + maximum_attempts: 1, + ..Default::default() + }, ..Default::default() }, - ..Default::default() - }); + )?; join!(local_act_pass, local_act_fail); // TODO: Currently takes a WFT b/c of https://github.com/temporalio/sdk-core/issues/856 local_act_cancel.cancel(&ctx); local_act_cancel.await; Ok(().into()) }); - worker.register_activity( - "pass_fail_act", - |ctx: ActivityContext, i: String| async move { - match i.as_str() { - "pass" => Ok("pass"), - "cancel" => { - ctx.cancelled().await; - Err(ActivityError::cancelled()) - } - _ => Err(anyhow!("fail").into()), - } - }, - ); worker .submit_wf( @@ -905,7 +923,7 @@ async fn nexus_metrics() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "nexus_metrics"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter.worker_config.task_types = WorkerTaskTypes { + starter.sdk_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, @@ -1087,7 +1105,7 @@ async fn evict_on_complete_does_not_count_as_forced_eviction() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "evict_on_complete_does_not_count_as_forced_eviction"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf( @@ -1170,17 +1188,13 @@ async fn metrics_available_from_custom_slot_supplier() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("metrics_available_from_custom_slot_supplier", rt); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); - starter.worker_config.max_outstanding_workflow_tasks = None; - starter.worker_config.max_outstanding_local_activities = None; - starter.worker_config.max_outstanding_activities = None; - starter.worker_config.max_outstanding_nexus_tasks = None; + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut tb = TunerBuilder::default(); tb.workflow_slot_supplier(Arc::new(MetricRecordingSlotSupplier:: { inner: FixedSizeSlotSupplier::new(5), metrics: OnceLock::new(), })); - starter.worker_config.tuner = Some(Arc::new(tb.build())); + starter.sdk_config.tuner = Arc::new(tb.build()); let mut worker = starter.worker().await; worker.register_wf( @@ -1334,8 +1348,8 @@ async fn sticky_queue_label_strategy( let wf_name = format!("sticky_queue_label_strategy_{strategy:?}"); let mut starter = CoreWfStarter::new_with_runtime(&wf_name, rt); // Enable sticky queues by setting a reasonable cache size - starter.worker_config.max_cached_workflows = 10_usize; - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.max_cached_workflows = 10_usize; + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let task_queue = starter.get_task_queue().to_owned(); let mut worker = starter.worker().await; @@ -1411,15 +1425,10 @@ async fn resource_based_tuner_metrics() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "resource_based_tuner_metrics"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); - starter.worker_config.max_outstanding_workflow_tasks = None; - starter.worker_config.max_outstanding_local_activities = None; - starter.worker_config.max_outstanding_activities = None; - starter.worker_config.max_outstanding_nexus_tasks = None; - + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); // Create a resource-based tuner with reasonable thresholds let tuner = ResourceBasedTuner::new(0.8, 0.8); - starter.worker_config.tuner = Some(Arc::new(tuner)); + starter.sdk_config.tuner = Arc::new(tuner); let mut worker = starter.worker().await; diff --git a/crates/sdk-core/tests/integ_tests/polling_tests.rs b/crates/sdk-core/tests/integ_tests/polling_tests.rs index 9c0b8d701..5fb63e04a 100644 --- a/crates/sdk-core/tests/integ_tests/polling_tests.rs +++ b/crates/sdk-core/tests/integ_tests/polling_tests.rs @@ -1,9 +1,7 @@ -use crate::{ - common::{ - CoreWfStarter, INTEG_CLIENT_NAME, INTEG_CLIENT_VERSION, get_integ_client, - init_core_and_create_wf, init_integ_telem, integ_dev_server_config, integ_worker_config, - }, - integ_tests::activity_functions::echo, +use crate::common::{ + CoreWfStarter, INTEG_CLIENT_NAME, INTEG_CLIENT_VERSION, activity_functions::StdActivities, + get_integ_client, init_core_and_create_wf, init_integ_telem, integ_dev_server_config, + integ_worker_config, }; use assert_matches::assert_matches; use futures_util::{FutureExt, StreamExt, future::join_all}; @@ -22,7 +20,7 @@ use temporalio_common::{ prost_dur, protos::{ coresdk::{ - AsJsonPayloadExt, IntoCompletion, + IntoCompletion, activity_task::activity_task as act_task, workflow_activation::{FireTimer, WorkflowActivationJob, workflow_activation_job}, workflow_commands::{ActivityCancellationType, RequestCancelActivity, StartTimer}, @@ -35,7 +33,7 @@ use temporalio_common::{ }; use temporalio_sdk::{ActivityOptions, WfContext}; use temporalio_sdk_core::{ - CoreRuntime, PollerBehavior, RuntimeOptions, + CoreRuntime, PollerBehavior, RuntimeOptions, TunerHolder, ephemeral_server::{TemporalDevServerConfig, default_cached_download}, init_worker, test_help::{NAMESPACE, WorkerTestHelpers, drain_pollers_and_shutdown}, @@ -249,32 +247,34 @@ async fn small_workflow_slots_and_pollers(#[values(false, true)] use_autoscaling let wf_name = "only_one_workflow_slot_and_two_pollers"; let mut starter = CoreWfStarter::new(wf_name); if use_autoscaling { - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 5, initial: 1, }; } else { - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(2); + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(2); } - starter.worker_config.max_outstanding_workflow_tasks = Some(2_usize); - starter.worker_config.max_outstanding_local_activities = Some(1_usize); - starter.worker_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(1); - starter.worker_config.max_outstanding_activities = Some(1_usize); + starter.sdk_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(1); + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(2, 1, 1, 1)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; + worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { for _ in 0..3 { - ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; } Ok(().into()) }); - worker.register_activity("echo_activity", echo); worker .submit_wf( starter.get_task_queue(), diff --git a/crates/sdk-core/tests/integ_tests/update_tests.rs b/crates/sdk-core/tests/integ_tests/update_tests.rs index 6621602e7..b22e2f3ff 100644 --- a/crates/sdk-core/tests/integ_tests/update_tests.rs +++ b/crates/sdk-core/tests/integ_tests/update_tests.rs @@ -1,5 +1,6 @@ use crate::common::{ - CoreWfStarter, WorkflowHandleExt, init_core_and_create_wf, init_core_replay_preloaded, + CoreWfStarter, WorkflowHandleExt, activity_functions::StdActivities, init_core_and_create_wf, + init_core_replay_preloaded, }; use anyhow::anyhow; use assert_matches::assert_matches; @@ -36,8 +37,10 @@ use temporalio_common::{ }, worker::WorkerTaskTypes, }; +use temporalio_macros::activities; use temporalio_sdk::{ - ActivityOptions, LocalActivityOptions, UpdateContext, WfContext, activities::ActivityContext, + ActivityOptions, LocalActivityOptions, UpdateContext, WfContext, + activities::{ActivityContext, ActivityError}, }; use temporalio_sdk_core::{ Worker, @@ -638,19 +641,21 @@ async fn update_with_local_acts() { let mut starter = CoreWfStarter::new(wf_name); // Short task timeout to get activities to heartbeat without taking ages starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; + worker.register_wf(wf_name.to_owned(), move |ctx: WfContext| async move { ctx.update_handler( "update", |_: &_, _: ()| Ok(()), move |ctx: UpdateContext, _: ()| async move { ctx.wf_ctx - .local_activity(LocalActivityOptions { - activity_type: "echo_activity".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + .start_local_activity( + StdActivities::delay, + Duration::from_secs(3), + LocalActivityOptions::default(), + )? .await; Ok("hi") }, @@ -659,14 +664,6 @@ async fn update_with_local_acts() { sig.next().await; Ok(().into()) }); - worker.register_activity( - "echo_activity", - |_ctx: ActivityContext, echo_me: String| async move { - // Sleep so we'll heartbeat - tokio::time::sleep(Duration::from_secs(3)).await; - Ok(echo_me) - }, - ); let handle = starter.start_with_worker(wf_name, &mut worker).await; let wf_id = starter.get_task_queue().to_string(); @@ -713,7 +710,7 @@ async fn update_with_local_acts() { async fn update_rejection_sdk() { let wf_name = "update_rejection_sdk"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -757,7 +754,7 @@ async fn update_rejection_sdk() { async fn update_fail_sdk() { let wf_name = "update_fail_sdk"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -801,7 +798,7 @@ async fn update_fail_sdk() { async fn update_timer_sequence() { let wf_name = "update_timer_sequence"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -849,7 +846,7 @@ async fn update_timer_sequence() { async fn task_failure_during_validation() { let wf_name = "task_failure_during_validation"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); let mut worker = starter.worker().await; let client = starter.get_client().await; @@ -910,7 +907,7 @@ async fn task_failure_during_validation() { async fn task_failure_after_update() { let wf_name = "task_failure_after_update"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); let mut worker = starter.worker().await; let client = starter.get_client().await; @@ -955,27 +952,46 @@ async fn task_failure_after_update() { .unwrap(); } +static BARR: LazyLock = LazyLock::new(|| Barrier::new(2)); +static ACT_RAN: AtomicBool = AtomicBool::new(false); #[tokio::test] async fn worker_restarted_in_middle_of_update() { let wf_name = "worker_restarted_in_middle_of_update"; let mut starter = CoreWfStarter::new(wf_name); + + struct BlockingActivities; + #[activities] + impl BlockingActivities { + #[activity] + async fn blocks(_ctx: ActivityContext, echo_me: String) -> Result { + BARR.wait().await; + if !ACT_RAN.fetch_or(true, Ordering::Relaxed) { + // On first run fail the task so we'll get retried on the new worker + return Err(anyhow!("Fail first time").into()); + } + Ok(echo_me) + } + } + + starter.sdk_config.register_activities(BlockingActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; - static BARR: LazyLock = LazyLock::new(|| Barrier::new(2)); - static ACT_RAN: AtomicBool = AtomicBool::new(false); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { ctx.update_handler( "update", |_: &_, _: ()| Ok(()), move |ctx: UpdateContext, _: ()| async move { ctx.wf_ctx - .activity(ActivityOptions { - activity_type: "blocks".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(2)), - ..Default::default() - }) + .start_activity( + BlockingActivities::blocks, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(2)), + ..Default::default() + }, + ) + .unwrap() .await; Ok(()) }, @@ -984,17 +1000,6 @@ async fn worker_restarted_in_middle_of_update() { sig.next().await; Ok(().into()) }); - worker.register_activity( - "blocks", - |_ctx: ActivityContext, echo_me: String| async move { - BARR.wait().await; - if !ACT_RAN.fetch_or(true, Ordering::Relaxed) { - // On first run fail the task so we'll get retried on the new worker - return Err(anyhow!("Fail first time").into()); - } - Ok(echo_me) - }, - ); let handle = starter.start_with_worker(wf_name, &mut worker).await; @@ -1058,6 +1063,7 @@ async fn worker_restarted_in_middle_of_update() { async fn update_after_empty_wft() { let wf_name = "update_after_empty_wft"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; @@ -1071,12 +1077,15 @@ async fn update_after_empty_wft() { return Ok(()); } ctx.wf_ctx - .activity(ActivityOptions { - activity_type: "echo".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(2)), - ..Default::default() - }) + .start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(2)), + ..Default::default() + }, + ) + .unwrap() .await; Ok(()) }, @@ -1085,12 +1094,15 @@ async fn update_after_empty_wft() { let sig_handle = async { sig.next().await; ACT_STARTED.store(true, Ordering::Release); - ctx.activity(ActivityOptions { - activity_type: "echo".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(2)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(2)), + ..Default::default() + }, + ) + .unwrap() .await; ACT_STARTED.store(false, Ordering::Release); }; @@ -1099,10 +1111,6 @@ async fn update_after_empty_wft() { }); Ok(().into()) }); - worker.register_activity( - "echo", - |_ctx: ActivityContext, echo_me: String| async move { Ok(echo_me) }, - ); let handle = starter.start_with_worker(wf_name, &mut worker).await; @@ -1147,6 +1155,7 @@ async fn update_after_empty_wft() { async fn update_lost_on_activity_mismatch() { let wf_name = "update_lost_on_activity_mismatch"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; @@ -1167,23 +1176,22 @@ async fn update_lost_on_activity_mismatch() { for _ in 1..=3 { let cr = can_run.clone(); ctx.wait_condition(|| cr.load(Ordering::Relaxed) > 0).await; - ctx.activity(ActivityOptions { - activity_type: "echo".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(2)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(2)), + ..Default::default() + }, + ) + .unwrap() .await; can_run.fetch_sub(1, Ordering::Release); } Ok(().into()) }); - worker.register_activity( - "echo", - |_ctx: ActivityContext, echo_me: String| async move { Ok(echo_me) }, - ); - let core_worker = worker.core_worker.clone(); + let core_worker = worker.core_worker(); let handle = starter.start_with_worker(wf_name, &mut worker).await; let wf_id = starter.get_task_queue().to_string(); diff --git a/crates/sdk-core/tests/integ_tests/worker_heartbeat_tests.rs b/crates/sdk-core/tests/integ_tests/worker_heartbeat_tests.rs index b53fd1477..89312c132 100644 --- a/crates/sdk-core/tests/integ_tests/worker_heartbeat_tests.rs +++ b/crates/sdk-core/tests/integ_tests/worker_heartbeat_tests.rs @@ -1,4 +1,6 @@ -use crate::common::{ANY_PORT, CoreWfStarter, eventually, get_integ_telem_options}; +use crate::common::{ + ANY_PORT, CoreWfStarter, activity_functions::StdActivities, eventually, get_integ_telem_options, +}; use anyhow::anyhow; use crossbeam_utils::atomic::AtomicCell; use futures_util::StreamExt; @@ -31,9 +33,14 @@ use temporalio_common::{ build_otlp_metric_exporter, start_prometheus_metric_exporter, }, }; -use temporalio_sdk::{ActivityOptions, WfContext, activities::ActivityContext}; +use temporalio_macros::activities; +use temporalio_sdk::{ + ActivityOptions, WfContext, + activities::{ActivityContext, ActivityError}, +}; use temporalio_sdk_core::{ CoreRuntime, PollerBehavior, ResourceBasedTuner, ResourceSlotOptions, RuntimeOptions, + TunerHolder, }; use tokio::{sync::Notify, time::sleep}; use tonic::IntoRequest; @@ -127,48 +134,62 @@ async fn docker_worker_heartbeat_basic(#[values("otel", "prom", "no_metrics")] b } let wf_name = format!("worker_heartbeat_basic_{backing}"); let mut starter = CoreWfStarter::new_with_runtime(&wf_name, rt); - starter.worker_config.max_outstanding_workflow_tasks = Some(5_usize); - starter.worker_config.max_cached_workflows = 5_usize; - starter.worker_config.max_outstanding_activities = Some(5_usize); - starter.worker_config.plugins = vec![ - PluginInfo { - name: "plugin1".to_string(), - version: "1".to_string(), - }, - PluginInfo { - name: "plugin2".to_string(), - version: "2".to_string(), - }, - ] - .into_iter() - .collect(); - let mut worker = starter.worker().await; - let worker_instance_key = worker.worker_instance_key(); - - worker.register_wf(wf_name.to_string(), |ctx: WfContext| async move { - ctx.activity(ActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "pass".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(5)), - ..Default::default() - }) - .await; - Ok(().into()) + starter.sdk_config.max_cached_workflows = 5_usize; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(5, 5, 100, 0)); + starter.set_core_cfg_mutator(|c| { + c.plugins = vec![ + PluginInfo { + name: "plugin1".to_string(), + version: "1".to_string(), + }, + PluginInfo { + name: "plugin2".to_string(), + version: "2".to_string(), + }, + ] + .into_iter() + .collect(); }); - let acts_started = Arc::new(Notify::new()); let acts_done = Arc::new(Notify::new()); - let acts_started_act = acts_started.clone(); - let acts_done_act = acts_done.clone(); - worker.register_activity("pass_fail_act", move |_ctx: ActivityContext, i: String| { - let acts_started = acts_started_act.clone(); - let acts_done = acts_done_act.clone(); - async move { - acts_started.notify_one(); - acts_done.notified().await; + struct NotifyActivities { + acts_started: Arc, + acts_done: Arc, + } + #[activities] + impl NotifyActivities { + #[activity] + async fn pass_fail_act( + self: Arc, + _ctx: ActivityContext, + i: String, + ) -> Result { + self.acts_started.notify_one(); + self.acts_done.notified().await; Ok(i) } + } + + starter.sdk_config.register_activities(NotifyActivities { + acts_started: acts_started.clone(), + acts_done: acts_done.clone(), + }); + let mut worker = starter.worker().await; + let worker_instance_key = worker.worker_instance_key(); + + worker.register_wf(wf_name.to_string(), |ctx: WfContext| async move { + ctx.start_activity( + NotifyActivities::pass_fail_act, + "pass".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() + .await; + Ok(().into()) }); starter @@ -282,39 +303,35 @@ async fn docker_worker_heartbeat_tuner() { tuner .with_workflow_slots_options(ResourceSlotOptions::new(2, 10, Duration::from_millis(0))) .with_activity_slots_options(ResourceSlotOptions::new(5, 10, Duration::from_millis(50))); - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; - starter.worker_config.nexus_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.nexus_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; - starter.worker_config.max_outstanding_workflow_tasks = None; - starter.worker_config.max_outstanding_local_activities = None; - starter.worker_config.max_outstanding_activities = None; - starter.worker_config.max_outstanding_nexus_tasks = None; - starter.worker_config.tuner = Some(Arc::new(tuner)); + starter.sdk_config.tuner = Arc::new(tuner); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let worker_instance_key = worker.worker_instance_key(); // Run a workflow worker.register_wf(wf_name.to_string(), |ctx: WfContext| async move { - ctx.activity(ActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "pass".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(1)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "pass".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(1)), + ..Default::default() + }, + ) + .unwrap() .await; Ok(().into()) }); - worker.register_activity( - "pass_fail_act", - |_ctx: ActivityContext, i: String| async move { Ok(i) }, - ); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -535,27 +552,53 @@ fn after_shutdown_checks( ); } +static HISTORY_WF1_ACTIVITY_STARTED: Notify = Notify::const_new(); +static HISTORY_WF1_ACTIVITY_FINISH: Notify = Notify::const_new(); +static HISTORY_WF2_ACTIVITY_STARTED: Notify = Notify::const_new(); +static HISTORY_WF2_ACTIVITY_FINISH: Notify = Notify::const_new(); #[tokio::test] async fn worker_heartbeat_sticky_cache_miss() { let wf_name = "worker_heartbeat_cache_miss"; let mut starter = new_no_metrics_starter(wf_name); - starter.worker_config.max_cached_workflows = 1_usize; - starter.worker_config.max_outstanding_workflow_tasks = Some(2_usize); + starter.sdk_config.max_cached_workflows = 1_usize; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(2, 10, 10, 10)); + + struct StickyCacheActivities; + #[activities] + impl StickyCacheActivities { + #[activity] + async fn sticky_cache_history_act( + _ctx: ActivityContext, + marker: String, + ) -> Result { + match marker.as_str() { + "wf1" => { + HISTORY_WF1_ACTIVITY_STARTED.notify_one(); + HISTORY_WF1_ACTIVITY_FINISH.notified().await; + } + "wf2" => { + HISTORY_WF2_ACTIVITY_STARTED.notify_one(); + HISTORY_WF2_ACTIVITY_FINISH.notified().await; + } + _ => {} + } + Ok(marker) + } + } + + starter + .sdk_config + .register_activities(StickyCacheActivities); let mut worker = starter.worker().await; worker.fetch_results = false; let worker_key = worker.worker_instance_key().to_string(); - let worker_core = worker.core_worker.clone(); + let worker_core = worker.core_worker(); let submitter = worker.get_submitter_handle(); let wf_opts = starter.workflow_options.clone(); let client = starter.get_client().await; let client_for_orchestrator = client.clone(); - static HISTORY_WF1_ACTIVITY_STARTED: Notify = Notify::const_new(); - static HISTORY_WF1_ACTIVITY_FINISH: Notify = Notify::const_new(); - static HISTORY_WF2_ACTIVITY_STARTED: Notify = Notify::const_new(); - static HISTORY_WF2_ACTIVITY_FINISH: Notify = Notify::const_new(); - worker.register_wf(wf_name.to_string(), |ctx: WfContext| async move { let wf_marker = ctx .get_args() @@ -563,33 +606,19 @@ async fn worker_heartbeat_sticky_cache_miss() { .and_then(|p| String::from_json_payload(p).ok()) .unwrap_or_else(|| "wf1".to_string()); - ctx.activity(ActivityOptions { - activity_type: "sticky_cache_history_act".to_string(), - input: wf_marker.clone().as_json_payload().expect("serialize"), - start_to_close_timeout: Some(Duration::from_secs(5)), - ..Default::default() - }) + ctx.start_activity( + StickyCacheActivities::sticky_cache_history_act, + wf_marker.clone(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; Ok(().into()) }); - worker.register_activity( - "sticky_cache_history_act", - |_ctx: ActivityContext, marker: String| async move { - match marker.as_str() { - "wf1" => { - HISTORY_WF1_ACTIVITY_STARTED.notify_one(); - HISTORY_WF1_ACTIVITY_FINISH.notified().await; - } - "wf2" => { - HISTORY_WF2_ACTIVITY_STARTED.notify_one(); - HISTORY_WF2_ACTIVITY_FINISH.notified().await; - } - _ => {} - } - Ok(marker) - }, - ); let wf1_id = format!("{wf_name}_wf1"); let wf2_id = format!("{wf_name}_wf2"); @@ -657,8 +686,9 @@ async fn worker_heartbeat_sticky_cache_miss() { async fn worker_heartbeat_multiple_workers() { let wf_name = "worker_heartbeat_multi_workers"; let mut starter = new_no_metrics_starter(wf_name); - starter.worker_config.max_outstanding_workflow_tasks = Some(5_usize); - starter.worker_config.max_cached_workflows = 5_usize; + starter.sdk_config.max_cached_workflows = 5_usize; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(5, 10, 10, 10)); + starter.sdk_config.register_activities(StdActivities); let client = starter.get_client().await; let starting_hb_len = list_worker_heartbeats(&client, String::new()).await.len(); @@ -667,20 +697,12 @@ async fn worker_heartbeat_multiple_workers() { worker_a.register_wf(wf_name.to_string(), |_ctx: WfContext| async move { Ok(().into()) }); - worker_a.register_activity( - "failing_act", - |_ctx: ActivityContext, _: String| async move { Ok(()) }, - ); let mut starter_b = starter.clone_no_worker(); let mut worker_b = starter_b.worker().await; worker_b.register_wf(wf_name.to_string(), |_ctx: WfContext| async move { Ok(().into()) }); - worker_b.register_activity( - "failing_act", - |_ctx: ActivityContext, _: String| async move { Ok(()) }, - ); let worker_a_key = worker_a.worker_instance_key().to_string(); let worker_b_key = worker_b.worker_instance_key().to_string(); @@ -753,34 +775,54 @@ async fn worker_heartbeat_multiple_workers() { assert_eq!(describe_worker_b, filtered_b[0]); } +static ACT_COUNT: AtomicU64 = AtomicU64::new(0); +static WF_COUNT: AtomicU64 = AtomicU64::new(0); +static ACT_FAIL: Notify = Notify::const_new(); +static WF_FAIL: Notify = Notify::const_new(); #[tokio::test] async fn worker_heartbeat_failure_metrics() { const WORKFLOW_CONTINUE_SIGNAL: &str = "workflow-continue"; let wf_name = "worker_heartbeat_failure_metrics"; let mut starter = new_no_metrics_starter(wf_name); - starter.worker_config.max_outstanding_activities = Some(5_usize); + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(10, 5, 10, 10)); + + struct FailingActivities; + #[activities] + impl FailingActivities { + #[activity] + async fn failing_act(_ctx: ActivityContext, _: String) -> Result<(), ActivityError> { + if ACT_COUNT.load(Ordering::Relaxed) == 3 { + return Ok(()); + } + ACT_COUNT.fetch_add(1, Ordering::Relaxed); + ACT_FAIL.notify_one(); + Err(anyhow!("Expected error").into()) + } + } + + starter.sdk_config.register_activities(FailingActivities); let mut worker = starter.worker().await; let worker_instance_key = worker.worker_instance_key(); - static ACT_COUNT: AtomicU64 = AtomicU64::new(0); - static WF_COUNT: AtomicU64 = AtomicU64::new(0); - static ACT_FAIL: Notify = Notify::const_new(); - static WF_FAIL: Notify = Notify::const_new(); + worker.register_wf(wf_name.to_string(), |ctx: WfContext| async move { let _ = ctx - .activity(ActivityOptions { - activity_type: "failing_act".to_string(), - input: "boom".as_json_payload().expect("serialize"), - start_to_close_timeout: Some(Duration::from_secs(5)), - retry_policy: Some(RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(10))), - backoff_coefficient: 1.0, - maximum_attempts: 4, + .start_activity( + FailingActivities::failing_act, + "boom".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + retry_policy: Some(RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(10))), + backoff_coefficient: 1.0, + maximum_attempts: 4, + ..Default::default() + }), ..Default::default() - }), - ..Default::default() - }) + }, + ) + .unwrap() .await; if WF_COUNT.load(Ordering::Relaxed) == 0 { @@ -796,18 +838,6 @@ async fn worker_heartbeat_failure_metrics() { Ok(().into()) }); - worker.register_activity( - "failing_act", - |_ctx: ActivityContext, _: String| async move { - if ACT_COUNT.load(Ordering::Relaxed) == 3 { - return Ok(()); - } - ACT_COUNT.fetch_add(1, Ordering::Relaxed); - ACT_FAIL.notify_one(); - Err(anyhow!("Expected error").into()) - }, - ); - let worker_key = worker_instance_key.to_string(); starter.workflow_options.retry_policy = Some(RetryPolicy { maximum_attempts: 2, @@ -944,25 +974,24 @@ async fn worker_heartbeat_no_runtime_heartbeat() { .unwrap(); let rt = CoreRuntime::new_assume_tokio(runtimeopts).unwrap(); let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let worker_instance_key = worker.worker_instance_key(); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - ctx.activity(ActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "pass".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(1)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "pass".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(1)), + ..Default::default() + }, + ) + .unwrap() .await; Ok(().into()) }); - worker.register_activity( - "pass_fail_act", - |_ctx: ActivityContext, i: String| async move { Ok(i) }, - ); - starter .start_with_worker(wf_name.to_owned(), &mut worker) .await; @@ -1005,26 +1034,25 @@ async fn worker_heartbeat_skip_client_worker_set_check() { .unwrap(); let rt = CoreRuntime::new_assume_tokio(runtimeopts).unwrap(); let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter.worker_config.skip_client_worker_set_check = true; + starter.set_core_cfg_mutator(|m| m.skip_client_worker_set_check = true); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let worker_instance_key = worker.worker_instance_key(); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - ctx.activity(ActivityOptions { - activity_type: "pass_fail_act".to_string(), - input: "pass".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(1)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "pass".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(1)), + ..Default::default() + }, + ) + .unwrap() .await; Ok(().into()) }); - worker.register_activity( - "pass_fail_act", - |_ctx: ActivityContext, i: String| async move { Ok(i) }, - ); - starter .start_with_worker(wf_name.to_owned(), &mut worker) .await; diff --git a/crates/sdk-core/tests/integ_tests/worker_tests.rs b/crates/sdk-core/tests/integ_tests/worker_tests.rs index 41bd4b018..a4fa11468 100644 --- a/crates/sdk-core/tests/integ_tests/worker_tests.rs +++ b/crates/sdk-core/tests/integ_tests/worker_tests.rs @@ -1,7 +1,7 @@ use crate::{ common::{ - CoreWfStarter, fake_grpc_server::fake_server, get_integ_runtime_options, - get_integ_server_options, get_integ_telem_options, mock_sdk_cfg, + CoreWfStarter, activity_functions::StdActivities, fake_grpc_server::fake_server, + get_integ_runtime_options, get_integ_server_options, get_integ_telem_options, mock_sdk_cfg, }, shared_tests, }; @@ -33,13 +33,15 @@ use temporalio_common::{ command::v1::command::Attributes, common::v1::WorkerVersionStamp, enums::v1::{ - EventType, WorkflowTaskFailedCause, WorkflowTaskFailedCause::GrpcMessageTooLarge, + EventType, + WorkflowTaskFailedCause::{self, GrpcMessageTooLarge}, }, failure::v1::Failure as InnerFailure, history::v1::{ - ActivityTaskScheduledEventAttributes, history_event, - history_event::Attributes::{ - self as EventAttributes, WorkflowTaskFailedEventAttributes, + ActivityTaskScheduledEventAttributes, + history_event::{ + self, + Attributes::{self as EventAttributes, WorkflowTaskFailedEventAttributes}, }, }, workflowservice::v1::{ @@ -50,8 +52,11 @@ use temporalio_common::{ }, worker::WorkerTaskTypes, }; +use temporalio_macros::activities; use temporalio_sdk::{ - ActivityOptions, LocalActivityOptions, WfContext, interceptors::WorkerInterceptor, + ActivityOptions, LocalActivityOptions, WfContext, WorkerOptions, + activities::{ActivityContext, ActivityError}, + interceptors::WorkerInterceptor, }; use temporalio_sdk_core::{ ActivitySlotKind, CoreRuntime, LocalActivitySlotKind, PollError, PollerBehavior, @@ -170,17 +175,13 @@ async fn worker_handles_unknown_workflow_types_gracefully() { async fn resource_based_few_pollers_guarantees_non_sticky_poll() { let wf_name = "resource_based_few_pollers_guarantees_non_sticky_poll"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.max_outstanding_workflow_tasks = None; - starter.worker_config.max_outstanding_local_activities = None; - starter.worker_config.max_outstanding_activities = None; - starter.worker_config.max_outstanding_nexus_tasks = None; - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); // 3 pollers so the minimum slots of 2 can both be handed out to a sticky poller - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(3_usize); + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(3_usize); // Set the limits to zero so it's essentially unwilling to hand out slots let mut tuner = ResourceBasedTuner::new(0.0, 0.0); tuner.with_workflow_slots_options(ResourceSlotOptions::new(2, 10, Duration::from_millis(0))); - starter.worker_config.tuner = Some(Arc::new(tuner)); + starter.sdk_config.tuner = Arc::new(tuner); let mut worker = starter.worker().await; // Workflow doesn't actually need to do anything. We just need to see that we don't get stuck @@ -211,7 +212,7 @@ async fn oversize_grpc_message() { let (telemopts, addr, _aborter) = prom_metrics(None); let runtime = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime(wf_name, runtime); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut core = starter.worker().await; static OVERSIZE_GRPC_MESSAGE_RUN: AtomicBool = AtomicBool::new(false); @@ -351,7 +352,8 @@ async fn activity_tasks_from_completion_reserve_slots() { cfg.max_outstanding_activities = Some(2); }); let core = Arc::new(mock_worker(mock)); - let mut worker = crate::common::TestWorker::new(core.clone()); + let mut worker = + crate::common::TestWorker::new(temporalio_sdk::Worker::new_from_core(core.clone())); // First poll for activities twice, occupying both slots let at1 = core.poll_activity_task().await.unwrap(); @@ -359,19 +361,27 @@ async fn activity_tasks_from_completion_reserve_slots() { let workflow_complete_token = CancellationToken::new(); let workflow_complete_token_clone = workflow_complete_token.clone(); + struct FakeAct; + #[activities] + impl FakeAct { + #[activity(name = "act1")] + fn act1(_: ActivityContext) -> Result<(), ActivityError> { + unimplemented!() + } + + #[activity(name = "act2")] + fn act2(_: ActivityContext) -> Result<(), ActivityError> { + unimplemented!() + } + } + worker.register_wf(DEFAULT_WORKFLOW_TYPE, move |ctx: WfContext| { let complete_token = workflow_complete_token.clone(); async move { - ctx.activity(ActivityOptions { - activity_type: "act1".to_string(), - ..Default::default() - }) - .await; - ctx.activity(ActivityOptions { - activity_type: "act2".to_string(), - ..Default::default() - }) - .await; + ctx.start_activity(FakeAct::act1, (), ActivityOptions::default())? + .await; + ctx.start_activity(FakeAct::act2, (), ActivityOptions::default())? + .await; complete_token.cancel(); Ok(().into()) } @@ -705,39 +715,38 @@ async fn test_custom_slot_supplier_simple() { )); let mut starter = CoreWfStarter::new("test_custom_slot_supplier_simple"); - starter.worker_config.max_outstanding_workflow_tasks = None; - starter.worker_config.max_outstanding_local_activities = None; - starter.worker_config.max_outstanding_activities = None; - starter.worker_config.max_outstanding_nexus_tasks = None; + starter.sdk_config.register_activities(StdActivities); let mut tb = TunerBuilder::default(); tb.workflow_slot_supplier(wf_supplier.clone()); tb.activity_slot_supplier(activity_supplier.clone()); tb.local_activity_slot_supplier(local_activity_supplier.clone()); - starter.worker_config.tuner = Some(Arc::new(tb.build())); + starter.sdk_config.tuner = Arc::new(tb.build()); let mut worker = starter.worker().await; - worker.register_activity( - "SlotSupplierActivity", - |_: temporalio_sdk::activities::ActivityContext, _: ()| async move { Ok(()) }, - ); worker.register_wf( "SlotSupplierWorkflow".to_owned(), |ctx: WfContext| async move { let _result = ctx - .activity(ActivityOptions { - activity_type: "SlotSupplierActivity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(10)), - ..Default::default() - }) + .start_activity( + StdActivities::no_op, + (), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(10)), + ..Default::default() + }, + )? .await; let _result = ctx - .local_activity(LocalActivityOptions { - activity_type: "SlotSupplierActivity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(10)), - ..Default::default() - }) + .start_local_activity( + StdActivities::no_op, + (), + LocalActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(10)), + ..Default::default() + }, + )? .await; Ok(().into()) }, @@ -819,7 +828,7 @@ async fn test_custom_slot_supplier_simple() { slot_type: "activity", activity_type: Some(act_type), .. - } if act_type == "SlotSupplierActivity")) + } if act_type.contains("no_op"))) ); assert!( local_activity_events @@ -828,7 +837,7 @@ async fn test_custom_slot_supplier_simple() { slot_type: "local_activity", activity_type: Some(act_type), .. - } if act_type == "SlotSupplierActivity")) + } if act_type.contains("no_op"))) ); assert!(wf_events.iter().any(|e| matches!( e, @@ -915,3 +924,10 @@ async fn shutdown_worker_not_retried() { drain_pollers_and_shutdown(&worker).await; assert_eq!(shutdown_call_count.load(Ordering::Relaxed), 1); } + +#[test] +fn test_default_build_id() { + let o = WorkerOptions::new("task_queue").build(); + assert!(!o.deployment_options.version.build_id.is_empty()); + assert_ne!(o.deployment_options.version.build_id, "undetermined"); +} diff --git a/crates/sdk-core/tests/integ_tests/worker_versioning_tests.rs b/crates/sdk-core/tests/integ_tests/worker_versioning_tests.rs index 8d33dc564..827d079de 100644 --- a/crates/sdk-core/tests/integ_tests/worker_versioning_tests.rs +++ b/crates/sdk-core/tests/integ_tests/worker_versioning_tests.rs @@ -1,13 +1,10 @@ -use crate::{ - common::{CoreWfStarter, eventually}, - integ_tests::activity_functions::echo, -}; +use crate::common::{CoreWfStarter, activity_functions::StdActivities, eventually}; use std::time::Duration; use temporalio_client::{NamespacedClient, WorkflowOptions, WorkflowService}; use temporalio_common::{ protos::{ coresdk::{ - AsJsonPayloadExt, workflow_commands::CompleteWorkflowExecution, workflow_completion, + workflow_commands::CompleteWorkflowExecution, workflow_completion, workflow_completion::WorkflowActivationCompletion, }, temporal::api::{ @@ -21,7 +18,7 @@ use temporalio_common::{ worker::{WorkerDeploymentOptions, WorkerDeploymentVersion, WorkerTaskTypes}, }; use temporalio_sdk::{ActivityOptions, WfContext}; -use temporalio_sdk_core::{WorkerVersioningStrategy, test_help::WorkerTestHelpers}; +use temporalio_sdk_core::test_help::WorkerTestHelpers; use tokio::join; use tonic::IntoRequest; @@ -35,13 +32,12 @@ async fn sets_deployment_info_on_task_responses(#[values(true, false)] use_defau deployment_name: deploy_name.clone(), build_id: "1.0".to_string(), }; - starter.worker_config.versioning_strategy = - WorkerVersioningStrategy::WorkerDeploymentBased(WorkerDeploymentOptions { - version: version.clone(), - use_worker_versioning: true, - default_versioning_behavior: VersioningBehavior::AutoUpgrade.into(), - }); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.deployment_options = WorkerDeploymentOptions { + version: version.clone(), + use_worker_versioning: true, + default_versioning_behavior: VersioningBehavior::AutoUpgrade.into(), + }; + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let core = starter.get_worker().await; let client = starter.get_client().await; @@ -148,28 +144,31 @@ async fn activity_has_deployment_stamp() { let wf_name = "activity_has_deployment_stamp"; let mut starter = CoreWfStarter::new(wf_name); let deploy_name = format!("deployment-{}", starter.get_task_queue()); - starter.worker_config.versioning_strategy = - WorkerVersioningStrategy::WorkerDeploymentBased(WorkerDeploymentOptions { - version: WorkerDeploymentVersion { - deployment_name: deploy_name.clone(), - build_id: "1.0".to_string(), - }, - use_worker_versioning: true, - default_versioning_behavior: VersioningBehavior::AutoUpgrade.into(), - }); + starter.sdk_config.deployment_options = WorkerDeploymentOptions { + version: WorkerDeploymentVersion { + deployment_name: deploy_name.clone(), + build_id: "1.0".to_string(), + }, + use_worker_versioning: true, + default_versioning_behavior: VersioningBehavior::AutoUpgrade.into(), + }; + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; + worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; Ok(().into()) }); - worker.register_activity("echo_activity", echo); let submitter = worker.get_submitter_handle(); let shutdown_handle = worker.inner_mut().shutdown_handle(); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests.rs b/crates/sdk-core/tests/integ_tests/workflow_tests.rs index 73f217f27..9e2f52f36 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests.rs @@ -20,10 +20,11 @@ mod upsert_search_attrs; use crate::{ common::{ - CoreWfStarter, get_integ_runtime_options, history_from_proto_binary, - init_core_and_create_wf, init_core_replay_preloaded, mock_sdk_cfg, prom_metrics, + CoreWfStarter, activity_functions::StdActivities, get_integ_runtime_options, + history_from_proto_binary, init_core_and_create_wf, init_core_replay_preloaded, + mock_sdk_cfg, prom_metrics, }, - integ_tests::{activity_functions::echo, metrics_tests}, + integ_tests::metrics_tests, }; use assert_matches::assert_matches; use std::{ @@ -39,7 +40,7 @@ use temporalio_common::{ protos::{ DEFAULT_WORKFLOW_TYPE, canned_histories, coresdk::{ - ActivityTaskCompletion, AsJsonPayloadExt, IntoCompletion, + ActivityTaskCompletion, IntoCompletion, activity_result::ActivityExecutionResult, workflow_activation::{WorkflowActivationJob, workflow_activation_job}, workflow_commands::{ @@ -63,7 +64,7 @@ use temporalio_sdk::{ ActivityOptions, LocalActivityOptions, TimerOptions, WfContext, interceptors::WorkerInterceptor, }; use temporalio_sdk_core::{ - CoreRuntime, PollError, PollerBehavior, WorkerVersioningStrategy, WorkflowErrorType, + CoreRuntime, PollError, PollerBehavior, TunerHolder, WorkflowErrorType, replay::HistoryForReplay, test_help::{MockPollCfg, WorkerTestHelpers, drain_pollers_and_shutdown}, }; @@ -75,7 +76,7 @@ use tokio::{join, sync::Notify, time::sleep}; async fn parallel_workflows_same_queue() { let wf_name = "parallel_workflows_same_queue"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut core = starter.worker().await; core.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -362,9 +363,9 @@ async fn wft_timeout_doesnt_create_unsolvable_autocomplete() { let signal_at_complete = "at-complete"; let mut wf_starter = CoreWfStarter::new("wft_timeout_doesnt_create_unsolvable_autocomplete"); // Test needs eviction on and a short timeout - wf_starter.worker_config.max_cached_workflows = 0_usize; - wf_starter.worker_config.max_outstanding_workflow_tasks = Some(1_usize); - wf_starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(1_usize); + wf_starter.sdk_config.max_cached_workflows = 0_usize; + wf_starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(1, 1, 1, 1)); + wf_starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(1_usize); wf_starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); let core = wf_starter.get_worker().await; let client = wf_starter.get_client().await; @@ -470,23 +471,28 @@ async fn wft_timeout_doesnt_create_unsolvable_autocomplete() { async fn slow_completes_with_small_cache() { let wf_name = "slow_completes_with_small_cache"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.max_outstanding_workflow_tasks = Some(5_usize); - starter.worker_config.max_cached_workflows = 5_usize; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(5, 10, 1, 1)); + starter.sdk_config.max_cached_workflows = 5_usize; let mut worker = starter.worker().await; + + worker.register_activities(StdActivities); + worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { for _ in 0..3 { - ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; ctx.timer(Duration::from_secs(1)).await; } Ok(().into()) }); - worker.register_activity("echo_activity", echo); for i in 0..20 { worker .submit_wf( @@ -519,22 +525,26 @@ async fn slow_completes_with_small_cache() { async fn deployment_version_correct_in_wf_info(#[values(true, false)] use_only_build_id: bool) { let wf_type = "deployment_version_correct_in_wf_info"; let mut starter = CoreWfStarter::new(wf_type); - let version_strat = if use_only_build_id { - WorkerVersioningStrategy::None { - build_id: "1.0".to_owned(), + starter.sdk_config.deployment_options = if use_only_build_id { + WorkerDeploymentOptions { + version: WorkerDeploymentVersion { + deployment_name: "".to_string(), + build_id: "1.0".to_string(), + }, + use_worker_versioning: false, + default_versioning_behavior: None, } } else { - WorkerVersioningStrategy::WorkerDeploymentBased(WorkerDeploymentOptions { + WorkerDeploymentOptions { version: WorkerDeploymentVersion { deployment_name: "deployment-1".to_string(), build_id: "1.0".to_string(), }, use_worker_versioning: false, default_versioning_behavior: None, - }) + } }; - starter.worker_config.versioning_strategy = version_strat; - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let core = starter.get_worker().await; starter.start_wf().await; let client = starter.get_client().await; @@ -624,21 +634,25 @@ async fn deployment_version_correct_in_wf_info(#[values(true, false)] use_only_b .unwrap(); let mut starter = starter.clone_no_worker(); - let version_strat = if use_only_build_id { - WorkerVersioningStrategy::None { - build_id: "2.0".to_owned(), + starter.sdk_config.deployment_options = if use_only_build_id { + WorkerDeploymentOptions { + version: WorkerDeploymentVersion { + deployment_name: "".to_string(), + build_id: "2.0".to_string(), + }, + use_worker_versioning: false, + default_versioning_behavior: None, } } else { - WorkerVersioningStrategy::WorkerDeploymentBased(WorkerDeploymentOptions { + WorkerDeploymentOptions { version: WorkerDeploymentVersion { deployment_name: "deployment-1".to_string(), build_id: "2.0".to_string(), }, use_worker_versioning: false, default_versioning_behavior: None, - }) + } }; - starter.worker_config.versioning_strategy = version_strat; let core = starter.get_worker().await; @@ -757,12 +771,12 @@ async fn nondeterminism_errors_fail_workflow_when_configured_to( let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "nondeterminism_errors_fail_workflow_when_configured_to"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let typeset = HashSet::from([WorkflowErrorType::Nondeterminism]); if whole_worker { - starter.worker_config.workflow_failure_errors = typeset; + starter.sdk_config.workflow_failure_errors = typeset; } else { - starter.worker_config.workflow_types_to_failure_errors = + starter.sdk_config.workflow_types_to_failure_errors = HashMap::from([(wf_name.to_owned(), typeset)]); } let wf_id = starter.get_task_queue().to_owned(); @@ -774,7 +788,7 @@ async fn nondeterminism_errors_fail_workflow_when_configured_to( Ok(().into()) }); let client = starter.get_client().await; - let core_worker = worker.core_worker.clone(); + let core_worker = worker.core_worker(); starter.start_with_worker(wf_name, &mut worker).await; let stopper = async { @@ -799,13 +813,17 @@ async fn nondeterminism_errors_fail_workflow_when_configured_to( // Restart the worker with a new, incompatible wf definition which will cause nondeterminism let mut starter = starter.clone_no_worker(); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), move |ctx: WfContext| async move { - ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi".to_owned(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + )? .await; Ok(().into()) }); @@ -835,57 +853,64 @@ async fn nondeterminism_errors_fail_workflow_when_configured_to( async fn history_out_of_order_on_restart() { let wf_name = "history_out_of_order_on_restart"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.workflow_failure_errors = - HashSet::from([WorkflowErrorType::Nondeterminism]); + starter.sdk_config.workflow_failure_errors = HashSet::from([WorkflowErrorType::Nondeterminism]); let mut worker = starter.worker().await; let mut starter2 = starter.clone_no_worker(); let mut worker2 = starter2.worker().await; static HIT_SLEEP: Notify = Notify::const_new(); + worker.register_activities(StdActivities); + worker2.register_activities(StdActivities); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_owned(), - input: "hi".as_json_payload().unwrap(), - start_to_close_timeout: Some(Duration::from_secs(5)), - ..Default::default() - }) + ctx.start_local_activity( + StdActivities::echo, + "hi".to_string(), + LocalActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + )? .await; - ctx.activity(ActivityOptions { - activity_type: "echo".to_owned(), - input: "hi".as_json_payload().unwrap(), - start_to_close_timeout: Some(Duration::from_secs(5)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + )? .await; // Interrupt this sleep on first go HIT_SLEEP.notify_one(); ctx.timer(Duration::from_secs(5)).await; Ok(().into()) }); - worker.register_activity("echo", echo); worker2.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_owned(), - input: "hi".as_json_payload().unwrap(), - start_to_close_timeout: Some(Duration::from_secs(5)), - ..Default::default() - }) + ctx.start_local_activity( + StdActivities::echo, + "hi".to_string(), + LocalActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + )? .await; // Timer is added after restarting workflow ctx.timer(Duration::from_secs(1)).await; - ctx.activity(ActivityOptions { - activity_type: "echo".to_owned(), - input: "hi".as_json_payload().unwrap(), - start_to_close_timeout: Some(Duration::from_secs(5)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + )? .await; ctx.timer(Duration::from_secs(2)).await; Ok(().into()) }); - worker2.register_activity("echo", echo); worker .submit_wf( wf_name.to_owned(), diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/activities.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/activities.rs index 5afb44641..9e3948e19 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/activities.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/activities.rs @@ -1,15 +1,16 @@ -use crate::{ - common::{ - ActivationAssertionsInterceptor, CoreWfStarter, INTEG_CLIENT_IDENTITY, build_fake_sdk, - eventually, init_core_and_create_wf, mock_sdk, mock_sdk_cfg, - }, - integ_tests::activity_functions::echo, +use crate::common::{ + ActivationAssertionsInterceptor, CoreWfStarter, INTEG_CLIENT_IDENTITY, + activity_functions::StdActivities, build_fake_sdk, eventually, init_core_and_create_wf, + mock_sdk, mock_sdk_cfg, }; use anyhow::anyhow; use assert_matches::assert_matches; use futures_util::future::join_all; use std::{ - sync::atomic::{AtomicBool, Ordering}, + sync::{ + Arc, + atomic::{AtomicBool, Ordering}, + }, time::Duration, }; use temporalio_client::{ @@ -44,9 +45,9 @@ use temporalio_common::{ test_utils::schedule_activity_cmd, }, }; +use temporalio_macros::activities; use temporalio_sdk::{ - ActExitValue, ActivityOptions, CancellableFuture, WfContext, WfExitValue, WorkflowFunction, - WorkflowResult, + ActivityOptions, CancellableFuture, WfContext, WfExitValue, WorkflowFunction, WorkflowResult, activities::{ActivityContext, ActivityError}, }; use temporalio_sdk_core::{ @@ -58,25 +59,30 @@ use temporalio_sdk_core::{ }; use tokio::{join, sync::Semaphore, time::sleep}; -pub(crate) async fn one_activity_wf(ctx: WfContext) -> WorkflowResult<()> { - ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) - .await; - Ok(().into()) +async fn one_activity_wf(ctx: WfContext) -> WorkflowResult { + // TODO [rust-sdk-branch]: activities need to return deserialzied results + let r = ctx + .start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + )? + .await + .unwrap_ok_payload(); + Ok(r.into()) } #[tokio::test] async fn one_activity_only() { let wf_name = "one_activity"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), one_activity_wf); - worker.register_activity("echo_activity", echo); let run_id = worker .submit_wf( @@ -93,7 +99,9 @@ async fn one_activity_only() { .get_workflow_result(Default::default()) .await .unwrap(); - assert_matches!(res, WorkflowExecutionResult::Succeeded(_)); + let r = assert_matches!(res, WorkflowExecutionResult::Succeeded(r) => r); + let p = Payload::from_json_payload(&r[0]).unwrap(); + assert_eq!(String::from_json_payload(&p).unwrap(), "hi!"); } #[tokio::test] @@ -891,27 +899,25 @@ async fn activity_heartbeat_not_flushed_on_success() { async fn one_activity_abandon_cancelled_before_started() { let wf_name = "one_activity_abandon_cancelled_before_started"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - let act_fut = ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - cancellation_type: ActivityCancellationType::Abandon, - ..Default::default() - }); + let act_fut = ctx + .start_activity( + StdActivities::delay, + Duration::from_secs(2), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + cancellation_type: ActivityCancellationType::Abandon, + ..Default::default() + }, + ) + .unwrap(); act_fut.cancel(&ctx); act_fut.await; Ok(().into()) }); - worker.register_activity( - "echo_activity", - |_ctx: ActivityContext, echo_me: String| async move { - sleep(Duration::from_secs(2)).await; - Ok(echo_me) - }, - ); let run_id = worker .submit_wf( @@ -935,29 +941,27 @@ async fn one_activity_abandon_cancelled_before_started() { async fn one_activity_abandon_cancelled_after_complete() { let wf_name = "one_activity_abandon_cancelled_after_complete"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - let act_fut = ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - cancellation_type: ActivityCancellationType::Abandon, - ..Default::default() - }); + let act_fut = ctx + .start_activity( + StdActivities::delay, + Duration::from_secs(2), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + cancellation_type: ActivityCancellationType::Abandon, + ..Default::default() + }, + ) + .unwrap(); ctx.timer(Duration::from_secs(1)).await; act_fut.cancel(&ctx); ctx.timer(Duration::from_secs(3)).await; act_fut.await; Ok(().into()) }); - worker.register_activity( - "echo_activity", - |_ctx: ActivityContext, echo_me: String| async move { - sleep(Duration::from_secs(2)).await; - Ok(echo_me) - }, - ); let run_id = worker .submit_wf( @@ -979,23 +983,49 @@ async fn one_activity_abandon_cancelled_after_complete() { #[tokio::test] async fn it_can_complete_async() { - use std::sync::Arc; - use tokio::sync::Mutex; - let wf_name = "it_can_complete_async".to_owned(); let mut starter = CoreWfStarter::new(&wf_name); + let async_response = "agence"; + let shared_token = Arc::new(tokio::sync::Mutex::new(None)); + + struct AsyncActivities { + shared_token: Arc>>>, + } + #[activities] + impl AsyncActivities { + #[activity] + async fn complete_async_activity( + self: Arc, + ctx: ActivityContext, + _: String, + ) -> Result<(), ActivityError> { + // set the `activity_task_token` + let activity_info = ctx.get_info(); + let task_token = &activity_info.task_token; + let mut shared = self.shared_token.lock().await; + *shared = Some(task_token.clone()); + Err(ActivityError::WillCompleteAsync) + } + } + + starter.sdk_config.register_activities(AsyncActivities { + shared_token: shared_token.clone(), + }); + let mut worker = starter.worker().await; let client = starter.get_client().await; - let async_response = "agence"; - let shared_token: Arc>>> = Arc::new(Mutex::new(None)); + worker.register_wf(wf_name.clone(), move |ctx: WfContext| async move { let activity_resolution = ctx - .activity(ActivityOptions { - activity_type: "complete_async_activity".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(30)), - ..Default::default() - }) + .start_activity( + AsyncActivities::complete_async_activity, + "hi".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(30)), + ..Default::default() + }, + ) + .unwrap() .await; let res = match activity_resolution.status { @@ -1009,22 +1039,6 @@ async fn it_can_complete_async() { Ok(().into()) }); - let shared_token_ref = shared_token.clone(); - worker.register_activity( - "complete_async_activity", - move |ctx: ActivityContext, _: String| { - let shared_token_ref = shared_token_ref.clone(); - async move { - // set the `activity_task_token` - let activity_info = ctx.get_info(); - let task_token = &activity_info.task_token; - let mut shared = shared_token_ref.lock().await; - *shared = Some(task_token.clone()); - Ok::, _>(ActExitValue::WillCompleteAsync) - } - }, - ); - let shared_token_ref2 = shared_token.clone(); tokio::spawn(async move { loop { @@ -1057,39 +1071,50 @@ async fn it_can_complete_async() { worker.run_until_done().await.unwrap(); } +static ACTS_STARTED: Semaphore = Semaphore::const_new(0); +static ACTS_DONE: Semaphore = Semaphore::const_new(0); #[tokio::test] async fn graceful_shutdown() { let wf_name = "graceful_shutdown"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.graceful_shutdown_period = Some(Duration::from_millis(500)); + starter.sdk_config.graceful_shutdown_period = Some(Duration::from_millis(500)); + + struct SleeperActivities; + #[activities] + impl SleeperActivities { + #[activity] + async fn sleeper(ctx: ActivityContext, _: String) -> Result<(), ActivityError> { + ACTS_STARTED.add_permits(1); + // just wait to be cancelled + ctx.cancelled().await; + ACTS_DONE.add_permits(1); + Err(ActivityError::cancelled()) + } + } + + starter.sdk_config.register_activities(SleeperActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let act_futs = (1..=10).map(|_| { - ctx.activity(ActivityOptions { - activity_type: "sleeper".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - retry_policy: Some(RetryPolicy { - maximum_attempts: 1, + ctx.start_activity( + SleeperActivities::sleeper, + "hi".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + retry_policy: Some(RetryPolicy { + maximum_attempts: 1, + ..Default::default() + }), + cancellation_type: ActivityCancellationType::WaitCancellationCompleted, ..Default::default() - }), - cancellation_type: ActivityCancellationType::WaitCancellationCompleted, - input: "hi".as_json_payload().unwrap(), - ..Default::default() - }) + }, + ) + .unwrap() }); join_all(act_futs).await; Ok(().into()) }); - static ACTS_STARTED: Semaphore = Semaphore::const_new(0); - static ACTS_DONE: Semaphore = Semaphore::const_new(0); - worker.register_activity("sleeper", |ctx: ActivityContext, _: String| async move { - ACTS_STARTED.add_permits(1); - // just wait to be cancelled - ctx.cancelled().await; - ACTS_DONE.add_permits(1); - Result::<(), _>::Err(ActivityError::cancelled()) - }); worker .submit_wf( @@ -1120,38 +1145,52 @@ async fn graceful_shutdown() { join!(shutdowner, runner); } +static WAS_CANCELLED: AtomicBool = AtomicBool::new(false); #[tokio::test] async fn activity_can_be_cancelled_by_local_timeout() { let wf_name = "activity_can_be_cancelled_by_local_timeout"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.local_timeout_buffer_for_activities = Duration::from_secs(0); + starter + .set_core_cfg_mutator(|m| m.local_timeout_buffer_for_activities = Duration::from_secs(0)); + + struct CancellableEchoActivities; + #[activities] + impl CancellableEchoActivities { + #[activity] + async fn cancellable_echo( + ctx: ActivityContext, + echo_me: String, + ) -> Result { + // Doesn't heartbeat + ctx.cancelled().await; + WAS_CANCELLED.store(true, Ordering::Relaxed); + Ok(echo_me) + } + } + + starter + .sdk_config + .register_activities(CancellableEchoActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let res = ctx - .activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(1)), - input: "hi!".as_json_payload().expect("serializes fine"), - retry_policy: Some(RetryPolicy { - maximum_attempts: 1, + .start_activity( + CancellableEchoActivities::cancellable_echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(1)), + retry_policy: Some(RetryPolicy { + maximum_attempts: 1, + ..Default::default() + }), ..Default::default() - }), - ..Default::default() - }) + }, + ) + .unwrap() .await; assert!(res.timed_out().is_some()); Ok(().into()) }); - static WAS_CANCELLED: AtomicBool = AtomicBool::new(false); - worker.register_activity( - "echo_activity", - |ctx: ActivityContext, echo_me: String| async move { - // Doesn't heartbeat - ctx.cancelled().await; - WAS_CANCELLED.store(true, Ordering::Relaxed); - Ok(echo_me) - }, - ); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -1165,32 +1204,37 @@ async fn activity_can_be_cancelled_by_local_timeout() { async fn long_activity_timeout_repro() { let wf_name = "long_activity_timeout_repro"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 10, initial: 5, }; - starter.worker_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 10, initial: 5, }; - starter.worker_config.local_timeout_buffer_for_activities = Duration::from_secs(0); + starter + .set_core_cfg_mutator(|m| m.local_timeout_buffer_for_activities = Duration::from_secs(0)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let mut iter = 1; loop { let res = ctx - .activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(1)), - input: "hi!".as_json_payload().expect("serializes fine"), - retry_policy: Some(RetryPolicy { - maximum_attempts: 1, + .start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(1)), + retry_policy: Some(RetryPolicy { + maximum_attempts: 1, + ..Default::default() + }), ..Default::default() - }), - ..Default::default() - }) + }, + ) + .unwrap() .await; assert!(res.completed_ok()); ctx.timer(Duration::from_secs(60 * 3)).await; @@ -1200,7 +1244,6 @@ async fn long_activity_timeout_repro() { } } }); - worker.register_activity("echo_activity", echo); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -1237,11 +1280,14 @@ async fn pass_activity_summary_to_metadata() { let mut worker = mock_sdk_cfg(mock_cfg, |_| {}); worker.register_wf(wf_type, |ctx: WfContext| async move { - ctx.activity(ActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - summary: Some("activity summary".to_string()), - ..Default::default() - }) + ctx.start_activity( + StdActivities::default, + (), + ActivityOptions { + summary: Some("activity summary".to_string()), + ..Default::default() + }, + )? .await; Ok(().into()) }); @@ -1286,12 +1332,15 @@ async fn abandoned_activities_ignore_start_and_complete(hist_batches: &'static [ let mut worker = mock_sdk(MockPollCfg::from_resp_batches(wfid, t, hist_batches, mock)); worker.register_wf(wf_type.to_owned(), |ctx: WfContext| async move { - let act_fut = ctx.activity(ActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - cancellation_type: ActivityCancellationType::Abandon, - ..Default::default() - }); + let act_fut = ctx.start_activity( + StdActivities::default, + (), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + cancellation_type: ActivityCancellationType::Abandon, + ..Default::default() + }, + )?; ctx.timer(Duration::from_secs(1)).await; act_fut.cancel(&ctx); ctx.timer(Duration::from_secs(3)).await; @@ -1308,7 +1357,8 @@ async fn abandoned_activities_ignore_start_and_complete(hist_batches: &'static [ #[tokio::test] async fn immediate_activity_cancelation() { let func = WorkflowFunction::new(|ctx: WfContext| async move { - let cancel_activity_future = ctx.activity(ActivityOptions::default()); + let cancel_activity_future = + ctx.start_activity(StdActivities::default, (), ActivityOptions::default())?; // Immediately cancel the activity cancel_activity_future.cancel(&ctx); cancel_activity_future.await; diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/appdata_propagation.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/appdata_propagation.rs index aef3b0016..3d983f4d1 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/appdata_propagation.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/appdata_propagation.rs @@ -2,8 +2,11 @@ use crate::common::CoreWfStarter; use assert_matches::assert_matches; use std::time::Duration; use temporalio_client::{WfClientExt, WorkflowExecutionResult, WorkflowOptions}; -use temporalio_common::protos::coresdk::AsJsonPayloadExt; -use temporalio_sdk::{ActivityOptions, WfContext, WorkflowResult, activities::ActivityContext}; +use temporalio_macros::activities; +use temporalio_sdk::{ + ActivityOptions, WfContext, WorkflowResult, + activities::{ActivityContext, ActivityError}, +}; const TEST_APPDATA_MESSAGE: &str = "custom app data, yay"; @@ -12,20 +15,35 @@ struct Data { } pub(crate) async fn appdata_activity_wf(ctx: WfContext) -> WorkflowResult<()> { - ctx.activity(ActivityOptions { - activity_type: "echo_activity".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + AppdataActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; Ok(().into()) } +struct AppdataActivities; +#[activities] +impl AppdataActivities { + #[activity] + async fn echo(ctx: ActivityContext, echo_me: String) -> Result { + let data = ctx.app_data::().expect("appdata exists. qed"); + assert_eq!(data.message, TEST_APPDATA_MESSAGE.to_owned()); + Ok(echo_me) + } +} + #[tokio::test] async fn appdata_access_in_activities_and_workflows() { let wf_name = "appdata_activity"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(AppdataActivities); let mut worker = starter.worker().await; worker.inner_mut().insert_app_data(Data { message: TEST_APPDATA_MESSAGE.to_owned(), @@ -33,14 +51,6 @@ async fn appdata_access_in_activities_and_workflows() { let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), appdata_activity_wf); - worker.register_activity( - "echo_activity", - |ctx: ActivityContext, echo_me: String| async move { - let data = ctx.app_data::().expect("appdata exists. qed"); - assert_eq!(data.message, TEST_APPDATA_MESSAGE.to_owned()); - Ok(echo_me) - }, - ); let run_id = worker .submit_wf( diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs index d695bbecb..5f7b0dbee 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs @@ -42,7 +42,7 @@ async fn cancel_receiver(ctx: WfContext) -> WorkflowResult { #[tokio::test] async fn sends_cancel_to_other_wf() { let mut starter = CoreWfStarter::new("sends_cancel_to_other_wf"); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf("sender", cancel_sender); worker.register_wf("receiver", cancel_receiver); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs index 580d80995..bf5f5a73a 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs @@ -35,7 +35,7 @@ async fn cancelled_wf(ctx: WfContext) -> WorkflowResult<()> { async fn cancel_during_timer() { let wf_name = "cancel_during_timer"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_string(), cancelled_wf); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs index 6078fc4c7..d82f5a5a4 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs @@ -85,7 +85,7 @@ async fn happy_parent(ctx: WfContext) -> WorkflowResult<()> { #[tokio::test] async fn child_workflow_happy_path() { let mut starter = CoreWfStarter::new("child-workflows"); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(PARENT_WF_TYPE.to_string(), happy_parent); @@ -106,7 +106,7 @@ async fn child_workflow_happy_path() { #[tokio::test] async fn abandoned_child_bug_repro() { let mut starter = CoreWfStarter::new("child-workflow-abandon-bug"); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let barr: &'static Barrier = Box::leak(Box::new(Barrier::new(2))); @@ -177,7 +177,7 @@ async fn abandoned_child_bug_repro() { #[tokio::test] async fn abandoned_child_resolves_post_cancel() { let mut starter = CoreWfStarter::new("child-workflow-resolves-post-cancel"); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let barr: &'static Barrier = Box::leak(Box::new(Barrier::new(2))); @@ -244,7 +244,7 @@ async fn abandoned_child_resolves_post_cancel() { async fn cancelled_child_gets_reason() { let wf_name = "cancelled-child-gets-reason"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_string(), move |ctx: WfContext| async move { diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs index fef4bb2c5..c9ea06b17 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs @@ -1,5 +1,5 @@ use crate::common::{CoreWfStarter, build_fake_sdk}; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use temporalio_client::WorkflowOptions; use temporalio_common::{ protos::{ @@ -10,7 +10,7 @@ use temporalio_common::{ worker::WorkerTaskTypes, }; use temporalio_sdk::{WfContext, WfExitValue, WorkflowResult}; -use temporalio_sdk_core::test_help::MockPollCfg; +use temporalio_sdk_core::{TunerHolder, test_help::MockPollCfg}; async fn continue_as_new_wf(ctx: WfContext) -> WorkflowResult<()> { let run_ct = ctx.get_args()[0].data[0]; @@ -29,7 +29,7 @@ async fn continue_as_new_wf(ctx: WfContext) -> WorkflowResult<()> { async fn continue_as_new_happy_path() { let wf_name = "continue_as_new_happy_path"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_string(), continue_as_new_wf); @@ -49,9 +49,9 @@ async fn continue_as_new_happy_path() { async fn continue_as_new_multiple_concurrent() { let wf_name = "continue_as_new_multiple_concurrent"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); - starter.worker_config.max_cached_workflows = 5_usize; - starter.worker_config.max_outstanding_workflow_tasks = Some(5_usize); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.max_cached_workflows = 5_usize; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(5, 1, 1, 1)); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_string(), continue_as_new_wf); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/determinism.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/determinism.rs index 2d6676c7c..517bb7218 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/determinism.rs @@ -1,4 +1,6 @@ -use crate::common::{CoreWfStarter, WorkflowHandleExt, mock_sdk, mock_sdk_cfg}; +use crate::common::{ + CoreWfStarter, WorkflowHandleExt, activity_functions::StdActivities, mock_sdk, mock_sdk_cfg, +}; use std::{ sync::atomic::{AtomicBool, AtomicUsize, Ordering}, time::Duration, @@ -6,8 +8,7 @@ use std::{ use temporalio_client::WorkflowOptions; use temporalio_common::{ protos::{ - DEFAULT_ACTIVITY_TYPE, TestHistoryBuilder, canned_histories, - coresdk::AsJsonPayloadExt, + TestHistoryBuilder, canned_histories, temporal::api::{ enums::v1::{EventType, WorkflowTaskFailedCause}, failure::v1::Failure, @@ -17,7 +18,6 @@ use temporalio_common::{ }; use temporalio_sdk::{ ActivityOptions, ChildWorkflowOptions, LocalActivityOptions, WfContext, WorkflowResult, - activities::ActivityContext, }; use temporalio_sdk_core::{ replay::DEFAULT_WORKFLOW_TYPE, @@ -40,11 +40,8 @@ pub(crate) async fn timer_wf_nondeterministic(ctx: WfContext) -> WorkflowResult< } 2 => { // On the second attempt we should cause a nondeterminism error - ctx.activity(ActivityOptions { - activity_type: "whatever".to_string(), - ..Default::default() - }) - .await; + ctx.start_activity(StdActivities::default, (), ActivityOptions::default())? + .await; } _ => panic!("Ran too many times"), } @@ -55,7 +52,7 @@ pub(crate) async fn timer_wf_nondeterministic(ctx: WfContext) -> WorkflowResult< async fn test_determinism_error_then_recovers() { let wf_name = "test_determinism_error_then_recovers"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), timer_wf_nondeterministic); @@ -67,8 +64,11 @@ async fn test_determinism_error_then_recovers() { #[tokio::test] async fn task_fail_causes_replay_unset_too_soon() { + use crate::common::activity_functions::StdActivities; + let wf_name = "task_fail_causes_replay_unset_too_soon"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; static DID_FAIL: AtomicBool = AtomicBool::new(false); @@ -76,12 +76,15 @@ async fn task_fail_causes_replay_unset_too_soon() { if DID_FAIL.load(Ordering::Relaxed) { assert!(ctx.is_replaying()); } - ctx.activity(ActivityOptions { - activity_type: "echo".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - start_to_close_timeout: Some(Duration::from_secs(2)), - ..Default::default() - }) + ctx.start_activity( + StdActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(2)), + ..Default::default() + }, + ) + .unwrap() .await; if !DID_FAIL.load(Ordering::Relaxed) { DID_FAIL.store(true, Ordering::Relaxed); @@ -89,10 +92,6 @@ async fn task_fail_causes_replay_unset_too_soon() { } Ok(().into()) }); - worker.register_activity( - "echo", - |_ctx: ActivityContext, echo_me: String| async move { Ok(echo_me) }, - ); let handle = starter.start_with_worker(wf_name, &mut worker).await; @@ -248,32 +247,42 @@ async fn activity_id_or_type_change_is_nondeterministic( worker.register_wf(wf_type.to_owned(), move |ctx: WfContext| async move { if local_act { - ctx.local_activity(if id_change { - LocalActivityOptions { - activity_id: Some("I'm bad and wrong!".to_string()), - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - ..Default::default() - } + if id_change { + ctx.start_local_activity( + StdActivities::default, + (), + LocalActivityOptions { + activity_id: Some("I'm bad and wrong!".to_string()), + ..Default::default() + }, + )? + .await; } else { - LocalActivityOptions { - activity_type: "not the default act type".to_string(), - ..Default::default() - } - }) - .await; - } else { - ctx.activity(if id_change { + ctx.start_local_activity( + // Different type causes nondeterminism + StdActivities::no_op, + (), + Default::default(), + )? + .await; + } + } else if id_change { + ctx.start_activity( + StdActivities::default, + (), ActivityOptions { activity_id: Some("I'm bad and wrong!".to_string()), - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), ..Default::default() - } - } else { - ActivityOptions { - activity_type: "not the default act type".to_string(), - ..Default::default() - } - }) + }, + )? + .await; + } else { + ctx.start_activity( + // Different type causes nondeterminism + StdActivities::no_op, + (), + ActivityOptions::default(), + )? .await; } Ok(().into()) @@ -333,7 +342,7 @@ async fn child_wf_id_or_type_change_is_nondeterministic( ctx.child_workflow(if id_change { ChildWorkflowOptions { workflow_id: "I'm bad and wrong!".to_string(), - workflow_type: DEFAULT_ACTIVITY_TYPE.to_string(), + workflow_type: DEFAULT_WORKFLOW_TYPE.to_string(), ..Default::default() } } else { diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/eager.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/eager.rs index f89730a12..4082f6605 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/eager.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/eager.rs @@ -15,7 +15,7 @@ async fn eager_wf_start() { starter.workflow_options.enable_eager_workflow_start = true; // hang the test if eager task dispatch failed starter.workflow_options.task_timeout = Some(Duration::from_secs(1500)); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), eager_wf); starter.eager_start_with_worker(wf_name, &mut worker).await; @@ -29,7 +29,7 @@ async fn eager_wf_start_different_clients() { starter.workflow_options.enable_eager_workflow_start = true; // hang the test if wf task needs retry starter.workflow_options.task_timeout = Some(Duration::from_secs(1500)); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), eager_wf); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs index 07e66eda0..8b451cbae 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs @@ -1,7 +1,8 @@ use crate::common::{ - ActivationAssertionsInterceptor, CoreWfStarter, WorkflowHandleExt, build_fake_sdk, - history_from_proto_binary, init_core_replay_preloaded, mock_sdk, mock_sdk_cfg, - replay_sdk_worker, workflows::la_problem_workflow, + ActivationAssertionsInterceptor, CoreWfStarter, WorkflowHandleExt, + activity_functions::StdActivities, build_fake_sdk, history_from_proto_binary, + init_core_replay_preloaded, mock_sdk, mock_sdk_cfg, replay_sdk_worker, + workflows::la_problem_workflow, }; use anyhow::anyhow; use crossbeam_queue::SegQueue; @@ -26,8 +27,7 @@ use temporalio_common::protos::{ workflow_commands::{ ActivityCancellationType, ScheduleLocalActivity, workflow_command::Variant, }, - workflow_completion, - workflow_completion::{WorkflowActivationCompletion, workflow_activation_completion}, + workflow_completion::{self, WorkflowActivationCompletion, workflow_activation_completion}, }, temporal::api::{ command::v1::{RecordMarkerCommandAttributes, command}, @@ -43,6 +43,7 @@ use temporalio_common::protos::{ }, test_utils::{query_ok, schedule_local_activity_cmd, start_timer_cmd}, }; +use temporalio_macros::activities; use temporalio_sdk::{ ActivityOptions, CancellableFuture, LocalActivityOptions, UpdateContext, WfContext, WorkflowFunction, WorkflowResult, @@ -50,7 +51,7 @@ use temporalio_sdk::{ interceptors::{FailOnNondeterminismInterceptor, WorkerInterceptor}, }; use temporalio_sdk_core::{ - PollError, prost_dur, + PollError, TunerHolder, prost_dur, replay::{DEFAULT_WORKFLOW_TYPE, HistoryForReplay, TestHistoryBuilder, default_wes_attribs}, test_help::{ LEGACY_QUERY_ID, MockPollCfg, ResponseType, WorkerExt, WorkerTestHelpers, @@ -63,11 +64,11 @@ use tokio_util::sync::CancellationToken; pub(crate) async fn one_local_activity_wf(ctx: WfContext) -> WorkflowResult<()> { let initial_workflow_time = ctx.workflow_time().expect("Workflow time should be set"); - ctx.local_activity(LocalActivityOptions { - activity_type: "echo_activity".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_local_activity( + StdActivities::echo, + "hi!".to_string(), + LocalActivityOptions::default(), + )? .await; // Verify LA execution advances the clock assert!(initial_workflow_time < ctx.workflow_time().unwrap()); @@ -78,9 +79,9 @@ pub(crate) async fn one_local_activity_wf(ctx: WfContext) -> WorkflowResult<()> async fn one_local_activity() { let wf_name = "one_local_activity"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), one_local_activity_wf); - worker.register_activity("echo_activity", echo); let handle = starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -91,11 +92,11 @@ async fn one_local_activity() { } pub(crate) async fn local_act_concurrent_with_timer_wf(ctx: WfContext) -> WorkflowResult<()> { - let la = ctx.local_activity(LocalActivityOptions { - activity_type: "echo_activity".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }); + let la = ctx.start_local_activity( + StdActivities::echo, + "hi!".to_string(), + LocalActivityOptions::default(), + )?; let timer = ctx.timer(Duration::from_secs(1)); tokio::join!(la, timer); Ok(().into()) @@ -105,52 +106,56 @@ pub(crate) async fn local_act_concurrent_with_timer_wf(ctx: WfContext) -> Workfl async fn local_act_concurrent_with_timer() { let wf_name = "local_act_concurrent_with_timer"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), local_act_concurrent_with_timer_wf); - worker.register_activity("echo_activity", echo); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); } -pub(crate) async fn local_act_then_timer_then_wait(ctx: WfContext) -> WorkflowResult<()> { - let la = ctx.local_activity(LocalActivityOptions { - activity_type: "echo_activity".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }); - ctx.timer(Duration::from_secs(1)).await; - let res = la.await; - assert!(res.completed_ok()); - Ok(().into()) -} - #[tokio::test] async fn local_act_then_timer_then_wait_result() { let wf_name = "local_act_then_timer_then_wait_result"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; - worker.register_wf(wf_name.to_owned(), local_act_then_timer_then_wait); - worker.register_activity("echo_activity", echo); + worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { + let la = ctx.start_local_activity( + StdActivities::echo, + "hi!".to_string(), + LocalActivityOptions::default(), + )?; + ctx.timer(Duration::from_secs(1)).await; + let res = la.await; + assert!(res.completed_ok()); + Ok(().into()) + }); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); } +pub(crate) async fn local_act_then_timer_then_wait(ctx: WfContext) -> WorkflowResult<()> { + let la = ctx.start_local_activity( + StdActivities::delay, + Duration::from_secs(4), + LocalActivityOptions::default(), + )?; + ctx.timer(Duration::from_secs(1)).await; + let res = la.await; + assert!(res.completed_ok()); + Ok(().into()) +} + #[tokio::test] async fn long_running_local_act_with_timer() { let wf_name = "long_running_local_act_with_timer"; let mut starter = CoreWfStarter::new(wf_name); starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), local_act_then_timer_then_wait); - worker.register_activity( - "echo_activity", - |_ctx: ActivityContext, str: String| async { - tokio::time::sleep(Duration::from_secs(4)).await; - Ok(str) - }, - ); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -159,13 +164,8 @@ async fn long_running_local_act_with_timer() { pub(crate) async fn local_act_fanout_wf(ctx: WfContext) -> WorkflowResult<()> { let las: Vec<_> = (1..=50) .map(|i| { - ctx.local_activity(LocalActivityOptions { - activity_type: "echo_activity".to_string(), - input: format!("Hi {i}") - .as_json_payload() - .expect("serializes fine"), - ..Default::default() - }) + ctx.start_local_activity(StdActivities::echo, format!("Hi {i}"), Default::default()) + .expect("serializes fine") }) .collect(); ctx.timer(Duration::from_secs(1)).await; @@ -177,10 +177,10 @@ pub(crate) async fn local_act_fanout_wf(ctx: WfContext) -> WorkflowResult<()> { async fn local_act_fanout() { let wf_name = "local_act_fanout"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.max_outstanding_local_activities = Some(1_usize); + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(5, 1, 1, 1)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), local_act_fanout_wf); - worker.register_activity("echo_activity", echo); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -190,30 +190,30 @@ async fn local_act_fanout() { async fn local_act_retry_timer_backoff() { let wf_name = "local_act_retry_timer_backoff"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let res = ctx - .local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_micros(15))), - // We want two local backoffs that are short. Third backoff will use timer - backoff_coefficient: 1_000., - maximum_interval: Some(prost_dur!(from_millis(1500))), - maximum_attempts: 4, - non_retryable_error_types: vec![], + .start_local_activity( + StdActivities::always_fail, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_micros(15))), + // We want two local backoffs that are short. Third backoff will use timer + backoff_coefficient: 1_000., + maximum_interval: Some(prost_dur!(from_millis(1500))), + maximum_attempts: 4, + non_retryable_error_types: vec![], + }, + timer_backoff_threshold: Some(Duration::from_secs(1)), + ..Default::default() }, - timer_backoff_threshold: Some(Duration::from_secs(1)), - ..Default::default() - }) + )? .await; assert!(res.failed()); Ok(().into()) }); - worker.register_activity("echo", |_: ActivityContext, _: String| async { - Result::<(), _>::Err(anyhow!("Oh no I failed!").into()) - }); let run_id = worker .submit_wf( @@ -240,37 +240,51 @@ async fn local_act_retry_timer_backoff() { #[tokio::test] async fn cancel_immediate(#[case] cancel_type: ActivityCancellationType) { let wf_name = format!("cancel_immediate_{cancel_type:?}"); - let mut starter = CoreWfStarter::new(&wf_name); - let mut worker = starter.worker().await; - worker.register_wf(&wf_name, move |ctx: WfContext| async move { - let la = ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - cancel_type, - ..Default::default() - }); - la.cancel(&ctx); - let resolution = la.await; - assert!(resolution.cancelled()); - Ok(().into()) - }); - // If we don't use this, we'd hang on shutdown for abandon cancel modes. let manual_cancel = CancellationToken::new(); - let manual_cancel_act = manual_cancel.clone(); + let mut starter = CoreWfStarter::new(&wf_name); - worker.register_activity("echo", move |ctx: ActivityContext, _: String| { - let manual_cancel_act = manual_cancel_act.clone(); - async move { + struct EchoWithManualCancel { + manual_cancel: CancellationToken, + } + #[activities] + impl EchoWithManualCancel { + #[activity] + async fn echo( + self: Arc, + ctx: ActivityContext, + _: String, + ) -> Result<(), ActivityError> { tokio::select! { - _ = tokio::time::sleep(Duration::from_secs(10)) => {}, + _ = tokio::time::sleep(Duration::from_secs(10)) => {} _ = ctx.cancelled() => { return Err(ActivityError::cancelled()) } - _ = manual_cancel_act.cancelled() => {} + _ = self.manual_cancel.cancelled() => {} } Ok(()) } + } + + starter + .sdk_config + .register_activities(EchoWithManualCancel { + manual_cancel: manual_cancel.clone(), + }); + let mut worker = starter.worker().await; + worker.register_wf(&wf_name, move |ctx: WfContext| async move { + let la = ctx.start_local_activity( + EchoWithManualCancel::echo, + "hi".to_string(), + LocalActivityOptions { + cancel_type, + ..Default::default() + }, + )?; + la.cancel(&ctx); + let resolution = la.await; + assert!(resolution.cancelled()); + Ok(().into()) }); starter.start_with_worker(wf_name, &mut worker).await; @@ -324,47 +338,24 @@ async fn cancel_after_act_starts( cancel_type: ActivityCancellationType, ) { let wf_name = format!("cancel_after_act_starts_{cancel_on_backoff:?}_{cancel_type:?}"); - let mut starter = CoreWfStarter::new(&wf_name); - starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); - let mut worker = starter.worker().await; - let bo_dur = cancel_on_backoff.unwrap_or_else(|| Duration::from_secs(1)); - worker.register_wf(&wf_name, move |ctx: WfContext| async move { - let la = ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(bo_dur.try_into().unwrap()), - backoff_coefficient: 1., - maximum_interval: Some(bo_dur.try_into().unwrap()), - // Retry forever until cancelled - ..Default::default() - }, - timer_backoff_threshold: Some(Duration::from_secs(1)), - cancel_type, - ..Default::default() - }); - ctx.timer(Duration::from_secs(1)).await; - // Note that this cancel can't go through for *two* WF tasks, because we do a full heartbeat - // before the timer (LA hasn't resolved), and then the timer fired event won't appear in - // history until *after* the next WFT because we force generated it when we sent the timer - // command. - la.cancel(&ctx); - // This extra timer is here to ensure the presence of another WF task doesn't mess up - // resolving the LA with cancel on replay - ctx.timer(Duration::from_secs(1)).await; - let resolution = la.await; - assert!(resolution.cancelled()); - Ok(().into()) - }); - // If we don't use this, we'd hang on shutdown for abandon cancel modes. let manual_cancel = CancellationToken::new(); - let manual_cancel_act = manual_cancel.clone(); + let mut starter = CoreWfStarter::new(&wf_name); + starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); - worker.register_activity("echo", move |ctx: ActivityContext, _: String| { - let manual_cancel_act = manual_cancel_act.clone(); - async move { - if cancel_on_backoff.is_some() { + struct EchoWithManualCancelAndBackoff { + manual_cancel: CancellationToken, + cancel_on_backoff: Option, + } + #[activities] + impl EchoWithManualCancelAndBackoff { + #[activity] + async fn echo( + self: Arc, + ctx: ActivityContext, + _: String, + ) -> Result<(), ActivityError> { + if self.cancel_on_backoff.is_some() { if ctx.is_cancelled() { return Err(ActivityError::cancelled()); } @@ -372,17 +363,60 @@ async fn cancel_after_act_starts( return Err(anyhow!("Oh no I failed!").into()); } else { tokio::select! { - _ = tokio::time::sleep(Duration::from_secs(100)) => {}, + _ = tokio::time::sleep(Duration::from_secs(100)) => {} _ = ctx.cancelled() => { return Err(ActivityError::cancelled()) } - _ = manual_cancel_act.cancelled() => { + _ = self.manual_cancel.cancelled() => { return Ok(()) } } } Err(anyhow!("Oh no I failed!").into()) } + } + + starter + .sdk_config + .register_activities(EchoWithManualCancelAndBackoff { + manual_cancel: manual_cancel.clone(), + cancel_on_backoff: if cancel_on_backoff.is_some() { + Some(CancellationToken::new()) + } else { + None + }, + }); + let mut worker = starter.worker().await; + let bo_dur = cancel_on_backoff.unwrap_or_else(|| Duration::from_secs(1)); + worker.register_wf(&wf_name, move |ctx: WfContext| async move { + let la = ctx.start_local_activity( + EchoWithManualCancelAndBackoff::echo, + "hi".to_string(), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(bo_dur.try_into().unwrap()), + backoff_coefficient: 1., + maximum_interval: Some(bo_dur.try_into().unwrap()), + // Retry forever until cancelled + ..Default::default() + }, + timer_backoff_threshold: Some(Duration::from_secs(1)), + cancel_type, + ..Default::default() + }, + )?; + ctx.timer(Duration::from_secs(1)).await; + // Note that this cancel can't go through for *two* WF tasks, because we do a full heartbeat + // before the timer (LA hasn't resolved), and then the timer fired event won't appear in + // history until *after* the next WFT because we force generated it when we sent the timer + // command. + la.cancel(&ctx); + // This extra timer is here to ensure the presence of another WF task doesn't mess up + // resolving the LA with cancel on replay + ctx.timer(Duration::from_secs(1)).await; + let resolution = la.await; + assert!(resolution.cancelled()); + Ok(().into()) }); starter.start_with_worker(&wf_name, &mut worker).await; @@ -408,6 +442,25 @@ async fn x_to_close_timeout(#[case] is_schedule: bool) { if is_schedule { "schedule" } else { "start" } ); let mut starter = CoreWfStarter::new(&wf_name); + + struct LongRunningWithCancellation; + #[activities] + impl LongRunningWithCancellation { + #[activity] + async fn go(ctx: ActivityContext) -> Result<(), ActivityError> { + tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(100)) => {} + _ = ctx.cancelled() => { + return Err(ActivityError::cancelled()) + } + } + Ok(()) + } + } + + starter + .sdk_config + .register_activities(LongRunningWithCancellation); let mut worker = starter.worker().await; let (sched, start) = if is_schedule { (Some(Duration::from_secs(2)), None) @@ -422,34 +475,27 @@ async fn x_to_close_timeout(#[case] is_schedule: bool) { worker.register_wf(wf_name.to_owned(), move |ctx: WfContext| async move { let res = ctx - .local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_micros(15))), - backoff_coefficient: 1_000., - maximum_interval: Some(prost_dur!(from_millis(1500))), - maximum_attempts: 4, - non_retryable_error_types: vec![], + .start_local_activity( + LongRunningWithCancellation::go, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_micros(15))), + backoff_coefficient: 1_000., + maximum_interval: Some(prost_dur!(from_millis(1500))), + maximum_attempts: 4, + non_retryable_error_types: vec![], + }, + timer_backoff_threshold: Some(Duration::from_secs(1)), + schedule_to_close_timeout: sched, + start_to_close_timeout: start, + ..Default::default() }, - timer_backoff_threshold: Some(Duration::from_secs(1)), - schedule_to_close_timeout: sched, - start_to_close_timeout: start, - ..Default::default() - }) + )? .await; assert_eq!(res.timed_out(), Some(timeout_type)); Ok(().into()) }); - worker.register_activity("echo", |ctx: ActivityContext, _: String| async move { - tokio::select! { - _ = tokio::time::sleep(Duration::from_secs(100)) => {}, - _ = ctx.cancelled() => { - return Err(ActivityError::cancelled()) - } - }; - Ok(()) - }); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -466,33 +512,47 @@ async fn schedule_to_close_timeout_across_timer_backoff(#[case] cached: bool) { ); let mut starter = CoreWfStarter::new(&wf_name); if !cached { - starter.worker_config.max_cached_workflows = 0_usize; + starter.sdk_config.max_cached_workflows = 0_usize; } let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let res = ctx - .local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(15))), - backoff_coefficient: 1_000., - maximum_interval: Some(prost_dur!(from_millis(1000))), - maximum_attempts: 40, - non_retryable_error_types: vec![], + .start_local_activity( + FailWithAtomicCounter::go, + "hi".to_string(), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(15))), + backoff_coefficient: 1_000., + maximum_interval: Some(prost_dur!(from_millis(1000))), + maximum_attempts: 40, + non_retryable_error_types: vec![], + }, + timer_backoff_threshold: Some(Duration::from_millis(500)), + schedule_to_close_timeout: Some(Duration::from_secs(2)), + ..Default::default() }, - timer_backoff_threshold: Some(Duration::from_millis(500)), - schedule_to_close_timeout: Some(Duration::from_secs(2)), - ..Default::default() - }) + )? .await; assert_eq!(res.timed_out(), Some(TimeoutType::ScheduleToClose)); Ok(().into()) }); - let num_attempts: &'static _ = Box::leak(Box::new(AtomicU8::new(0))); - worker.register_activity("echo", move |_: ActivityContext, _: String| async { - num_attempts.fetch_add(1, Ordering::Relaxed); - Result::<(), _>::Err(anyhow!("Oh no I failed!").into()) + let num_attempts = Arc::new(AtomicU8::new(0)); + + struct FailWithAtomicCounter { + counter: Arc, + } + #[activities] + impl FailWithAtomicCounter { + #[activity] + async fn go(self: Arc, _: ActivityContext, _: String) -> Result<(), ActivityError> { + self.counter.fetch_add(1, Ordering::Relaxed); + Err(anyhow!("Oh no I failed!").into()) + } + } + + worker.register_activities(FailWithAtomicCounter { + counter: num_attempts.clone(), }); starter.start_with_worker(wf_name, &mut worker).await; @@ -507,16 +567,10 @@ async fn schedule_to_close_timeout_across_timer_backoff(#[case] cached: bool) { async fn eviction_wont_make_local_act_get_dropped(#[values(true, false)] short_wft_timeout: bool) { let wf_name = format!("eviction_wont_make_local_act_get_dropped_{short_wft_timeout}"); let mut starter = CoreWfStarter::new(&wf_name); - starter.worker_config.max_cached_workflows = 0_usize; + starter.sdk_config.max_cached_workflows = 0_usize; + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), local_act_then_timer_then_wait); - worker.register_activity( - "echo_activity", - |_ctx: ActivityContext, str: String| async { - tokio::time::sleep(Duration::from_secs(4)).await; - Ok(str) - }, - ); let opts = if short_wft_timeout { WorkflowOptions { @@ -537,42 +591,44 @@ async fn eviction_wont_make_local_act_get_dropped(#[values(true, false)] short_w async fn timer_backoff_concurrent_with_non_timer_backoff() { let wf_name = "timer_backoff_concurrent_with_non_timer_backoff"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - let r1 = ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_micros(15))), - backoff_coefficient: 1_000., - maximum_interval: Some(prost_dur!(from_millis(1500))), - maximum_attempts: 4, - non_retryable_error_types: vec![], + let r1 = ctx.start_local_activity( + StdActivities::always_fail, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_micros(15))), + backoff_coefficient: 1_000., + maximum_interval: Some(prost_dur!(from_millis(1500))), + maximum_attempts: 4, + non_retryable_error_types: vec![], + }, + timer_backoff_threshold: Some(Duration::from_secs(1)), + ..Default::default() }, - timer_backoff_threshold: Some(Duration::from_secs(1)), - ..Default::default() - }); - let r2 = ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(15))), - backoff_coefficient: 10., - maximum_interval: Some(prost_dur!(from_millis(1500))), - maximum_attempts: 4, - non_retryable_error_types: vec![], + )?; + let r2 = ctx.start_local_activity( + StdActivities::always_fail, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(15))), + backoff_coefficient: 10., + maximum_interval: Some(prost_dur!(from_millis(1500))), + maximum_attempts: 4, + non_retryable_error_types: vec![], + }, + timer_backoff_threshold: Some(Duration::from_secs(10)), + ..Default::default() }, - timer_backoff_threshold: Some(Duration::from_secs(10)), - ..Default::default() - }); + )?; let (r1, r2) = tokio::join!(r1, r2); assert!(r1.failed()); assert!(r2.failed()); Ok(().into()) }); - worker.register_activity("echo", |_: ActivityContext, _: String| async { - Result::<(), _>::Err(anyhow!("Oh no I failed!").into()) - }); starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -582,23 +638,26 @@ async fn timer_backoff_concurrent_with_non_timer_backoff() { async fn repro_nondeterminism_with_timer_bug() { let wf_name = "repro_nondeterminism_with_timer_bug"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let t1 = ctx.timer(Duration::from_secs(30)); - let r1 = ctx.local_activity(LocalActivityOptions { - activity_type: "delay".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_micros(15))), - backoff_coefficient: 1_000., - maximum_interval: Some(prost_dur!(from_millis(1500))), - maximum_attempts: 4, - non_retryable_error_types: vec![], + let r1 = ctx.start_local_activity( + StdActivities::delay, + Duration::from_secs(2), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_micros(15))), + backoff_coefficient: 1_000., + maximum_interval: Some(prost_dur!(from_millis(1500))), + maximum_attempts: 4, + non_retryable_error_types: vec![], + }, + timer_backoff_threshold: Some(Duration::from_secs(1)), + ..Default::default() }, - timer_backoff_threshold: Some(Duration::from_secs(1)), - ..Default::default() - }); + )?; tokio::pin!(t1); tokio::select! { _ = &mut t1 => {}, @@ -609,10 +668,6 @@ async fn repro_nondeterminism_with_timer_bug() { ctx.timer(Duration::from_secs(1)).await; Ok(().into()) }); - worker.register_activity("delay", |_: ActivityContext, _: String| async { - tokio::time::sleep(Duration::from_secs(2)).await; - Ok(()) - }); let run_id = worker .submit_wf( @@ -653,10 +708,7 @@ async fn weird_la_nondeterminism_repro(#[values(true, false)] fix_hist: bool) { "evict_while_la_running_no_interference", la_problem_workflow, ); - worker.register_activity("delay", |_: ActivityContext, _: String| async { - tokio::time::sleep(Duration::from_secs(15)).await; - Ok(()) - }); + worker.register_activities(StdActivities); worker.run().await.unwrap(); } @@ -677,10 +729,7 @@ async fn second_weird_la_nondeterminism_repro() { "evict_while_la_running_no_interference", la_problem_workflow, ); - worker.register_activity("delay", |_: ActivityContext, _: String| async { - tokio::time::sleep(Duration::from_secs(15)).await; - Ok(()) - }); + worker.register_activities(StdActivities); worker.run().await.unwrap(); } @@ -699,10 +748,7 @@ async fn third_weird_la_nondeterminism_repro() { "evict_while_la_running_no_interference", la_problem_workflow, ); - worker.register_activity("delay", |_: ActivityContext, _: String| async { - tokio::time::sleep(Duration::from_secs(15)).await; - Ok(()) - }); + worker.register_activities(StdActivities); worker.run().await.unwrap(); } @@ -722,27 +768,50 @@ async fn third_weird_la_nondeterminism_repro() { async fn la_resolve_same_time_as_other_cancel() { let wf_name = "la_resolve_same_time_as_other_cancel"; let mut starter = CoreWfStarter::new(wf_name); + + struct DelayWithCancellation; + #[activities] + impl DelayWithCancellation { + #[activity] + async fn delay(ctx: ActivityContext, dur: Duration) -> Result<(), ActivityError> { + tokio::select! { + _ = tokio::time::sleep(dur) => {} + _ = ctx.cancelled() => {} + } + Ok(()) + } + } + + starter + .sdk_config + .register_activities(DelayWithCancellation); // The activity won't get a chance to receive the cancel so make sure we still exit fast - starter.worker_config.graceful_shutdown_period = Some(Duration::from_millis(100)); + starter.sdk_config.graceful_shutdown_period = Some(Duration::from_millis(100)); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { - let normal_act = ctx.activity(ActivityOptions { - activity_type: "delay".to_string(), - input: 9000.as_json_payload().expect("serializes fine"), - cancellation_type: ActivityCancellationType::TryCancel, - start_to_close_timeout: Some(Duration::from_secs(9000)), - ..Default::default() - }); + let normal_act = ctx + .start_activity( + DelayWithCancellation::delay, + Duration::from_secs(9), + ActivityOptions { + cancellation_type: ActivityCancellationType::TryCancel, + start_to_close_timeout: Some(Duration::from_secs(9000)), + ..Default::default() + }, + ) + .unwrap(); // Make new task ctx.timer(Duration::from_millis(1)).await; // Start LA and cancel the activity at the same time - let local_act = ctx.local_activity(LocalActivityOptions { - activity_type: "delay".to_string(), - input: 100.as_json_payload().expect("serializes fine"), - ..Default::default() - }); + let local_act = ctx.start_local_activity( + DelayWithCancellation::delay, + Duration::from_millis(100), + LocalActivityOptions { + ..Default::default() + }, + )?; normal_act.cancel(&ctx); // Race them, starting a timer if LA completes first tokio::select! { @@ -754,13 +823,6 @@ async fn la_resolve_same_time_as_other_cancel() { } Ok(().into()) }); - worker.register_activity("delay", |ctx: ActivityContext, wait_time: u64| async move { - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(wait_time)) => {} - _ = ctx.cancelled() => {} - } - Ok(()) - }); let run_id = worker .submit_wf( @@ -794,6 +856,7 @@ async fn long_local_activity_with_update( let wf_name = format!("{}-{}", ctx.name, ctx.case.unwrap()); let mut starter = CoreWfStarter::new(&wf_name); starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; let client = starter.get_client().await; @@ -816,19 +879,15 @@ async fn long_local_activity_with_update( } }, ); - ctx.local_activity(LocalActivityOptions { - activity_type: "delay".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_local_activity( + StdActivities::delay, + Duration::from_secs(6), + LocalActivityOptions::default(), + )? .await; update_counter.load(Ordering::Relaxed); Ok(().into()) }); - worker.register_activity("delay", |_: ActivityContext, _: String| async { - tokio::time::sleep(Duration::from_secs(6)).await; - Ok(()) - }); let handle = starter .start_with_worker(wf_name.clone(), &mut worker) @@ -887,6 +946,7 @@ async fn local_activity_with_heartbeat_only_causes_one_wakeup() { let wf_name = "local_activity_with_heartbeat_only_causes_one_wakeup"; let mut starter = CoreWfStarter::new(wf_name); starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), move |ctx: WfContext| async move { @@ -894,11 +954,12 @@ async fn local_activity_with_heartbeat_only_causes_one_wakeup() { let la_resolved = AtomicBool::new(false); tokio::join!( async { - ctx.local_activity(LocalActivityOptions { - activity_type: "delay".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_local_activity( + StdActivities::delay, + Duration::from_secs(6), + LocalActivityOptions::default(), + ) + .unwrap() .await; la_resolved.store(true, Ordering::Relaxed); }, @@ -912,10 +973,6 @@ async fn local_activity_with_heartbeat_only_causes_one_wakeup() { ); Ok(().into()) }); - worker.register_activity("delay", |_: ActivityContext, _: String| async { - tokio::time::sleep(Duration::from_secs(6)).await; - Ok(()) - }); let handle = starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -932,12 +989,14 @@ async fn local_activity_with_heartbeat_only_causes_one_wakeup() { } pub(crate) async fn local_activity_with_summary_wf(ctx: WfContext) -> WorkflowResult<()> { - ctx.local_activity(LocalActivityOptions { - activity_type: "echo_activity".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - summary: Some("Echo summary".to_string()), - ..Default::default() - }) + ctx.start_local_activity( + StdActivities::echo, + "hi".to_string(), + LocalActivityOptions { + summary: Some("Echo summary".to_string()), + ..Default::default() + }, + )? .await; Ok(().into()) } @@ -946,9 +1005,9 @@ pub(crate) async fn local_activity_with_summary_wf(ctx: WfContext) -> WorkflowRe async fn local_activity_with_summary() { let wf_name = "local_activity_with_summary"; let mut starter = CoreWfStarter::new(wf_name); + starter.sdk_config.register_activities(StdActivities); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), local_activity_with_summary_wf); - worker.register_activity("echo_activity", echo); let handle = starter.start_with_worker(wf_name, &mut worker).await; worker.run_until_done().await.unwrap(); @@ -981,10 +1040,6 @@ async fn local_activity_with_summary() { ); } -async fn echo(_ctx: ActivityContext, e: String) -> Result { - Ok(e) -} - /// This test verifies that when replaying we are able to resolve local activities whose data we /// don't see until after the workflow issues the command #[rstest::rstest] @@ -1021,17 +1076,13 @@ async fn local_act_two_wfts_before_marker(#[case] replay: bool, #[case] cached: worker.register_wf( DEFAULT_WORKFLOW_TYPE.to_owned(), |ctx: WfContext| async move { - let la = ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - ..Default::default() - }); + let la = ctx.start_local_activity(StdActivities::default, (), Default::default())?; ctx.timer(Duration::from_secs(1)).await; la.await; Ok(().into()) }, ); - worker.register_activity(DEFAULT_ACTIVITY_TYPE, echo); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -1064,7 +1115,7 @@ async fn local_act_many_concurrent() { let mut worker = mock_sdk(mh); worker.register_wf(DEFAULT_WORKFLOW_TYPE.to_owned(), local_act_fanout_wf); - worker.register_activity("echo_activity", echo); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -1104,33 +1155,50 @@ async fn local_act_heartbeat(#[case] shutdown_middle: bool) { wc.max_cached_workflows = 1; wc.max_outstanding_workflow_tasks = Some(1); }); - let core = worker.core_worker.clone(); + let core = worker.core_worker(); let shutdown_barr: &'static Barrier = Box::leak(Box::new(Barrier::new(2))); worker.register_wf( DEFAULT_WORKFLOW_TYPE.to_owned(), |ctx: WfContext| async move { - ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_local_activity( + EchoWithConditionalBarrier::echo, + "hi".to_string(), + LocalActivityOptions::default(), + )? .await; Ok(().into()) }, ); - worker.register_activity( - "echo", - move |_ctx: ActivityContext, str: String| async move { - if shutdown_middle { - shutdown_barr.wait().await; + + struct EchoWithConditionalBarrier { + shutdown_middle: bool, + shutdown_barr: &'static Barrier, + wft_timeout: Duration, + } + #[activities] + impl EchoWithConditionalBarrier { + #[activity] + async fn echo( + self: Arc, + _: ActivityContext, + str: String, + ) -> Result { + if self.shutdown_middle { + self.shutdown_barr.wait().await; } // Take slightly more than two workflow tasks - tokio::time::sleep(wft_timeout.mul_f32(2.2)).await; + tokio::time::sleep(self.wft_timeout.mul_f32(2.2)).await; Ok(str) - }, - ); + } + } + + worker.register_activities(EchoWithConditionalBarrier { + shutdown_middle, + shutdown_barr, + wft_timeout, + }); worker .submit_wf( wf_id.to_owned(), @@ -1170,18 +1238,20 @@ async fn local_act_fail_and_retry(#[case] eventually_pass: bool) { DEFAULT_WORKFLOW_TYPE.to_owned(), move |ctx: WfContext| async move { let la_res = ctx - .local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(50))), - backoff_coefficient: 1.2, - maximum_interval: None, - maximum_attempts: 5, - non_retryable_error_types: vec![], + .start_local_activity( + EventuallyPassingActivity::echo, + "hi".to_string(), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(50))), + backoff_coefficient: 1.2, + maximum_interval: None, + maximum_attempts: 5, + non_retryable_error_types: vec![], + }, + ..Default::default() }, - ..Default::default() - }) + )? .await; if eventually_pass { assert!(la_res.completed_ok()) @@ -1191,14 +1261,28 @@ async fn local_act_fail_and_retry(#[case] eventually_pass: bool) { Ok(().into()) }, ); - let attempts: &'static _ = Box::leak(Box::new(AtomicUsize::new(0))); - worker.register_activity("echo", move |_ctx: ActivityContext, _: String| async move { - // Succeed on 3rd attempt (which is ==2 since fetch_add returns prev val) - if 2 == attempts.fetch_add(1, Ordering::Relaxed) && eventually_pass { - Ok(()) - } else { - Err(anyhow!("Oh no I failed!").into()) + let attempts = Arc::new(AtomicUsize::new(0)); + + struct EventuallyPassingActivity { + attempts: Arc, + eventually_pass: bool, + } + #[activities] + impl EventuallyPassingActivity { + #[activity] + async fn echo(self: Arc, _: ActivityContext, _: String) -> Result<(), ActivityError> { + // Succeed on 3rd attempt (which is ==2 since fetch_add returns prev val) + if 2 == self.attempts.fetch_add(1, Ordering::Relaxed) && self.eventually_pass { + Ok(()) + } else { + Err(anyhow!("Oh no I failed!").into()) + } } + } + + worker.register_activities(EventuallyPassingActivity { + attempts: attempts.clone(), + eventually_pass, }); worker .submit_wf( @@ -1219,18 +1303,22 @@ async fn local_act_retry_long_backoff_uses_timer() { let mut t = TestHistoryBuilder::default(); t.add_by_type(EventType::WorkflowExecutionStarted); t.add_full_wf_task(); - t.add_local_activity_fail_marker( + t.add_local_activity_marker( 1, "1", - Failure::application_failure("la failed".to_string(), false), + None, + Some(Failure::application_failure("la failed".to_string(), false)), + |m| m.activity_type = StdActivities::always_fail.name().to_owned(), ); let timer_started_event_id = t.add_by_type(EventType::TimerStarted); t.add_timer_fired(timer_started_event_id, "1".to_string()); t.add_full_wf_task(); - t.add_local_activity_fail_marker( + t.add_local_activity_marker( 2, "2", - Failure::application_failure("la failed".to_string(), false), + None, + Some(Failure::application_failure("la failed".to_string(), false)), + |m| m.activity_type = StdActivities::always_fail.name().to_owned(), ); let timer_started_event_id = t.add_by_type(EventType::TimerStarted); t.add_timer_fired(timer_started_event_id, "2".to_string()); @@ -1251,19 +1339,21 @@ async fn local_act_retry_long_backoff_uses_timer() { DEFAULT_WORKFLOW_TYPE.to_owned(), |ctx: WfContext| async move { let la_res = ctx - .local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(65))), - // This will make the second backoff 65 seconds, plenty to use timer - backoff_coefficient: 1_000., - maximum_interval: Some(prost_dur!(from_secs(600))), - maximum_attempts: 3, - non_retryable_error_types: vec![], + .start_local_activity( + StdActivities::always_fail, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(65))), + // This will make the second backoff 65 seconds, plenty to use timer + backoff_coefficient: 1_000., + maximum_interval: Some(prost_dur!(from_secs(600))), + maximum_attempts: 3, + non_retryable_error_types: vec![], + }, + ..Default::default() }, - ..Default::default() - }) + )? .await; assert!(la_res.failed()); // Extra timer just to have an extra workflow task which we can return full history for @@ -1271,12 +1361,7 @@ async fn local_act_retry_long_backoff_uses_timer() { Ok(().into()) }, ); - worker.register_activity( - DEFAULT_ACTIVITY_TYPE, - move |_ctx: ActivityContext, _: String| async move { - Result::<(), _>::Err(anyhow!("Oh no I failed!").into()) - }, - ); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -1294,7 +1379,9 @@ async fn local_act_null_result() { let mut t = TestHistoryBuilder::default(); t.add_by_type(EventType::WorkflowExecutionStarted); t.add_full_wf_task(); - t.add_local_activity_marker(1, "1", None, None, |_| {}); + t.add_local_activity_marker(1, "1", None, None, |m| { + m.activity_type = StdActivities::no_op.name().to_owned() + }); t.add_workflow_execution_completed(); let wf_id = "fakeid"; @@ -1305,18 +1392,12 @@ async fn local_act_null_result() { worker.register_wf( DEFAULT_WORKFLOW_TYPE.to_owned(), |ctx: WfContext| async move { - ctx.local_activity(LocalActivityOptions { - activity_type: "nullres".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - ..Default::default() - }) - .await; + ctx.start_local_activity(StdActivities::no_op, (), LocalActivityOptions::default())? + .await; Ok(().into()) }, ); - worker.register_activity("nullres", |_ctx: ActivityContext, _: String| async { - Ok(()) - }); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -1350,19 +1431,13 @@ async fn local_act_command_immediately_follows_la_marker() { worker.register_wf( DEFAULT_WORKFLOW_TYPE.to_owned(), |ctx: WfContext| async move { - ctx.local_activity(LocalActivityOptions { - activity_type: "nullres".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - ..Default::default() - }) - .await; + ctx.start_local_activity(StdActivities::no_op, (), LocalActivityOptions::default())? + .await; ctx.timer(Duration::from_secs(1)).await; Ok(().into()) }, ); - worker.register_activity("nullres", |_ctx: ActivityContext, _: String| async { - Ok(()) - }); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -1646,13 +1721,15 @@ async fn test_schedule_to_start_timeout() { DEFAULT_WORKFLOW_TYPE.to_owned(), |ctx: WfContext| async move { let la_res = ctx - .local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - // Impossibly small timeout so we timeout in the queue - schedule_to_start_timeout: prost_dur!(from_nanos(1)), - ..Default::default() - }) + .start_local_activity( + StdActivities::echo, + "hi".to_string(), + LocalActivityOptions { + // Impossibly small timeout so we timeout in the queue + schedule_to_start_timeout: prost_dur!(from_nanos(1)), + ..Default::default() + }, + )? .await; assert_eq!(la_res.timed_out(), Some(TimeoutType::ScheduleToStart)); let rfail = la_res.unwrap_failure(); @@ -1667,9 +1744,7 @@ async fn test_schedule_to_start_timeout() { Ok(().into()) }, ); - worker.register_activity("echo", move |_ctx: ActivityContext, _: String| async move { - Ok(()) - }); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -1733,20 +1808,22 @@ async fn test_schedule_to_start_timeout_not_based_on_original_time( DEFAULT_WORKFLOW_TYPE.to_owned(), move |ctx: WfContext| async move { let la_res = ctx - .local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(50))), - backoff_coefficient: 1.2, - maximum_interval: None, - maximum_attempts: 5, - non_retryable_error_types: vec![], + .start_local_activity( + StdActivities::echo, + "hi".to_string(), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(50))), + backoff_coefficient: 1.2, + maximum_interval: None, + maximum_attempts: 5, + non_retryable_error_types: vec![], + }, + schedule_to_start_timeout: Some(Duration::from_secs(60)), + schedule_to_close_timeout, + ..Default::default() }, - schedule_to_start_timeout: Some(Duration::from_secs(60)), - schedule_to_close_timeout, - ..Default::default() - }) + )? .await; if is_sched_to_start { assert!(la_res.completed_ok()); @@ -1756,9 +1833,7 @@ async fn test_schedule_to_start_timeout_not_based_on_original_time( Ok(().into()) }, ); - worker.register_activity("echo", move |_ctx: ActivityContext, _: String| async move { - Ok(()) - }); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -1805,19 +1880,21 @@ async fn start_to_close_timeout_allows_retries(#[values(true, false)] la_complet DEFAULT_WORKFLOW_TYPE.to_owned(), move |ctx: WfContext| async move { let la_res = ctx - .local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(20))), - backoff_coefficient: 1.0, - maximum_interval: None, - maximum_attempts: 5, - non_retryable_error_types: vec![], + .start_local_activity( + ActivityWithRetriesAndCancellation::go, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(20))), + backoff_coefficient: 1.0, + maximum_interval: None, + maximum_attempts: 5, + non_retryable_error_types: vec![], + }, + start_to_close_timeout: Some(prost_dur!(from_millis(25))), + ..Default::default() }, - start_to_close_timeout: Some(prost_dur!(from_millis(25))), - ..Default::default() - }) + )? .await; if la_completes { assert!(la_res.completed_ok()); @@ -1827,24 +1904,37 @@ async fn start_to_close_timeout_allows_retries(#[values(true, false)] la_complet Ok(().into()) }, ); - let attempts: &'static _ = Box::leak(Box::new(AtomicUsize::new(0))); - let cancels: &'static _ = Box::leak(Box::new(AtomicUsize::new(0))); - worker.register_activity( - DEFAULT_ACTIVITY_TYPE, - move |ctx: ActivityContext, _: String| async move { + let attempts = Arc::new(AtomicUsize::new(0)); + let cancels = Arc::new(AtomicUsize::new(0)); + + struct ActivityWithRetriesAndCancellation { + attempts: Arc, + cancels: Arc, + la_completes: bool, + } + #[activities] + impl ActivityWithRetriesAndCancellation { + #[activity(name = DEFAULT_ACTIVITY_TYPE)] + async fn go(self: Arc, ctx: ActivityContext) -> Result<(), ActivityError> { // Timeout the first 4 attempts, or all of them if we intend to fail - if attempts.fetch_add(1, Ordering::AcqRel) < 4 || !la_completes { + if self.attempts.fetch_add(1, Ordering::AcqRel) < 4 || !self.la_completes { select! { _ = tokio::time::sleep(Duration::from_millis(100)) => (), _ = ctx.cancelled() => { - cancels.fetch_add(1, Ordering::AcqRel); + self.cancels.fetch_add(1, Ordering::AcqRel); return Err(ActivityError::cancelled()); } } } Ok(()) - }, - ); + } + } + + worker.register_activities(ActivityWithRetriesAndCancellation { + attempts: attempts.clone(), + cancels: cancels.clone(), + la_completes, + }); worker .submit_wf( wf_id.to_owned(), @@ -1879,11 +1969,11 @@ async fn wft_failure_cancels_running_las() { worker.register_wf( DEFAULT_WORKFLOW_TYPE.to_owned(), |ctx: WfContext| async move { - let la_handle = ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - ..Default::default() - }); + let la_handle = ctx.start_local_activity( + ActivityThatExpectsCancellation::go, + (), + Default::default(), + )?; tokio::join!( async { ctx.timer(Duration::from_secs(1)).await; @@ -1894,16 +1984,21 @@ async fn wft_failure_cancels_running_las() { Ok(().into()) }, ); - worker.register_activity( - DEFAULT_ACTIVITY_TYPE, - move |ctx: ActivityContext, _: String| async move { + + struct ActivityThatExpectsCancellation; + #[activities] + impl ActivityThatExpectsCancellation { + #[activity] + async fn go(ctx: ActivityContext) -> Result<(), ActivityError> { let res = tokio::time::timeout(Duration::from_millis(500), ctx.cancelled()).await; if res.is_err() { panic!("Activity must be cancelled!!!!"); } - Result::<(), _>::Err(ActivityError::cancelled()) - }, - ); + Err(ActivityError::cancelled()) + } + } + + worker.register_activities(ActivityThatExpectsCancellation); worker .submit_wf( wf_id.to_owned(), @@ -1946,18 +2041,18 @@ async fn resolved_las_not_recorded_if_wft_fails_many_times() { worker.register_wf( DEFAULT_WORKFLOW_TYPE.to_owned(), WorkflowFunction::new::<_, _, ()>(|ctx: WfContext| async move { - ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_local_activity( + StdActivities::echo, + "hi".to_string(), + LocalActivityOptions { + ..Default::default() + }, + )? .await; panic!() }), ); - worker.register_activity("echo", move |_: ActivityContext, _: String| async move { - Ok(()) - }); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -2000,25 +2095,25 @@ async fn local_act_records_nonfirst_attempts_ok() { worker.register_wf( DEFAULT_WORKFLOW_TYPE.to_owned(), |ctx: WfContext| async move { - ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(10))), - backoff_coefficient: 1.0, - maximum_interval: None, - maximum_attempts: 0, - non_retryable_error_types: vec![], + ctx.start_local_activity( + StdActivities::always_fail, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(10))), + backoff_coefficient: 1.0, + maximum_interval: None, + maximum_attempts: 0, + non_retryable_error_types: vec![], + }, + ..Default::default() }, - ..Default::default() - }) + )? .await; Ok(().into()) }, ); - worker.register_activity("echo", move |_ctx: ActivityContext, _: String| async move { - Result::<(), _>::Err(anyhow!("I fail").into()) - }); + worker.register_activities(StdActivities); worker .submit_wf( wf_id.to_owned(), @@ -2319,36 +2414,50 @@ async fn local_act_retry_explicit_delay() { DEFAULT_WORKFLOW_TYPE.to_owned(), move |ctx: WfContext| async move { let la_res = ctx - .local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi".as_json_payload().expect("serializes fine"), - retry_policy: RetryPolicy { - initial_interval: Some(prost_dur!(from_millis(50))), - backoff_coefficient: 1.0, - maximum_attempts: 5, + .start_local_activity( + ActivityWithExplicitBackoff::go, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + initial_interval: Some(prost_dur!(from_millis(50))), + backoff_coefficient: 1.0, + maximum_attempts: 5, + ..Default::default() + }, ..Default::default() }, - ..Default::default() - }) + )? .await; assert!(la_res.completed_ok()); Ok(().into()) }, ); - let attempts: &'static _ = Box::leak(Box::new(AtomicUsize::new(0))); - worker.register_activity("echo", move |_ctx: ActivityContext, _: String| async move { - // Succeed on 3rd attempt (which is ==2 since fetch_add returns prev val) - let last_attempt = attempts.fetch_add(1, Ordering::Relaxed); - if 0 == last_attempt { - Err(ActivityError::Retryable { - source: anyhow!("Explicit backoff error"), - explicit_delay: Some(Duration::from_millis(300)), - }) - } else if 2 == last_attempt { - Ok(()) - } else { - Err(anyhow!("Oh no I failed!").into()) + let attempts = Arc::new(AtomicUsize::new(0)); + + struct ActivityWithExplicitBackoff { + attempts: Arc, + } + #[activities] + impl ActivityWithExplicitBackoff { + #[activity] + async fn go(self: Arc, _: ActivityContext) -> Result<(), ActivityError> { + // Succeed on 3rd attempt (which is ==2 since fetch_add returns prev val) + let last_attempt = self.attempts.fetch_add(1, Ordering::Relaxed); + if 0 == last_attempt { + Err(ActivityError::Retryable { + source: anyhow!("Explicit backoff error").into_boxed_dyn_error(), + explicit_delay: Some(Duration::from_millis(300)), + }) + } else if 2 == last_attempt { + Ok(()) + } else { + Err(anyhow!("Oh no I failed!").into()) + } } + } + + worker.register_activities(ActivityWithExplicitBackoff { + attempts: attempts.clone(), }); worker .submit_wf( @@ -2368,15 +2477,17 @@ async fn local_act_retry_explicit_delay() { } async fn la_wf(ctx: WfContext) -> WorkflowResult<()> { - ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: ().as_json_payload().unwrap(), - retry_policy: RetryPolicy { - maximum_attempts: 1, + ctx.start_local_activity( + StdActivities::default, + (), + LocalActivityOptions { + retry_policy: RetryPolicy { + maximum_attempts: 1, + ..Default::default() + }, ..Default::default() }, - ..Default::default() - }) + )? .await; Ok(().into()) } @@ -2448,54 +2559,64 @@ async fn one_la_success(#[case] replay: bool, #[case] completes_ok: bool) { let mut worker = build_fake_sdk(mock_cfg); worker.register_wf(DEFAULT_WORKFLOW_TYPE, la_wf); - worker.register_activity( - DEFAULT_ACTIVITY_TYPE, - move |_ctx: ActivityContext, _: ()| async move { - if replay { + + struct ActivityWithReplayCheck { + replay: bool, + completes_ok: bool, + } + #[activities] + impl ActivityWithReplayCheck { + #[activity(name = DEFAULT_ACTIVITY_TYPE)] + #[allow(unused)] + async fn echo( + self: Arc, + _: ActivityContext, + _: (), + ) -> Result<&'static str, ActivityError> { + if self.replay { panic!("Should not be invoked on replay"); } - if completes_ok { + if self.completes_ok { Ok("hi") } else { Err(anyhow!("Oh no I failed!").into()) } - }, - ); + } + } + + worker.register_activities(ActivityWithReplayCheck { + replay, + completes_ok, + }); worker.run().await.unwrap(); } async fn two_la_wf(ctx: WfContext) -> WorkflowResult<()> { - ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: ().as_json_payload().unwrap(), - ..Default::default() - }) - .await; - ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: ().as_json_payload().unwrap(), - ..Default::default() - }) - .await; + ctx.start_local_activity(StdActivities::default, (), LocalActivityOptions::default())? + .await; + ctx.start_local_activity(StdActivities::default, (), LocalActivityOptions::default())? + .await; Ok(().into()) } async fn two_la_wf_parallel(ctx: WfContext) -> WorkflowResult<()> { tokio::join!( - ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: ().as_json_payload().unwrap(), - ..Default::default() - }), - ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: ().as_json_payload().unwrap(), - ..Default::default() - }) + ctx.start_local_activity(StdActivities::default, (), LocalActivityOptions::default())?, + ctx.start_local_activity(StdActivities::default, (), LocalActivityOptions::default())? ); Ok(().into()) } +struct ResolvedActivity; +#[activities] +impl ResolvedActivity { + #[allow(unused)] + #[activity(name = DEFAULT_ACTIVITY_TYPE)] + async fn echo(_: ActivityContext, _: ()) -> Result<&'static str, ActivityError> { + Ok("Resolved") + } +} + #[rstest] #[tokio::test] async fn two_sequential_las( @@ -2585,27 +2706,16 @@ async fn two_sequential_las( } else { worker.register_wf(DEFAULT_WORKFLOW_TYPE, two_la_wf); } - worker.register_activity( - DEFAULT_ACTIVITY_TYPE, - move |_ctx: ActivityContext, _: ()| async move { Ok("Resolved") }, - ); + worker.register_activities(ResolvedActivity); worker.run().await.unwrap(); } async fn la_timer_la(ctx: WfContext) -> WorkflowResult<()> { - ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: ().as_json_payload().unwrap(), - ..Default::default() - }) - .await; + ctx.start_local_activity(StdActivities::default, (), LocalActivityOptions::default())? + .await; ctx.timer(Duration::from_secs(5)).await; - ctx.local_activity(LocalActivityOptions { - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - input: ().as_json_payload().unwrap(), - ..Default::default() - }) - .await; + ctx.start_local_activity(StdActivities::default, (), LocalActivityOptions::default())? + .await; Ok(().into()) } @@ -2680,10 +2790,7 @@ async fn las_separated_by_timer(#[case] replay: bool) { let mut worker = build_fake_sdk(mock_cfg); worker.set_worker_interceptor(aai); worker.register_wf(DEFAULT_WORKFLOW_TYPE, la_timer_la); - worker.register_activity( - DEFAULT_ACTIVITY_TYPE, - move |_ctx: ActivityContext, _: ()| async move { Ok("Resolved") }, - ); + worker.register_activities(ResolvedActivity); worker.run().await.unwrap(); } @@ -2715,10 +2822,7 @@ async fn one_la_heartbeating_wft_failure_still_executes() { let mut worker = build_fake_sdk(mock_cfg); worker.register_wf(DEFAULT_WORKFLOW_TYPE, la_wf); - worker.register_activity( - DEFAULT_ACTIVITY_TYPE, - move |_ctx: ActivityContext, _: ()| async move { Ok("Resolved") }, - ); + worker.register_activities(ResolvedActivity); worker.run().await.unwrap(); } @@ -2752,10 +2856,14 @@ async fn immediate_cancel( let mut worker = build_fake_sdk(mock_cfg); worker.register_wf(DEFAULT_WORKFLOW_TYPE, move |ctx: WfContext| async move { - let la = ctx.local_activity(LocalActivityOptions { - cancel_type, - ..Default::default() - }); + let la = ctx.start_local_activity( + StdActivities::default, + (), + LocalActivityOptions { + cancel_type, + ..Default::default() + }, + )?; la.cancel(&ctx); la.await; Ok(().into()) @@ -2849,12 +2957,14 @@ async fn cancel_after_act_starts_canned( let mut worker = build_fake_sdk(mock_cfg); worker.register_wf(DEFAULT_WORKFLOW_TYPE, move |ctx: WfContext| async move { - let la = ctx.local_activity(LocalActivityOptions { - cancel_type, - input: ().as_json_payload().unwrap(), - activity_type: DEFAULT_ACTIVITY_TYPE.to_string(), - ..Default::default() - }); + let la = ctx.start_local_activity( + ActivityWithConditionalCancelWait::echo, + (), + LocalActivityOptions { + cancel_type, + ..Default::default() + }, + )?; ctx.timer(Duration::from_secs(1)).await; la.cancel(&ctx); // This extra timer is here to ensure the presence of another WF task doesn't mess up @@ -2873,15 +2983,26 @@ async fn cancel_after_act_starts_canned( ); Ok(().into()) }); - worker.register_activity(DEFAULT_ACTIVITY_TYPE, move |ctx: ActivityContext, _: ()| { - let allow_cancel_barr_clone = allow_cancel_barr_clone.clone(); - async move { - if cancel_type == ActivityCancellationType::WaitCancellationCompleted { + + struct ActivityWithConditionalCancelWait { + cancel_type: ActivityCancellationType, + allow_cancel_barr: CancellationToken, + } + #[activities] + impl ActivityWithConditionalCancelWait { + #[activity(name = DEFAULT_ACTIVITY_TYPE)] + async fn echo(self: Arc, ctx: ActivityContext, _: ()) -> Result<(), ActivityError> { + if self.cancel_type == ActivityCancellationType::WaitCancellationCompleted { ctx.cancelled().await; } - allow_cancel_barr_clone.cancelled().await; - Result::<(), _>::Err(ActivityError::cancelled()) + self.allow_cancel_barr.cancelled().await; + Err(ActivityError::cancelled()) } + } + + worker.register_activities(ActivityWithConditionalCancelWait { + cancel_type, + allow_cancel_barr: allow_cancel_barr_clone, }); worker.run().await.unwrap(); } diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs index 4af1162e8..3e2b801b6 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs @@ -32,7 +32,7 @@ async fn sends_modify_wf_props() { let wf_name = "can_upsert_memo"; let wf_id = Uuid::new_v4(); let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name, memo_upserter); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/nexus.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/nexus.rs index 708123408..5cdbd0231 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/nexus.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/nexus.rs @@ -58,7 +58,7 @@ async fn nexus_basic( ) { let wf_name = "nexus_basic"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes { + starter.sdk_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, @@ -208,7 +208,7 @@ async fn nexus_async( ) { let wf_name = "nexus_async"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes { + starter.sdk_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, @@ -440,7 +440,7 @@ async fn nexus_async( async fn nexus_cancel_before_start() { let wf_name = "nexus_cancel_before_start"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes { + starter.sdk_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, @@ -487,14 +487,14 @@ async fn nexus_cancel_before_start() { async fn nexus_must_complete_task_to_shutdown(#[values(true, false)] use_grace_period: bool) { let wf_name = "nexus_must_complete_task_to_shutdown"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes { + starter.sdk_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, enable_nexus: true, }; if use_grace_period { - starter.worker_config.graceful_shutdown_period = Some(Duration::from_millis(500)); + starter.sdk_config.graceful_shutdown_period = Some(Duration::from_millis(500)); } let mut worker = starter.worker().await; let core_worker = starter.get_worker().await; @@ -590,7 +590,7 @@ async fn nexus_cancellation_types( ) { let wf_name = "nexus_cancellation_types"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes { + starter.sdk_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/patches.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/patches.rs index 18e6d78b4..c80e6fb55 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/patches.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/patches.rs @@ -31,7 +31,11 @@ use temporalio_common::protos::{ }; use temporalio_common::worker::WorkerTaskTypes; -use temporalio_sdk::{ActivityOptions, WfContext, WorkflowResult}; +use temporalio_macros::activities; +use temporalio_sdk::{ + ActivityOptions, WfContext, WorkflowResult, + activities::{ActivityContext, ActivityError}, +}; use temporalio_sdk_core::test_help::{CoreInternalFlags, MockPollCfg, ResponseType}; use tokio::{join, sync::Notify}; use tokio_stream::StreamExt; @@ -57,7 +61,7 @@ pub(crate) async fn changes_wf(ctx: WfContext) -> WorkflowResult<()> { async fn writes_change_markers() { let wf_name = "writes_change_markers"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), changes_wf); @@ -91,7 +95,7 @@ pub(crate) async fn no_change_then_change_wf(ctx: WfContext) -> WorkflowResult<( async fn can_add_change_markers() { let wf_name = "can_add_change_markers"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), no_change_then_change_wf); @@ -115,7 +119,7 @@ pub(crate) async fn replay_with_change_marker_wf(ctx: WfContext) -> WorkflowResu async fn replaying_with_patch_marker() { let wf_name = "replaying_with_patch_marker"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), replay_with_change_marker_wf); @@ -131,8 +135,8 @@ async fn patched_on_second_workflow_task_is_deterministic() { let wf_name = "timer_patched_timer"; let mut starter = CoreWfStarter::new(wf_name); // Disable caching to force replay from beginning - starter.worker_config.max_cached_workflows = 0_usize; - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.max_cached_workflows = 0_usize; + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; // Include a task failure as well to make sure that works static FAIL_ONCE: AtomicBool = AtomicBool::new(true); @@ -155,7 +159,7 @@ async fn patched_on_second_workflow_task_is_deterministic() { async fn can_remove_deprecated_patch_near_other_patch() { let wf_name = "can_add_change_markers"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let did_die = Arc::new(AtomicBool::new(false)); worker.register_wf(wf_name.to_owned(), move |ctx: WfContext| { @@ -186,7 +190,7 @@ async fn can_remove_deprecated_patch_near_other_patch() { async fn deprecated_patch_removal() { let wf_name = "deprecated_patch_removal"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; let wf_id = starter.get_task_queue().to_string(); @@ -312,27 +316,51 @@ fn patch_marker_single_activity( t } +struct FakeAct; +#[activities] +impl FakeAct { + #[activity(name = "")] + fn nameless(_: ActivityContext) -> Result<(), ActivityError> { + unimplemented!() + } +} + async fn v1(ctx: &mut WfContext) { - ctx.activity(ActivityOptions { - activity_id: Some("no_change".to_owned()), - ..Default::default() - }) + ctx.start_activity( + FakeAct::nameless, + (), + ActivityOptions { + activity_id: Some("no_change".to_owned()), + ..Default::default() + }, + ) + .unwrap() .await; } async fn v2(ctx: &mut WfContext) -> bool { if ctx.patched(MY_PATCH_ID) { - ctx.activity(ActivityOptions { - activity_id: Some("had_change".to_owned()), - ..Default::default() - }) + ctx.start_activity( + FakeAct::nameless, + (), + ActivityOptions { + activity_id: Some("had_change".to_owned()), + ..Default::default() + }, + ) + .unwrap() .await; true } else { - ctx.activity(ActivityOptions { - activity_id: Some("no_change".to_owned()), - ..Default::default() - }) + ctx.start_activity( + FakeAct::nameless, + (), + ActivityOptions { + activity_id: Some("no_change".to_owned()), + ..Default::default() + }, + ) + .unwrap() .await; false } @@ -340,18 +368,28 @@ async fn v2(ctx: &mut WfContext) -> bool { async fn v3(ctx: &mut WfContext) { ctx.deprecate_patch(MY_PATCH_ID); - ctx.activity(ActivityOptions { - activity_id: Some("had_change".to_owned()), - ..Default::default() - }) + ctx.start_activity( + FakeAct::nameless, + (), + ActivityOptions { + activity_id: Some("had_change".to_owned()), + ..Default::default() + }, + ) + .unwrap() .await; } async fn v4(ctx: &mut WfContext) { - ctx.activity(ActivityOptions { - activity_id: Some("had_change".to_owned()), - ..Default::default() - }) + ctx.start_activity( + FakeAct::nameless, + (), + ActivityOptions { + activity_id: Some("had_change".to_owned()), + ..Default::default() + }, + ) + .unwrap() .await; } @@ -642,13 +680,15 @@ async fn same_change_multiple_spots(#[case] have_marker_in_hist: bool, #[case] r let mut worker = build_fake_sdk(mock_cfg); worker.register_wf(DEFAULT_WORKFLOW_TYPE, move |ctx: WfContext| async move { if ctx.patched(MY_PATCH_ID) { - ctx.activity(ActivityOptions::default()).await; + ctx.start_activity(FakeAct::nameless, (), ActivityOptions::default())? + .await; } else { ctx.timer(ONE_SECOND).await; } ctx.timer(ONE_SECOND).await; if ctx.patched(MY_PATCH_ID) { - ctx.activity(ActivityOptions::default()).await; + ctx.start_activity(FakeAct::nameless, (), ActivityOptions::default())? + .await; } else { ctx.timer(ONE_SECOND).await; } diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/resets.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/resets.rs index 034a060b6..d75d2429d 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/resets.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/resets.rs @@ -1,7 +1,4 @@ -use crate::{ - common::{CoreWfStarter, NAMESPACE}, - integ_tests::activity_functions::echo, -}; +use crate::common::{CoreWfStarter, NAMESPACE, activity_functions::StdActivities}; use futures_util::StreamExt; use std::{ sync::{ @@ -11,11 +8,8 @@ use std::{ time::Duration, }; use temporalio_client::{WfClientExt, WorkflowClientTrait, WorkflowOptions, WorkflowService}; -use temporalio_common::protos::{ - coresdk::AsJsonPayloadExt, - temporal::api::{ - common::v1::WorkflowExecution, workflowservice::v1::ResetWorkflowExecutionRequest, - }, +use temporalio_common::protos::temporal::api::{ + common::v1::WorkflowExecution, workflowservice::v1::ResetWorkflowExecutionRequest, }; use temporalio_common::worker::WorkerTaskTypes; @@ -29,7 +23,7 @@ const POST_RESET_SIG: &str = "post-reset"; async fn reset_workflow() { let wf_name = "reset_me_wf"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.fetch_results = false; let notify = Arc::new(Notify::new()); @@ -114,7 +108,7 @@ async fn reset_workflow() { async fn reset_randomseed() { let wf_name = "reset_randomseed"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes { + starter.sdk_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: true, enable_remote_activities: false, @@ -153,11 +147,11 @@ async fn reset_randomseed() { if RAND_SEED.load(Ordering::Relaxed) == ctx.random_seed() { ctx.timer(Duration::from_millis(100)).await; } else { - ctx.local_activity(LocalActivityOptions { - activity_type: "echo".to_string(), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_local_activity( + StdActivities::echo, + "hi!".to_string(), + LocalActivityOptions::default(), + )? .await; } // Wait for the post-task-fail signal @@ -172,7 +166,7 @@ async fn reset_randomseed() { Ok(().into()) } }); - worker.register_activity("echo", echo); + worker.register_activities(StdActivities); let run_id = worker .submit_wf( diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/signals.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/signals.rs index fabe71fbc..49fc43049 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/signals.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/signals.rs @@ -48,7 +48,7 @@ async fn signal_sender(ctx: WfContext) -> WorkflowResult<()> { async fn sends_signal_to_missing_wf() { let wf_name = "sends_signal_to_missing_wf"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), signal_sender); @@ -87,7 +87,7 @@ async fn signal_with_create_wf_receiver(ctx: WfContext) -> WorkflowResult<()> { #[tokio::test] async fn sends_signal_to_other_wf() { let mut starter = CoreWfStarter::new("sends_signal_to_other_wf"); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf("sender", signal_sender); worker.register_wf("receiver", signal_receiver); @@ -116,7 +116,7 @@ async fn sends_signal_to_other_wf() { #[tokio::test] async fn sends_signal_with_create_wf() { let mut starter = CoreWfStarter::new("sends_signal_with_create_wf"); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf("receiver_signal", signal_with_create_wf_receiver); @@ -161,7 +161,7 @@ async fn signals_child(ctx: WfContext) -> WorkflowResult<()> { #[tokio::test] async fn sends_signal_to_child() { let mut starter = CoreWfStarter::new("sends_signal_to_child"); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf("child_signaler", signals_child); worker.register_wf("child_receiver", signal_receiver); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs index dd7975571..4eec1bd5c 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs @@ -1,20 +1,23 @@ use crate::{common::CoreWfStarter, integ_tests::workflow_tests::timers::timer_wf}; use std::{ - sync::atomic::{AtomicBool, AtomicUsize, Ordering}, + sync::{ + Arc, + atomic::{AtomicBool, AtomicUsize, Ordering}, + }, time::Duration, }; use temporalio_client::WorkflowOptions; use temporalio_common::worker::WorkerTaskTypes; use temporalio_sdk::{WfContext, WorkflowResult}; -use temporalio_sdk_core::PollerBehavior; +use temporalio_sdk_core::{PollerBehavior, TunerHolder}; use tokio::sync::Barrier; #[tokio::test] async fn timer_workflow_not_sticky() { let wf_name = "timer_wf_not_sticky"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); - starter.worker_config.max_cached_workflows = 0_usize; + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.max_cached_workflows = 0_usize; let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), timer_wf); @@ -41,7 +44,7 @@ async fn timer_workflow_timeout_on_sticky() { // on a not-sticky queue let wf_name = "timer_workflow_timeout_on_sticky"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); starter.workflow_options.task_timeout = Some(Duration::from_secs(2)); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), timer_timeout_wf); @@ -56,10 +59,10 @@ async fn timer_workflow_timeout_on_sticky() { async fn cache_miss_ok() { let wf_name = "cache_miss_ok"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); - starter.worker_config.max_outstanding_workflow_tasks = Some(2_usize); - starter.worker_config.max_cached_workflows = 0_usize; - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(1_usize); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(2, 1, 1, 1)); + starter.sdk_config.max_cached_workflows = 0_usize; + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(1_usize); let mut worker = starter.worker().await; let barr: &'static Barrier = Box::leak(Box::new(Barrier::new(2))); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/timers.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/timers.rs index 5c0cbab57..a5ec217a5 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/timers.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/timers.rs @@ -28,7 +28,7 @@ pub(crate) async fn timer_wf(command_sink: WfContext) -> WorkflowResult<()> { async fn timer_workflow_workflow_driver() { let wf_name = "timer_wf_new"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), timer_wf); @@ -40,7 +40,7 @@ async fn timer_workflow_workflow_driver() { async fn timer_workflow_manual() { let mut starter = init_core_and_create_wf("timer_workflow").await; let core = starter.get_worker().await; - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let task = core.poll_workflow_activation().await.unwrap(); core.complete_workflow_activation(WorkflowActivationCompletion::from_cmds( task.run_id, @@ -64,7 +64,7 @@ async fn timer_workflow_manual() { async fn timer_cancel_workflow() { let mut starter = init_core_and_create_wf("timer_cancel_workflow").await; let core = starter.get_worker().await; - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let task = core.poll_workflow_activation().await.unwrap(); core.complete_workflow_activation(WorkflowActivationCompletion::from_cmds( task.run_id, @@ -123,7 +123,7 @@ async fn parallel_timer_wf(command_sink: WfContext) -> WorkflowResult<()> { async fn parallel_timers() { let wf_name = "parallel_timers"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), parallel_timer_wf); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs index 81ebdc7a7..df068fe2b 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs @@ -47,7 +47,7 @@ async fn sends_upsert() { let wf_name = "sends_upsert_search_attrs"; let wf_id = Uuid::new_v4(); let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name, search_attr_updater); diff --git a/crates/sdk-core/tests/main.rs b/crates/sdk-core/tests/main.rs index d9c0d5b56..0abe176ff 100644 --- a/crates/sdk-core/tests/main.rs +++ b/crates/sdk-core/tests/main.rs @@ -12,7 +12,6 @@ mod shared_tests; #[cfg(test)] mod integ_tests { - mod activity_functions; mod client_tests; mod ephemeral_server_tests; mod heartbeat_tests; diff --git a/crates/sdk-core/tests/manual_tests.rs b/crates/sdk-core/tests/manual_tests.rs index 7c15450b2..ee4dfb759 100644 --- a/crates/sdk-core/tests/manual_tests.rs +++ b/crates/sdk-core/tests/manual_tests.rs @@ -17,19 +17,33 @@ use rand::{Rng, SeedableRng}; use std::{ mem, net::SocketAddr, + sync::Arc, time::{Duration, Instant}, }; use temporalio_client::{ GetWorkflowResultOptions, WfClientExt, WorkflowClientTrait, WorkflowOptions, }; -use temporalio_common::{ - protos::coresdk::AsJsonPayloadExt, telemetry::PrometheusExporterOptions, - worker::WorkerTaskTypes, +use temporalio_common::{telemetry::PrometheusExporterOptions, worker::WorkerTaskTypes}; +use temporalio_macros::activities; +use temporalio_sdk::{ + ActivityOptions, WfContext, + activities::{ActivityContext, ActivityError}, }; -use temporalio_sdk::{ActivityOptions, WfContext, activities::ActivityContext}; -use temporalio_sdk_core::{CoreRuntime, PollerBehavior}; +use temporalio_sdk_core::{CoreRuntime, PollerBehavior, TunerHolder}; use tracing::info; +struct JitteryEchoActivities; +#[activities] +impl JitteryEchoActivities { + #[activity] + async fn echo(_ctx: ActivityContext, echo: String) -> Result { + // Add some jitter to completions + let rand_millis = rand::rng().random_range(0..500); + tokio::time::sleep(Duration::from_millis(rand_millis)).await; + Ok(echo) + } +} + #[tokio::test] async fn poller_load_spiky() { const SIGNAME: &str = "signame"; @@ -47,33 +61,37 @@ async fn poller_load_spiky() { }; let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("poller_load", rt); - starter.worker_config.max_cached_workflows = 5000; - starter.worker_config.max_outstanding_workflow_tasks = Some(1000); - starter.worker_config.max_outstanding_activities = Some(1000); - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.max_cached_workflows = 5000; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(1000, 1000, 100, 100)); + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; - starter.worker_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; let mut worker = starter.worker().await; let submitter = worker.get_submitter_handle(); + + worker.register_activities(JitteryEchoActivities); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let sigchan = ctx.make_signal_channel(SIGNAME).map(Ok); let drained_fut = sigchan.forward(sink::drain()); let real_stuff = async move { for _ in 0..5 { - ctx.activity(ActivityOptions { - activity_type: "echo".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + JitteryEchoActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; } }; @@ -84,12 +102,6 @@ async fn poller_load_spiky() { Ok(().into()) }); - worker.register_activity("echo", |_: ActivityContext, echo: String| async move { - // Add some jitter to completions - let rand_millis = rand::rng().random_range(0..500); - tokio::time::sleep(Duration::from_millis(rand_millis)).await; - Ok(echo) - }); let client = starter.get_client().await; info!("Prom bound to {:?}", addr); @@ -203,14 +215,14 @@ async fn poller_load_sustained() { }; let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("poller_load", rt); - starter.worker_config.max_cached_workflows = 5000; - starter.worker_config.max_outstanding_workflow_tasks = Some(1000); - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.max_cached_workflows = 5000; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(1000, 100, 100, 100)); + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let sigchan = ctx.make_signal_channel(SIGNAME).map(Ok); @@ -291,32 +303,37 @@ async fn poller_load_spike_then_sustained() { }; let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("poller_load", rt); - starter.worker_config.max_cached_workflows = 5000; - starter.worker_config.max_outstanding_workflow_tasks = Some(1000); - starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.max_cached_workflows = 5000; + starter.sdk_config.tuner = Arc::new(TunerHolder::fixed_size(1000, 100, 100, 100)); + starter.sdk_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; - starter.worker_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { + starter.sdk_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { minimum: 1, maximum: 200, initial: 5, }; let mut worker = starter.worker().await; let submitter = worker.get_submitter_handle(); + + worker.register_activities(JitteryEchoActivities); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let sigchan = ctx.make_signal_channel(SIGNAME).map(Ok); let drained_fut = sigchan.forward(sink::drain()); let real_stuff = async move { for _ in 0..5 { - ctx.activity(ActivityOptions { - activity_type: "echo".to_string(), - start_to_close_timeout: Some(Duration::from_secs(5)), - input: "hi!".as_json_payload().expect("serializes fine"), - ..Default::default() - }) + ctx.start_activity( + JitteryEchoActivities::echo, + "hi!".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + ..Default::default() + }, + ) + .unwrap() .await; } }; @@ -327,12 +344,6 @@ async fn poller_load_spike_then_sustained() { Ok(().into()) }); - worker.register_activity("echo", |_: ActivityContext, echo: String| async move { - // Add some jitter to completions - let rand_millis = rand::rng().random_range(0..500); - tokio::time::sleep(Duration::from_millis(rand_millis)).await; - Ok(echo) - }); let client = starter.get_client().await; info!("Prom bound to {:?}", addr); diff --git a/crates/sdk-core/tests/shared_tests/mod.rs b/crates/sdk-core/tests/shared_tests/mod.rs index e89e47d8b..772317dda 100644 --- a/crates/sdk-core/tests/shared_tests/mod.rs +++ b/crates/sdk-core/tests/shared_tests/mod.rs @@ -20,7 +20,7 @@ pub(crate) async fn grpc_message_too_large() { let mut starter = CoreWfStarter::new_cloud_or_local(wf_name, "") .await .unwrap(); - starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.sdk_config.task_types = WorkerTaskTypes::workflow_only(); let mut core = starter.worker().await; static OVERSIZE_GRPC_MESSAGE_RUN: AtomicBool = AtomicBool::new(false); diff --git a/crates/sdk-core/tests/shared_tests/priority.rs b/crates/sdk-core/tests/shared_tests/priority.rs index 6e4253e83..bd2679d26 100644 --- a/crates/sdk-core/tests/shared_tests/priority.rs +++ b/crates/sdk-core/tests/shared_tests/priority.rs @@ -3,12 +3,11 @@ use std::time::Duration; use temporalio_client::{ GetWorkflowResultOptions, Priority, WfClientExt, WorkflowClientTrait, WorkflowOptions, }; -use temporalio_common::protos::{ - coresdk::AsJsonPayloadExt, - temporal::api::{common, history::v1::history_event::Attributes}, -}; +use temporalio_common::protos::temporal::api::{common, history::v1::history_event::Attributes}; +use temporalio_macros::activities; use temporalio_sdk::{ - ActivityOptions, ChildWorkflowOptions, WfContext, activities::ActivityContext, + ActivityOptions, ChildWorkflowOptions, WfContext, + activities::{ActivityContext, ActivityError}, }; pub(crate) async fn priority_values_sent_to_server() { @@ -27,6 +26,24 @@ pub(crate) async fn priority_values_sent_to_server() { let mut worker = starter.worker().await; let child_type = "child-wf"; + struct PriorityActivities; + #[activities] + impl PriorityActivities { + #[activity] + async fn echo(ctx: ActivityContext, echo_me: String) -> Result { + assert_eq!( + ctx.get_info().priority, + Priority { + priority_key: 5, + fairness_key: "fair-act".to_string(), + fairness_weight: 1.1 + } + ); + Ok(echo_me) + } + } + + worker.register_activities(PriorityActivities); worker.register_wf(starter.get_task_queue(), move |ctx: WfContext| async move { let child = ctx.child_workflow(ChildWorkflowOptions { workflow_id: format!("{}-child", ctx.task_queue()), @@ -47,19 +64,23 @@ pub(crate) async fn priority_values_sent_to_server() { .await .into_started() .expect("Child should start OK"); - let activity = ctx.activity(ActivityOptions { - activity_type: "echo".to_owned(), - input: "hello".as_json_payload().unwrap(), - start_to_close_timeout: Some(Duration::from_secs(5)), - priority: Some(Priority { - priority_key: 5, - fairness_key: "fair-act".to_string(), - fairness_weight: 1.1, - }), - // Currently no priority info attached to eagerly run activities - do_not_eagerly_execute: true, - ..Default::default() - }); + let activity = ctx + .start_activity( + PriorityActivities::echo, + "hello".to_string(), + ActivityOptions { + start_to_close_timeout: Some(Duration::from_secs(5)), + priority: Some(Priority { + priority_key: 5, + fairness_key: "fair-act".to_string(), + fairness_weight: 1.1, + }), + // Currently no priority info attached to eagerly run activities + do_not_eagerly_execute: true, + ..Default::default() + }, + ) + .unwrap(); started.result().await; activity.await.unwrap_ok_payload(); Ok(().into()) @@ -75,17 +96,6 @@ pub(crate) async fn priority_values_sent_to_server() { ); Ok(().into()) }); - worker.register_activity("echo", |ctx: ActivityContext, echo_me: String| async move { - assert_eq!( - ctx.get_info().priority, - Priority { - priority_key: 5, - fairness_key: "fair-act".to_string(), - fairness_weight: 1.1 - } - ); - Ok(echo_me) - }); starter .start_with_worker(starter.get_task_queue(), &mut worker) diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index 5d55226e6..768fe5c95 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -29,6 +29,7 @@ tokio = { version = "1.47", features = [ tokio-util = { version = "0.7" } tokio-stream = "0.1" tracing = "0.1" +uuid = { version = "1.18", features = ["v4"] } [dependencies.temporalio-sdk-core] path = "../sdk-core" @@ -43,13 +44,13 @@ version = "0.1" path = "../client" version = "0.1" -[dev-dependencies] -futures = "0.3" - -[dev-dependencies.temporalio-macros] +[dependencies.temporalio-macros] path = "../macros" version = "0.1" +[dev-dependencies] +futures = "0.3" + [features] default = [] antithesis_assertions = ["temporalio-sdk-core/antithesis_assertions"] diff --git a/crates/sdk/src/activities.rs b/crates/sdk/src/activities.rs index 9df1ce997..985339f5f 100644 --- a/crates/sdk/src/activities.rs +++ b/crates/sdk/src/activities.rs @@ -1,18 +1,67 @@ //! Functionality related to defining and interacting with activities +//! +//! +//! An example of defining an activity: +//! ``` +//! use std::sync::{ +//! Arc, +//! atomic::{AtomicUsize, Ordering}, +//! }; +//! use temporalio_macros::activities; +//! use temporalio_sdk::activities::{ActivityContext, ActivityError}; +//! +//! struct MyActivities { +//! counter: AtomicUsize, +//! } +//! +//! #[activities] +//! impl MyActivities { +//! #[activity] +//! async fn echo(_ctx: ActivityContext, e: String) -> Result { +//! Ok(e) +//! } +//! +//! #[activity] +//! async fn uses_self(self: Arc, _ctx: ActivityContext) -> Result<(), ActivityError> { +//! self.counter.fetch_add(1, Ordering::Relaxed); +//! Ok(()) +//! } +//! } +//! +//! // If you need to refer to an activity that is defined externally, in a different codebase or +//! // possibly a differenet language, you can simply leave the function body unimplemented like so: +//! +//! struct ExternalActivities; +//! #[activities] +//! impl ExternalActivities { +//! #[activity(name = "foo")] +//! async fn foo(_ctx: ActivityContext, _: String) -> Result { +//! unimplemented!() +//! } +//! } +//! ``` +//! +//! This will allows you to call the activity from workflow code still, but the actual function +//! will never be invoked, since you won't have registered it with the worker. -use crate::{WorkerOptionsBuilder, app_data::AppData, worker_options_builder}; +#[doc(inline)] +pub use temporalio_macros::activities; -use futures_util::future::BoxFuture; +use crate::app_data::AppData; +use futures_util::{FutureExt, future::BoxFuture}; use prost_types::{Duration, Timestamp}; use std::{ collections::HashMap, + fmt::Debug, sync::Arc, time::{Duration as StdDuration, SystemTime}, }; use temporalio_client::Priority; use temporalio_common::{ ActivityDefinition, - data_converters::{PayloadConversionError, PayloadConverter}, + data_converters::{ + GenericPayloadConverter, PayloadConversionError, PayloadConverter, SerializationContext, + }, protos::{ coresdk::{ActivityHeartbeat, activity_task}, temporal::api::common::v1::{Payload, RetryPolicy, WorkflowExecution}, @@ -192,8 +241,7 @@ pub struct ActivityInfo { pub priority: Priority, } -// TODO [rust-sdk-branch]: Remove anyhow from public interfaces -/// Returned as errors from activity functions +/// Returned as errors from activity functions. #[derive(Debug)] pub enum ActivityError { /// This error can be returned from activities to allow the explicit configuration of certain @@ -201,7 +249,7 @@ pub enum ActivityError { /// into. Retryable { /// The underlying error - source: anyhow::Error, + source: Box, /// If specified, the next retry (if there is one) will occur after this delay explicit_delay: Option, }, @@ -211,7 +259,10 @@ pub enum ActivityError { details: Option, }, /// Return this error to indicate that the activity should not be retried. - NonRetryable(anyhow::Error), + NonRetryable(Box), + /// Return this error to indicate that the activity will be completed outside of this activity + /// definition, by an external client. + WillCompleteAsync, } impl ActivityError { @@ -227,7 +278,7 @@ where { fn from(source: E) -> Self { Self::Retryable { - source: source.into(), + source: source.into().into_boxed_dyn_error(), explicit_delay: None, } } @@ -295,29 +346,25 @@ fn maybe_convert_timestamp(timestamp: &Timestamp) -> Option { }) } -pub(crate) type ActivityInvocation = Box< +pub(crate) type ActivityInvocation = Arc< dyn Fn( - Payload, - PayloadConverter, - ActivityContext, - ) - -> Result>, PayloadConversionError>, + Payload, + PayloadConverter, + ActivityContext, + ) + -> Result>, PayloadConversionError> + + Send + + Sync, >; #[doc(hidden)] pub trait ActivityImplementer { - fn register_all_static( - worker_options: &mut WorkerOptionsBuilder, - ); - fn register_all_instance( - self: Arc, - worker_options: &mut WorkerOptionsBuilder, - ); + fn register_all(self: Arc, defs: &mut ActivityDefinitions); } #[doc(hidden)] pub trait ExecutableActivity: ActivityDefinition { - type Implementer: ActivityImplementer + 'static; + type Implementer: ActivityImplementer + Send + Sync + 'static; fn execute( receiver: Option>, ctx: ActivityContext, @@ -327,3 +374,56 @@ pub trait ExecutableActivity: ActivityDefinition { #[doc(hidden)] pub trait HasOnlyStaticMethods {} + +/// Contains activity registrations in a form ready for execution by workers. +#[derive(Default, Clone)] +pub struct ActivityDefinitions { + activities: HashMap<&'static str, ActivityInvocation>, +} + +impl ActivityDefinitions { + /// Registers all activities on an activity implementer. + pub fn register_activities(&mut self, instance: AI) -> &mut Self { + let arcd = Arc::new(instance); + AI::register_all(arcd, self); + self + } + /// Registers a specific activitiy. + pub fn register_activity( + &mut self, + instance: Arc, + ) -> &mut Self { + self.activities.insert( + AD::name(), + Arc::new(move |p, pc, c| { + let deserialized = pc.from_payload(&SerializationContext::Activity, p)?; + let pc2 = pc.clone(); + Ok(AD::execute(Some(instance.clone()), c, deserialized) + .map(move |v| match v { + Ok(okv) => pc2 + .to_payload(&SerializationContext::Activity, &okv) + .map_err(|e| e.into()), + Err(e) => Err(e), + }) + .boxed()) + }), + ); + self + } + + pub(crate) fn is_empty(&self) -> bool { + self.activities.is_empty() + } + + pub(crate) fn get(&self, act_type: &str) -> Option { + self.activities.get(act_type).cloned() + } +} + +impl Debug for ActivityDefinitions { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ActivityDefinitions") + .field("activities", &self.activities.keys()) + .finish() + } +} diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 1eb00d6ab..c8d168156 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -8,40 +8,60 @@ //! //! An example of running an activity worker: //! ```no_run -//! use std::{str::FromStr, sync::Arc}; -//! use temporalio_client::{ConnectionOptions, ClientOptions, Connection, Client}; -//! use temporalio_sdk::{activities::ActivityContext, Worker}; -//! use temporalio_sdk_core::{init_worker, Url, CoreRuntime, RuntimeOptions, WorkerConfig, WorkerVersioningStrategy }; +//! use std::str::FromStr; +//! use temporalio_client::{Client, ClientOptions, Connection, ConnectionOptions}; //! use temporalio_common::{ -//! worker::WorkerTaskTypes, -//! telemetry::TelemetryOptions +//! telemetry::TelemetryOptions, +//! worker::{WorkerDeploymentOptions, WorkerDeploymentVersion, WorkerTaskTypes}, //! }; +//! use temporalio_macros::activities; +//! use temporalio_sdk::{ +//! Worker, WorkerOptions, +//! activities::{ActivityContext, ActivityError}, +//! }; +//! use temporalio_sdk_core::{CoreRuntime, RuntimeOptions, Url}; +//! +//! struct MyActivities; +//! +//! #[activities] +//! impl MyActivities { +//! #[activity] +//! pub(crate) async fn echo( +//! _ctx: ActivityContext, +//! e: String, +//! ) -> Result { +//! Ok(e) +//! } +//! } //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { -//! let connection_options = ConnectionOptions::new(Url::from_str("http://localhost:7233")?).build(); +//! let connection_options = +//! ConnectionOptions::new(Url::from_str("http://localhost:7233")?).build(); //! let telemetry_options = TelemetryOptions::builder().build(); -//! let runtime_options = RuntimeOptions::builder().telemetry_options(telemetry_options).build().unwrap(); +//! let runtime_options = RuntimeOptions::builder() +//! .telemetry_options(telemetry_options) +//! .build() +//! .unwrap(); //! let runtime = CoreRuntime::new_assume_tokio(runtime_options)?; //! //! let connection = Connection::connect(connection_options).await?; +//! let client = Client::new(connection, ClientOptions::new("my_namespace").build()); //! -//! let worker_config = WorkerConfig::builder() -//! .namespace("default") -//! .task_queue("task_queue") +//! let worker_options = WorkerOptions::new("task_queue") //! .task_types(WorkerTaskTypes::activity_only()) -//! .versioning_strategy(WorkerVersioningStrategy::None { build_id: "rust-sdk".to_owned() }) -//! .build() -//! .unwrap(); -//! -//! let core_worker = init_worker(&runtime, worker_config, connection)?; -//! -//! let mut worker = Worker::new_from_core(Arc::new(core_worker), "task_queue"); -//! worker.register_activity( -//! "echo_activity", -//! |_ctx: ActivityContext, echo_me: String| async move { Ok(echo_me) }, -//! ); +//! .deployment_options(WorkerDeploymentOptions { +//! version: WorkerDeploymentVersion { +//! deployment_name: "my_deployment".to_owned(), +//! build_id: "my_build_id".to_owned(), +//! }, +//! use_worker_versioning: false, +//! default_versioning_behavior: None, +//! }) +//! .register_activities(MyActivities) +//! .build(); //! +//! let mut worker = Worker::new(&runtime, client, worker_options)?; //! worker.run().await?; //! //! Ok(()) @@ -59,7 +79,6 @@ mod workflow_context; mod workflow_future; pub use temporalio_client::Namespace; -use tracing::{Instrument, Span, field}; pub use workflow_context::{ ActivityOptions, CancellableFuture, ChildWorkflow, ChildWorkflowOptions, LocalActivityOptions, NexusOperationOptions, PendingChildWorkflow, Signal, SignalData, SignalWorkflowOptions, @@ -68,8 +87,8 @@ pub use workflow_context::{ use crate::{ activities::{ - ActivityContext, ActivityError, ActivityImplementer, ActivityInvocation, - ExecutableActivity, HasOnlyStaticMethods, + ActivityContext, ActivityDefinitions, ActivityError, ActivityImplementer, + ExecutableActivity, }, interceptors::WorkerInterceptor, workflow_context::{ChildWfCommon, NexusUnblockData, StartedNexusOperation}, @@ -81,16 +100,17 @@ use serde::Serialize; use std::{ any::{Any, TypeId}, cell::RefCell, - collections::HashMap, + collections::{HashMap, HashSet}, fmt::{Debug, Display, Formatter}, future::Future, panic::AssertUnwindSafe, sync::Arc, + time::Duration, }; -use temporalio_client::{ConnectionOptions, ConnectionOptionsBuilder, connection_options_builder}; +use temporalio_client::{Client, NamespacedClient}; use temporalio_common::{ ActivityDefinition, - data_converters::{GenericPayloadConverter, SerializationContext}, + data_converters::PayloadConverter, protos::{ TaskToken, coresdk::{ @@ -116,8 +136,12 @@ use temporalio_common::{ failure::v1::{Failure, failure}, }, }, + worker::{WorkerDeploymentOptions, WorkerTaskTypes, build_id_from_current_exe}, +}; +use temporalio_sdk_core::{ + CoreRuntime, PollError, PollerBehavior, TunerBuilder, Worker as CoreWorker, WorkerConfig, + WorkerTuner, WorkerVersioningStrategy, WorkflowErrorType, init_worker, }; -use temporalio_sdk_core::{PollError, Url, Worker as CoreWorker}; use tokio::{ sync::{ Notify, @@ -128,98 +152,173 @@ use tokio::{ }; use tokio_stream::wrappers::UnboundedReceiverStream; use tokio_util::sync::CancellationToken; +use tracing::{Instrument, Span, field}; +use uuid::Uuid; -const VERSION: &str = env!("CARGO_PKG_VERSION"); - -// TODO [rust-sdk-branch]: Stubbed while working on macros -#[allow(unused)] /// Contains options for configuring a worker. -#[derive(bon::Builder)] -#[builder(state_mod(vis = "pub"))] +#[derive(bon::Builder, Clone)] +#[builder(start_fn = new, on(String, into), state_mod(vis = "pub"))] #[non_exhaustive] pub struct WorkerOptions { + /// What task queue will this worker poll from? This task queue name will be used for both + /// workflow and activity polling. + #[builder(start_fn)] + pub task_queue: String, + #[builder(field)] - activities: HashMap<&'static str, ActivityInvocation>, + activities: ActivityDefinitions, + + /// Set the deployment options for this worker. Defaults to a hash of the currently running + /// executable. + #[builder(default = def_build_id())] + pub deployment_options: WorkerDeploymentOptions, + /// A human-readable string that can identify this worker. Using something like sdk version + /// and host name is a good default. If set, overrides the identity set (if any) on the client + /// used by this worker. + pub client_identity_override: Option, + /// If set nonzero, workflows will be cached and sticky task queues will be used, meaning that + /// history updates are applied incrementally to suspended instances of workflow execution. + /// Workflows are evicted according to a least-recently-used policy once the cache maximum is + /// reached. Workflows may also be explicitly evicted at any time, or as a result of errors + /// or failures. + #[builder(default = 1000)] + pub max_cached_workflows: usize, + /// Set a [crate::WorkerTuner] for this worker, which controls how many slots are available for + /// the different kinds of tasks. + #[builder(default = Arc::new(TunerBuilder::default().build()))] + pub tuner: Arc, + /// Controls how polling for Workflow tasks will happen on this worker's task queue. See also + /// [WorkerConfig::nonsticky_to_sticky_poll_ratio]. If using SimpleMaximum, Must be at least 2 + /// when `max_cached_workflows` > 0, or is an error. + #[builder(default = PollerBehavior::SimpleMaximum(5))] + pub workflow_task_poller_behavior: PollerBehavior, + /// Only applies when using [PollerBehavior::SimpleMaximum] + /// + /// (max workflow task polls * this number) = the number of max pollers that will be allowed for + /// the nonsticky queue when sticky tasks are enabled. If both defaults are used, the sticky + /// queue will allow 4 max pollers while the nonsticky queue will allow one. The minimum for + /// either poller is 1, so if the maximum allowed is 1 and sticky queues are enabled, there will + /// be 2 concurrent polls. + #[builder(default = 0.2)] + pub nonsticky_to_sticky_poll_ratio: f32, + /// Controls how polling for Activity tasks will happen on this worker's task queue. + #[builder(default = PollerBehavior::SimpleMaximum(5))] + pub activity_task_poller_behavior: PollerBehavior, + /// Controls how polling for Nexus tasks will happen on this worker's task queue. + #[builder(default = PollerBehavior::SimpleMaximum(5))] + pub nexus_task_poller_behavior: PollerBehavior, + /// Specifies which task types this worker will poll for. + /// + /// Note: At least one task type must be specified or the worker will fail validation. + #[builder(default = WorkerTaskTypes::all())] + pub task_types: WorkerTaskTypes, + /// How long a workflow task is allowed to sit on the sticky queue before it is timed out + /// and moved to the non-sticky queue where it may be picked up by any worker. + #[builder(default = Duration::from_secs(10))] + pub sticky_queue_schedule_to_start_timeout: Duration, + /// Longest interval for throttling activity heartbeats + #[builder(default = Duration::from_secs(60))] + pub max_heartbeat_throttle_interval: Duration, + /// Default interval for throttling activity heartbeats in case + /// `ActivityOptions.heartbeat_timeout` is unset. + /// When the timeout *is* set in the `ActivityOptions`, throttling is set to + /// `heartbeat_timeout * 0.8`. + #[builder(default = Duration::from_secs(30))] + pub default_heartbeat_throttle_interval: Duration, + /// Sets the maximum number of activities per second the task queue will dispatch, controlled + /// server-side. Note that this only takes effect upon an activity poll request. If multiple + /// workers on the same queue have different values set, they will thrash with the last poller + /// winning. + /// + /// Setting this to a nonzero value will also disable eager activity execution. + pub max_task_queue_activities_per_second: Option, + /// Limits the number of activities per second that this worker will process. The worker will + /// not poll for new activities if by doing so it might receive and execute an activity which + /// would cause it to exceed this limit. Negative, zero, or NaN values will cause building + /// the options to fail. + pub max_worker_activities_per_second: Option, + /// Any error types listed here will cause any workflow being processed by this worker to fail, + /// rather than simply failing the workflow task. + #[builder(default)] + pub workflow_failure_errors: HashSet, + /// Like [WorkerConfig::workflow_failure_errors], but specific to certain workflow types (the + /// map key). + #[builder(default)] + pub workflow_types_to_failure_errors: HashMap>, + /// If set, the worker will issue cancels for all outstanding activities and nexus operations after + /// shutdown has been initiated and this amount of time has elapsed. + pub graceful_shutdown_period: Option, } impl WorkerOptionsBuilder { - /// You shouldn't have to call this directly, instead rely on the `#[activities]` macro. - /// - /// Registers all activities on an activity implementer that don't take a receiver. - pub fn register_activities_static(&mut self) -> &mut Self - where - AI: ActivityImplementer + HasOnlyStaticMethods, - { - AI::register_all_static(self); + /// Registers all activities on an activity implementer. + pub fn register_activities(mut self, instance: AI) -> Self { + self.activities.register_activities::(instance); self } - /// You shouldn't have to call this directly, instead rely on the `#[activities]` macro. - /// - /// Registers all activities on an activity implementer that take a receiver. - pub fn register_activities(&mut self, instance: AI) -> &mut Self { - AI::register_all_static(self); - let arcd = Arc::new(instance); - AI::register_all_instance(arcd, self); + /// Registers a specific activitiy. + pub fn register_activity( + mut self, + instance: Arc, + ) -> Self { + self.activities.register_activity::(instance); self } - /// You shouldn't have to call this directly, instead rely on the `#[activities]` macro. - /// - /// Registers a specific activitiy that does not take a receiver. - pub fn register_activity(&mut self) -> &mut Self { - self.activities.insert( - AD::name(), - Box::new(move |p, pc, c| { - let deserialized = pc.from_payload(p, &SerializationContext::Activity)?; - let pc2 = pc.clone(); - Ok(AD::execute(None, c, deserialized) - .map(move |v| match v { - Ok(okv) => pc2 - .to_payload(&okv, &SerializationContext::Activity) - .map_err(|_| todo!()), - Err(e) => Err(e), - }) - .boxed()) - }), - ); +} + +// Needs to exist to avoid https://github.com/elastio/bon/issues/359 +fn def_build_id() -> WorkerDeploymentOptions { + WorkerDeploymentOptions::from_build_id(build_id_from_current_exe().to_owned()) +} + +impl WorkerOptions { + /// Registers all activities on an activity implementer. + pub fn register_activities(&mut self, instance: AI) -> &mut Self { + self.activities.register_activities::(instance); self } - /// You shouldn't have to call this directly, instead rely on the `#[activities]` macro. - /// - /// Registers a specific activitiy that takes a receiver. - pub fn register_activity_with_instance( + /// Registers a specific activitiy. + pub fn register_activity( &mut self, instance: Arc, ) -> &mut Self { - self.activities.insert( - AD::name(), - Box::new(move |p, pc, c| { - let deserialized = pc.from_payload(p, &SerializationContext::Activity)?; - let pc2 = pc.clone(); - Ok(AD::execute(Some(instance.clone()), c, deserialized) - .map(move |v| match v { - Ok(okv) => pc2 - .to_payload(&okv, &SerializationContext::Activity) - .map_err(|_| todo!()), - Err(e) => Err(e), - }) - .boxed()) - }), - ); + self.activities.register_activity::(instance); self } -} + /// Returns all the registered activities by cloning the current set. + pub fn activities(&self) -> ActivityDefinitions { + self.activities.clone() + } -/// Returns connection options with required fields set to appropriate values for the Rust SDK. -pub fn sdk_connection_options( - url: impl Into, -) -> ConnectionOptionsBuilder { - ConnectionOptions::new(url) - .client_name("temporal-rust".to_string()) - .client_version(VERSION.to_string()) + #[doc(hidden)] + pub fn to_core_options(&self, namespace: String) -> Result { + WorkerConfig::builder() + .namespace(namespace) + .task_queue(self.task_queue.clone()) + .maybe_client_identity_override(self.client_identity_override.clone()) + .max_cached_workflows(self.max_cached_workflows) + .tuner(self.tuner.clone()) + .workflow_task_poller_behavior(self.workflow_task_poller_behavior) + .activity_task_poller_behavior(self.activity_task_poller_behavior) + .nexus_task_poller_behavior(self.nexus_task_poller_behavior) + .task_types(self.task_types) + .sticky_queue_schedule_to_start_timeout(self.sticky_queue_schedule_to_start_timeout) + .max_heartbeat_throttle_interval(self.max_heartbeat_throttle_interval) + .default_heartbeat_throttle_interval(self.default_heartbeat_throttle_interval) + .maybe_max_task_queue_activities_per_second(self.max_task_queue_activities_per_second) + .maybe_max_worker_activities_per_second(self.max_worker_activities_per_second) + .maybe_graceful_shutdown_period(self.graceful_shutdown_period) + .versioning_strategy(WorkerVersioningStrategy::WorkerDeploymentBased( + self.deployment_options.clone(), + )) + .workflow_failure_errors(self.workflow_failure_errors.clone()) + .workflow_types_to_failure_errors(self.workflow_types_to_failure_errors.clone()) + .build() + } } /// A worker that can poll for and respond to workflow tasks by using [WorkflowFunction]s, -/// and activity tasks by using [ActivityFunction]s +/// and activity tasks by using activities defined with [temporalio_macros::activities]. pub struct Worker { common: CommonWorker, workflow_half: WorkflowHalf, @@ -233,6 +332,7 @@ struct CommonWorker { worker_interceptor: Option>, } +#[derive(Default)] struct WorkflowHalf { /// Maps run id to cached workflow state workflows: RefCell>, @@ -250,32 +350,53 @@ struct WorkflowFutureHandle, J run_id: String, } +#[derive(Default)] struct ActivityHalf { /// Maps activity type to the function for executing activities of that type - activity_fns: HashMap, + activities: ActivityDefinitions, task_tokens_to_cancels: HashMap, } impl Worker { - // /// Create a new worker from an existing connection, and options. - // pub fn new(connection: Connection, options: WorkerOptions) -> Self {} + // TODO [rust-sdk-branch]: Not 100% sure I like passing runtime here + /// Create a new worker from an existing connection, and options. + pub fn new( + runtime: &CoreRuntime, + client: Client, + mut options: WorkerOptions, + ) -> Result> { + let acts = std::mem::take(&mut options.activities); + let wc = options + .to_core_options(client.namespace()) + .map_err(|s| anyhow::anyhow!("{s}"))?; + let core = init_worker(runtime, wc, client.connection().clone())?; + let mut me = Self::new_from_core(Arc::new(core)); + me.activity_half.activities = acts; + Ok(me) + } + + // TODO [rust-sdk-branch]: Eliminate this constructor in favor of passing in fake connection + #[doc(hidden)] + pub fn new_from_core(worker: Arc) -> Self { + Self::new_from_core_activities(worker, Default::default()) + } - /// Create a new Rust SDK worker from a core worker - pub fn new_from_core(worker: Arc, task_queue: impl Into) -> Self { + // TODO [rust-sdk-branch]: Eliminate this constructor in favor of passing in fake connection + #[doc(hidden)] + pub fn new_from_core_activities( + worker: Arc, + activities: ActivityDefinitions, + ) -> Self { Self { common: CommonWorker { + task_queue: worker.get_config().task_queue.clone(), worker, - task_queue: task_queue.into(), worker_interceptor: None, }, - workflow_half: WorkflowHalf { - workflows: Default::default(), - workflow_fns: Default::default(), - workflow_removed_from_map: Default::default(), - }, + workflow_half: Default::default(), activity_half: ActivityHalf { - activity_fns: Default::default(), - task_tokens_to_cancels: Default::default(), + activities, + ..Default::default() }, app_data: Some(Default::default()), } @@ -306,19 +427,22 @@ impl Worker { .insert(workflow_type.into(), wf_function.into()); } - /// Register an Activity function to invoke when the Worker is asked to run an activity of - /// `activity_type` - pub fn register_activity( + /// Registers all activities on an activity implementer. + pub fn register_activities(&mut self, instance: AI) -> &mut Self { + self.activity_half + .activities + .register_activities::(instance); + self + } + /// Registers a specific activitiy. + pub fn register_activity( &mut self, - activity_type: impl Into, - act_function: impl IntoActivityFunc, - ) { - self.activity_half.activity_fns.insert( - activity_type.into(), - ActivityFunction { - act_func: act_function.into_activity_fn(), - }, - ); + instance: Arc, + ) -> &mut Self { + self.activity_half + .activities + .register_activity::(instance); + self } /// Insert Custom App Context for Workflows and Activities @@ -410,7 +534,7 @@ impl Worker { // Only poll on the activity queue if activity functions have been registered. This // makes tests which use mocks dramatically more manageable. async { - if !act_half.activity_fns.is_empty() { + if !act_half.activities.is_empty() { loop { let activity = common.worker.poll_activity_task().await; if matches!(activity, Err(PollError::ShutDown)) { @@ -461,6 +585,16 @@ impl Worker { self.workflow_half.workflows.borrow().len() } + /// Returns the instance key for this worker, used for worker heartbeating. + pub fn worker_instance_key(&self) -> Uuid { + self.common.worker.worker_instance_key() + } + + #[doc(hidden)] + pub fn core_worker(&self) -> Arc { + self.common.worker.clone() + } + fn split_apart( &mut self, ) -> ( @@ -604,16 +738,12 @@ impl ActivityHalf { ) -> Result<(), anyhow::Error> { match activity.variant { Some(activity_task::Variant::Start(start)) => { - let act_fn = self - .activity_fns - .get(&start.activity_type) - .ok_or_else(|| { - anyhow!( - "No function registered for activity type {}", - start.activity_type - ) - })? - .clone(); + let act_fn = self.activities.get(&start.activity_type).ok_or_else(|| { + anyhow!( + "No function registered for activity type {}", + start.activity_type + ) + })?; let span = info_span!( "RunActivity", "otel.name" = format!("RunActivity:{}", start.activity_type), @@ -635,6 +765,8 @@ impl ActivityHalf { task_token.clone(), start, ); + // TODO [rust-sdk-branch]: Get payload converter from client + let payload_converter = PayloadConverter::serde_json(); tokio::spawn(async move { let act_fut = async move { @@ -643,7 +775,7 @@ impl ActivityHalf { .record("temporalWorkflowID", &info.workflow_id) .record("temporalRunID", &info.run_id); } - (act_fn.act_func)(ctx, arg).await + (act_fn)(arg, payload_converter, ctx)?.await } .instrument(span); let output = AssertUnwindSafe(act_fut).catch_unwind().await; @@ -652,16 +784,16 @@ impl ActivityHalf { format!("Activity function panicked: {}", panic_formatter(e)), true, )), - Ok(Ok(ActExitValue::Normal(p))) => ActivityExecutionResult::ok(p), - Ok(Ok(ActExitValue::WillCompleteAsync)) => { - ActivityExecutionResult::will_complete_async() - } + Ok(Ok(p)) => ActivityExecutionResult::ok(p), Ok(Err(err)) => match err { ActivityError::Retryable { source, explicit_delay, } => ActivityExecutionResult::fail({ - let mut f = Failure::application_failure_from_error(source, false); + let mut f = Failure::application_failure_from_error( + anyhow::Error::from_boxed(source), + false, + ); if let Some(d) = explicit_delay && let Some(failure::FailureInfo::ApplicationFailureInfo(fi)) = f.failure_info.as_mut() @@ -674,8 +806,14 @@ impl ActivityHalf { ActivityExecutionResult::cancel_from_details(details) } ActivityError::NonRetryable(nre) => ActivityExecutionResult::fail( - Failure::application_failure_from_error(nre, true), + Failure::application_failure_from_error( + anyhow::Error::from_boxed(nre), + true, + ), ), + ActivityError::WillCompleteAsync => { + ActivityExecutionResult::will_complete_async() + } }, }; worker @@ -1033,12 +1171,6 @@ type BoxActFn = Arc< + Sync, >; -/// Container for user-defined activity functions -#[derive(Clone)] -pub struct ActivityFunction { - act_func: BoxActFn, -} - /// Closures / functions which can be turned into activity functions implement this trait pub trait IntoActivityFunc { /// Consume the closure or fn pointer and turned it into a boxed activity function @@ -1067,7 +1199,9 @@ where } ActExitValue::Normal(x) => match x.as_json_payload() { Ok(v) => Ok(ActExitValue::Normal(v)), - Err(e) => Err(ActivityError::NonRetryable(e)), + Err(e) => { + Err(ActivityError::NonRetryable(e.into_boxed_dyn_error())) + } }, } }) @@ -1208,11 +1342,39 @@ mod tests { async fn my_activity(_ctx: ActivityContext) -> Result<(), ActivityError> { Ok(()) } + + #[activity] + async fn takes_self( + self: Arc, + _ctx: ActivityContext, + _: String, + ) -> Result<(), ActivityError> { + Ok(()) + } } #[test] fn test_activity_registration() { let act_instance = MyActivities {}; - WorkerOptions::builder().register_activities(act_instance); + let _ = WorkerOptions::new("task_q").register_activities(act_instance); + } + + // Compile-only test for workflow context invocation + #[allow(dead_code, unreachable_code, unused, clippy::diverging_sub_expression)] + fn test_activity_via_workflow_context() { + let wf_ctx: WfContext = unimplemented!(); + wf_ctx.start_activity(MyActivities::my_activity, (), ActivityOptions::default()); + wf_ctx.start_activity( + MyActivities::takes_self, + "Hi".to_owned(), + ActivityOptions::default(), + ); + } + + // Compile-only test for direct invocation via .run() + #[allow(dead_code, unreachable_code, unused, clippy::diverging_sub_expression)] + async fn test_activity_direct_invocation() { + let ctx: ActivityContext = unimplemented!(); + let _result = MyActivities::my_activity.run(ctx).await; } } diff --git a/crates/sdk/src/workflow_context.rs b/crates/sdk/src/workflow_context.rs index 4743a41bc..f5e3c3866 100644 --- a/crates/sdk/src/workflow_context.rs +++ b/crates/sdk/src/workflow_context.rs @@ -29,6 +29,10 @@ use std::{ time::{Duration, SystemTime}, }; use temporalio_common::{ + ActivityDefinition, + data_converters::{ + GenericPayloadConverter, PayloadConversionError, PayloadConverter, SerializationContext, + }, protos::{ coresdk::{ activity_result::{ActivityResolution, activity_resolution}, @@ -213,43 +217,60 @@ impl WfContext { } /// Request to run an activity - pub fn activity( + pub fn start_activity( &self, + _activity: AD, + input: AD::Input, mut opts: ActivityOptions, - ) -> impl CancellableFuture { + ) -> Result, PayloadConversionError> { + // TODO [rust-sdk-branch]: Get payload converter properly + let pc = PayloadConverter::serde_json(); + let payload = pc.to_payload(&SerializationContext::Workflow, &input)?; + let seq = self.seq_nums.write().next_activity_seq(); + let (cmd, unblocker) = CancellableWFCommandFut::new(CancellableID::Activity(seq)); if opts.task_queue.is_none() { opts.task_queue = Some(self.task_queue.clone()); } - let seq = self.seq_nums.write().next_activity_seq(); - let (cmd, unblocker) = CancellableWFCommandFut::new(CancellableID::Activity(seq)); self.send( CommandCreateRequest { - cmd: opts.into_command(seq), + cmd: opts.into_command(AD::name().to_string(), payload, seq), unblocker, } .into(), ); - cmd + Ok(cmd) } /// Request to run a local activity - pub fn local_activity( + pub fn start_local_activity( &self, + _activity: AD, + input: AD::Input, opts: LocalActivityOptions, - ) -> impl CancellableFuture + '_ { - LATimerBackoffFut::new(opts, self) + ) -> Result + '_, PayloadConversionError> { + // TODO [rust-sdk-branch]: Get payload converter properly + let pc = PayloadConverter::serde_json(); + let payload = pc.to_payload(&SerializationContext::Workflow, &input)?; + Ok(LATimerBackoffFut::new( + AD::name().to_string(), + payload, + opts, + self, + )) } /// Request to run a local activity with no implementation of timer-backoff based retrying. fn local_activity_no_timer_retry( &self, + activity_type: String, + input: Payload, opts: LocalActivityOptions, ) -> impl CancellableFuture { let seq = self.seq_nums.write().next_activity_seq(); let (cmd, unblocker) = CancellableWFCommandFut::new(CancellableID::LocalActivity(seq)); self.send( CommandCreateRequest { - cmd: opts.into_command(seq), + cmd: opts.into_command(activity_type, input, seq), unblocker, } .into(), @@ -668,6 +689,8 @@ where struct LATimerBackoffFut<'a> { la_opts: LocalActivityOptions, + activity_type: String, + input: Payload, current_fut: Pin + Send + Unpin + 'a>>, timer_fut: Option + Send + Unpin + 'a>>>, ctx: &'a WfContext, @@ -676,10 +699,17 @@ struct LATimerBackoffFut<'a> { did_cancel: AtomicBool, } impl<'a> LATimerBackoffFut<'a> { - pub(crate) fn new(opts: LocalActivityOptions, ctx: &'a WfContext) -> Self { + pub(crate) fn new( + activity_type: String, + input: Payload, + opts: LocalActivityOptions, + ctx: &'a WfContext, + ) -> Self { Self { la_opts: opts.clone(), - current_fut: Box::pin(ctx.local_activity_no_timer_retry(opts)), + activity_type: activity_type.clone(), + input: input.clone(), + current_fut: Box::pin(ctx.local_activity_no_timer_retry(activity_type, input, opts)), timer_fut: None, ctx, next_attempt: 1, @@ -704,7 +734,11 @@ impl Future for LATimerBackoffFut<'_> { opts.attempt = Some(self.next_attempt); opts.original_schedule_time .clone_from(&self.next_sched_time); - self.current_fut = Box::pin(self.ctx.local_activity_no_timer_retry(opts)); + self.current_fut = Box::pin(self.ctx.local_activity_no_timer_retry( + self.activity_type.clone(), + self.input.clone(), + opts, + )); Poll::Pending } else { Poll::Ready(ActivityResolution { diff --git a/crates/sdk/src/workflow_context/options.rs b/crates/sdk/src/workflow_context/options.rs index 06ac526f1..4218c5690 100644 --- a/crates/sdk/src/workflow_context/options.rs +++ b/crates/sdk/src/workflow_context/options.rs @@ -33,10 +33,6 @@ pub struct ActivityOptions { /// /// If `None` use the context's sequence number pub activity_id: Option, - /// Type of activity to schedule - pub activity_type: String, - /// Input to the activity - pub input: Payload, /// Task queue to schedule the activity in /// /// If `None`, use the same task queue as the parent workflow. @@ -76,8 +72,13 @@ pub struct ActivityOptions { pub do_not_eagerly_execute: bool, } -impl IntoWorkflowCommand for ActivityOptions { - fn into_command(self, seq: u32) -> WorkflowCommand { +impl ActivityOptions { + pub(crate) fn into_command( + self, + activity_type: String, + input: Payload, + seq: u32, + ) -> WorkflowCommand { WorkflowCommand { variant: Some( ScheduleActivity { @@ -86,7 +87,7 @@ impl IntoWorkflowCommand for ActivityOptions { None => seq.to_string(), Some(aid) => aid, }, - activity_type: self.activity_type, + activity_type, task_queue: self.task_queue.unwrap_or_default(), schedule_to_close_timeout: self .schedule_to_close_timeout @@ -99,7 +100,8 @@ impl IntoWorkflowCommand for ActivityOptions { .and_then(|d| d.try_into().ok()), heartbeat_timeout: self.heartbeat_timeout.and_then(|d| d.try_into().ok()), cancellation_type: self.cancellation_type as i32, - arguments: vec![self.input], + // TODO [rust-sdk-branch]: Handle multi-args + arguments: vec![input], retry_policy: self.retry_policy, priority: self.priority.map(Into::into), do_not_eagerly_execute: self.do_not_eagerly_execute, @@ -124,11 +126,6 @@ pub struct LocalActivityOptions { /// /// If `None` use the context's sequence number pub activity_id: Option, - /// Type of activity to schedule - pub activity_type: String, - /// Input to the activity - // TODO: Make optional - pub input: Payload, /// Retry policy pub retry_policy: RetryPolicy, /// Override attempt number rather than using 1. @@ -160,8 +157,13 @@ pub struct LocalActivityOptions { pub summary: Option, } -impl IntoWorkflowCommand for LocalActivityOptions { - fn into_command(mut self, seq: u32) -> WorkflowCommand { +impl LocalActivityOptions { + pub(crate) fn into_command( + mut self, + activity_type: String, + input: Payload, + seq: u32, + ) -> WorkflowCommand { // Allow tests to avoid extra verbosity when they don't care about timeouts // TODO: Builderize LA options self.schedule_to_close_timeout @@ -177,8 +179,8 @@ impl IntoWorkflowCommand for LocalActivityOptions { None => seq.to_string(), Some(aid) => aid, }, - activity_type: self.activity_type, - arguments: vec![self.input], + activity_type, + arguments: vec![input], retry_policy: Some(self.retry_policy), local_retry_threshold: self .timer_backoff_threshold diff --git a/rustfmt.toml b/rustfmt.toml index d3db3454a..c6b99ae57 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1 +1,2 @@ -imports_granularity="Crate" \ No newline at end of file +imports_granularity="Crate" +format_code_in_doc_comments=true