diff --git a/Cargo.toml b/Cargo.toml index 45bfd8964..246a98ef8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ license = "MIT" license-file = "LICENSE.txt" [workspace.dependencies] -derive_builder = "0.20" +bon = { version = "3", features = ["implied-bounds"] } derive_more = { version = "2.0", features = [ "constructor", "display", diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 5d013587e..877087de3 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -23,8 +23,8 @@ test-utilities = ["history_builders"] anyhow = "1.0" async-trait = "0.1" base64 = "0.22" +bon = { workspace = true } dirs = { version = "6.0", optional = true } -derive_builder = { workspace = true } derive_more = { workspace = true } opentelemetry = { workspace = true, optional = true } prost = { workspace = true } diff --git a/crates/common/src/telemetry.rs b/crates/common/src/telemetry.rs index 4ad3d2eb3..fc8cd343e 100644 --- a/crates/common/src/telemetry.rs +++ b/crates/common/src/telemetry.rs @@ -28,31 +28,30 @@ pub trait CoreTelemetry { fn fetch_buffered_logs(&self) -> Vec; } -/// Telemetry configuration options. Construct with [TelemetryOptionsBuilder] -#[derive(Clone, derive_builder::Builder)] +/// Telemetry configuration options. Construct with [TelemetryOptions::builder] +#[derive(Clone, bon::Builder)] #[non_exhaustive] pub struct TelemetryOptions { /// Optional logger - set as None to disable. - #[builder(setter(into, strip_option), default)] + #[builder(into)] pub logging: Option, /// Optional metrics exporter - set as None to disable. - #[builder(setter(into, strip_option), default)] + #[builder(into)] pub metrics: Option>, /// If set true (the default) explicitly attach a `service_name` label to all metrics. Turn this /// off if your collection system supports the `target_info` metric from the OpenMetrics spec. /// For more, see /// [here](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems) - #[builder(default = "true")] + #[builder(default = true)] pub attach_service_name: bool, /// A prefix to be applied to all core-created metrics. Defaults to "temporal_". - #[builder(default = "METRIC_PREFIX.to_string()")] + #[builder(default = METRIC_PREFIX.to_string())] pub metric_prefix: String, /// If provided, logging config will be ignored and this explicit subscriber will be used for /// all logging and traces. - #[builder(setter(strip_option), default)] pub subscriber_override: Option>, /// See [TaskQueueLabelStrategy]. - #[builder(default = "TaskQueueLabelStrategy::UseNormal")] + #[builder(default = TaskQueueLabelStrategy::UseNormal)] pub task_queue_label_strategy: TaskQueueLabelStrategy, } impl Debug for TelemetryOptions { @@ -96,19 +95,19 @@ pub enum TaskQueueLabelStrategy { } /// Options for exporting to an OpenTelemetry Collector -#[derive(Debug, Clone, derive_builder::Builder)] +#[derive(Debug, Clone, bon::Builder)] pub struct OtelCollectorOptions { /// The url of the OTel collector to export telemetry and metrics to. Lang SDK should also /// export to this same collector. pub url: Url, /// Optional set of HTTP headers to send to the Collector, e.g for authentication. - #[builder(default = "HashMap::new()")] + #[builder(default = HashMap::new())] pub headers: HashMap, /// Optionally specify how frequently metrics should be exported. Defaults to 1 second. - #[builder(default = "Duration::from_secs(1)")] + #[builder(default = Duration::from_secs(1))] pub metric_periodicity: Duration, /// Specifies the aggregation temporality for metric export. Defaults to cumulative. - #[builder(default = "MetricTemporality::Cumulative")] + #[builder(default = MetricTemporality::Cumulative)] pub metric_temporality: MetricTemporality, /// A map of tags to be applied to all metrics #[builder(default)] @@ -120,12 +119,12 @@ pub struct OtelCollectorOptions { #[builder(default)] pub histogram_bucket_overrides: HistogramBucketOverrides, /// Protocol to use for communication with the collector - #[builder(default = "OtlpProtocol::Grpc")] + #[builder(default = OtlpProtocol::Grpc)] pub protocol: OtlpProtocol, } /// Options for exporting metrics to Prometheus -#[derive(Debug, Clone, derive_builder::Builder)] +#[derive(Debug, Clone, bon::Builder)] pub struct PrometheusExporterOptions { pub socket_addr: SocketAddr, // A map of tags to be applied to all metrics @@ -205,7 +204,7 @@ pub enum OtlpProtocol { impl Default for TelemetryOptions { fn default() -> Self { - TelemetryOptionsBuilder::default().build().unwrap() + TelemetryOptions::builder().build() } } diff --git a/crates/common/src/telemetry/metrics.rs b/crates/common/src/telemetry/metrics.rs index 94b2b488d..7cb790067 100644 --- a/crates/common/src/telemetry/metrics.rs +++ b/crates/common/src/telemetry/metrics.rs @@ -259,16 +259,16 @@ impl WorkerHeartbeatMetrics { } } -#[derive(Debug, Clone, derive_builder::Builder)] +#[derive(Debug, Clone, bon::Builder)] pub struct MetricParameters { /// The name for the new metric/instrument - #[builder(setter(into))] + #[builder(into)] pub name: Cow<'static, str>, /// A description that will appear in metadata if the backend supports it - #[builder(setter(into), default = "\"\".into()")] + #[builder(into, default = Cow::Borrowed(""))] pub description: Cow<'static, str>, /// Unit information that will appear in metadata if the backend supports it - #[builder(setter(into), default = "\"\".into()")] + #[builder(into, default = Cow::Borrowed(""))] pub unit: Cow<'static, str>, } impl From<&'static str> for MetricParameters { diff --git a/crates/common/src/worker.rs b/crates/common/src/worker.rs index 8666583fc..714967572 100644 --- a/crates/common/src/worker.rs +++ b/crates/common/src/worker.rs @@ -88,8 +88,8 @@ impl WorkerTaskTypes { } /// Defines per-worker configuration options -#[derive(Clone, derive_builder::Builder)] -#[builder(setter(into), build_fn(validate = "Self::validate"))] +#[derive(Clone, bon::Builder)] +#[builder(on(String, into), state_mod(vis = "pub"), finish_fn(vis = "", name = build_internal))] #[non_exhaustive] pub struct WorkerConfig { /// The Temporal service namespace this worker is bound to @@ -100,23 +100,21 @@ pub struct WorkerConfig { /// A human-readable string that can identify this worker. Using something like sdk version /// and host name is a good default. If set, overrides the identity set (if any) on the client /// used by this worker. - #[builder(default)] pub client_identity_override: Option, /// If set nonzero, workflows will be cached and sticky task queues will be used, meaning that /// history updates are applied incrementally to suspended instances of workflow execution. /// Workflows are evicted according to a least-recently-used policy one the cache maximum is /// reached. Workflows may also be explicitly evicted at any time, or as a result of errors /// or failures. - #[builder(default = "0")] + #[builder(default = 0)] pub max_cached_workflows: usize, /// Set a [WorkerTuner] for this worker. Either this or at least one of the `max_outstanding_*` /// fields must be set. - #[builder(setter(into = false, strip_option), default)] pub tuner: Option>, /// Maximum number of concurrent poll workflow task requests we will perform at a time on this /// worker's task queue. See also [WorkerConfig::nonsticky_to_sticky_poll_ratio]. /// If using SimpleMaximum, Must be at least 2 when `max_cached_workflows` > 0, or is an error. - #[builder(default = "PollerBehavior::SimpleMaximum(5)")] + #[builder(default = PollerBehavior::SimpleMaximum(5))] pub workflow_task_poller_behavior: PollerBehavior, /// Only applies when using [PollerBehavior::SimpleMaximum] /// @@ -125,15 +123,15 @@ pub struct WorkerConfig { /// queue will allow 4 max pollers while the nonsticky queue will allow one. The minimum for /// either poller is 1, so if the maximum allowed is 1 and sticky queues are enabled, there will /// be 2 concurrent polls. - #[builder(default = "0.2")] + #[builder(default = 0.2)] pub nonsticky_to_sticky_poll_ratio: f32, /// Maximum number of concurrent poll activity task requests we will perform at a time on this /// worker's task queue - #[builder(default = "PollerBehavior::SimpleMaximum(5)")] + #[builder(default = PollerBehavior::SimpleMaximum(5))] pub activity_task_poller_behavior: PollerBehavior, /// Maximum number of concurrent poll nexus task requests we will perform at a time on this /// worker's task queue - #[builder(default = "PollerBehavior::SimpleMaximum(5)")] + #[builder(default = PollerBehavior::SimpleMaximum(5))] pub nexus_task_poller_behavior: PollerBehavior, /// Specifies which task types this worker will poll for. /// @@ -141,18 +139,18 @@ pub struct WorkerConfig { pub task_types: WorkerTaskTypes, /// How long a workflow task is allowed to sit on the sticky queue before it is timed out /// and moved to the non-sticky queue where it may be picked up by any worker. - #[builder(default = "Duration::from_secs(10)")] + #[builder(default = Duration::from_secs(10))] pub sticky_queue_schedule_to_start_timeout: Duration, /// Longest interval for throttling activity heartbeats - #[builder(default = "Duration::from_secs(60)")] + #[builder(default = Duration::from_secs(60))] pub max_heartbeat_throttle_interval: Duration, /// Default interval for throttling activity heartbeats in case /// `ActivityOptions.heartbeat_timeout` is unset. /// When the timeout *is* set in the `ActivityOptions`, throttling is set to /// `heartbeat_timeout * 0.8`. - #[builder(default = "Duration::from_secs(30)")] + #[builder(default = Duration::from_secs(30))] pub default_heartbeat_throttle_interval: Duration, /// Sets the maximum number of activities per second the task queue will dispatch, controlled @@ -161,14 +159,12 @@ pub struct WorkerConfig { /// winning. /// /// Setting this to a nonzero value will also disable eager activity execution. - #[builder(default)] pub max_task_queue_activities_per_second: Option, /// Limits the number of activities per second that this worker will process. The worker will /// not poll for new activities if by doing so it might receive and execute an activity which /// would cause it to exceed this limit. Negative, zero, or NaN values will cause building /// the options to fail. - #[builder(default)] pub max_worker_activities_per_second: Option, /// If set false (default), shutdown will not finish until all pending evictions have been @@ -178,23 +174,22 @@ pub struct WorkerConfig { /// This flag is useful during tests to avoid needing to deal with lots of uninteresting /// evictions during shutdown. Alternatively, if a lang implementation finds it easy to clean /// up during shutdown, setting this true saves some back-and-forth. - #[builder(default = "false")] + #[builder(default = false)] pub ignore_evicts_on_shutdown: bool, /// Maximum number of next page (or initial) history event listing requests we'll make /// concurrently. I don't this it's worth exposing this to users until we encounter a reason. - #[builder(default = "5")] + #[builder(default = 5)] pub fetching_concurrency: usize, /// If set, core will issue cancels for all outstanding activities and nexus operations after /// shutdown has been initiated and this amount of time has elapsed. - #[builder(default)] pub graceful_shutdown_period: Option, /// The amount of time core will wait before timing out activities using its own local timers /// after one of them elapses. This is to avoid racing with server's own tracking of the /// timeout. - #[builder(default = "Duration::from_secs(5)")] + #[builder(default = Duration::from_secs(5))] pub local_timeout_buffer_for_activities: Duration, /// Any error types listed here will cause any workflow being processed by this worker to fail, @@ -213,24 +208,24 @@ pub struct WorkerConfig { /// `max_cached_workflows` is > 0, or is an error. /// /// Mutually exclusive with `tuner` - #[builder(setter(into, strip_option), default)] + #[builder(into)] pub max_outstanding_workflow_tasks: Option, /// The maximum number of activity tasks that will ever be given to this worker concurrently. /// /// Mutually exclusive with `tuner` - #[builder(setter(into, strip_option), default)] + #[builder(into)] pub max_outstanding_activities: Option, /// The maximum number of local activity tasks that will ever be given to this worker /// concurrently. /// /// Mutually exclusive with `tuner` - #[builder(setter(into, strip_option), default)] + #[builder(into)] pub max_outstanding_local_activities: Option, /// The maximum number of nexus tasks that will ever be given to this worker /// concurrently. /// /// Mutually exclusive with `tuner` - #[builder(setter(into, strip_option), default)] + #[builder(into)] pub max_outstanding_nexus_tasks: Option, /// A versioning strategy for this worker. @@ -241,7 +236,7 @@ pub struct WorkerConfig { pub plugins: HashSet, /// Skips the single worker+client+namespace+task_queue check - #[builder(default = "false")] + #[builder(default = false)] pub skip_client_worker_set_check: bool, } @@ -279,115 +274,87 @@ impl WorkerConfig { } } -impl WorkerConfigBuilder { - /// Unset all `max_outstanding_*` fields - pub fn clear_max_outstanding_opts(&mut self) -> &mut Self { - self.max_outstanding_workflow_tasks = None; - self.max_outstanding_activities = None; - self.max_outstanding_local_activities = None; - self - } - - fn validate(&self) -> Result<(), String> { - let task_types = self - .task_types - .as_ref() - .cloned() - .unwrap_or_else(WorkerTaskTypes::all); +impl WorkerConfigBuilder { + pub fn build(self) -> Result { + let config = self.build_internal(); + let task_types = &config.task_types; if task_types.is_empty() { - return Err("At least one task type must be enabled in `task_types`".to_owned()); + return Err("At least one task type must be enabled in `task_types`".to_string()); } if !task_types.enable_workflows && task_types.enable_local_activities { - return Err("`task_types` cannot enable local activities without workflows".to_owned()); + return Err( + "`task_types` cannot enable local activities without workflows".to_string(), + ); } - if let Some(b) = self.workflow_task_poller_behavior.as_ref() { - b.validate()? - } - if let Some(b) = self.activity_task_poller_behavior.as_ref() { - b.validate()? - } - if let Some(b) = self.nexus_task_poller_behavior.as_ref() { - b.validate()? - } + config.workflow_task_poller_behavior.validate()?; + config.activity_task_poller_behavior.validate()?; + config.nexus_task_poller_behavior.validate()?; - if let Some(Some(ref x)) = self.max_worker_activities_per_second + if let Some(ref x) = config.max_worker_activities_per_second && (!x.is_normal() || x.is_sign_negative()) { return Err( - "`max_worker_activities_per_second` must be positive and nonzero".to_owned(), + "`max_worker_activities_per_second` must be positive and nonzero".to_string(), ); } - if matches!(self.max_outstanding_workflow_tasks.as_ref(), Some(Some(v)) if *v == 0) { - return Err("`max_outstanding_workflow_tasks` must be > 0".to_owned()); + if matches!(config.max_outstanding_workflow_tasks, Some(v) if v == 0) { + return Err("`max_outstanding_workflow_tasks` must be > 0".to_string()); } - if matches!(self.max_outstanding_activities.as_ref(), Some(Some(v)) if *v == 0) { - return Err("`max_outstanding_activities` must be > 0".to_owned()); + if matches!(config.max_outstanding_activities, Some(v) if v == 0) { + return Err("`max_outstanding_activities` must be > 0".to_string()); } - if matches!(self.max_outstanding_local_activities.as_ref(), Some(Some(v)) if *v == 0) { - return Err("`max_outstanding_local_activities` must be > 0".to_owned()); + if matches!(config.max_outstanding_local_activities, Some(v) if v == 0) { + return Err("`max_outstanding_local_activities` must be > 0".to_string()); } - if matches!(self.max_outstanding_nexus_tasks.as_ref(), Some(Some(v)) if *v == 0) { - return Err("`max_outstanding_nexus_tasks` must be > 0".to_owned()); + if matches!(config.max_outstanding_nexus_tasks, Some(v) if v == 0) { + return Err("`max_outstanding_nexus_tasks` must be > 0".to_string()); } - if let Some(cache) = self.max_cached_workflows.as_ref() - && *cache > 0 - { - if let Some(Some(max_wft)) = self.max_outstanding_workflow_tasks.as_ref() - && *max_wft < 2 + if config.max_cached_workflows > 0 { + if let Some(max_wft) = config.max_outstanding_workflow_tasks + && max_wft < 2 { return Err( "`max_cached_workflows` > 0 requires `max_outstanding_workflow_tasks` >= 2" - .to_owned(), + .to_string(), ); } - if let Some(b) = self.workflow_task_poller_behavior.as_ref() { - if matches!(b, PollerBehavior::SimpleMaximum(u) if *u < 2) { - return Err( - "`max_cached_workflows` > 0 requires `workflow_task_poller_behavior` to be at least 2" - .to_owned(), - ); - } - b.validate()? + if matches!(config.workflow_task_poller_behavior, PollerBehavior::SimpleMaximum(u) if u < 2) + { + return Err("`max_cached_workflows` > 0 requires `workflow_task_poller_behavior` to be at least 2".to_string()); } } - if self.tuner.is_some() - && (self.max_outstanding_workflow_tasks.is_some() - || self.max_outstanding_activities.is_some() - || self.max_outstanding_local_activities.is_some()) + if config.tuner.is_some() + && (config.max_outstanding_workflow_tasks.is_some() + || config.max_outstanding_activities.is_some() + || config.max_outstanding_local_activities.is_some()) { - return Err("max_outstanding_* fields are mutually exclusive with `tuner`".to_owned()); + return Err("max_outstanding_* fields are mutually exclusive with `tuner`".to_string()); } - if let Some(wv) = self.versioning_strategy.as_ref() { - match wv { - WorkerVersioningStrategy::None { .. } => {} - WorkerVersioningStrategy::WorkerDeploymentBased(d) => { - if d.use_worker_versioning - && (d.version.build_id.is_empty() || d.version.deployment_name.is_empty()) - { - return Err( - "WorkerDeploymentVersion must have a non-empty build_id and \ - deployment_name when deployment-based versioning is enabled" - .to_owned(), - ); - } + match &config.versioning_strategy { + WorkerVersioningStrategy::None { .. } => {} + WorkerVersioningStrategy::WorkerDeploymentBased(d) => { + if d.use_worker_versioning + && (d.version.build_id.is_empty() || d.version.deployment_name.is_empty()) + { + return Err("WorkerDeploymentVersion must have a non-empty build_id and deployment_name when deployment-based versioning is enabled".to_string()); } - WorkerVersioningStrategy::LegacyBuildIdBased { build_id } => { - if build_id.is_empty() { - return Err( - "Legacy build id-based versioning must have a non-empty build_id" - .to_owned(), - ); - } + } + WorkerVersioningStrategy::LegacyBuildIdBased { build_id } => { + if build_id.is_empty() { + return Err( + "Legacy build id-based versioning must have a non-empty build_id" + .to_string(), + ); } } } - Ok(()) + Ok(config) } } diff --git a/crates/common/tests/worker_task_types_test.rs b/crates/common/tests/worker_task_types_test.rs index da19cc310..35f14bcd8 100644 --- a/crates/common/tests/worker_task_types_test.rs +++ b/crates/common/tests/worker_task_types_test.rs @@ -1,4 +1,4 @@ -use temporalio_common::worker::{WorkerConfigBuilder, WorkerTaskTypes, WorkerVersioningStrategy}; +use temporalio_common::worker::{WorkerConfig, WorkerTaskTypes, WorkerVersioningStrategy}; fn default_versioning_strategy() -> WorkerVersioningStrategy { WorkerVersioningStrategy::None { @@ -8,13 +8,13 @@ fn default_versioning_strategy() -> WorkerVersioningStrategy { #[test] fn test_default_configuration_polls_all_types() { - let config = WorkerConfigBuilder::default() + let config = WorkerConfig::builder() .namespace("default") .task_queue("test-queue") .versioning_strategy(default_versioning_strategy()) .task_types(WorkerTaskTypes::all()) .build() - .expect("Failed to build default config"); + .unwrap(); let effective = &config.task_types; assert!( @@ -35,7 +35,7 @@ fn test_default_configuration_polls_all_types() { #[test] fn test_invalid_task_types_fails_validation() { // empty task types - let result = WorkerConfigBuilder::default() + let result = WorkerConfig::builder() .namespace("default") .task_queue("test-queue") .versioning_strategy(default_versioning_strategy()) @@ -48,14 +48,14 @@ fn test_invalid_task_types_fails_validation() { .build(); assert!(result.is_err(), "Empty task_types should fail validation"); - let err = result.err().unwrap().to_string(); + let err = result.err().unwrap(); assert!( err.contains("At least one task type"), "Error should mention task types: {err}", ); // local activities with no workflows - let result = WorkerConfigBuilder::default() + let result = WorkerConfig::builder() .namespace("default") .task_queue("test-queue") .versioning_strategy(default_versioning_strategy()) @@ -68,7 +68,7 @@ fn test_invalid_task_types_fails_validation() { .build(); assert!(result.is_err(), "Empty task_types should fail validation"); - let err = result.err().unwrap().to_string(); + let err = result.err().unwrap(); assert!( err.contains("cannot enable local activities without workflows"), "Error should mention task types: {err}", @@ -112,13 +112,13 @@ fn test_all_combinations() { ]; for (task_types, description) in combinations { - let config = WorkerConfigBuilder::default() + let config = WorkerConfig::builder() .namespace("default") .task_queue("test-queue") .versioning_strategy(default_versioning_strategy()) .task_types(task_types) .build() - .unwrap_or_else(|e| panic!("Failed to build config for {description}: {e:?}")); + .unwrap(); let effective = config.task_types; assert_eq!( diff --git a/crates/sdk-core-c-bridge/src/metric.rs b/crates/sdk-core-c-bridge/src/metric.rs index 6a35a2eb9..6ab9140d1 100644 --- a/crates/sdk-core-c-bridge/src/metric.rs +++ b/crates/sdk-core-c-bridge/src/metric.rs @@ -231,12 +231,11 @@ pub extern "C" fn temporal_core_metric_record_duration( impl From<&MetricOptions> for metrics::MetricParameters { fn from(options: &MetricOptions) -> Self { - metrics::MetricParametersBuilder::default() + metrics::MetricParameters::builder() .name(options.name.to_string()) .description(options.description.to_string()) .unit(options.unit.to_string()) .build() - .unwrap() } } diff --git a/crates/sdk-core-c-bridge/src/runtime.rs b/crates/sdk-core-c-bridge/src/runtime.rs index 710645f69..14f60428d 100644 --- a/crates/sdk-core-c-bridge/src/runtime.rs +++ b/crates/sdk-core-c-bridge/src/runtime.rs @@ -17,12 +17,11 @@ use std::{ }; use temporalio_common::telemetry::{ CoreLog, CoreLogConsumer, HistogramBucketOverrides, Logger, MetricTemporality, - OtelCollectorOptionsBuilder, PrometheusExporterOptionsBuilder, - TelemetryOptions as CoreTelemetryOptions, TelemetryOptionsBuilder, metrics::CoreMeter, + OtelCollectorOptions, PrometheusExporterOptions, TelemetryOptions as CoreTelemetryOptions, + metrics::CoreMeter, }; use temporalio_sdk_core::{ - CoreRuntime, RuntimeOptions as CoreRuntimeOptions, - RuntimeOptionsBuilder as CoreRuntimeOptionsBuilder, TokioRuntimeBuilder, + CoreRuntime, RuntimeOptions as CoreRuntimeOptions, TokioRuntimeBuilder as TokioRuntime, telemetry::{build_otlp_metric_exporter, start_prometheus_metric_exporter}, }; use tracing::Level; @@ -143,11 +142,8 @@ pub extern "C" fn temporal_core_runtime_new(options: *const RuntimeOptions) -> R // freeable let mut runtime = Runtime { core: Arc::new( - CoreRuntime::new( - CoreRuntimeOptions::default(), - TokioRuntimeBuilder::default(), - ) - .unwrap(), + CoreRuntime::new(CoreRuntimeOptions::default(), TokioRuntime::default()) + .unwrap(), ), log_forwarder: None, }; @@ -207,19 +203,15 @@ impl Runtime { // Build telemetry options let mut log_forwarder = None; let telemetry_options = if let Some(v) = unsafe { options.telemetry.as_ref() } { - let mut build = TelemetryOptionsBuilder::default(); - - // Metrics options (note, metrics meter is late-bound later) - if let Some(v) = unsafe { v.metrics.as_ref() } { - build.attach_service_name(v.attach_service_name); - if let Some(metric_prefix) = v.metric_prefix.to_option_string() { - build.metric_prefix(metric_prefix); - } - } + let (attach_service_name, metric_prefix) = + if let Some(v) = unsafe { v.metrics.as_ref() } { + (v.attach_service_name, v.metric_prefix.to_option_string()) + } else { + (true, None) + }; - // Logging options - if let Some(v) = unsafe { v.logging.as_ref() } { - build.logging(if let Some(callback) = v.forward_to { + let logging = unsafe { v.logging.as_ref() }.map(|v| { + if let Some(callback) = v.forward_to { let consumer = Arc::new(LogForwarder { callback, active: AtomicBool::new(false), @@ -233,28 +225,32 @@ impl Runtime { Logger::Console { filter: v.filter.to_string(), } - }); - } - build.build()? - } else { - CoreTelemetryOptions::default() - }; + } + }); - let heartbeat_interval = if options.worker_heartbeat_interval_millis == 0 { - None + CoreTelemetryOptions::builder() + .attach_service_name(attach_service_name) + .maybe_metric_prefix(metric_prefix) + .maybe_logging(logging) + .build() } else { - Some(Duration::from_millis( - options.worker_heartbeat_interval_millis, - )) + CoreTelemetryOptions::default() }; - let core_runtime_options = CoreRuntimeOptionsBuilder::default() + let core_runtime_options = CoreRuntimeOptions::builder() .telemetry_options(telemetry_options) - .heartbeat_interval(heartbeat_interval) - .build()?; + .maybe_heartbeat_interval(if options.worker_heartbeat_interval_millis == 0 { + None + } else { + Some(Duration::from_millis( + options.worker_heartbeat_interval_millis, + )) + }) + .build() + .map_err(|e| anyhow::anyhow!(e))?; // Build core runtime - let mut core = CoreRuntime::new(core_runtime_options, TokioRuntimeBuilder::default())?; + let mut core = CoreRuntime::new(core_runtime_options, TokioRuntime::default())?; // We late-bind the metrics after core runtime is created since it needs // the Tokio handle @@ -392,27 +388,30 @@ fn create_meter( )); } // Build OTel exporter - let mut build = OtelCollectorOptionsBuilder::default(); - build - .url(Url::parse(otel_options.url.to_str())?) - .headers(otel_options.headers.to_string_map_on_newlines()) - .metric_temporality(match otel_options.metric_temporality { - OpenTelemetryMetricTemporality::Cumulative => MetricTemporality::Cumulative, - OpenTelemetryMetricTemporality::Delta => MetricTemporality::Delta, - }) - .global_tags(options.global_tags.to_string_map_on_newlines()) - .use_seconds_for_durations(otel_options.durations_as_seconds) - .histogram_bucket_overrides(HistogramBucketOverrides { - overrides: parse_histogram_bucket_overrides( - &otel_options.histogram_bucket_overrides, - )?, - }); - if otel_options.metric_periodicity_millis > 0 { - build.metric_periodicity(Duration::from_millis( - otel_options.metric_periodicity_millis.into(), - )); - } - Ok(Arc::new(build_otlp_metric_exporter(build.build()?)?)) + Ok(Arc::new(build_otlp_metric_exporter( + OtelCollectorOptions::builder() + .url(Url::parse(otel_options.url.to_str())?) + .headers(otel_options.headers.to_string_map_on_newlines()) + .metric_temporality(match otel_options.metric_temporality { + OpenTelemetryMetricTemporality::Cumulative => MetricTemporality::Cumulative, + OpenTelemetryMetricTemporality::Delta => MetricTemporality::Delta, + }) + .global_tags(options.global_tags.to_string_map_on_newlines()) + .use_seconds_for_durations(otel_options.durations_as_seconds) + .histogram_bucket_overrides(HistogramBucketOverrides { + overrides: parse_histogram_bucket_overrides( + &otel_options.histogram_bucket_overrides, + )?, + }) + .maybe_metric_periodicity(if otel_options.metric_periodicity_millis > 0 { + Some(Duration::from_millis( + otel_options.metric_periodicity_millis.into(), + )) + } else { + None + }) + .build(), + )?)) } else if let Some(prom_options) = unsafe { options.prometheus.as_ref() } { if custom_meter.is_some() { return Err(anyhow::anyhow!( @@ -420,8 +419,7 @@ fn create_meter( )); } // Start prom exporter - let mut build = PrometheusExporterOptionsBuilder::default(); - build + let build = PrometheusExporterOptions::builder() .socket_addr(SocketAddr::from_str(prom_options.bind_address.to_str())?) .global_tags(options.global_tags.to_string_map_on_newlines()) .counters_total_suffix(prom_options.counters_total_suffix) @@ -432,7 +430,7 @@ fn create_meter( &prom_options.histogram_bucket_overrides, )?, }); - Ok(start_prometheus_metric_exporter(build.build()?)?.meter) + Ok(start_prometheus_metric_exporter(build.build())?.meter) } else if let Some(custom_meter) = custom_meter { Ok(Arc::new(custom_meter)) } else { diff --git a/crates/sdk-core-c-bridge/src/testing.rs b/crates/sdk-core-c-bridge/src/testing.rs index 4d3982fb6..a2fde474b 100644 --- a/crates/sdk-core-c-bridge/src/testing.rs +++ b/crates/sdk-core-c-bridge/src/testing.rs @@ -203,24 +203,24 @@ impl TryFrom<&DevServerOptions> for ephemeral_server::TemporalDevServerConfig { fn try_from(options: &DevServerOptions) -> anyhow::Result { let test_server_options = unsafe { &*options.test_server }; - Ok(ephemeral_server::TemporalDevServerConfigBuilder::default() + Ok(ephemeral_server::TemporalDevServerConfig::builder() .exe(test_server_options.exe()) .namespace(options.namespace.to_string()) .ip(options.ip.to_string()) - .port(test_server_options.port()) - .db_filename(options.database_filename.to_option_string()) + .maybe_port(test_server_options.port()) + .maybe_db_filename(options.database_filename.to_option_string()) .ui(options.ui) - .ui_port(if options.ui_port == 0 || !options.ui { - None - } else { + .maybe_ui_port(if options.ui_port != 0 && options.ui { Some(options.ui_port) + } else { + None }) .log(( options.log_format.to_string(), options.log_level.to_string(), )) .extra_args(test_server_options.extra_args()) - .build()?) + .build()) } } @@ -228,11 +228,11 @@ impl TryFrom<&TestServerOptions> for ephemeral_server::TestServerConfig { type Error = anyhow::Error; fn try_from(options: &TestServerOptions) -> anyhow::Result { - Ok(ephemeral_server::TestServerConfigBuilder::default() + Ok(ephemeral_server::TestServerConfig::builder() .exe(options.exe()) - .port(options.port()) + .maybe_port(options.port()) .extra_args(options.extra_args()) - .build()?) + .build()) } } diff --git a/crates/sdk-core-c-bridge/src/tests/utils.rs b/crates/sdk-core-c-bridge/src/tests/utils.rs index f9e222738..9ec36263f 100644 --- a/crates/sdk-core-c-bridge/src/tests/utils.rs +++ b/crates/sdk-core-c-bridge/src/tests/utils.rs @@ -5,9 +5,7 @@ use crate::{ }; use std::{collections::HashMap, ops::Deref}; use temporalio_client::ClientOptions; -use temporalio_sdk_core::ephemeral_server::{ - TemporalDevServerConfig, TemporalDevServerConfigBuilder, default_cached_download, -}; +use temporalio_sdk_core::ephemeral_server::{TemporalDevServerConfig, default_cached_download}; use url::Url; pub fn byte_array_to_vec(runtime: *mut Runtime, byte_array: *const ByteArray) -> Option> { @@ -31,10 +29,9 @@ pub fn pointer_or_null(x: Option>) -> *const T { } pub fn default_server_config() -> TemporalDevServerConfig { - TemporalDevServerConfigBuilder::default() + TemporalDevServerConfig::builder() .exe(default_cached_download()) .build() - .unwrap() } pub fn default_client_options(target: &str) -> ClientOptions { diff --git a/crates/sdk-core-c-bridge/src/worker.rs b/crates/sdk-core-c-bridge/src/worker.rs index 16074ea2d..78f50e7b7 100644 --- a/crates/sdk-core-c-bridge/src/worker.rs +++ b/crates/sdk-core-c-bridge/src/worker.rs @@ -26,7 +26,7 @@ use temporalio_common::{ }, }; use temporalio_sdk_core::{ - WorkerConfigBuilder, + WorkerConfig, replay::{HistoryForReplay, ReplayWorkerInput}, }; use tokio::sync::{ @@ -1164,7 +1164,7 @@ impl TryFrom<&WorkerOptions> for temporalio_sdk_core::WorkerConfig { fn try_from(opt: &WorkerOptions) -> anyhow::Result { let converted_tuner: temporalio_sdk_core::TunerHolder = (&opt.tuner).try_into()?; - WorkerConfigBuilder::default() + WorkerConfig::builder() .namespace(opt.namespace.to_str()) .task_queue(opt.task_queue.to_str()) .versioning_strategy({ @@ -1201,7 +1201,7 @@ impl TryFrom<&WorkerOptions> for temporalio_sdk_core::WorkerConfig { } } }) - .client_identity_override(opt.identity_override.to_option_string()) + .maybe_client_identity_override(opt.identity_override.to_option_string()) .max_cached_workflows(opt.max_cached_workflows as usize) .tuner(Arc::new(converted_tuner)) .task_types(temporalio_common::worker::WorkerTaskTypes::from( @@ -1216,12 +1216,12 @@ impl TryFrom<&WorkerOptions> for temporalio_sdk_core::WorkerConfig { .default_heartbeat_throttle_interval(Duration::from_millis( opt.default_heartbeat_throttle_interval_millis, )) - .max_worker_activities_per_second(if opt.max_activities_per_second == 0.0 { + .maybe_max_worker_activities_per_second(if opt.max_activities_per_second == 0.0 { None } else { Some(opt.max_activities_per_second) }) - .max_task_queue_activities_per_second( + .maybe_max_task_queue_activities_per_second( if opt.max_task_queue_activities_per_second == 0.0 { None } else { @@ -1324,23 +1324,19 @@ impl TryFrom<&TunerHolder> for temporalio_sdk_core::TunerHolder { bail!("All resource-based slot suppliers must have the same ResourceBasedTunerOptions",); } - let mut options = temporalio_sdk_core::TunerHolderOptionsBuilder::default(); - if let Some(first) = first { - options.resource_based_options( - temporalio_sdk_core::ResourceBasedSlotsOptionsBuilder::default() - .target_mem_usage(first.target_memory_usage) - .target_cpu_usage(first.target_cpu_usage) - .build() - .expect("Building ResourceBasedSlotsOptions is infallible"), - ); - }; - options + temporalio_sdk_core::TunerHolderOptions::builder() .workflow_slot_options(holder.workflow_slot_supplier.try_into()?) .activity_slot_options(holder.activity_slot_supplier.try_into()?) .local_activity_slot_options(holder.local_activity_slot_supplier.try_into()?) .nexus_slot_options(holder.nexus_task_slot_supplier.try_into()?) + .maybe_resource_based_options(first.map(|f| { + temporalio_sdk_core::ResourceBasedSlotsOptions::builder() + .target_mem_usage(f.target_memory_usage) + .target_cpu_usage(f.target_cpu_usage) + .build() + })) .build() - .context("Invalid tuner holder options")? + .map_err(|e| anyhow::anyhow!("Failed building tuner holder options: {}", e))? .build_tuner_holder() .context("Failed building tuner holder") } diff --git a/crates/sdk-core/Cargo.toml b/crates/sdk-core/Cargo.toml index c4d6d964f..c2131d9f9 100644 --- a/crates/sdk-core/Cargo.toml +++ b/crates/sdk-core/Cargo.toml @@ -35,12 +35,12 @@ antithesis_sdk = { version = "0.2.1", optional = true, default-features = false, assert_matches = { version = "1.5", optional = true } bimap = { version = "0.6.3", optional = true } async-trait = "0.1" +bon = { workspace = true } console-subscriber = { version = "0.4", optional = true } crossbeam-channel = "0.5" crossbeam-queue = "0.3" crossbeam-utils = "0.8" dashmap = "6.1" -derive_builder = { workspace = true } derive_more = { workspace = true } enum_dispatch = "0.3" enum-iterator = "2" diff --git a/crates/sdk-core/benches/workflow_replay_bench.rs b/crates/sdk-core/benches/workflow_replay_bench.rs index 6762e3a4d..ab1287419 100644 --- a/crates/sdk-core/benches/workflow_replay_bench.rs +++ b/crates/sdk-core/benches/workflow_replay_bench.rs @@ -15,7 +15,7 @@ use std::{ }; use temporalio_common::{ protos::{DEFAULT_WORKFLOW_TYPE, canned_histories}, - telemetry::metrics::{MetricKeyValue, MetricParametersBuilder, NewAttributes}, + telemetry::metrics::{MetricKeyValue, MetricParameters, NewAttributes}, }; use temporalio_sdk::{WfContext, WorkflowFunction}; use temporalio_sdk_core::{CoreRuntime, replay::HistoryForReplay}; @@ -86,24 +86,9 @@ pub fn bench_metrics(c: &mut Criterion) { c.bench_function("Record with new attributes on each call", move |b| { b.iter_batched( || { - let c = meter.counter( - MetricParametersBuilder::default() - .name("c") - .build() - .unwrap(), - ); - let h = meter.histogram( - MetricParametersBuilder::default() - .name("h") - .build() - .unwrap(), - ); - let g = meter.gauge( - MetricParametersBuilder::default() - .name("g") - .build() - .unwrap(), - ); + let c = meter.counter(MetricParameters::builder().name("c").build()); + let h = meter.histogram(MetricParameters::builder().name("h").build()); + let g = meter.gauge(MetricParameters::builder().name("g").build()); let vals = [1, 2, 3, 4, 5]; let labels = ["l1", "l2"]; diff --git a/crates/sdk-core/src/core_tests/mod.rs b/crates/sdk-core/src/core_tests/mod.rs index 8d59c6510..3a652e1c2 100644 --- a/crates/sdk-core/src/core_tests/mod.rs +++ b/crates/sdk-core/src/core_tests/mod.rs @@ -85,12 +85,15 @@ async fn shutdown_interrupts_both_polls() { }); let worker = Worker::new_test( - test_worker_cfg() - // Need only 1 concurrent pollers for mock expectations to work here - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)) - .activity_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)) - .build() - .unwrap(), + { + let mut cfg = test_worker_cfg() + // Need only 1 concurrent pollers for mock expectations to work here + .activity_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)) + .build() + .unwrap(); + cfg.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(1_usize); + cfg + }, mock_client, ); tokio::join! { diff --git a/crates/sdk-core/src/core_tests/workflow_tasks.rs b/crates/sdk-core/src/core_tests/workflow_tasks.rs index ffa7444f6..c49b5dce3 100644 --- a/crates/sdk-core/src/core_tests/workflow_tasks.rs +++ b/crates/sdk-core/src/core_tests/workflow_tasks.rs @@ -2022,12 +2022,15 @@ async fn no_race_acquiring_permits() { .returning(|_| async move { Ok(Default::default()) }.boxed()); let worker = Worker::new_test( - test_worker_cfg() - .max_outstanding_workflow_tasks(2_usize) - .max_cached_workflows(0_usize) - .ignore_evicts_on_shutdown(false) - .build() - .unwrap(), + { + let mut cfg = test_worker_cfg() + .max_outstanding_workflow_tasks(2_usize) + .max_cached_workflows(0_usize) + .build() + .unwrap(); + cfg.ignore_evicts_on_shutdown = false; + cfg + }, mock_client, ); @@ -2667,13 +2670,14 @@ async fn poller_wont_run_ahead_of_task_slots() { .returning(|_| Ok(Default::default())); let worker = Worker::new_test( - test_worker_cfg() - .max_cached_workflows(10_usize) - .max_outstanding_workflow_tasks(10_usize) - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(10_usize)) - .task_types(WorkerTaskTypes::workflow_only()) - .build() - .unwrap(), + { + let mut cfg = test_worker_cfg().build().unwrap(); + cfg.max_cached_workflows = 10_usize; + cfg.max_outstanding_workflow_tasks = Some(10_usize); + cfg.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(10_usize); + cfg.task_types = WorkerTaskTypes::workflow_only(); + cfg + }, mock_client, ); @@ -2730,10 +2734,11 @@ async fn poller_wont_poll_until_lang_polls() { }); let worker = Worker::new_test( - test_worker_cfg() - .task_types(WorkerTaskTypes::workflow_only()) - .build() - .unwrap(), + { + let mut cfg = test_worker_cfg().build().unwrap(); + cfg.task_types = WorkerTaskTypes::workflow_only(); + cfg + }, mock_client, ); @@ -2868,17 +2873,18 @@ async fn slot_provider_cant_hand_out_more_permits_than_cache_size() { } let worker = Worker::new_test( - test_worker_cfg() - .max_cached_workflows(10_usize) - .tuner(Arc::new( + { + let mut cfg = test_worker_cfg().build().unwrap(); + cfg.max_cached_workflows = 10_usize; + cfg.tuner = Some(Arc::new( TunerBuilder::default() .workflow_slot_supplier(Arc::new(EndlessSupplier {})) .build(), - )) - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(10_usize)) - .task_types(WorkerTaskTypes::workflow_only()) - .build() - .unwrap(), + )); + cfg.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(10_usize); + cfg.task_types = WorkerTaskTypes::workflow_only(); + cfg + }, mock_client, ); @@ -3019,14 +3025,15 @@ async fn both_normal_and_sticky_pollers_poll_concurrently() { }); let worker = Worker::new( - test_worker_cfg() - .max_cached_workflows(500_usize) // We need cache, but don't want to deal with evictions - .max_outstanding_workflow_tasks(2_usize) - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(2_usize)) - .nonsticky_to_sticky_poll_ratio(0.2) - .task_types(WorkerTaskTypes::workflow_only()) - .build() - .unwrap(), + { + let mut cfg = test_worker_cfg().build().unwrap(); + cfg.max_cached_workflows = 500_usize; // We need cache, but don't want to deal with evictions + cfg.max_outstanding_workflow_tasks = Some(2_usize); + cfg.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(2_usize); + cfg.nonsticky_to_sticky_poll_ratio = 0.2; + cfg.task_types = WorkerTaskTypes::workflow_only(); + cfg + }, Some("stickytq".to_string()), Arc::new(mock_client), None, diff --git a/crates/sdk-core/src/ephemeral_server/mod.rs b/crates/sdk-core/src/ephemeral_server/mod.rs index d6ce38ca1..d5ed194b0 100644 --- a/crates/sdk-core/src/ephemeral_server/mod.rs +++ b/crates/sdk-core/src/ephemeral_server/mod.rs @@ -24,30 +24,28 @@ use std::os::unix::fs::OpenOptionsExt; use std::process::Stdio; /// Configuration for Temporal CLI dev server. -#[derive(Debug, Clone, derive_builder::Builder)] +#[derive(Debug, Clone, bon::Builder)] +#[builder(on(String, into))] pub struct TemporalDevServerConfig { /// Required path to executable or download info. pub exe: EphemeralExe, /// Namespace to use. - #[builder(default = "\"default\".to_owned()")] + #[builder(default = "default".to_owned())] pub namespace: String, /// IP to bind to. - #[builder(default = "\"127.0.0.1\".to_owned()")] + #[builder(default = "127.0.0.1".to_owned())] pub ip: String, /// Port to use or obtains a free one if none given. - #[builder(default)] pub port: Option, /// Port to use for the UI server or obtains a free one if none given. - #[builder(default)] pub ui_port: Option, /// Sqlite DB filename if persisting or non-persistent if none. - #[builder(default)] pub db_filename: Option, /// Whether to enable the UI. If ui_port is set, assumes true. #[builder(default)] pub ui: bool, /// Log format and level - #[builder(default = "(\"pretty\".to_owned(), \"warn\".to_owned())")] + #[builder(default = ("pretty".to_owned(), "warn".to_owned()))] pub log: (String, String), /// Additional arguments to Temporal dev server. #[builder(default)] @@ -131,12 +129,11 @@ impl TemporalDevServerConfig { } /// Configuration for the test server. -#[derive(Debug, Clone, derive_builder::Builder)] +#[derive(Debug, Clone, bon::Builder)] pub struct TestServerConfig { /// Required path to executable or download info. pub exe: EphemeralExe, /// Port to use or obtains a free one if none given. - #[builder(default)] pub port: Option, /// Additional arguments to the test server. #[builder(default)] diff --git a/crates/sdk-core/src/lib.rs b/crates/sdk-core/src/lib.rs index 10e8f98df..dc91c07a4 100644 --- a/crates/sdk-core/src/lib.rs +++ b/crates/sdk-core/src/lib.rs @@ -240,9 +240,9 @@ pub struct CoreRuntime { heartbeat_interval: Option, } -/// Holds telemetry options, as well as worker heartbeat_interval. Construct with [RuntimeOptionsBuilder] -#[derive(Default, derive_builder::Builder)] -#[builder(build_fn(validate = "Self::validate"))] +/// Holds telemetry options, as well as worker heartbeat_interval. Construct with [RuntimeOptions::builder] +#[derive(Default, bon::Builder)] +#[builder(finish_fn(vis = "", name = build_internal))] #[non_exhaustive] pub struct RuntimeOptions { /// Telemetry configuration options. @@ -252,21 +252,28 @@ pub struct RuntimeOptions { /// workers created using this runtime. /// /// Interval must be between 1s and 60s, inclusive. - #[builder(default = "Some(Duration::from_secs(60))")] + #[builder(required, default = Some(Duration::from_secs(60)))] heartbeat_interval: Option, } -impl RuntimeOptionsBuilder { - fn validate(&self) -> Result<(), String> { - if let Some(Some(interval)) = self.heartbeat_interval - && (interval < Duration::from_secs(1) || interval > Duration::from_secs(60)) +impl RuntimeOptionsBuilder { + /// Builds the RuntimeOptions + /// + /// # Errors + /// Returns an error if heartbeat_interval is set but not between 1s and 60s inclusive. + pub fn build(self) -> Result { + let options = self.build_internal(); { - return Err(format!( - "heartbeat_interval ({interval:?}) must be between 1s and 60s", - )); + if let Some(interval) = options.heartbeat_interval + && (interval < Duration::from_secs(1) || interval > Duration::from_secs(60)) + { + return Err(format!( + "heartbeat_interval ({interval:?}) must be between 1s and 60s", + )); + } + + Ok(options) } - - Ok(()) } } diff --git a/crates/sdk-core/src/telemetry/log_export.rs b/crates/sdk-core/src/telemetry/log_export.rs index 8f20ad095..be06ac10a 100644 --- a/crates/sdk-core/src/telemetry/log_export.rs +++ b/crates/sdk-core/src/telemetry/log_export.rs @@ -225,7 +225,7 @@ mod tests { sync::{Arc, Mutex}, }; use temporalio_common::telemetry::{ - CoreLog, CoreLogConsumer, CoreTelemetry, Logger, TelemetryOptionsBuilder, + CoreLog, CoreLogConsumer, CoreTelemetry, Logger, TelemetryOptions, }; use tracing::Level; @@ -260,12 +260,11 @@ mod tests { #[tokio::test] async fn test_forwarding_output() { - let opts = TelemetryOptionsBuilder::default() + let opts = TelemetryOptions::builder() .logging(Logger::Forward { filter: construct_filter_string(Level::INFO, Level::WARN), }) - .build() - .unwrap(); + .build(); let instance = telemetry_init(opts).unwrap(); let _g = tracing::subscriber::set_default(instance.trace_subscriber().unwrap().clone()); @@ -290,13 +289,12 @@ mod tests { #[tokio::test] async fn test_push_output() { let consumer = Arc::new(CaptureConsumer(Mutex::new(Vec::new()))); - let opts = TelemetryOptionsBuilder::default() + let opts = TelemetryOptions::builder() .logging(Logger::Push { filter: construct_filter_string(Level::INFO, Level::WARN), consumer: consumer.clone(), }) - .build() - .unwrap(); + .build(); let instance = telemetry_init(opts).unwrap(); let _g = tracing::subscriber::set_default(instance.trace_subscriber().unwrap().clone()); @@ -308,13 +306,12 @@ mod tests { async fn test_push_stream_output() { let (consumer, stream) = CoreLogStreamConsumer::new(100); let consumer = Arc::new(consumer); - let opts = TelemetryOptionsBuilder::default() + let opts = TelemetryOptions::builder() .logging(Logger::Push { filter: construct_filter_string(Level::INFO, Level::WARN), consumer: consumer.clone(), }) - .build() - .unwrap(); + .build(); let instance = telemetry_init(opts).unwrap(); let _g = tracing::subscriber::set_default(instance.trace_subscriber().unwrap().clone()); diff --git a/crates/sdk-core/src/telemetry/mod.rs b/crates/sdk-core/src/telemetry/mod.rs index 357477903..c73e4a03a 100644 --- a/crates/sdk-core/src/telemetry/mod.rs +++ b/crates/sdk-core/src/telemetry/mod.rs @@ -41,7 +41,6 @@ use std::{ }; use temporalio_common::telemetry::{ CoreLog, CoreTelemetry, Logger, TaskQueueLabelStrategy, TelemetryOptions, - TelemetryOptionsBuilder, metrics::{CoreMeter, MetricKeyValue, NewAttributes, TemporalMeter}, }; use tracing::{Level, Subscriber}; @@ -277,11 +276,11 @@ pub fn telemetry_init_global(opts: TelemetryOptions) -> Result<(), anyhow::Error /// that uses the default console logger. pub fn telemetry_init_fallback() -> Result<(), anyhow::Error> { telemetry_init_global( - TelemetryOptionsBuilder::default() + TelemetryOptions::builder() .logging(Logger::Console { filter: construct_filter_string(Level::DEBUG, Level::WARN), }) - .build()?, + .build(), )?; Ok(()) } diff --git a/crates/sdk-core/src/test_help/integ_helpers.rs b/crates/sdk-core/src/test_help/integ_helpers.rs index 86fe7e1e9..184eff339 100644 --- a/crates/sdk-core/src/test_help/integ_helpers.rs +++ b/crates/sdk-core/src/test_help/integ_helpers.rs @@ -58,7 +58,7 @@ use temporalio_common::{ }, utilities::pack_any, }, - worker::{PollerBehavior, WorkerTaskTypes, WorkerVersioningStrategy}, + worker::{PollerBehavior, WorkerTaskTypes, WorkerVersioningStrategy, worker_config_builder}, }; use tokio::sync::{Notify, mpsc::unbounded_channel}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -97,9 +97,20 @@ pub async fn drain_pollers_and_shutdown(worker: &dyn WorkerTrait) { worker.shutdown().await; } -pub fn test_worker_cfg() -> WorkerConfigBuilder { - let mut wcb = WorkerConfigBuilder::default(); - wcb.namespace(NAMESPACE) +#[allow(clippy::type_complexity)] +pub fn test_worker_cfg() -> WorkerConfigBuilder< + worker_config_builder::SetWorkflowTaskPollerBehavior< + worker_config_builder::SetTaskTypes< + worker_config_builder::SetIgnoreEvictsOnShutdown< + worker_config_builder::SetVersioningStrategy< + worker_config_builder::SetTaskQueue, + >, + >, + >, + >, +> { + WorkerConfig::builder() + .namespace(NAMESPACE) .task_queue(Uuid::new_v4().to_string()) .versioning_strategy(WorkerVersioningStrategy::None { build_id: "test_bin_id".to_string(), @@ -107,8 +118,7 @@ pub fn test_worker_cfg() -> WorkerConfigBuilder { .ignore_evicts_on_shutdown(true) .task_types(WorkerTaskTypes::all()) // Serial polling since it makes mocking much easier. - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)); - wcb + .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)) } /// When constructing responses for mocks, indicates how a given response should be built diff --git a/crates/sdk-core/src/worker/heartbeat.rs b/crates/sdk-core/src/worker/heartbeat.rs index 09fccb835..31b400c3e 100644 --- a/crates/sdk-core/src/worker/heartbeat.rs +++ b/crates/sdk-core/src/worker/heartbeat.rs @@ -7,7 +7,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use temporalio_client::worker::SharedNamespaceWorkerTrait; use temporalio_common::{ protos::temporal::api::worker::v1::WorkerHeartbeat, - worker::{PollerBehavior, WorkerConfigBuilder, WorkerTaskTypes, WorkerVersioningStrategy}, + worker::{PollerBehavior, WorkerConfig, WorkerTaskTypes, WorkerVersioningStrategy}, }; use tokio::sync::Notify; use tokio_util::sync::CancellationToken; @@ -32,7 +32,7 @@ impl SharedNamespaceWorker { heartbeat_interval: Duration, telemetry: Option, ) -> Result { - let config = WorkerConfigBuilder::default() + let config = WorkerConfig::builder() .namespace(namespace.clone()) .task_queue(format!( "temporal-sys/worker-commands/{namespace}/{}", @@ -45,7 +45,7 @@ impl SharedNamespaceWorker { }) .nexus_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)) .build() - .expect("all required fields should be implemented"); + .map_err(|e| anyhow::anyhow!(e))?; let worker = crate::worker::Worker::new_with_pollers( config, None, diff --git a/crates/sdk-core/src/worker/mod.rs b/crates/sdk-core/src/worker/mod.rs index 177b6df6f..a001a9f09 100644 --- a/crates/sdk-core/src/worker/mod.rs +++ b/crates/sdk-core/src/worker/mod.rs @@ -10,8 +10,10 @@ pub use temporalio_common::worker::{WorkerConfig, WorkerConfigBuilder}; pub use tuner::{ FixedSizeSlotSupplier, ResourceBasedSlotsOptions, ResourceBasedSlotsOptionsBuilder, ResourceBasedTuner, ResourceSlotOptions, SlotSupplierOptions, TunerBuilder, TunerHolder, - TunerHolderOptions, TunerHolderOptionsBuilder, + TunerHolderOptions, }; +// Re-export the generated builder (it's in the tuner module) +pub use tuner::TunerHolderOptionsBuilder; pub(crate) use tuner::{RealSysInfo, SystemResourceInfo}; pub(crate) use activities::{ @@ -1436,10 +1438,11 @@ mod tests { #[test] fn max_polls_calculated_properly() { - let cfg = test_worker_cfg() - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(5_usize)) - .build() - .unwrap(); + let cfg = { + let mut cfg = test_worker_cfg().build().unwrap(); + cfg.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(5_usize); + cfg + }; assert_eq!( wft_poller_behavior(&cfg, false), PollerBehavior::SimpleMaximum(1) @@ -1452,8 +1455,15 @@ mod tests { #[test] fn max_polls_zero_is_err() { + use temporalio_common::worker::{WorkerConfig, WorkerTaskTypes, WorkerVersioningStrategy}; assert!( - test_worker_cfg() + WorkerConfig::builder() + .namespace("test") + .task_queue("test") + .versioning_strategy(WorkerVersioningStrategy::None { + build_id: "test".to_string(), + }) + .task_types(WorkerTaskTypes::all()) .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(0_usize)) .build() .is_err() diff --git a/crates/sdk-core/src/worker/tuner.rs b/crates/sdk-core/src/worker/tuner.rs index 4c7d4d51c..836c0567c 100644 --- a/crates/sdk-core/src/worker/tuner.rs +++ b/crates/sdk-core/src/worker/tuner.rs @@ -25,25 +25,20 @@ pub struct TunerHolder { /// Can be used to construct a [TunerHolder] without needing to manually construct each /// [SlotSupplier]. Useful for lang bridges to allow more easily passing through user options. -#[derive(Clone, Debug, derive_builder::Builder)] -#[builder(build_fn(validate = "Self::validate"))] +#[derive(Clone, Debug, bon::Builder)] +#[builder(finish_fn(vis = "", name = build_internal))] #[non_exhaustive] pub struct TunerHolderOptions { /// Options for workflow slots - #[builder(default, setter(strip_option))] pub workflow_slot_options: Option>, /// Options for activity slots - #[builder(default, setter(strip_option))] pub activity_slot_options: Option>, /// Options for local activity slots - #[builder(default, setter(strip_option))] pub local_activity_slot_options: Option>, /// Options for nexus slots - #[builder(default, setter(strip_option))] pub nexus_slot_options: Option>, /// Options that will apply to all resource based slot suppliers. Must be set if any slot /// options are [SlotSupplierOptions::ResourceBased] - #[builder(default, setter(strip_option))] pub resource_based_options: Option, } @@ -150,35 +145,42 @@ pub enum SlotSupplierOptions { Custom(Arc + Send + Sync>), } -impl TunerHolderOptionsBuilder { +impl TunerHolderOptionsBuilder { + /// Build the [TunerHolderOptions] with validation + pub fn build(self) -> Result { + let options = self.build_internal(); + validate_tuner_holder_options(&options)?; + Ok(options) + } + /// Create a [TunerHolder] from this builder pub fn build_tuner_holder(self) -> Result { - let s = self.build()?; + let s = self.build().map_err(|e: String| anyhow::anyhow!(e))?; s.build_tuner_holder() } +} - fn validate(&self) -> Result<(), String> { - let any_is_resource_based = matches!( - self.workflow_slot_options, - Some(Some(SlotSupplierOptions::ResourceBased(_))) - ) || matches!( - self.activity_slot_options, - Some(Some(SlotSupplierOptions::ResourceBased(_))) - ) || matches!( - self.local_activity_slot_options, - Some(Some(SlotSupplierOptions::ResourceBased(_))) - ) || matches!( - self.nexus_slot_options, - Some(Some(SlotSupplierOptions::ResourceBased(_))) +fn validate_tuner_holder_options(options: &TunerHolderOptions) -> Result<(), String> { + let any_is_resource_based = matches!( + options.workflow_slot_options, + Some(SlotSupplierOptions::ResourceBased(_)) + ) || matches!( + options.activity_slot_options, + Some(SlotSupplierOptions::ResourceBased(_)) + ) || matches!( + options.local_activity_slot_options, + Some(SlotSupplierOptions::ResourceBased(_)) + ) || matches!( + options.nexus_slot_options, + Some(SlotSupplierOptions::ResourceBased(_)) + ); + if any_is_resource_based && options.resource_based_options.is_none() { + return Err( + "`resource_based_options` must be set if any slot options are ResourceBased" + .to_string(), ); - if any_is_resource_based && matches!(self.resource_based_options, None | Some(None)) { - return Err( - "`resource_based_options` must be set if any slot options are ResourceBased" - .to_string(), - ); - } - Ok(()) } + Ok(()) } /// Can be used to construct a `TunerHolder` from individual slot suppliers. Any supplier which is @@ -348,11 +350,10 @@ mod tests { #[test] fn tuner_holder_options_nexus_resource_based() { - let resource_opts = ResourceBasedSlotsOptionsBuilder::default() + let resource_opts = ResourceBasedSlotsOptions::builder() .target_mem_usage(0.8) .target_cpu_usage(0.9) - .build() - .unwrap(); + .build(); let options = TunerHolderOptions { workflow_slot_options: None, @@ -403,7 +404,7 @@ mod tests { #[test] fn tuner_holder_options_builder_validates_resource_based_requirements() { // Should fail when nexus uses ResourceBased but resource_based_options is not set - let result = TunerHolderOptionsBuilder::default() + let result = TunerHolderOptions::builder() .nexus_slot_options(SlotSupplierOptions::ResourceBased( ResourceSlotOptions::new(5, 100, Duration::from_millis(100)), )) @@ -420,11 +421,10 @@ mod tests { #[test] fn tuner_holder_options_all_slot_types() { - let resource_opts = ResourceBasedSlotsOptionsBuilder::default() + let resource_opts = ResourceBasedSlotsOptions::builder() .target_mem_usage(0.8) .target_cpu_usage(0.9) - .build() - .unwrap(); + .build(); let options = TunerHolderOptions { workflow_slot_options: Some(SlotSupplierOptions::FixedSize { slots: 10 }), diff --git a/crates/sdk-core/src/worker/tuner/resource_based.rs b/crates/sdk-core/src/worker/tuner/resource_based.rs index 823b56854..13c7851fe 100644 --- a/crates/sdk-core/src/worker/tuner/resource_based.rs +++ b/crates/sdk-core/src/worker/tuner/resource_based.rs @@ -43,11 +43,10 @@ impl ResourceBasedTuner { /// Create an instance attempting to target the provided memory and cpu thresholds as values /// between 0 and 1. pub fn new(target_mem_usage: f64, target_cpu_usage: f64) -> Self { - let opts = ResourceBasedSlotsOptionsBuilder::default() + let opts = ResourceBasedSlotsOptions::builder() .target_mem_usage(target_mem_usage) .target_cpu_usage(target_cpu_usage) - .build() - .expect("default resource based slot options can't fail to build"); + .build(); let controller = ResourceController::new_with_sysinfo(opts, Arc::new(RealSysInfo::new())); Self::new_from_controller(controller) } @@ -163,7 +162,7 @@ pub(crate) struct ResourceBasedSlotsForType { _slot_kind: PhantomData, } /// Allows for the full customization of the PID options for a resource based tuner -#[derive(Clone, Debug, derive_builder::Builder)] +#[derive(Clone, Debug, bon::Builder)] #[non_exhaustive] pub struct ResourceBasedSlotsOptions { /// A value in the range [0.0, 1.0] representing the target memory usage. @@ -172,30 +171,30 @@ pub struct ResourceBasedSlotsOptions { pub target_cpu_usage: f64, /// See [pid::Pid::p] - #[builder(default = "5.0")] + #[builder(default = 5.0)] pub mem_p_gain: f64, /// See [pid::Pid::i] - #[builder(default = "0.0")] + #[builder(default = 0.0)] pub mem_i_gain: f64, /// See [pid::Pid::d] - #[builder(default = "1.0")] + #[builder(default = 1.0)] pub mem_d_gain: f64, /// If the mem PID controller outputs a value higher than this, we say the mem half of things /// will allow a slot - #[builder(default = "0.25")] + #[builder(default = 0.25)] pub mem_output_threshold: f64, /// See [pid::Pid::d] - #[builder(default = "5.0")] + #[builder(default = 5.0)] pub cpu_p_gain: f64, /// See [pid::Pid::i] - #[builder(default = "0.0")] + #[builder(default = 0.0)] pub cpu_i_gain: f64, /// See [pid::Pid::d] - #[builder(default = "1.0")] + #[builder(default = 1.0)] pub cpu_d_gain: f64, /// If the CPU PID controller outputs a value higher than this, we say the CPU half of things /// will allow a slot - #[builder(default = "0.05")] + #[builder(default = 0.05)] pub cpu_output_threshold: f64, } struct PidControllers { @@ -765,11 +764,10 @@ mod tests { } fn test_options() -> ResourceBasedSlotsOptions { - ResourceBasedSlotsOptionsBuilder::default() + ResourceBasedSlotsOptions::builder() .target_mem_usage(0.8) .target_cpu_usage(1.0) .build() - .expect("default resource based slot options can't fail to build") } #[test] diff --git a/crates/sdk-core/src/worker/workflow/mod.rs b/crates/sdk-core/src/worker/workflow/mod.rs index 32906e60f..688a5f3f3 100644 --- a/crates/sdk-core/src/worker/workflow/mod.rs +++ b/crates/sdk-core/src/worker/workflow/mod.rs @@ -98,8 +98,7 @@ use tokio::{ task::{LocalSet, spawn_blocking}, }; use tokio_stream::wrappers::UnboundedReceiverStream; -use tokio_util::either::Either; -use tokio_util::sync::CancellationToken; +use tokio_util::{either::Either, sync::CancellationToken}; use tracing::{Span, Subscriber}; /// Id used by server for "legacy" queries. IE: Queries that come in the `query` rather than diff --git a/crates/sdk-core/tests/common/mod.rs b/crates/sdk-core/tests/common/mod.rs index 9b9fe554d..7d7f4a1aa 100644 --- a/crates/sdk-core/tests/common/mod.rs +++ b/crates/sdk-core/tests/common/mod.rs @@ -46,8 +46,7 @@ use temporalio_common::{ }, }, telemetry::{ - Logger, OtelCollectorOptionsBuilder, PrometheusExporterOptions, - PrometheusExporterOptionsBuilder, TelemetryOptions, TelemetryOptionsBuilder, + Logger, OtelCollectorOptions, PrometheusExporterOptions, TelemetryOptions, metrics::CoreMeter, }, worker::{WorkerTaskTypes, WorkerVersioningStrategy}, @@ -62,8 +61,7 @@ use temporalio_sdk::{ #[cfg(any(feature = "test-utilities", test))] pub(crate) use temporalio_sdk_core::test_help::NAMESPACE; use temporalio_sdk_core::{ - ClientOptions, CoreRuntime, RuntimeOptions, RuntimeOptionsBuilder, WorkerConfig, - WorkerConfigBuilder, init_replay_worker, init_worker, + ClientOptions, CoreRuntime, RuntimeOptions, WorkerConfig, init_replay_worker, init_worker, replay::{HistoryForReplay, ReplayWorkerInput}, telemetry::{build_otlp_metric_exporter, start_prometheus_metric_exporter}, test_help::{MockPollCfg, build_mock_pollers, mock_worker}, @@ -101,9 +99,9 @@ pub(crate) async fn init_core_and_create_wf(test_name: &str) -> CoreWfStarter { starter } -pub(crate) fn integ_worker_config(tq: &str) -> WorkerConfigBuilder { - let mut b = WorkerConfigBuilder::default(); - b.namespace(NAMESPACE) +pub(crate) fn integ_worker_config(tq: &str) -> WorkerConfig { + WorkerConfig::builder() + .namespace(env::var(INTEG_NAMESPACE_ENV_VAR).unwrap_or(NAMESPACE.to_string())) .task_queue(tq) .max_outstanding_activities(100_usize) .max_outstanding_local_activities(100_usize) @@ -112,8 +110,9 @@ pub(crate) fn integ_worker_config(tq: &str) -> WorkerConfigBuilder { build_id: "test_build_id".to_owned(), }) .task_types(WorkerTaskTypes::all()) - .skip_client_worker_set_check(true); - b + .skip_client_worker_set_check(true) + .build() + .expect("Configuration options construct properly") } /// Create a worker replay instance preloaded with provided histories. Returns the worker impl. @@ -129,9 +128,7 @@ where I: Stream + Send + 'static, { init_integ_telem(); - let worker_cfg = integ_worker_config(test_name) - .build() - .expect("Configuration options construct properly"); + let worker_cfg = integ_worker_config(test_name); let worker = init_replay_worker(ReplayWorkerInput::new(worker_cfg, histories)) .expect("Replay worker must init properly"); Arc::new(worker) @@ -177,7 +174,7 @@ pub(crate) fn init_integ_telem() -> Option<&'static CoreRuntime> { } Some(INTEG_TESTS_RT.get_or_init(|| { let telemetry_options = get_integ_telem_options(); - let runtime_options = RuntimeOptionsBuilder::default() + let runtime_options = RuntimeOptions::builder() .telemetry_options(telemetry_options) .build() .expect("Runtime options build cleanly"); @@ -224,7 +221,7 @@ pub(crate) async fn get_cloud_client() -> RetryClient { pub(crate) struct CoreWfStarter { /// Used for both the task queue and workflow id task_queue_name: String, - pub worker_config: WorkerConfigBuilder, + pub worker_config: WorkerConfig, /// Options to use when starting workflow(s) pub workflow_options: WorkflowOptions, initted_worker: OnceCell, @@ -299,9 +296,7 @@ impl CoreWfStarter { let task_q_salt = rand_6_chars(); let task_queue = format!("{test_name}_{task_q_salt}"); let mut worker_config = integ_worker_config(&task_queue); - worker_config - .namespace(env::var(INTEG_NAMESPACE_ENV_VAR).unwrap_or(NAMESPACE.to_string())) - .max_cached_workflows(1000_usize); + worker_config.max_cached_workflows = 1000_usize; Self { task_queue_name: task_queue, worker_config, @@ -452,10 +447,7 @@ impl CoreWfStarter { } else { init_integ_telem().unwrap() }; - let cfg = self - .worker_config - .build() - .expect("Worker config must be valid"); + let cfg = self.worker_config.clone(); let client = if let Some(client) = self.client_override.take() { client } else { @@ -794,41 +786,47 @@ pub(crate) fn get_integ_tls_config() -> Option { } pub(crate) fn get_integ_telem_options() -> TelemetryOptions { - let mut ob = TelemetryOptionsBuilder::default(); let filter_string = env::var("RUST_LOG").unwrap_or_else(|_| "INFO,temporalio_sdk_core=INFO".to_string()); + if let Some(url) = env::var(OTEL_URL_ENV_VAR) .ok() .map(|x| x.parse::().unwrap()) { - let opts = OtelCollectorOptionsBuilder::default() - .url(url) + let opts = OtelCollectorOptions::builder().url(url).build(); + TelemetryOptions::builder() + .metrics(Arc::new(build_otlp_metric_exporter(opts).unwrap()) as Arc) + .logging(Logger::Console { + filter: filter_string, + }) .build() - .unwrap(); - ob.metrics(Arc::new(build_otlp_metric_exporter(opts).unwrap()) as Arc); - } - if let Some(addr) = env::var(PROM_ENABLE_ENV_VAR) + } else if let Some(addr) = env::var(PROM_ENABLE_ENV_VAR) .ok() .map(|x| SocketAddr::new([127, 0, 0, 1].into(), x.parse().unwrap())) { let prom_info = start_prometheus_metric_exporter( - PrometheusExporterOptionsBuilder::default() + PrometheusExporterOptions::builder() .socket_addr(addr) - .build() - .unwrap(), + .build(), ) .unwrap(); - ob.metrics(prom_info.meter as Arc); + TelemetryOptions::builder() + .metrics(prom_info.meter as Arc) + .logging(Logger::Console { + filter: filter_string, + }) + .build() + } else { + TelemetryOptions::builder() + .logging(Logger::Console { + filter: filter_string, + }) + .build() } - ob.logging(Logger::Console { - filter: filter_string, - }) - .build() - .unwrap() } pub(crate) fn get_integ_runtime_options(telemopts: TelemetryOptions) -> RuntimeOptions { - RuntimeOptionsBuilder::default() + RuntimeOptions::builder() .telemetry_options(telemopts) .build() .unwrap() @@ -886,10 +884,9 @@ pub(crate) fn prom_metrics( options_override: Option, ) -> (TelemetryOptions, SocketAddr, AbortOnDrop) { let prom_exp_opts = options_override.unwrap_or_else(|| { - PrometheusExporterOptionsBuilder::default() + PrometheusExporterOptions::builder() .socket_addr(ANY_PORT.parse().unwrap()) .build() - .unwrap() }); let mut telemopts = get_integ_telem_options(); let prom_info = start_prometheus_metric_exporter(prom_exp_opts).unwrap(); @@ -998,13 +995,14 @@ impl Drop for ActivationAssertionsInterceptor { #[cfg(feature = "ephemeral-server")] use temporalio_sdk_core::ephemeral_server::{ - EphemeralExe, EphemeralExeVersion, TemporalDevServerConfigBuilder, default_cached_download, + EphemeralExe, EphemeralExeVersion, TemporalDevServerConfig, default_cached_download, }; #[cfg(feature = "ephemeral-server")] pub(crate) fn integ_dev_server_config( mut extra_args: Vec, -) -> TemporalDevServerConfigBuilder { + ui: bool, +) -> TemporalDevServerConfig { let cli_version = if let Ok(ver_override) = env::var(CLI_VERSION_OVERRIDE_ENV_VAR) { EphemeralExe::CachedDownload { version: EphemeralExeVersion::Fixed(ver_override.to_owned()), @@ -1041,7 +1039,9 @@ pub(crate) fn integ_dev_server_config( .map(Into::into), ); - let mut config = TemporalDevServerConfigBuilder::default(); - config.exe(cli_version).extra_args(extra_args); - config + TemporalDevServerConfig::builder() + .exe(cli_version) + .extra_args(extra_args) + .ui(ui) + .build() } diff --git a/crates/sdk-core/tests/global_metric_tests.rs b/crates/sdk-core/tests/global_metric_tests.rs index 8b52d935b..576415028 100644 --- a/crates/sdk-core/tests/global_metric_tests.rs +++ b/crates/sdk-core/tests/global_metric_tests.rs @@ -7,7 +7,7 @@ use common::CoreWfStarter; use parking_lot::Mutex; use std::{sync::Arc, time::Duration}; use temporalio_common::telemetry::{ - Logger, OtelCollectorOptionsBuilder, TelemetryOptionsBuilder, metrics::CoreMeter, + Logger, OtelCollectorOptions, TelemetryOptions, metrics::CoreMeter, }; use temporalio_sdk_core::{ CoreRuntime, @@ -58,28 +58,25 @@ async fn otel_errors_logged_as_errors() { .with_env_filter("debug") .finish(), ); - let opts = OtelCollectorOptionsBuilder::default() + let opts = OtelCollectorOptions::builder() .url("https://localhost:12345/v1/metrics".parse().unwrap()) // Nothing bound on that port - .build() - .unwrap(); + .build(); let exporter = build_otlp_metric_exporter(opts).unwrap(); // Global initialization is needed to capture (some) otel logging. telemetry_init_global( - TelemetryOptionsBuilder::default() + TelemetryOptions::builder() .subscriber_override(subscriber) - .build() - .unwrap(), + .build(), ) .unwrap(); - let telemopts = TelemetryOptionsBuilder::default() + let telemopts = TelemetryOptions::builder() .metrics(Arc::new(exporter) as Arc) // Importantly, _not_ using subscriber override, is using console. .logging(Logger::Console { filter: construct_filter_string(Level::INFO, Level::WARN), }) - .build() - .unwrap(); + .build(); let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("otel_errors_logged_as_errors", rt); diff --git a/crates/sdk-core/tests/heavy_tests.rs b/crates/sdk-core/tests/heavy_tests.rs index aa4924ac8..caaa920c1 100644 --- a/crates/sdk-core/tests/heavy_tests.rs +++ b/crates/sdk-core/tests/heavy_tests.rs @@ -40,12 +40,10 @@ async fn activity_load() { const CONCURRENCY: usize = 512; let mut starter = CoreWfStarter::new("activity_load"); - starter - .worker_config - .max_outstanding_workflow_tasks(CONCURRENCY) - .max_cached_workflows(CONCURRENCY) - .activity_task_poller_behavior(PollerBehavior::SimpleMaximum(10_usize)) - .max_outstanding_activities(CONCURRENCY); + starter.worker_config.max_outstanding_workflow_tasks = Some(CONCURRENCY); + starter.worker_config.max_cached_workflows = CONCURRENCY; + starter.worker_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10); + starter.worker_config.max_outstanding_activities = Some(CONCURRENCY); let mut worker = starter.worker().await; let activity_id = "act-1"; @@ -110,11 +108,12 @@ async fn chunky_activities_resource_based() { const WORKFLOWS: usize = 100; let mut starter = CoreWfStarter::new("chunky_activities_resource_based"); - starter - .worker_config - .clear_max_outstanding_opts() - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(10_usize)) - .activity_task_poller_behavior(PollerBehavior::SimpleMaximum(10_usize)); + starter.worker_config.max_outstanding_workflow_tasks = None; + starter.worker_config.max_outstanding_local_activities = None; + starter.worker_config.max_outstanding_activities = None; + starter.worker_config.max_outstanding_nexus_tasks = None; + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(10_usize); + starter.worker_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10_usize); let mut tuner = ResourceBasedTuner::new(0.7, 0.7); tuner .with_workflow_slots_options(ResourceSlotOptions::new( @@ -123,7 +122,7 @@ async fn chunky_activities_resource_based() { Duration::from_millis(0), )) .with_activity_slots_options(ResourceSlotOptions::new(5, 1000, Duration::from_millis(50))); - starter.worker_config.tuner(Arc::new(tuner)); + starter.worker_config.tuner = Some(Arc::new(tuner)); let mut worker = starter.worker().await; let activity_id = "act-1"; @@ -203,12 +202,10 @@ async fn workflow_load() { init_integ_telem(); let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("workflow_load", rt); - starter - .worker_config - .max_outstanding_workflow_tasks(5_usize) - .max_cached_workflows(200_usize) - .activity_task_poller_behavior(PollerBehavior::SimpleMaximum(10_usize)) - .max_outstanding_activities(100_usize); + starter.worker_config.max_outstanding_workflow_tasks = Some(5); + starter.worker_config.max_cached_workflows = 200; + starter.worker_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(10); + starter.worker_config.max_outstanding_activities = Some(100); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let sigchan = ctx.make_signal_channel(SIGNAME).map(Ok); @@ -282,10 +279,8 @@ async fn workflow_load() { async fn evict_while_la_running_no_interference() { let wf_name = "evict_while_la_running_no_interference"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .max_outstanding_local_activities(20_usize) - .max_cached_workflows(20_usize); + starter.worker_config.max_outstanding_local_activities = Some(20); + starter.worker_config.max_cached_workflows = 20; // Though it doesn't make sense to set wft higher than cached workflows, leaving this commented // introduces more instability that can be useful in the test. // starter.max_wft(20); @@ -350,11 +345,9 @@ pub async fn many_parallel_timers_longhist(ctx: WfContext) -> WorkflowResult<()> async fn can_paginate_long_history() { let wf_name = "can_paginate_long_history"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()) - // Do not use sticky queues so we are forced to paginate once history gets long - .max_cached_workflows(0_usize); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + // Do not use sticky queues so we are forced to paginate once history gets long + starter.worker_config.max_cached_workflows = 0; let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), many_parallel_timers_longhist); @@ -394,21 +387,19 @@ async fn poller_autoscaling_basic_loadtest() { let num_workflows = 100; let wf_name = "poller_load"; let mut starter = CoreWfStarter::new("poller_load"); - starter - .worker_config - .max_cached_workflows(5000_usize) - .max_outstanding_workflow_tasks(1000_usize) - .max_outstanding_activities(1000_usize) - .workflow_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }) - .activity_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }); + starter.worker_config.max_cached_workflows = 5000; + starter.worker_config.max_outstanding_workflow_tasks = Some(1000); + starter.worker_config.max_outstanding_activities = Some(1000); + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; + starter.worker_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; let mut worker = starter.worker().await; let shutdown_handle = worker.inner_mut().shutdown_handle(); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { diff --git a/crates/sdk-core/tests/heavy_tests/fuzzy_workflow.rs b/crates/sdk-core/tests/heavy_tests/fuzzy_workflow.rs index 4390f8517..71fb33877 100644 --- a/crates/sdk-core/tests/heavy_tests/fuzzy_workflow.rs +++ b/crates/sdk-core/tests/heavy_tests/fuzzy_workflow.rs @@ -77,11 +77,9 @@ async fn fuzzy_workflow() { let num_workflows = 200; let wf_name = "fuzzy_wf"; let mut starter = CoreWfStarter::new("fuzzy_workflow"); - starter - .worker_config - .max_outstanding_workflow_tasks(25_usize) - .max_cached_workflows(25_usize) - .max_outstanding_activities(25_usize); + starter.worker_config.max_outstanding_workflow_tasks = Some(25); + starter.worker_config.max_cached_workflows = 25; + starter.worker_config.max_outstanding_activities = Some(25); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), fuzzy_wf_def); worker.register_activity("echo_activity", echo); diff --git a/crates/sdk-core/tests/integ_tests/ephemeral_server_tests.rs b/crates/sdk-core/tests/integ_tests/ephemeral_server_tests.rs index f3c65311d..e249ac120 100644 --- a/crates/sdk-core/tests/integ_tests/ephemeral_server_tests.rs +++ b/crates/sdk-core/tests/integ_tests/ephemeral_server_tests.rs @@ -4,7 +4,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use temporalio_client::{ClientOptions, TestService, WorkflowService}; use temporalio_common::protos::temporal::api::workflowservice::v1::DescribeNamespaceRequest; use temporalio_sdk_core::ephemeral_server::{ - EphemeralExe, EphemeralExeVersion, EphemeralServer, TemporalDevServerConfigBuilder, + EphemeralExe, EphemeralExeVersion, EphemeralServer, TemporalDevServerConfig, default_cached_download, }; use tonic::IntoRequest; @@ -12,10 +12,9 @@ use url::Url; #[tokio::test] async fn temporal_cli_default() { - let config = TemporalDevServerConfigBuilder::default() + let config = TemporalDevServerConfig::builder() .exe(default_cached_download()) - .build() - .unwrap(); + .build(); let mut server = config.start_server().await.unwrap(); assert_ephemeral_server(&server).await; @@ -28,10 +27,9 @@ async fn temporal_cli_default() { #[tokio::test] async fn temporal_cli_fixed() { - let config = TemporalDevServerConfigBuilder::default() + let config = TemporalDevServerConfig::builder() .exe(fixed_cached_download("v1.2.0")) - .build() - .unwrap(); + .build(); let mut server = config.start_server().await.unwrap(); assert_ephemeral_server(&server).await; server.shutdown().await.unwrap(); @@ -41,11 +39,10 @@ async fn temporal_cli_fixed() { async fn temporal_cli_shutdown_port_reuse() { // Start, test shutdown, do again immediately on same port to ensure we can // reuse after shutdown - let config = TemporalDevServerConfigBuilder::default() + let config = TemporalDevServerConfig::builder() .exe(default_cached_download()) - .port(Some(10123)) - .build() - .unwrap(); + .port(10123) + .build(); let mut server = config.start_server().await.unwrap(); assert_ephemeral_server(&server).await; server.shutdown().await.unwrap(); @@ -66,10 +63,11 @@ async fn temporal_cli_shutdown_port_reuse() { #[ignore] async fn temporal_cli_concurrent_starts() -> Result<(), Box> { stream::iter((0..80).map(|_| { - TemporalDevServerConfigBuilder::default() - .exe(default_cached_download()) - .build() - .map_err(anyhow::Error::from) + Ok::>( + TemporalDevServerConfig::builder() + .exe(default_cached_download()) + .build(), + ) })) .try_for_each_concurrent(8, |config| async move { let mut server = config.start_server().await?; @@ -85,14 +83,13 @@ async fn temporal_cli_concurrent_starts() -> Result<(), Box().unwrap()) { - let opts = OtelCollectorOptionsBuilder::default() - .url(url) - .build() - .unwrap(); + let opts = OtelCollectorOptions::builder().url(url).build(); build_otlp_metric_exporter(opts).unwrap() } else { // skip return; }; - let mut telemopts = TelemetryOptionsBuilder::default(); let exporter = Arc::new(exporter); - telemopts.metrics(exporter as Arc); - let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts.build().unwrap())) - .unwrap(); + let telemopts = TelemetryOptions::builder().metrics(exporter as Arc); + let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts.build())).unwrap(); let opts = get_integ_server_options(); let mut client = opts .connect(NAMESPACE, rt.telemetry().get_temporal_metric_meter()) @@ -717,18 +713,16 @@ async fn docker_metrics_with_prometheus( ); // Configure the OTLP exporter with HTTP - let opts = OtelCollectorOptionsBuilder::default() + let opts = OtelCollectorOptions::builder() .url(otel_collector_addr.parse().unwrap()) .protocol(otel_protocol) .global_tags(HashMap::from([("test_id".to_string(), test_uid.clone())])) - .build() - .unwrap(); + .build(); let exporter = Arc::new(build_otlp_metric_exporter(opts).unwrap()); - let telemopts = TelemetryOptionsBuilder::default() + let telemopts = TelemetryOptions::builder() .metrics(exporter as Arc) .metric_prefix(test_uid.clone()) - .build() - .unwrap(); + .build(); let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let test_name = "docker_metrics_with_prometheus"; let mut starter = CoreWfStarter::new_with_runtime(test_name, rt); @@ -786,9 +780,7 @@ async fn activity_metrics() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "activity_metrics"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter - .worker_config - .graceful_shutdown_period(Duration::from_secs(1)); + starter.worker_config.graceful_shutdown_period = Some(Duration::from_secs(1)); let task_queue = starter.get_task_queue().to_owned(); let mut worker = starter.worker().await; @@ -920,12 +912,12 @@ async fn nexus_metrics() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "nexus_metrics"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter.worker_config.task_types(WorkerTaskTypes { + starter.worker_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, enable_nexus: true, - }); + }; let task_queue = starter.get_task_queue().to_owned(); let mut worker = starter.worker().await; let core_worker = starter.get_worker().await; @@ -1102,9 +1094,7 @@ async fn evict_on_complete_does_not_count_as_forced_eviction() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "evict_on_complete_does_not_count_as_forced_eviction"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf( @@ -1187,16 +1177,17 @@ async fn metrics_available_from_custom_slot_supplier() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("metrics_available_from_custom_slot_supplier", rt); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); - starter.worker_config.clear_max_outstanding_opts(); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.worker_config.max_outstanding_workflow_tasks = None; + starter.worker_config.max_outstanding_local_activities = None; + starter.worker_config.max_outstanding_activities = None; + starter.worker_config.max_outstanding_nexus_tasks = None; let mut tb = TunerBuilder::default(); tb.workflow_slot_supplier(Arc::new(MetricRecordingSlotSupplier:: { inner: FixedSizeSlotSupplier::new(5), metrics: OnceLock::new(), })); - starter.worker_config.tuner(Arc::new(tb.build())); + starter.worker_config.tuner = Some(Arc::new(tb.build())); let mut worker = starter.worker().await; worker.register_wf( @@ -1314,17 +1305,11 @@ async fn test_prometheus_metric_format_consistency() { #[tokio::test] async fn prometheus_label_nonsense() { - let mut opts_builder = PrometheusExporterOptionsBuilder::default(); - opts_builder.socket_addr(ANY_PORT.parse().unwrap()); - let (telemopts, addr, _aborter) = prom_metrics(Some(opts_builder.build().unwrap())); + let opts_builder = PrometheusExporterOptions::builder().socket_addr(ANY_PORT.parse().unwrap()); + let (telemopts, addr, _aborter) = prom_metrics(Some(opts_builder.build())); let meter = telemopts.metrics.clone().unwrap(); - let ctr = meter.counter( - MetricParametersBuilder::default() - .name("some_counter") - .build() - .unwrap(), - ); + let ctr = meter.counter(MetricParameters::builder().name("some_counter").build()); let a1 = meter.new_attributes(NewAttributes::from([MetricKeyValue::new("thing", "foo")])); let a2 = meter.new_attributes(NewAttributes::from([MetricKeyValue::new("blerp", "baz")])); ctr.add(1, &a1); @@ -1347,20 +1332,17 @@ async fn sticky_queue_label_strategy( strategy: TaskQueueLabelStrategy, ) { let (mut telemopts, addr, _aborter) = prom_metrics(Some( - PrometheusExporterOptionsBuilder::default() + PrometheusExporterOptions::builder() .socket_addr(ANY_PORT.parse().unwrap()) - .build() - .unwrap(), + .build(), )); telemopts.task_queue_label_strategy = strategy; let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = format!("sticky_queue_label_strategy_{strategy:?}"); let mut starter = CoreWfStarter::new_with_runtime(&wf_name, rt); // Enable sticky queues by setting a reasonable cache size - starter.worker_config.max_cached_workflows(10_usize); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.max_cached_workflows = 10_usize; + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let task_queue = starter.get_task_queue().to_owned(); let mut worker = starter.worker().await; @@ -1436,14 +1418,15 @@ async fn resource_based_tuner_metrics() { let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "resource_based_tuner_metrics"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); - starter.worker_config.clear_max_outstanding_opts(); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.worker_config.max_outstanding_workflow_tasks = None; + starter.worker_config.max_outstanding_local_activities = None; + starter.worker_config.max_outstanding_activities = None; + starter.worker_config.max_outstanding_nexus_tasks = None; // Create a resource-based tuner with reasonable thresholds let tuner = ResourceBasedTuner::new(0.8, 0.8); - starter.worker_config.tuner(Arc::new(tuner)); + starter.worker_config.tuner = Some(Arc::new(tuner)); let mut worker = starter.worker().await; diff --git a/crates/sdk-core/tests/integ_tests/polling_tests.rs b/crates/sdk-core/tests/integ_tests/polling_tests.rs index a8832a225..91b7d1460 100644 --- a/crates/sdk-core/tests/integ_tests/polling_tests.rs +++ b/crates/sdk-core/tests/integ_tests/polling_tests.rs @@ -29,13 +29,13 @@ use temporalio_common::{ temporal::api::enums::v1::EventType, test_utils::schedule_activity_cmd, }, - telemetry::{Logger, TelemetryOptionsBuilder}, + telemetry::{Logger, TelemetryOptions}, worker::PollerBehavior, }; use temporalio_sdk::{ActivityOptions, WfContext}; use temporalio_sdk_core::{ - ClientOptions, CoreRuntime, RuntimeOptionsBuilder, - ephemeral_server::{TemporalDevServerConfigBuilder, default_cached_download}, + ClientOptions, CoreRuntime, RuntimeOptions, + ephemeral_server::{TemporalDevServerConfig, default_cached_download}, init_worker, telemetry::CoreLogStreamConsumer, test_help::{NAMESPACE, WorkerTestHelpers, drain_pollers_and_shutdown}, @@ -125,15 +125,14 @@ async fn out_of_order_completion_doesnt_hang() { async fn switching_worker_client_changes_poll() { // Start two servers info!("Starting servers"); - let server_config = TemporalDevServerConfigBuilder::default() + let server_config = TemporalDevServerConfig::builder() .exe(default_cached_download()) // We need to lower the poll timeout so the poll call rolls over .extra_args(vec![ "--dynamic-config-value".to_string(), "matching.longPollExpirationInterval=\"1s\"".to_string(), ]) - .build() - .unwrap(); + .build(); let mut server1 = server_config .start_server_with_output(Stdio::null(), Stdio::null()) .await @@ -191,16 +190,10 @@ async fn switching_worker_client_changes_poll() { .unwrap(); // Create a worker only on the first server - let worker = init_worker( - init_integ_telem().unwrap(), - integ_worker_config("my-task-queue") - // We want a cache so we don't get extra remove-job activations - .max_cached_workflows(100_usize) - .build() - .unwrap(), - client1.clone(), - ) - .unwrap(); + let mut config = integ_worker_config("my-task-queue"); + // We want a cache so we don't get extra remove-job activations + config.max_cached_workflows = 100_usize; + let worker = init_worker(init_integ_telem().unwrap(), config, client1.clone()).unwrap(); // Poll for first task, confirm it's first wf, complete, and wait for complete info!("Doing initial poll"); @@ -252,24 +245,18 @@ async fn small_workflow_slots_and_pollers(#[values(false, true)] use_autoscaling let wf_name = "only_one_workflow_slot_and_two_pollers"; let mut starter = CoreWfStarter::new(wf_name); if use_autoscaling { - starter - .worker_config - .workflow_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 5, - initial: 1, - }); + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 5, + initial: 1, + }; } else { - starter - .worker_config - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(2)); + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(2); } - starter - .worker_config - .max_outstanding_workflow_tasks(2_usize) - .max_outstanding_local_activities(1_usize) - .activity_task_poller_behavior(PollerBehavior::SimpleMaximum(1)) - .max_outstanding_activities(1_usize); + starter.worker_config.max_outstanding_workflow_tasks = Some(2_usize); + starter.worker_config.max_outstanding_local_activities = Some(1_usize); + starter.worker_config.activity_task_poller_behavior = PollerBehavior::SimpleMaximum(1); + starter.worker_config.max_outstanding_activities = Some(1_usize); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { for _ in 0..3 { @@ -330,14 +317,13 @@ async fn small_workflow_slots_and_pollers(#[values(false, true)] use_autoscaling #[tokio::test] async fn replace_client_works_after_polling_failure() { let (log_consumer, mut log_rx) = CoreLogStreamConsumer::new(100); - let telem_opts = TelemetryOptionsBuilder::default() + let telem_opts = TelemetryOptions::builder() .logging(Logger::Push { filter: "OFF,temporalio_client=DEBUG".into(), consumer: Arc::new(log_consumer), }) - .build() - .unwrap(); - let runtime_opts = RuntimeOptionsBuilder::default() + .build(); + let runtime_opts = RuntimeOptions::builder() .telemetry_options(telem_opts) .build() .unwrap(); @@ -376,7 +362,7 @@ async fn replace_client_works_after_polling_failure() { // Starting a second dev server for the worker to connect to initially. Later this server will be shut down // and the worker client replaced with a client connected to the main integration test server. - let initial_server_config = integ_dev_server_config(vec![]).build().unwrap(); + let initial_server_config = integ_dev_server_config(vec![], false); let initial_server = Arc::new(Mutex::new(Some( initial_server_config .start_server_with_output(Stdio::null(), Stdio::null()) @@ -405,17 +391,10 @@ async fn replace_client_works_after_polling_failure() { let wf_name = "replace_client_works_after_polling_failure"; let task_queue = format!("{wf_name}_tq"); - let worker = Arc::new( - init_worker( - &rt, - integ_worker_config(&task_queue) - .max_cached_workflows(100_usize) - .build() - .unwrap(), - client_for_initial_server.clone(), - ) - .unwrap(), - ); + let mut config = integ_worker_config(&task_queue); + config.max_cached_workflows = 100_usize; + let worker = + Arc::new(init_worker(&rt, config, client_for_initial_server.clone()).unwrap()); // Polling the initial server the first time is successful. let wf_1 = client_for_initial_server diff --git a/crates/sdk-core/tests/integ_tests/update_tests.rs b/crates/sdk-core/tests/integ_tests/update_tests.rs index 3a1ab27c8..18f77273a 100644 --- a/crates/sdk-core/tests/integ_tests/update_tests.rs +++ b/crates/sdk-core/tests/integ_tests/update_tests.rs @@ -724,9 +724,7 @@ async fn update_with_local_acts() { async fn update_rejection_sdk() { let wf_name = "update_rejection_sdk"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -770,9 +768,7 @@ async fn update_rejection_sdk() { async fn update_fail_sdk() { let wf_name = "update_fail_sdk"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -816,9 +812,7 @@ async fn update_fail_sdk() { async fn update_timer_sequence() { let wf_name = "update_timer_sequence"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -866,9 +860,7 @@ async fn update_timer_sequence() { async fn task_failure_during_validation() { let wf_name = "task_failure_during_validation"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); let mut worker = starter.worker().await; let client = starter.get_client().await; @@ -929,9 +921,7 @@ async fn task_failure_during_validation() { async fn task_failure_after_update() { let wf_name = "task_failure_after_update"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); let mut worker = starter.worker().await; let client = starter.get_client().await; diff --git a/crates/sdk-core/tests/integ_tests/worker_heartbeat_tests.rs b/crates/sdk-core/tests/integ_tests/worker_heartbeat_tests.rs index 3847f0c7e..b32162952 100644 --- a/crates/sdk-core/tests/integ_tests/worker_heartbeat_tests.rs +++ b/crates/sdk-core/tests/integ_tests/worker_heartbeat_tests.rs @@ -26,14 +26,12 @@ use temporalio_common::{ workflowservice::v1::{DescribeWorkerRequest, ListWorkersRequest}, }, }, - telemetry::{ - OtelCollectorOptionsBuilder, PrometheusExporterOptionsBuilder, TelemetryOptionsBuilder, - }, + telemetry::{OtelCollectorOptions, PrometheusExporterOptions, TelemetryOptions}, worker::PollerBehavior, }; use temporalio_sdk::{ActContext, ActivityOptions, WfContext}; use temporalio_sdk_core::{ - CoreRuntime, ResourceBasedTuner, ResourceSlotOptions, RuntimeOptionsBuilder, + CoreRuntime, ResourceBasedTuner, ResourceSlotOptions, RuntimeOptions, telemetry::{build_otlp_metric_exporter, start_prometheus_metric_exporter}, }; use tokio::{sync::Notify, time::sleep}; @@ -54,8 +52,8 @@ fn within_duration(dur: PbDuration, threshold: Duration) -> bool { } fn new_no_metrics_starter(wf_name: &str) -> CoreWfStarter { - let runtimeopts = RuntimeOptionsBuilder::default() - .telemetry_options(TelemetryOptionsBuilder::default().build().unwrap()) + let runtimeopts = RuntimeOptions::builder() + .telemetry_options(TelemetryOptions::builder().build()) .heartbeat_interval(Some(Duration::from_secs(1))) .build() .unwrap(); @@ -99,11 +97,11 @@ async fn docker_worker_heartbeat_basic(#[values("otel", "prom", "no_metrics")] b return; } let telemopts = if backing == "no_metrics" { - TelemetryOptionsBuilder::default().build().unwrap() + TelemetryOptions::builder().build() } else { get_integ_telem_options() }; - let runtimeopts = RuntimeOptionsBuilder::default() + let runtimeopts = RuntimeOptions::builder() .telemetry_options(telemopts) .heartbeat_interval(Some(Duration::from_secs(1))) .build() @@ -114,15 +112,15 @@ async fn docker_worker_heartbeat_basic(#[values("otel", "prom", "no_metrics")] b let url = Some("grpc://localhost:4317") .map(|x| x.parse::().unwrap()) .unwrap(); - let mut opts_build = OtelCollectorOptionsBuilder::default(); - let opts = opts_build.url(url).build().unwrap(); + let opts_build = OtelCollectorOptions::builder(); + let opts = opts_build.url(url).build(); rt.telemetry_mut() .attach_late_init_metrics(Arc::new(build_otlp_metric_exporter(opts).unwrap())); } "prom" => { - let mut opts_build = PrometheusExporterOptionsBuilder::default(); - opts_build.socket_addr(ANY_PORT.parse().unwrap()); - let opts = opts_build.build().unwrap(); + let opts_build = + PrometheusExporterOptions::builder().socket_addr(ANY_PORT.parse().unwrap()); + let opts = opts_build.build(); rt.telemetry_mut() .attach_late_init_metrics(start_prometheus_metric_exporter(opts).unwrap().meter); } @@ -131,25 +129,21 @@ async fn docker_worker_heartbeat_basic(#[values("otel", "prom", "no_metrics")] b } let wf_name = format!("worker_heartbeat_basic_{backing}"); let mut starter = CoreWfStarter::new_with_runtime(&wf_name, rt); - starter - .worker_config - .max_outstanding_workflow_tasks(5_usize) - .max_cached_workflows(5_usize) - .max_outstanding_activities(5_usize) - .plugins( - [ - PluginInfo { - name: "plugin1".to_string(), - version: "1".to_string(), - }, - PluginInfo { - name: "plugin2".to_string(), - version: "2".to_string(), - }, - ] - .into_iter() - .collect::>(), - ); + starter.worker_config.max_outstanding_workflow_tasks = Some(5_usize); + starter.worker_config.max_cached_workflows = 5_usize; + starter.worker_config.max_outstanding_activities = Some(5_usize); + starter.worker_config.plugins = vec![ + PluginInfo { + name: "plugin1".to_string(), + version: "1".to_string(), + }, + PluginInfo { + name: "plugin2".to_string(), + version: "2".to_string(), + }, + ] + .into_iter() + .collect(); let mut worker = starter.worker().await; let worker_instance_key = worker.worker_instance_key(); @@ -269,7 +263,7 @@ async fn docker_worker_heartbeat_tuner() { if env::var("DOCKER_PROMETHEUS_RUNNING").is_err() { return; } - let runtimeopts = RuntimeOptionsBuilder::default() + let runtimeopts = RuntimeOptions::builder() .telemetry_options(get_integ_telem_options()) .heartbeat_interval(Some(Duration::from_secs(1))) .build() @@ -279,8 +273,8 @@ async fn docker_worker_heartbeat_tuner() { let url = Some("grpc://localhost:4317") .map(|x| x.parse::().unwrap()) .unwrap(); - let mut opts_build = OtelCollectorOptionsBuilder::default(); - let opts = opts_build.url(url).build().unwrap(); + let opts_build = OtelCollectorOptions::builder(); + let opts = opts_build.url(url).build(); rt.telemetry_mut() .attach_late_init_metrics(Arc::new(build_otlp_metric_exporter(opts).unwrap())); @@ -290,20 +284,21 @@ async fn docker_worker_heartbeat_tuner() { tuner .with_workflow_slots_options(ResourceSlotOptions::new(2, 10, Duration::from_millis(0))) .with_activity_slots_options(ResourceSlotOptions::new(5, 10, Duration::from_millis(50))); - starter - .worker_config - .workflow_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }) - .nexus_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }) - .clear_max_outstanding_opts() - .tuner(Arc::new(tuner)); + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; + starter.worker_config.nexus_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; + starter.worker_config.max_outstanding_workflow_tasks = None; + starter.worker_config.max_outstanding_local_activities = None; + starter.worker_config.max_outstanding_activities = None; + starter.worker_config.max_outstanding_nexus_tasks = None; + starter.worker_config.tuner = Some(Arc::new(tuner)); let mut worker = starter.worker().await; let worker_instance_key = worker.worker_instance_key(); @@ -545,10 +540,8 @@ fn after_shutdown_checks( async fn worker_heartbeat_sticky_cache_miss() { let wf_name = "worker_heartbeat_cache_miss"; let mut starter = new_no_metrics_starter(wf_name); - starter - .worker_config - .max_cached_workflows(1_usize) - .max_outstanding_workflow_tasks(2_usize); + starter.worker_config.max_cached_workflows = 1_usize; + starter.worker_config.max_outstanding_workflow_tasks = Some(2_usize); let mut worker = starter.worker().await; worker.fetch_results = false; @@ -665,10 +658,8 @@ async fn worker_heartbeat_sticky_cache_miss() { async fn worker_heartbeat_multiple_workers() { let wf_name = "worker_heartbeat_multi_workers"; let mut starter = new_no_metrics_starter(wf_name); - starter - .worker_config - .max_outstanding_workflow_tasks(5_usize) - .max_cached_workflows(5_usize); + starter.worker_config.max_outstanding_workflow_tasks = Some(5_usize); + starter.worker_config.max_cached_workflows = 5_usize; let client = starter.get_client().await; let starting_hb_len = list_worker_heartbeats(&client, String::new()).await.len(); @@ -767,7 +758,7 @@ async fn worker_heartbeat_failure_metrics() { let wf_name = "worker_heartbeat_failure_metrics"; let mut starter = new_no_metrics_starter(wf_name); - starter.worker_config.max_outstanding_activities(5_usize); + starter.worker_config.max_outstanding_activities = Some(5_usize); let mut worker = starter.worker().await; let worker_instance_key = worker.worker_instance_key(); @@ -942,9 +933,9 @@ async fn worker_heartbeat_failure_metrics() { #[tokio::test] async fn worker_heartbeat_no_runtime_heartbeat() { let wf_name = "worker_heartbeat_no_runtime_heartbeat"; - let runtimeopts = RuntimeOptionsBuilder::default() + let runtimeopts = RuntimeOptions::builder() .telemetry_options(get_integ_telem_options()) - .heartbeat_interval(None) // Turn heartbeating off + .heartbeat_interval(None) .build() .unwrap(); let rt = CoreRuntime::new_assume_tokio(runtimeopts).unwrap(); @@ -1002,14 +993,14 @@ async fn worker_heartbeat_no_runtime_heartbeat() { #[tokio::test] async fn worker_heartbeat_skip_client_worker_set_check() { let wf_name = "worker_heartbeat_skip_client_worker_set_check"; - let runtimeopts = RuntimeOptionsBuilder::default() + let runtimeopts = RuntimeOptions::builder() .telemetry_options(get_integ_telem_options()) .heartbeat_interval(Some(Duration::from_secs(1))) .build() .unwrap(); let rt = CoreRuntime::new_assume_tokio(runtimeopts).unwrap(); let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter.worker_config.skip_client_worker_set_check(true); + starter.worker_config.skip_client_worker_set_check = true; let mut worker = starter.worker().await; let worker_instance_key = worker.worker_instance_key(); diff --git a/crates/sdk-core/tests/integ_tests/worker_tests.rs b/crates/sdk-core/tests/integ_tests/worker_tests.rs index cb780aeb1..2b50549b4 100644 --- a/crates/sdk-core/tests/integ_tests/worker_tests.rs +++ b/crates/sdk-core/tests/integ_tests/worker_tests.rs @@ -53,7 +53,7 @@ use temporalio_common::{ worker::{ ActivitySlotKind, LocalActivitySlotKind, PollerBehavior, SlotInfo, SlotInfoTrait, SlotMarkUsedContext, SlotReleaseContext, SlotReservationContext, SlotSupplier, - SlotSupplierPermit, WorkerConfigBuilder, WorkerTaskTypes, WorkerVersioningStrategy, + SlotSupplierPermit, WorkerConfig, WorkerTaskTypes, WorkerVersioningStrategy, WorkflowSlotKind, }, }; @@ -84,7 +84,7 @@ async fn worker_validation_fails_on_nonexistent_namespace() { let worker = init_worker( &runtime, - WorkerConfigBuilder::default() + WorkerConfig::builder() .namespace("i_dont_exist") .task_queue("Wheee!") .versioning_strategy(WorkerVersioningStrategy::None { @@ -176,16 +176,17 @@ async fn worker_handles_unknown_workflow_types_gracefully() { async fn resource_based_few_pollers_guarantees_non_sticky_poll() { let wf_name = "resource_based_few_pollers_guarantees_non_sticky_poll"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .clear_max_outstanding_opts() - .task_types(WorkerTaskTypes::workflow_only()) - // 3 pollers so the minimum slots of 2 can both be handed out to a sticky poller - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(3_usize)); + starter.worker_config.max_outstanding_workflow_tasks = None; + starter.worker_config.max_outstanding_local_activities = None; + starter.worker_config.max_outstanding_activities = None; + starter.worker_config.max_outstanding_nexus_tasks = None; + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + // 3 pollers so the minimum slots of 2 can both be handed out to a sticky poller + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(3_usize); // Set the limits to zero so it's essentially unwilling to hand out slots let mut tuner = ResourceBasedTuner::new(0.0, 0.0); tuner.with_workflow_slots_options(ResourceSlotOptions::new(2, 10, Duration::from_millis(0))); - starter.worker_config.tuner(Arc::new(tuner)); + starter.worker_config.tuner = Some(Arc::new(tuner)); let mut worker = starter.worker().await; // Workflow doesn't actually need to do anything. We just need to see that we don't get stuck @@ -216,9 +217,7 @@ async fn oversize_grpc_message() { let (telemopts, addr, _aborter) = prom_metrics(None); let runtime = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime(wf_name, runtime); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut core = starter.worker().await; static OVERSIZE_GRPC_MESSAGE_RUN: AtomicBool = AtomicBool::new(false); @@ -712,13 +711,16 @@ async fn test_custom_slot_supplier_simple() { )); let mut starter = CoreWfStarter::new("test_custom_slot_supplier_simple"); - starter.worker_config.clear_max_outstanding_opts(); + starter.worker_config.max_outstanding_workflow_tasks = None; + starter.worker_config.max_outstanding_local_activities = None; + starter.worker_config.max_outstanding_activities = None; + starter.worker_config.max_outstanding_nexus_tasks = None; let mut tb = TunerBuilder::default(); tb.workflow_slot_supplier(wf_supplier.clone()); tb.activity_slot_supplier(activity_supplier.clone()); tb.local_activity_slot_supplier(local_activity_supplier.clone()); - starter.worker_config.tuner(Arc::new(tb.build())); + starter.worker_config.tuner = Some(Arc::new(tb.build())); let mut worker = starter.worker().await; diff --git a/crates/sdk-core/tests/integ_tests/worker_versioning_tests.rs b/crates/sdk-core/tests/integ_tests/worker_versioning_tests.rs index e7ac54f09..56af43960 100644 --- a/crates/sdk-core/tests/integ_tests/worker_versioning_tests.rs +++ b/crates/sdk-core/tests/integ_tests/worker_versioning_tests.rs @@ -37,16 +37,13 @@ async fn sets_deployment_info_on_task_responses(#[values(true, false)] use_defau deployment_name: deploy_name.clone(), build_id: "1.0".to_string(), }; - starter - .worker_config - .versioning_strategy(WorkerVersioningStrategy::WorkerDeploymentBased( - WorkerDeploymentOptions { - version: version.clone(), - use_worker_versioning: true, - default_versioning_behavior: VersioningBehavior::AutoUpgrade.into(), - }, - )) - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.versioning_strategy = + WorkerVersioningStrategy::WorkerDeploymentBased(WorkerDeploymentOptions { + version: version.clone(), + use_worker_versioning: true, + default_versioning_behavior: VersioningBehavior::AutoUpgrade.into(), + }); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let core = starter.get_worker().await; let client = starter.get_client().await; @@ -153,18 +150,15 @@ async fn activity_has_deployment_stamp() { let wf_name = "activity_has_deployment_stamp"; let mut starter = CoreWfStarter::new(wf_name); let deploy_name = format!("deployment-{}", starter.get_task_queue()); - starter - .worker_config - .versioning_strategy(WorkerVersioningStrategy::WorkerDeploymentBased( - WorkerDeploymentOptions { - version: WorkerDeploymentVersion { - deployment_name: deploy_name.clone(), - build_id: "1.0".to_string(), - }, - use_worker_versioning: true, - default_versioning_behavior: VersioningBehavior::AutoUpgrade.into(), + starter.worker_config.versioning_strategy = + WorkerVersioningStrategy::WorkerDeploymentBased(WorkerDeploymentOptions { + version: WorkerDeploymentVersion { + deployment_name: deploy_name.clone(), + build_id: "1.0".to_string(), }, - )); + use_worker_versioning: true, + default_versioning_behavior: VersioningBehavior::AutoUpgrade.into(), + }); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests.rs b/crates/sdk-core/tests/integ_tests/workflow_tests.rs index ddab142ef..3a6265aba 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests.rs @@ -78,9 +78,7 @@ use tokio::{join, sync::Notify, time::sleep}; async fn parallel_workflows_same_queue() { let wf_name = "parallel_workflows_same_queue"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut core = starter.worker().await; core.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -366,14 +364,10 @@ async fn wft_timeout_doesnt_create_unsolvable_autocomplete() { let signal_at_start = "at-start"; let signal_at_complete = "at-complete"; let mut wf_starter = CoreWfStarter::new("wft_timeout_doesnt_create_unsolvable_autocomplete"); - wf_starter - .worker_config - // Test needs eviction on and a short timeout - .max_cached_workflows(0_usize) - .max_outstanding_workflow_tasks(1_usize); - wf_starter - .worker_config - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)); + // Test needs eviction on and a short timeout + wf_starter.worker_config.max_cached_workflows = 0_usize; + wf_starter.worker_config.max_outstanding_workflow_tasks = Some(1_usize); + wf_starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(1_usize); wf_starter.workflow_options.task_timeout = Some(Duration::from_secs(1)); let core = wf_starter.get_worker().await; let client = wf_starter.get_client().await; @@ -479,10 +473,8 @@ async fn wft_timeout_doesnt_create_unsolvable_autocomplete() { async fn slow_completes_with_small_cache() { let wf_name = "slow_completes_with_small_cache"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .max_outstanding_workflow_tasks(5_usize) - .max_cached_workflows(5_usize); + starter.worker_config.max_outstanding_workflow_tasks = Some(5_usize); + starter.worker_config.max_cached_workflows = 5_usize; let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { for _ in 0..3 { @@ -544,10 +536,8 @@ async fn deployment_version_correct_in_wf_info(#[values(true, false)] use_only_b default_versioning_behavior: None, }) }; - starter - .worker_config - .versioning_strategy(version_strat) - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.versioning_strategy = version_strat; + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let core = starter.get_worker().await; starter.start_wf().await; let client = starter.get_client().await; @@ -651,7 +641,7 @@ async fn deployment_version_correct_in_wf_info(#[values(true, false)] use_only_b default_versioning_behavior: None, }) }; - starter.worker_config.versioning_strategy(version_strat); + starter.worker_config.versioning_strategy = version_strat; let core = starter.get_worker().await; @@ -770,16 +760,13 @@ async fn nondeterminism_errors_fail_workflow_when_configured_to( let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let wf_name = "nondeterminism_errors_fail_workflow_when_configured_to"; let mut starter = CoreWfStarter::new_with_runtime(wf_name, rt); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let typeset = HashSet::from([WorkflowErrorType::Nondeterminism]); if whole_worker { - starter.worker_config.workflow_failure_errors(typeset); + starter.worker_config.workflow_failure_errors = typeset; } else { - starter - .worker_config - .workflow_types_to_failure_errors(HashMap::from([(wf_name.to_owned(), typeset)])); + starter.worker_config.workflow_types_to_failure_errors = + HashMap::from([(wf_name.to_owned(), typeset)]); } let wf_id = starter.get_task_queue().to_owned(); let mut worker = starter.worker().await; @@ -851,9 +838,8 @@ async fn nondeterminism_errors_fail_workflow_when_configured_to( async fn history_out_of_order_on_restart() { let wf_name = "history_out_of_order_on_restart"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .workflow_failure_errors([WorkflowErrorType::Nondeterminism]); + starter.worker_config.workflow_failure_errors = + HashSet::from([WorkflowErrorType::Nondeterminism]); let mut worker = starter.worker().await; let mut starter2 = starter.clone_no_worker(); let mut worker2 = starter2.worker().await; diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/activities.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/activities.rs index 34854a14f..8e2076957 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/activities.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/activities.rs @@ -1057,9 +1057,7 @@ async fn it_can_complete_async() { async fn graceful_shutdown() { let wf_name = "graceful_shutdown"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .graceful_shutdown_period(Some(Duration::from_millis(500))); + starter.worker_config.graceful_shutdown_period = Some(Duration::from_millis(500)); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -1122,9 +1120,7 @@ async fn graceful_shutdown() { async fn activity_can_be_cancelled_by_local_timeout() { let wf_name = "activity_can_be_cancelled_by_local_timeout"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .local_timeout_buffer_for_activities(Duration::from_secs(0)); + starter.worker_config.local_timeout_buffer_for_activities = Duration::from_secs(0); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let res = ctx @@ -1165,19 +1161,17 @@ async fn activity_can_be_cancelled_by_local_timeout() { async fn long_activity_timeout_repro() { let wf_name = "long_activity_timeout_repro"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .workflow_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 10, - initial: 5, - }) - .activity_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 10, - initial: 5, - }) - .local_timeout_buffer_for_activities(Duration::from_secs(0)); + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 10, + initial: 5, + }; + starter.worker_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 10, + initial: 5, + }; + starter.worker_config.local_timeout_buffer_for_activities = Duration::from_secs(0); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let mut iter = 1; diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs index 7aaf352ba..d695bbecb 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_external.rs @@ -42,9 +42,7 @@ async fn cancel_receiver(ctx: WfContext) -> WorkflowResult { #[tokio::test] async fn sends_cancel_to_other_wf() { let mut starter = CoreWfStarter::new("sends_cancel_to_other_wf"); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf("sender", cancel_sender); worker.register_wf("receiver", cancel_receiver); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs index f5aa10675..580d80995 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/cancel_wf.rs @@ -35,9 +35,7 @@ async fn cancelled_wf(ctx: WfContext) -> WorkflowResult<()> { async fn cancel_during_timer() { let wf_name = "cancel_during_timer"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; worker.register_wf(wf_name.to_string(), cancelled_wf); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs index 305a48103..db458b48b 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/child_workflows.rs @@ -86,9 +86,7 @@ async fn happy_parent(ctx: WfContext) -> WorkflowResult<()> { #[tokio::test] async fn child_workflow_happy_path() { let mut starter = CoreWfStarter::new("child-workflows"); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(PARENT_WF_TYPE.to_string(), happy_parent); @@ -109,9 +107,7 @@ async fn child_workflow_happy_path() { #[tokio::test] async fn abandoned_child_bug_repro() { let mut starter = CoreWfStarter::new("child-workflow-abandon-bug"); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let barr: &'static Barrier = Box::leak(Box::new(Barrier::new(2))); @@ -182,9 +178,7 @@ async fn abandoned_child_bug_repro() { #[tokio::test] async fn abandoned_child_resolves_post_cancel() { let mut starter = CoreWfStarter::new("child-workflow-resolves-post-cancel"); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let barr: &'static Barrier = Box::leak(Box::new(Barrier::new(2))); @@ -251,9 +245,7 @@ async fn abandoned_child_resolves_post_cancel() { async fn cancelled_child_gets_reason() { let wf_name = "cancelled-child-gets-reason"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_string(), move |ctx: WfContext| async move { diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs index 8038ec8e4..fef4bb2c5 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/continue_as_new.rs @@ -29,9 +29,7 @@ async fn continue_as_new_wf(ctx: WfContext) -> WorkflowResult<()> { async fn continue_as_new_happy_path() { let wf_name = "continue_as_new_happy_path"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_string(), continue_as_new_wf); @@ -51,11 +49,9 @@ async fn continue_as_new_happy_path() { async fn continue_as_new_multiple_concurrent() { let wf_name = "continue_as_new_multiple_concurrent"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()) - .max_cached_workflows(5_usize) - .max_outstanding_workflow_tasks(5_usize); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.worker_config.max_cached_workflows = 5_usize; + starter.worker_config.max_outstanding_workflow_tasks = Some(5_usize); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_string(), continue_as_new_wf); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/determinism.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/determinism.rs index 9565020b2..b8e2b49bb 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/determinism.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/determinism.rs @@ -55,9 +55,7 @@ pub(crate) async fn timer_wf_nondeterministic(ctx: WfContext) -> WorkflowResult< async fn test_determinism_error_then_recovers() { let wf_name = "test_determinism_error_then_recovers"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), timer_wf_nondeterministic); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/eager.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/eager.rs index 6d3c3c1cb..df11b82c8 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/eager.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/eager.rs @@ -15,9 +15,7 @@ async fn eager_wf_start() { starter.workflow_options.enable_eager_workflow_start = true; // hang the test if eager task dispatch failed starter.workflow_options.task_timeout = Some(Duration::from_secs(1500)); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), eager_wf); starter.eager_start_with_worker(wf_name, &mut worker).await; @@ -31,9 +29,7 @@ async fn eager_wf_start_different_clients() { starter.workflow_options.enable_eager_workflow_start = true; // hang the test if wf task needs retry starter.workflow_options.task_timeout = Some(Duration::from_secs(1500)); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), eager_wf); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs index b3e99cb79..02ad1ea68 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/local_activities.rs @@ -177,9 +177,7 @@ pub(crate) async fn local_act_fanout_wf(ctx: WfContext) -> WorkflowResult<()> { async fn local_act_fanout() { let wf_name = "local_act_fanout"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .max_outstanding_local_activities(1_usize); + starter.worker_config.max_outstanding_local_activities = Some(1_usize); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), local_act_fanout_wf); worker.register_activity("echo_activity", echo); @@ -468,7 +466,7 @@ async fn schedule_to_close_timeout_across_timer_backoff(#[case] cached: bool) { ); let mut starter = CoreWfStarter::new(&wf_name); if !cached { - starter.worker_config.max_cached_workflows(0_usize); + starter.worker_config.max_cached_workflows = 0_usize; } let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -509,7 +507,7 @@ async fn schedule_to_close_timeout_across_timer_backoff(#[case] cached: bool) { async fn eviction_wont_make_local_act_get_dropped(#[values(true, false)] short_wft_timeout: bool) { let wf_name = format!("eviction_wont_make_local_act_get_dropped_{short_wft_timeout}"); let mut starter = CoreWfStarter::new(&wf_name); - starter.worker_config.max_cached_workflows(0_usize); + starter.worker_config.max_cached_workflows = 0_usize; let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), local_act_then_timer_then_wait); worker.register_activity("echo_activity", |_ctx: ActContext, str: String| async { @@ -722,9 +720,7 @@ async fn la_resolve_same_time_as_other_cancel() { let wf_name = "la_resolve_same_time_as_other_cancel"; let mut starter = CoreWfStarter::new(wf_name); // The activity won't get a chance to receive the cancel so make sure we still exit fast - starter - .worker_config - .graceful_shutdown_period(Duration::from_millis(100)); + starter.worker_config.graceful_shutdown_period = Some(Duration::from_millis(100)); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs index 760e889ad..4af1162e8 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/modify_wf_properties.rs @@ -32,9 +32,7 @@ async fn sends_modify_wf_props() { let wf_name = "can_upsert_memo"; let wf_id = Uuid::new_v4(); let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name, memo_upserter); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/nexus.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/nexus.rs index c4f913d19..4cfeb1fe7 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/nexus.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/nexus.rs @@ -58,12 +58,12 @@ async fn nexus_basic( ) { let wf_name = "nexus_basic"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types(WorkerTaskTypes { + starter.worker_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, enable_nexus: true, - }); + }; let mut worker = starter.worker().await; let core_worker = starter.get_worker().await; @@ -208,12 +208,12 @@ async fn nexus_async( ) { let wf_name = "nexus_async"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types(WorkerTaskTypes { + starter.worker_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, enable_nexus: true, - }); + }; let mut worker = starter.worker().await; let core_worker = starter.get_worker().await; @@ -440,12 +440,12 @@ async fn nexus_async( async fn nexus_cancel_before_start() { let wf_name = "nexus_cancel_before_start"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types(WorkerTaskTypes { + starter.worker_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, enable_nexus: true, - }); + }; let mut worker = starter.worker().await; let endpoint = mk_nexus_endpoint(&mut starter).await; @@ -487,16 +487,14 @@ async fn nexus_cancel_before_start() { async fn nexus_must_complete_task_to_shutdown(#[values(true, false)] use_grace_period: bool) { let wf_name = "nexus_must_complete_task_to_shutdown"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types(WorkerTaskTypes { + starter.worker_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, enable_nexus: true, - }); + }; if use_grace_period { - starter - .worker_config - .graceful_shutdown_period(Duration::from_millis(500)); + starter.worker_config.graceful_shutdown_period = Some(Duration::from_millis(500)); } let mut worker = starter.worker().await; let core_worker = starter.get_worker().await; @@ -592,12 +590,12 @@ async fn nexus_cancellation_types( ) { let wf_name = "nexus_cancellation_types"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types(WorkerTaskTypes { + starter.worker_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: false, enable_remote_activities: false, enable_nexus: true, - }); + }; let mut worker = starter.worker().await; let core_worker = starter.get_worker().await; diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/patches.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/patches.rs index 4b700c2e1..18e6d78b4 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/patches.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/patches.rs @@ -57,9 +57,7 @@ pub(crate) async fn changes_wf(ctx: WfContext) -> WorkflowResult<()> { async fn writes_change_markers() { let wf_name = "writes_change_markers"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), changes_wf); @@ -93,9 +91,7 @@ pub(crate) async fn no_change_then_change_wf(ctx: WfContext) -> WorkflowResult<( async fn can_add_change_markers() { let wf_name = "can_add_change_markers"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), no_change_then_change_wf); @@ -119,9 +115,7 @@ pub(crate) async fn replay_with_change_marker_wf(ctx: WfContext) -> WorkflowResu async fn replaying_with_patch_marker() { let wf_name = "replaying_with_patch_marker"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), replay_with_change_marker_wf); @@ -137,10 +131,8 @@ async fn patched_on_second_workflow_task_is_deterministic() { let wf_name = "timer_patched_timer"; let mut starter = CoreWfStarter::new(wf_name); // Disable caching to force replay from beginning - starter - .worker_config - .max_cached_workflows(0_usize) - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.max_cached_workflows = 0_usize; + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; // Include a task failure as well to make sure that works static FAIL_ONCE: AtomicBool = AtomicBool::new(true); @@ -163,9 +155,7 @@ async fn patched_on_second_workflow_task_is_deterministic() { async fn can_remove_deprecated_patch_near_other_patch() { let wf_name = "can_add_change_markers"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let did_die = Arc::new(AtomicBool::new(false)); worker.register_wf(wf_name.to_owned(), move |ctx: WfContext| { @@ -196,9 +186,7 @@ async fn can_remove_deprecated_patch_near_other_patch() { async fn deprecated_patch_removal() { let wf_name = "deprecated_patch_removal"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; let client = starter.get_client().await; let wf_id = starter.get_task_queue().to_string(); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/resets.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/resets.rs index 4b2cf154d..9ccb4d542 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/resets.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/resets.rs @@ -29,9 +29,7 @@ const POST_RESET_SIG: &str = "post-reset"; async fn reset_workflow() { let wf_name = "reset_me_wf"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.fetch_results = false; let notify = Arc::new(Notify::new()); @@ -117,12 +115,12 @@ async fn reset_workflow() { async fn reset_randomseed() { let wf_name = "reset_randomseed"; let mut starter = CoreWfStarter::new(wf_name); - starter.worker_config.task_types(WorkerTaskTypes { + starter.worker_config.task_types = WorkerTaskTypes { enable_workflows: true, enable_local_activities: true, enable_remote_activities: false, enable_nexus: true, - }); + }; let mut worker = starter.worker().await; worker.fetch_results = false; let notify = Arc::new(Notify::new()); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/signals.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/signals.rs index 899884ac9..fabe71fbc 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/signals.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/signals.rs @@ -48,9 +48,7 @@ async fn signal_sender(ctx: WfContext) -> WorkflowResult<()> { async fn sends_signal_to_missing_wf() { let wf_name = "sends_signal_to_missing_wf"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), signal_sender); @@ -89,9 +87,7 @@ async fn signal_with_create_wf_receiver(ctx: WfContext) -> WorkflowResult<()> { #[tokio::test] async fn sends_signal_to_other_wf() { let mut starter = CoreWfStarter::new("sends_signal_to_other_wf"); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf("sender", signal_sender); worker.register_wf("receiver", signal_receiver); @@ -120,9 +116,7 @@ async fn sends_signal_to_other_wf() { #[tokio::test] async fn sends_signal_with_create_wf() { let mut starter = CoreWfStarter::new("sends_signal_with_create_wf"); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf("receiver_signal", signal_with_create_wf_receiver); @@ -167,9 +161,7 @@ async fn signals_child(ctx: WfContext) -> WorkflowResult<()> { #[tokio::test] async fn sends_signal_to_child() { let mut starter = CoreWfStarter::new("sends_signal_to_child"); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf("child_signaler", signals_child); worker.register_wf("child_receiver", signal_receiver); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs index e00c976c2..4d68bcbb1 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/stickyness.rs @@ -12,10 +12,8 @@ use tokio::sync::Barrier; async fn timer_workflow_not_sticky() { let wf_name = "timer_wf_not_sticky"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()) - .max_cached_workflows(0_usize); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.worker_config.max_cached_workflows = 0_usize; let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), timer_wf); @@ -42,9 +40,7 @@ async fn timer_workflow_timeout_on_sticky() { // on a not-sticky queue let wf_name = "timer_workflow_timeout_on_sticky"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); starter.workflow_options.task_timeout = Some(Duration::from_secs(2)); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), timer_timeout_wf); @@ -59,12 +55,10 @@ async fn timer_workflow_timeout_on_sticky() { async fn cache_miss_ok() { let wf_name = "cache_miss_ok"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()) - .max_outstanding_workflow_tasks(2_usize) - .max_cached_workflows(0_usize) - .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); + starter.worker_config.max_outstanding_workflow_tasks = Some(2_usize); + starter.worker_config.max_cached_workflows = 0_usize; + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::SimpleMaximum(1_usize); let mut worker = starter.worker().await; let barr: &'static Barrier = Box::leak(Box::new(Barrier::new(2))); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/timers.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/timers.rs index 979b9c255..5c0cbab57 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/timers.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/timers.rs @@ -28,9 +28,7 @@ pub(crate) async fn timer_wf(command_sink: WfContext) -> WorkflowResult<()> { async fn timer_workflow_workflow_driver() { let wf_name = "timer_wf_new"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), timer_wf); @@ -42,9 +40,7 @@ async fn timer_workflow_workflow_driver() { async fn timer_workflow_manual() { let mut starter = init_core_and_create_wf("timer_workflow").await; let core = starter.get_worker().await; - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let task = core.poll_workflow_activation().await.unwrap(); core.complete_workflow_activation(WorkflowActivationCompletion::from_cmds( task.run_id, @@ -68,9 +64,7 @@ async fn timer_workflow_manual() { async fn timer_cancel_workflow() { let mut starter = init_core_and_create_wf("timer_cancel_workflow").await; let core = starter.get_worker().await; - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let task = core.poll_workflow_activation().await.unwrap(); core.complete_workflow_activation(WorkflowActivationCompletion::from_cmds( task.run_id, @@ -129,9 +123,7 @@ async fn parallel_timer_wf(command_sink: WfContext) -> WorkflowResult<()> { async fn parallel_timers() { let wf_name = "parallel_timers"; let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), parallel_timer_wf); diff --git a/crates/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs b/crates/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs index 75a492adb..81ebdc7a7 100644 --- a/crates/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs +++ b/crates/sdk-core/tests/integ_tests/workflow_tests/upsert_search_attrs.rs @@ -47,9 +47,7 @@ async fn sends_upsert() { let wf_name = "sends_upsert_search_attrs"; let wf_id = Uuid::new_v4(); let mut starter = CoreWfStarter::new(wf_name); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name, search_attr_updater); diff --git a/crates/sdk-core/tests/main.rs b/crates/sdk-core/tests/main.rs index d77a88351..307be1adf 100644 --- a/crates/sdk-core/tests/main.rs +++ b/crates/sdk-core/tests/main.rs @@ -39,7 +39,7 @@ mod integ_tests { operatorservice::v1::CreateNexusEndpointRequest, workflowservice::v1::ListNamespacesRequest, }, - worker::WorkerConfigBuilder, + worker::{WorkerConfig, WorkerTaskTypes, WorkerVersioningStrategy}, }; use temporalio_sdk_core::{CoreRuntime, init_worker}; use tonic::IntoRequest; @@ -59,9 +59,13 @@ mod integ_tests { let _worker = init_worker( &runtime, - WorkerConfigBuilder::default() + WorkerConfig::builder() .namespace("default") .task_queue("Wheee!") + .task_types(WorkerTaskTypes::all()) + .versioning_strategy(WorkerVersioningStrategy::None { + build_id: "test".to_owned(), + }) .build() .unwrap(), // clone the client if you intend to use it later. Strip off the retry wrapper since diff --git a/crates/sdk-core/tests/manual_tests.rs b/crates/sdk-core/tests/manual_tests.rs index a1a2e896d..ccf5b2777 100644 --- a/crates/sdk-core/tests/manual_tests.rs +++ b/crates/sdk-core/tests/manual_tests.rs @@ -24,7 +24,7 @@ use temporalio_client::{ }; use temporalio_common::{ protos::coresdk::AsJsonPayloadExt, - telemetry::PrometheusExporterOptionsBuilder, + telemetry::PrometheusExporterOptions, worker::{PollerBehavior, WorkerTaskTypes}, }; use temporalio_sdk::{ActContext, ActivityOptions, WfContext}; @@ -39,31 +39,28 @@ async fn poller_load_spiky() { let (telemopts, addr, _aborter) = if std::env::var("PAR_JOBNUM").unwrap_or("1".to_string()) == "1" { prom_metrics(Some( - PrometheusExporterOptionsBuilder::default() + PrometheusExporterOptions::builder() .socket_addr(SocketAddr::V4("0.0.0.0:9999".parse().unwrap())) - .build() - .unwrap(), + .build(), )) } else { prom_metrics(None) }; let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("poller_load", rt); - starter - .worker_config - .max_cached_workflows(5000_usize) - .max_outstanding_workflow_tasks(1000_usize) - .max_outstanding_activities(1000_usize) - .workflow_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }) - .activity_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }); + starter.worker_config.max_cached_workflows = 5000; + starter.worker_config.max_outstanding_workflow_tasks = Some(1000); + starter.worker_config.max_outstanding_activities = Some(1000); + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; + starter.worker_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; let mut worker = starter.worker().await; let submitter = worker.get_submitter_handle(); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { @@ -198,26 +195,23 @@ async fn poller_load_sustained() { let (telemopts, addr, _aborter) = if std::env::var("PAR_JOBNUM").unwrap_or("1".to_string()) == "1" { prom_metrics(Some( - PrometheusExporterOptionsBuilder::default() + PrometheusExporterOptions::builder() .socket_addr(SocketAddr::V4("0.0.0.0:9999".parse().unwrap())) - .build() - .unwrap(), + .build(), )) } else { prom_metrics(None) }; let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("poller_load", rt); - starter - .worker_config - .max_cached_workflows(5000_usize) - .max_outstanding_workflow_tasks(1000_usize) - .workflow_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }) - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.max_cached_workflows = 5000; + starter.worker_config.max_outstanding_workflow_tasks = Some(1000); + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut worker = starter.worker().await; worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { let sigchan = ctx.make_signal_channel(SIGNAME).map(Ok); @@ -289,30 +283,27 @@ async fn poller_load_spike_then_sustained() { let (telemopts, addr, _aborter) = if std::env::var("PAR_JOBNUM").unwrap_or("1".to_string()) == "1" { prom_metrics(Some( - PrometheusExporterOptionsBuilder::default() + PrometheusExporterOptions::builder() .socket_addr(SocketAddr::V4("0.0.0.0:9999".parse().unwrap())) - .build() - .unwrap(), + .build(), )) } else { prom_metrics(None) }; let rt = CoreRuntime::new_assume_tokio(get_integ_runtime_options(telemopts)).unwrap(); let mut starter = CoreWfStarter::new_with_runtime("poller_load", rt); - starter - .worker_config - .max_cached_workflows(5000_usize) - .max_outstanding_workflow_tasks(1000_usize) - .workflow_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }) - .activity_task_poller_behavior(PollerBehavior::Autoscaling { - minimum: 1, - maximum: 200, - initial: 5, - }); + starter.worker_config.max_cached_workflows = 5000; + starter.worker_config.max_outstanding_workflow_tasks = Some(1000); + starter.worker_config.workflow_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; + starter.worker_config.activity_task_poller_behavior = PollerBehavior::Autoscaling { + minimum: 1, + maximum: 200, + initial: 5, + }; let mut worker = starter.worker().await; let submitter = worker.get_submitter_handle(); worker.register_wf(wf_name.to_owned(), |ctx: WfContext| async move { diff --git a/crates/sdk-core/tests/runner.rs b/crates/sdk-core/tests/runner.rs index 9a30d9aae..542207b8e 100644 --- a/crates/sdk-core/tests/runner.rs +++ b/crates/sdk-core/tests/runner.rs @@ -11,7 +11,7 @@ use std::{ path::{Path, PathBuf}, process::Stdio, }; -use temporalio_sdk_core::ephemeral_server::{TestServerConfigBuilder, default_cached_download}; +use temporalio_sdk_core::ephemeral_server::{TestServerConfig, default_cached_download}; use tokio::{self, process::Command}; /// This env var is set (to any value) if temporal CLI dev server is in use @@ -95,9 +95,7 @@ async fn main() -> Result<(), anyhow::Error> { let (server, envs) = match server_kind { ServerKind::TemporalCLI => { let config = - integ_dev_server_config(vec!["--http-port".to_string(), "7243".to_string()]) - .ui(true) - .build()?; + integ_dev_server_config(vec!["--http-port".to_string(), "7243".to_string()], true); println!("Using temporal CLI: {config:?}"); ( Some( @@ -109,9 +107,9 @@ async fn main() -> Result<(), anyhow::Error> { ) } ServerKind::TestServer => { - let config = TestServerConfigBuilder::default() + let config = TestServerConfig::builder() .exe(default_cached_download()) - .build()?; + .build(); println!("Using java test server"); ( Some( diff --git a/crates/sdk-core/tests/shared_tests/mod.rs b/crates/sdk-core/tests/shared_tests/mod.rs index 29ac06177..97e8d8b8f 100644 --- a/crates/sdk-core/tests/shared_tests/mod.rs +++ b/crates/sdk-core/tests/shared_tests/mod.rs @@ -18,9 +18,7 @@ pub(crate) async fn grpc_message_too_large() { let mut starter = CoreWfStarter::new_cloud_or_local(wf_name, "") .await .unwrap(); - starter - .worker_config - .task_types(WorkerTaskTypes::workflow_only()); + starter.worker_config.task_types = WorkerTaskTypes::workflow_only(); let mut core = starter.worker().await; static OVERSIZE_GRPC_MESSAGE_RUN: AtomicBool = AtomicBool::new(false); diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 3dfd05283..82e892429 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -10,27 +10,29 @@ //! ```no_run //! use std::{str::FromStr, sync::Arc}; //! use temporalio_sdk::{sdk_client_options, ActContext, Worker}; -//! use temporalio_sdk_core::{init_worker, Url, CoreRuntime, RuntimeOptionsBuilder}; +//! use temporalio_sdk_core::{init_worker, Url, CoreRuntime, RuntimeOptions}; //! use temporalio_common::{ -//! worker::{WorkerConfigBuilder, WorkerVersioningStrategy}, -//! telemetry::TelemetryOptionsBuilder +//! worker::{WorkerConfig, WorkerTaskTypes, WorkerVersioningStrategy}, +//! telemetry::TelemetryOptions //! }; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { //! let server_options = sdk_client_options(Url::from_str("http://localhost:7233")?).build(); //! -//! let telemetry_options = TelemetryOptionsBuilder::default().build()?; -//! let runtime_options = RuntimeOptionsBuilder::default().telemetry_options(telemetry_options).build().unwrap(); +//! let telemetry_options = TelemetryOptions::builder().build(); +//! let runtime_options = RuntimeOptions::builder().telemetry_options(telemetry_options).build().unwrap(); //! let runtime = CoreRuntime::new_assume_tokio(runtime_options)?; //! //! let client = server_options.connect("default", None).await?; //! -//! let worker_config = WorkerConfigBuilder::default() +//! let worker_config = WorkerConfig::builder() //! .namespace("default") //! .task_queue("task_queue") +//! .task_types(WorkerTaskTypes::activity_only()) //! .versioning_strategy(WorkerVersioningStrategy::None { build_id: "rust-sdk".to_owned() }) -//! .build()?; +//! .build() +//! .unwrap(); //! //! let core_worker = init_worker(&runtime, worker_config, client)?; //! @@ -346,7 +348,7 @@ impl Worker { wf_completion_processor, )?; - info!("Polling loops exited"); + debug!("Polling loops exited"); if let Some(i) = self.common.worker_interceptor.as_ref() { i.on_shutdown(self); }