From 5c80501000e56e8b8bc4e24d27ecfc1424a3fe01 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Mon, 11 Aug 2025 13:38:17 -0700
Subject: [PATCH 01/53] save
---
Directory.Packages.props | 6 ++
src/Abstractions/Abstractions.csproj | 1 +
.../Converters/BlobPayloadStore.cs | 98 +++++++++++++++++++
src/Abstractions/Converters/IPayloadStore.cs | 27 +++++
.../Converters/LargePayloadDataConverter.cs | 83 ++++++++++++++++
.../Converters/LargePayloadStorageOptions.cs | 41 ++++++++
src/Abstractions/DataConverter.cs | 11 ++-
.../DurableTaskClientBuilderExtensions.cs | 36 +++++++
.../DurableTaskWorkerBuilderExtensions.cs | 33 +++++++
src/Worker/Core/DurableTaskWorkerOptions.cs | 1 -
10 files changed, 333 insertions(+), 4 deletions(-)
create mode 100644 src/Abstractions/Converters/BlobPayloadStore.cs
create mode 100644 src/Abstractions/Converters/IPayloadStore.cs
create mode 100644 src/Abstractions/Converters/LargePayloadDataConverter.cs
create mode 100644 src/Abstractions/Converters/LargePayloadStorageOptions.cs
diff --git a/Directory.Packages.props b/Directory.Packages.props
index 8388030f..7a24d9b7 100644
--- a/Directory.Packages.props
+++ b/Directory.Packages.props
@@ -23,6 +23,7 @@
+
@@ -33,6 +34,11 @@
+
+
+
+
+
diff --git a/src/Abstractions/Abstractions.csproj b/src/Abstractions/Abstractions.csproj
index db8be76a..ab32b488 100644
--- a/src/Abstractions/Abstractions.csproj
+++ b/src/Abstractions/Abstractions.csproj
@@ -13,6 +13,7 @@
+
diff --git a/src/Abstractions/Converters/BlobPayloadStore.cs b/src/Abstractions/Converters/BlobPayloadStore.cs
new file mode 100644
index 00000000..c746a517
--- /dev/null
+++ b/src/Abstractions/Converters/BlobPayloadStore.cs
@@ -0,0 +1,98 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using System.Globalization;
+using System.IO.Compression;
+using System.Text;
+using Azure;
+using Azure.Storage.Blobs;
+using Azure.Storage.Blobs.Models;
+
+namespace Microsoft.DurableTask.Converters;
+
+///
+/// Azure Blob Storage implementation of .
+/// Stores payloads as blobs and returns opaque tokens in the form "dtp:v1:<container>:<blobName>".
+///
+public sealed class BlobPayloadStore : IPayloadStore
+{
+ readonly BlobContainerClient containerClient;
+ readonly LargePayloadStorageOptions options;
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// The options for the blob payload store.
+ /// Thrown when is null.
+ /// Thrown when is null or empty.
+ public BlobPayloadStore(LargePayloadStorageOptions options)
+ {
+ this.options = options ?? throw new ArgumentNullException(nameof(options));
+
+ Check.NotNullOrEmpty(options.ConnectionString, nameof(options.ConnectionString));
+ Check.NotNullOrEmpty(options.ContainerName, nameof(options.ContainerName));
+
+ BlobServiceClient serviceClient = new(options.ConnectionString);
+ this.containerClient = serviceClient.GetBlobContainerClient(options.ContainerName);
+ }
+
+ ///
+ public async Task UploadAsync(string contentType, ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
+ {
+ // Ensure container exists
+ await this.containerClient.CreateIfNotExistsAsync(PublicAccessType.None, cancellationToken: cancellationToken).ConfigureAwait(false);
+
+ // One blob per payload using GUID-based name for uniqueness
+ string timestamp = DateTimeOffset.UtcNow.ToString("yyyy/MM/dd/HH", CultureInfo.InvariantCulture);
+ string blobName = $"{timestamp}/{Guid.NewGuid():N}.bin";
+ BlobClient blob = this.containerClient.GetBlobClient(blobName);
+
+ byte[] payloadBuffer = payloadBytes.ToArray();
+
+ // Compress and upload streaming
+ using Stream blobStream = await blob.OpenWriteAsync(overwrite: true, cancellationToken: cancellationToken).ConfigureAwait(false);
+ using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
+ using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+
+ await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: 81920, cancellationToken).ConfigureAwait(false);
+ await compressedBlobStream.FlushAsync(cancellationToken).ConfigureAwait(false);
+ await blobStream.FlushAsync(cancellationToken).ConfigureAwait(false);
+
+ return EncodeToken(this.containerClient.Name, blobName);
+ }
+
+ ///
+ public async Task DownloadAsync(string token, CancellationToken cancellationToken)
+ {
+ (string container, string name) = DecodeToken(token);
+ if (!string.Equals(container, this.containerClient.Name, StringComparison.Ordinal))
+ {
+ throw new ArgumentException("Token container does not match configured container.", nameof(token));
+ }
+
+ BlobClient blob = this.containerClient.GetBlobClient(name);
+ using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: cancellationToken).ConfigureAwait(false);
+ using GZipStream decompressedBlobStream = new GZipStream(result.Content, CompressionMode.Decompress);
+ using StreamReader reader = new(decompressedBlobStream, Encoding.UTF8);
+ return await reader.ReadToEndAsync();
+ }
+
+ static string EncodeToken(string container, string name) => $"dtp:v1:{container}:{name}";
+
+ static (string Container, string Name) DecodeToken(string token)
+ {
+ if (!token.StartsWith("dtp:v1:", StringComparison.Ordinal))
+ {
+ throw new ArgumentException("Invalid external payload token.", nameof(token));
+ }
+
+ string rest = token.Substring("dtp:v1:".Length);
+ int sep = rest.IndexOf(':');
+ if (sep <= 0 || sep >= rest.Length - 1)
+ {
+ throw new ArgumentException("Invalid external payload token format.", nameof(token));
+ }
+
+ return (rest.Substring(0, sep), rest.Substring(sep + 1));
+ }
+}
diff --git a/src/Abstractions/Converters/IPayloadStore.cs b/src/Abstractions/Converters/IPayloadStore.cs
new file mode 100644
index 00000000..2dc09465
--- /dev/null
+++ b/src/Abstractions/Converters/IPayloadStore.cs
@@ -0,0 +1,27 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+namespace Microsoft.DurableTask.Converters;
+
+///
+/// Abstraction for storing and retrieving large payloads out-of-band.
+///
+public interface IPayloadStore
+{
+ ///
+ /// Uploads a payload and returns an opaque reference token that can be embedded in orchestration messages.
+ ///
+ /// The content type of the payload (e.g., application/json).
+ /// The payload bytes.
+ /// Cancellation token.
+ /// Opaque reference token.
+ Task UploadAsync(string contentType, ReadOnlyMemory payloadBytes, CancellationToken cancellationToken);
+
+ ///
+ /// Downloads the payload referenced by the token.
+ ///
+ /// The opaque reference token.
+ /// Cancellation token.
+ /// Payload string.
+ Task DownloadAsync(string token, CancellationToken cancellationToken);
+}
diff --git a/src/Abstractions/Converters/LargePayloadDataConverter.cs b/src/Abstractions/Converters/LargePayloadDataConverter.cs
new file mode 100644
index 00000000..327d1751
--- /dev/null
+++ b/src/Abstractions/Converters/LargePayloadDataConverter.cs
@@ -0,0 +1,83 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using System.Text;
+
+namespace Microsoft.DurableTask.Converters;
+
+///
+/// A DataConverter that wraps another DataConverter and externalizes payloads larger than a configured threshold.
+/// It uploads large payloads to an and returns a reference token string.
+/// On deserialization, it resolves tokens and feeds the underlying converter the original content.
+///
+///
+/// Initializes a new instance of the class.
+///
+/// The inner data converter to wrap.
+/// The external payload store to use.
+/// The options for the externalizing data converter.
+/// Thrown when , , or is null.
+public sealed class LargePayloadDataConverter(DataConverter innerConverter, IPayloadStore payloadStore, LargePayloadStorageOptions largePayloadStorageOptions) : DataConverter
+{
+ const string TokenPrefix = "dtp:v1:"; // matches BlobExternalPayloadStore
+
+ readonly DataConverter innerConverter = innerConverter ?? throw new ArgumentNullException(nameof(innerConverter));
+ readonly IPayloadStore payLoadStore = payloadStore ?? throw new ArgumentNullException(nameof(payloadStore));
+ readonly LargePayloadStorageOptions largePayloadStorageOptions = largePayloadStorageOptions ?? throw new ArgumentNullException(nameof(largePayloadStorageOptions));
+ readonly Encoding utf8 = new UTF8Encoding(false);
+
+ ///
+ public override bool UsesExternalStorage => this.largePayloadStorageOptions.Enabled || this.innerConverter.UsesExternalStorage;
+
+ ///
+ /// Serializes the value to a JSON string and uploads it to the external payload store if it exceeds the configured threshold.
+ ///
+ /// The value to serialize.
+ /// The serialized value or the token if externalized.
+ public override string? Serialize(object? value)
+ {
+ if (value is null)
+ {
+ return null;
+ }
+
+ string json = this.innerConverter.Serialize(value) ?? "null";
+ if (!this.largePayloadStorageOptions.Enabled)
+ {
+ return json;
+ }
+
+ int byteCount = this.utf8.GetByteCount(json);
+ if (byteCount < this.largePayloadStorageOptions.ExternalizeThresholdBytes)
+ {
+ return json;
+ }
+
+ // Upload synchronously in this context by blocking on async. SDK call sites already run on threadpool.
+ byte[] bytes = this.utf8.GetBytes(json);
+ string token = this.payLoadStore.UploadAsync("application/json", bytes, CancellationToken.None).GetAwaiter().GetResult();
+ return token;
+ }
+
+ ///
+ /// Deserializes the JSON string or resolves the token to the original value.
+ ///
+ /// The JSON string or token.
+ /// The type to deserialize to.
+ /// The deserialized value.
+ public override object? Deserialize(string? data, Type targetType)
+ {
+ if (data is null)
+ {
+ return null;
+ }
+
+ string toDeserialize = data;
+ if (this.largePayloadStorageOptions.Enabled && data.StartsWith(TokenPrefix, StringComparison.Ordinal))
+ {
+ toDeserialize = this.payLoadStore.DownloadAsync(data, CancellationToken.None).GetAwaiter().GetResult();
+ }
+
+ return this.innerConverter.Deserialize(toDeserialize, targetType);
+ }
+}
diff --git a/src/Abstractions/Converters/LargePayloadStorageOptions.cs b/src/Abstractions/Converters/LargePayloadStorageOptions.cs
new file mode 100644
index 00000000..19585f60
--- /dev/null
+++ b/src/Abstractions/Converters/LargePayloadStorageOptions.cs
@@ -0,0 +1,41 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+// Intentionally no DataAnnotations to avoid extra package requirements in minimal hosts.
+namespace Microsoft.DurableTask.Converters;
+
+///
+/// Options for externalized payload storage, used by SDKs to store large payloads out-of-band.
+///
+public sealed class LargePayloadStorageOptions
+{
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// The Azure Storage connection string to the customer's storage account.
+ public LargePayloadStorageOptions(string connectionString)
+ {
+ Check.NotNullOrEmpty(connectionString, nameof(connectionString));
+ this.ConnectionString = connectionString;
+ }
+
+ ///
+ /// Gets or sets a value indicating whether externalized payload storage is enabled.
+ ///
+ public bool Enabled { get; set; } = true;
+
+ ///
+ /// Gets or sets the threshold in bytes at which payloads are externalized. Default is 900_000 bytes.
+ ///
+ public int ExternalizeThresholdBytes { get; set; } = 900_000; // leave headroom below 1MB
+
+ ///
+ /// Gets or sets the Azure Storage connection string to the customer's storage account. Required.
+ ///
+ public string ConnectionString { get; set; }
+
+ ///
+ /// Gets or sets the blob container name to use for payloads. Defaults to "durabletask-payloads".
+ ///
+ public string ContainerName { get; set; } = "durabletask-payloads";
+}
diff --git a/src/Abstractions/DataConverter.cs b/src/Abstractions/DataConverter.cs
index 3248761a..6d33be6a 100644
--- a/src/Abstractions/DataConverter.cs
+++ b/src/Abstractions/DataConverter.cs
@@ -10,12 +10,17 @@ namespace Microsoft.DurableTask;
///
///
/// Implementations of this abstract class are free to use any serialization method. The default implementation
-/// uses the JSON serializer from the System.Text.Json namespace. Currently only strings are supported as
-/// the serialized representation of data. Byte array payloads and streams are not supported by this abstraction.
-/// Note that these methods all accept null values, in which case the return value should also be null.
+/// uses the JSON serializer from the System.Text.Json namespace. Implementations may optionally externalize
+/// large payloads and return an opaque reference string that can be resolved during deserialization.
+/// These methods all accept null values, in which case the return value should also be null.
///
public abstract class DataConverter
{
+ ///
+ /// Gets a value indicating whether this converter may return an external reference token instead of inline JSON.
+ ///
+ public virtual bool UsesExternalStorage => false;
+
///
/// Serializes into a text string.
///
diff --git a/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs b/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
index 5c854759..9e072a9a 100644
--- a/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
+++ b/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
@@ -1,6 +1,7 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
+using Microsoft.DurableTask.Converters;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Options;
@@ -101,4 +102,39 @@ public static IDurableTaskClientBuilder UseDefaultVersion(this IDurableTaskClien
builder.Configure(options => options.DefaultVersion = version);
return builder;
}
+
+ ///
+ /// Enables externalized payload storage using Azure Blob Storage.
+ /// Registers , and wraps the
+ /// configured in an for this client name.
+ ///
+ /// The to configure.
+ /// The action to configure the .
+ /// The .
+ public static IDurableTaskClientBuilder UseExternalizedPayloads(
+ this IDurableTaskClientBuilder builder,
+ Action configure)
+ {
+ Check.NotNull(builder);
+ Check.NotNull(configure);
+
+ builder.Services.Configure(builder.Name, configure);
+ builder.Services.AddSingleton(sp =>
+ {
+ LargePayloadStorageOptions opts = sp.GetRequiredService>().Get(builder.Name);
+ return new BlobPayloadStore(opts);
+ });
+
+ // Wrap DataConverter for this named client without building a ServiceProvider
+ builder.Services
+ .AddOptions(builder.Name)
+ .PostConfigure>((opt, store, monitor) =>
+ {
+ LargePayloadStorageOptions opts = monitor.Get(builder.Name);
+ DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
+ opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
+ });
+
+ return builder;
+ }
}
diff --git a/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs b/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
index 3f349b71..61e0d21d 100644
--- a/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
+++ b/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
@@ -4,6 +4,7 @@
using Microsoft.DurableTask.Worker.Hosting;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
+using Microsoft.DurableTask.Converters;
using static Microsoft.DurableTask.Worker.DurableTaskWorkerOptions;
namespace Microsoft.DurableTask.Worker;
@@ -137,4 +138,36 @@ public static IDurableTaskWorkerBuilder UseOrchestrationFilter(this IDurableTask
builder.Services.AddSingleton(filter);
return builder;
}
+
+ ///
+ /// Enables externalized payload storage for the worker's data converter to mirror client behavior.
+ ///
+ /// The to configure.
+ /// The action to configure the .
+ /// The .
+ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
+ this IDurableTaskWorkerBuilder builder,
+ Action configure)
+ {
+ Check.NotNull(builder);
+ Check.NotNull(configure);
+
+ builder.Services.Configure(builder.Name, configure);
+ builder.Services.AddSingleton(sp =>
+ {
+ LargePayloadStorageOptions opts = sp.GetRequiredService>().Get(builder.Name);
+ return new BlobPayloadStore(opts);
+ });
+
+ builder.Services
+ .AddOptions(builder.Name)
+ .PostConfigure>((opt, store, monitor) =>
+ {
+ LargePayloadStorageOptions opts = monitor.Get(builder.Name);
+ DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
+ opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
+ });
+
+ return builder;
+ }
}
diff --git a/src/Worker/Core/DurableTaskWorkerOptions.cs b/src/Worker/Core/DurableTaskWorkerOptions.cs
index 703bbbd4..c65ccdbd 100644
--- a/src/Worker/Core/DurableTaskWorkerOptions.cs
+++ b/src/Worker/Core/DurableTaskWorkerOptions.cs
@@ -162,7 +162,6 @@ public DataConverter DataConverter
///
internal bool DataConverterExplicitlySet { get; private set; }
-
///
/// Applies these option values to another.
///
From 560ecabeb320ca90bc5a1fd9b3fa8e12f723ef68 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 12 Aug 2025 17:56:09 -0700
Subject: [PATCH 02/53] tests
---
.../Converters/LargePayloadDataConverter.cs | 13 +-
.../Converters/LargePayloadStorageOptions.cs | 10 +-
.../LargePayloadTests.cs | 398 ++++++++++++++++++
3 files changed, 419 insertions(+), 2 deletions(-)
create mode 100644 test/Grpc.IntegrationTests/LargePayloadTests.cs
diff --git a/src/Abstractions/Converters/LargePayloadDataConverter.cs b/src/Abstractions/Converters/LargePayloadDataConverter.cs
index 327d1751..5748198d 100644
--- a/src/Abstractions/Converters/LargePayloadDataConverter.cs
+++ b/src/Abstractions/Converters/LargePayloadDataConverter.cs
@@ -78,6 +78,17 @@ public sealed class LargePayloadDataConverter(DataConverter innerConverter, IPay
toDeserialize = this.payLoadStore.DownloadAsync(data, CancellationToken.None).GetAwaiter().GetResult();
}
- return this.innerConverter.Deserialize(toDeserialize, targetType);
+ return this.innerConverter.Deserialize(StripArrayCharacters(toDeserialize), targetType);
+ }
+
+ static string? StripArrayCharacters(string? input)
+ {
+ if (input != null && input.StartsWith('[') && input.EndsWith(']'))
+ {
+ // Strip the outer bracket characters
+ return input[1..^1];
+ }
+
+ return input;
}
}
diff --git a/src/Abstractions/Converters/LargePayloadStorageOptions.cs b/src/Abstractions/Converters/LargePayloadStorageOptions.cs
index 19585f60..3e4b1b0c 100644
--- a/src/Abstractions/Converters/LargePayloadStorageOptions.cs
+++ b/src/Abstractions/Converters/LargePayloadStorageOptions.cs
@@ -9,6 +9,14 @@ namespace Microsoft.DurableTask.Converters;
///
public sealed class LargePayloadStorageOptions
{
+ ///
+ /// Initializes a new instance of the class.
+ /// Parameterless constructor required for options activation.
+ ///
+ public LargePayloadStorageOptions()
+ {
+ }
+
///
/// Initializes a new instance of the class.
///
@@ -32,7 +40,7 @@ public LargePayloadStorageOptions(string connectionString)
///
/// Gets or sets the Azure Storage connection string to the customer's storage account. Required.
///
- public string ConnectionString { get; set; }
+ public string ConnectionString { get; set; } = string.Empty;
///
/// Gets or sets the blob container name to use for payloads. Defaults to "durabletask-payloads".
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
new file mode 100644
index 00000000..d3cf89db
--- /dev/null
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -0,0 +1,398 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using Microsoft.DurableTask.Client;
+using Microsoft.DurableTask.Converters;
+using Microsoft.DurableTask.Worker;
+using Microsoft.Extensions.DependencyInjection;
+using Xunit.Abstractions;
+
+namespace Microsoft.DurableTask.Grpc.Tests;
+
+public class LargePayloadTests(ITestOutputHelper output, GrpcSidecarFixture sidecarFixture) : IntegrationTestBase(output, sidecarFixture)
+{
+ [Fact]
+ public async Task OrchestrationInput_IsExternalizedByClient_ResolvedByWorker()
+ {
+ string largeInput = new string('A', 1024 * 1024); // 1MB
+ TaskName orchestratorName = nameof(OrchestrationInput_IsExternalizedByClient_ResolvedByWorker);
+
+ InMemoryPayloadStore fakeStore = new InMemoryPayloadStore();
+
+ await using HostTestLifetime server = await this.StartWorkerAsync(
+ worker =>
+ {
+ worker.AddTasks(tasks => tasks.AddOrchestratorFunc(
+ orchestratorName,
+ (ctx, input) => Task.FromResult(input)));
+
+ // Enable externalization on the worker
+ worker.UseExternalizedPayloads(opts =>
+ {
+ opts.Enabled = true;
+ opts.ExternalizeThresholdBytes = 1024; // small threshold to force externalization for test data
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+
+ // Override store with in-memory test double
+ worker.Services.AddSingleton(fakeStore);
+ },
+ client =>
+ {
+ // Enable externalization on the client
+ client.UseExternalizedPayloads(opts =>
+ {
+ opts.Enabled = true;
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+
+ // Override store with in-memory test double
+ client.Services.AddSingleton(fakeStore);
+ });
+
+ string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName, input: largeInput);
+
+ OrchestrationMetadata completed = await server.Client.WaitForInstanceCompletionAsync(
+ instanceId, getInputsAndOutputs: true, this.TimeoutToken);
+
+ Assert.Equal(OrchestrationRuntimeStatus.Completed, completed.RuntimeStatus);
+
+ // Validate that the input made a roundtrip and was resolved on the worker
+ string? echoed = completed.ReadOutputAs();
+ Assert.NotNull(echoed);
+ Assert.Equal(largeInput.Length, echoed!.Length);
+
+ // Ensure client externalized the input
+ Assert.True(fakeStore.UploadCount >= 1);
+ }
+
+ [Fact]
+ public async Task ActivityInput_IsExternalizedByWorker_ResolvedByActivity()
+ {
+ string largeParam = new string('P', 700 * 1024); // 700KB
+ TaskName orchestratorName = nameof(ActivityInput_IsExternalizedByWorker_ResolvedByActivity);
+ TaskName activityName = "EchoLength";
+
+ InMemoryPayloadStore workerStore = new InMemoryPayloadStore();
+
+ await using HostTestLifetime server = await this.StartWorkerAsync(
+ worker =>
+ {
+ worker.AddTasks(tasks => tasks
+ .AddOrchestratorFunc
diff --git a/src/Abstractions/Abstractions.csproj b/src/Abstractions/Abstractions.csproj
index ab32b488..db8be76a 100644
--- a/src/Abstractions/Abstractions.csproj
+++ b/src/Abstractions/Abstractions.csproj
@@ -13,7 +13,6 @@
-
diff --git a/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs b/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
index 9e072a9a..b3e91a8f 100644
--- a/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
+++ b/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
@@ -1,7 +1,6 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-using Microsoft.DurableTask.Converters;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Options;
@@ -103,38 +102,5 @@ public static IDurableTaskClientBuilder UseDefaultVersion(this IDurableTaskClien
return builder;
}
- ///
- /// Enables externalized payload storage using Azure Blob Storage.
- /// Registers , and wraps the
- /// configured in an for this client name.
- ///
- /// The to configure.
- /// The action to configure the .
- /// The .
- public static IDurableTaskClientBuilder UseExternalizedPayloads(
- this IDurableTaskClientBuilder builder,
- Action configure)
- {
- Check.NotNull(builder);
- Check.NotNull(configure);
-
- builder.Services.Configure(builder.Name, configure);
- builder.Services.AddSingleton(sp =>
- {
- LargePayloadStorageOptions opts = sp.GetRequiredService>().Get(builder.Name);
- return new BlobPayloadStore(opts);
- });
-
- // Wrap DataConverter for this named client without building a ServiceProvider
- builder.Services
- .AddOptions(builder.Name)
- .PostConfigure>((opt, store, monitor) =>
- {
- LargePayloadStorageOptions opts = monitor.Get(builder.Name);
- DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
- opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
- });
-
- return builder;
- }
+ // Large payload enablement moved to Microsoft.DurableTask.Extensions.AzureBlobPayloads package.
}
diff --git a/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj b/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
new file mode 100644
index 00000000..54883acf
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
@@ -0,0 +1,28 @@
+
+
+
+ netstandard2.0
+ Azure Blob Storage externalized payload support for Durable Task.
+ Microsoft.DurableTask.Extensions.AzureBlobPayloads
+ Microsoft.DurableTask
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/Abstractions/Converters/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
similarity index 99%
rename from src/Abstractions/Converters/BlobPayloadStore.cs
rename to src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
index 637ccf12..4e1275c6 100644
--- a/src/Abstractions/Converters/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
@@ -49,7 +49,7 @@ public async Task UploadAsync(ReadOnlyMemory payloadBytes, Cancell
byte[] payloadBuffer = payloadBytes.ToArray();
- // Compress and upload streaming
+ // Compress and upload streaming
using Stream blobStream = await blob.OpenWriteAsync(overwrite: true, cancellationToken: cancellationToken).ConfigureAwait(false);
using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
using MemoryStream payloadStream = new(payloadBuffer, writable: false);
@@ -96,3 +96,5 @@ public async Task DownloadAsync(string token, CancellationToken cancella
return (rest.Substring(0, sep), rest.Substring(sep + 1));
}
}
+
+
diff --git a/src/Abstractions/Converters/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
similarity index 99%
rename from src/Abstractions/Converters/LargePayloadDataConverter.cs
rename to src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
index 48db66e6..344c226c 100644
--- a/src/Abstractions/Converters/LargePayloadDataConverter.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
@@ -88,3 +88,5 @@ public sealed class LargePayloadDataConverter(DataConverter innerConverter, IPay
return input;
}
}
+
+
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
new file mode 100644
index 00000000..ef77d50f
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -0,0 +1,47 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using Microsoft.DurableTask.Client;
+using Microsoft.DurableTask.Converters;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.DependencyInjection.Extensions;
+using Microsoft.Extensions.Options;
+
+namespace Microsoft.DurableTask.Client;
+
+///
+/// Extension methods to enable externalized payloads using Azure Blob Storage for Durable Task Client.
+///
+public static class DurableTaskClientBuilderExtensionsAzureBlobPayloads
+{
+ ///
+ /// Enables externalized payload storage using Azure Blob Storage for the specified client builder.
+ ///
+ public static IDurableTaskClientBuilder UseExternalizedPayloads(
+ this IDurableTaskClientBuilder builder,
+ Action configure)
+ {
+ Check.NotNull(builder);
+ Check.NotNull(configure);
+
+ builder.Services.Configure(builder.Name, configure);
+ builder.Services.AddSingleton(sp =>
+ {
+ LargePayloadStorageOptions opts = sp.GetRequiredService>().Get(builder.Name);
+ return new BlobPayloadStore(opts);
+ });
+
+ builder.Services
+ .AddOptions(builder.Name)
+ .PostConfigure>((opt, store, monitor) =>
+ {
+ LargePayloadStorageOptions opts = monitor.Get(builder.Name);
+ DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
+ opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
+ });
+
+ return builder;
+ }
+}
+
+
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
new file mode 100644
index 00000000..8ae08fff
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
@@ -0,0 +1,46 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using Microsoft.DurableTask.Converters;
+using Microsoft.DurableTask.Worker;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Options;
+
+namespace Microsoft.DurableTask.Worker;
+
+///
+/// Extension methods to enable externalized payloads using Azure Blob Storage for Durable Task Worker.
+///
+public static class DurableTaskWorkerBuilderExtensionsAzureBlobPayloads
+{
+ ///
+ /// Enables externalized payload storage using Azure Blob Storage for the specified worker builder.
+ ///
+ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
+ this IDurableTaskWorkerBuilder builder,
+ Action configure)
+ {
+ Check.NotNull(builder);
+ Check.NotNull(configure);
+
+ builder.Services.Configure(builder.Name, configure);
+ builder.Services.AddSingleton(sp =>
+ {
+ LargePayloadStorageOptions opts = sp.GetRequiredService>().Get(builder.Name);
+ return new BlobPayloadStore(opts);
+ });
+
+ builder.Services
+ .AddOptions(builder.Name)
+ .PostConfigure>((opt, store, monitor) =>
+ {
+ LargePayloadStorageOptions opts = monitor.Get(builder.Name);
+ DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
+ opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
+ });
+
+ return builder;
+ }
+}
+
+
diff --git a/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs b/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
index 61e0d21d..05cb4bc4 100644
--- a/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
+++ b/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
@@ -4,7 +4,6 @@
using Microsoft.DurableTask.Worker.Hosting;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
-using Microsoft.DurableTask.Converters;
using static Microsoft.DurableTask.Worker.DurableTaskWorkerOptions;
namespace Microsoft.DurableTask.Worker;
@@ -139,35 +138,5 @@ public static IDurableTaskWorkerBuilder UseOrchestrationFilter(this IDurableTask
return builder;
}
- ///
- /// Enables externalized payload storage for the worker's data converter to mirror client behavior.
- ///
- /// The to configure.
- /// The action to configure the .
- /// The .
- public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
- this IDurableTaskWorkerBuilder builder,
- Action configure)
- {
- Check.NotNull(builder);
- Check.NotNull(configure);
-
- builder.Services.Configure(builder.Name, configure);
- builder.Services.AddSingleton(sp =>
- {
- LargePayloadStorageOptions opts = sp.GetRequiredService>().Get(builder.Name);
- return new BlobPayloadStore(opts);
- });
-
- builder.Services
- .AddOptions(builder.Name)
- .PostConfigure>((opt, store, monitor) =>
- {
- LargePayloadStorageOptions opts = monitor.Get(builder.Name);
- DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
- opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
- });
-
- return builder;
- }
+ // Large payload enablement moved to Microsoft.DurableTask.Extensions.AzureBlobPayloads package.
}
diff --git a/test/Grpc.IntegrationTests/Grpc.IntegrationTests.csproj b/test/Grpc.IntegrationTests/Grpc.IntegrationTests.csproj
index e6b0aee7..ba2c8b68 100644
--- a/test/Grpc.IntegrationTests/Grpc.IntegrationTests.csproj
+++ b/test/Grpc.IntegrationTests/Grpc.IntegrationTests.csproj
@@ -7,6 +7,7 @@
+
From b5bedd081c6a2a6d31090187e2b7e1fc63edff5b Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Sun, 31 Aug 2025 21:08:19 -0700
Subject: [PATCH 10/53] update sample
---
samples/LargePayloadConsoleApp/Program.cs | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/samples/LargePayloadConsoleApp/Program.cs b/samples/LargePayloadConsoleApp/Program.cs
index d962df4b..6ee592c3 100644
--- a/samples/LargePayloadConsoleApp/Program.cs
+++ b/samples/LargePayloadConsoleApp/Program.cs
@@ -28,9 +28,8 @@
{
// Keep threshold small to force externalization for demo purposes
opts.ExternalizeThresholdBytes = 1024; // 1KB
- // Default to local Azurite/emulator. Override via environment or appsettings if desired.
- opts.ConnectionString = Environment.GetEnvironmentVariable("DURABLETASK_STORAGE") ?? "UseDevelopmentStorage=true";
- opts.ContainerName = Environment.GetEnvironmentVariable("DURABLETASK_PAYLOAD_CONTAINER") ?? "durabletask-payloads";
+ opts.ConnectionString = builder.Configuration.GetValue("DURABLETASK_STORAGE") ?? "UseDevelopmentStorage=true";
+ opts.ContainerName = builder.Configuration.GetValue("DURABLETASK_PAYLOAD_CONTAINER");
});
});
@@ -67,8 +66,8 @@
b.UseExternalizedPayloads(opts =>
{
opts.ExternalizeThresholdBytes = 1024; // mirror client
- opts.ConnectionString = Environment.GetEnvironmentVariable("DURABLETASK_STORAGE") ?? "UseDevelopmentStorage=true";
- opts.ContainerName = Environment.GetEnvironmentVariable("DURABLETASK_PAYLOAD_CONTAINER") ?? "durabletask-payloads";
+ opts.ConnectionString = builder.Configuration.GetValue("DURABLETASK_STORAGE") ?? "UseDevelopmentStorage=true";
+ opts.ContainerName = builder.Configuration.GetValue("DURABLETASK_PAYLOAD_CONTAINER");
});
});
From 6cebf0e822f874c35ddb54d1decb5fb1c5ddf635 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Sun, 31 Aug 2025 21:33:39 -0700
Subject: [PATCH 11/53] remove enablestorage
---
samples/LargePayloadConsoleApp/Program.cs | 1 -
src/Abstractions/DataConverter.cs | 5 -----
.../Converters/LargePayloadDataConverter.cs | 3 ---
3 files changed, 9 deletions(-)
diff --git a/samples/LargePayloadConsoleApp/Program.cs b/samples/LargePayloadConsoleApp/Program.cs
index 6ee592c3..10984292 100644
--- a/samples/LargePayloadConsoleApp/Program.cs
+++ b/samples/LargePayloadConsoleApp/Program.cs
@@ -89,7 +89,6 @@
cts.Token);
Console.WriteLine($"RuntimeStatus: {result.RuntimeStatus}");
-Console.WriteLine($"UsesExternalStorage (result converter): {result.DataConverter?.UsesExternalStorage ?? false}");
string deserializedInput = result.ReadInputAs() ?? string.Empty;
string deserializedOutput = result.ReadOutputAs() ?? string.Empty;
diff --git a/src/Abstractions/DataConverter.cs b/src/Abstractions/DataConverter.cs
index a37f8081..6c623c81 100644
--- a/src/Abstractions/DataConverter.cs
+++ b/src/Abstractions/DataConverter.cs
@@ -18,11 +18,6 @@ namespace Microsoft.DurableTask;
///
public abstract class DataConverter
{
- ///
- /// Gets a value indicating whether this converter may return an external reference token instead of inline JSON.
- ///
- public virtual bool UsesExternalStorage => false;
-
///
/// Serializes into a text string.
///
diff --git a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
index 344c226c..a2823794 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
@@ -26,9 +26,6 @@ public sealed class LargePayloadDataConverter(DataConverter innerConverter, IPay
readonly LargePayloadStorageOptions largePayloadStorageOptions = largePayloadStorageOptions ?? throw new ArgumentNullException(nameof(largePayloadStorageOptions));
readonly Encoding utf8 = new UTF8Encoding(false);
- ///
- public override bool UsesExternalStorage => true;
-
///
/// Serializes the value to a JSON string and uploads it to the external payload store if it exceeds the configured threshold.
///
From ce51c0d130481af3a231b3de4b7374a57c459698 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Mon, 1 Sep 2025 09:50:33 -0700
Subject: [PATCH 12/53] comment
---
.../AzureBlobPayloads/Converters/LargePayloadDataConverter.cs | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
index a2823794..5c106ba1 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
@@ -24,6 +24,10 @@ public sealed class LargePayloadDataConverter(DataConverter innerConverter, IPay
readonly DataConverter innerConverter = innerConverter ?? throw new ArgumentNullException(nameof(innerConverter));
readonly IPayloadStore payLoadStore = payloadStore ?? throw new ArgumentNullException(nameof(payloadStore));
readonly LargePayloadStorageOptions largePayloadStorageOptions = largePayloadStorageOptions ?? throw new ArgumentNullException(nameof(largePayloadStorageOptions));
+ // Use UTF-8 without a BOM (encoderShouldEmitUTF8Identifier=false). JSON in UTF-8 should not include a
+ // byte order mark per RFC 8259, and omitting it avoids hidden extra bytes that could skew the
+ // externalization threshold calculation and prevents interop issues with strict JSON parsers.
+ // A few legacy tools rely on a BOM for encoding detection, but modern JSON tooling assumes BOM-less UTF-8.
readonly Encoding utf8 = new UTF8Encoding(false);
///
From ecf89de5de3edb3d835df7a04e5c10bab6d53d33 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Mon, 1 Sep 2025 10:13:55 -0700
Subject: [PATCH 13/53] testname update
---
.../LargePayloadTests.cs | 28 +++++++++----------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index 5f64f8e5..dc7150b3 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -15,10 +15,10 @@ public class LargePayloadTests(ITestOutputHelper output, GrpcSidecarFixture side
{
// Validates client externalizes a large orchestration input and worker resolves it.
[Fact]
- public async Task OrchestrationInput_IsExternalizedByClient_ResolvedByWorker()
+ public async Task LargeOrchestrationInput()
{
string largeInput = new string('A', 1024 * 1024); // 1MB
- TaskName orchestratorName = nameof(OrchestrationInput_IsExternalizedByClient_ResolvedByWorker);
+ TaskName orchestratorName = nameof(LargeOrchestrationInput);
InMemoryPayloadStore fakeStore = new InMemoryPayloadStore();
@@ -72,10 +72,10 @@ public async Task OrchestrationInput_IsExternalizedByClient_ResolvedByWorker()
// Validates worker externalizes large activity input and delivers resolved payload to activity.
[Fact]
- public async Task ActivityInput_IsExternalizedByWorker_ResolvedByActivity()
+ public async Task LargeActivityInput()
{
string largeParam = new string('P', 700 * 1024); // 700KB
- TaskName orchestratorName = nameof(ActivityInput_IsExternalizedByWorker_ResolvedByActivity);
+ TaskName orchestratorName = nameof(LargeActivityInput);
TaskName activityName = "EchoLength";
InMemoryPayloadStore workerStore = new InMemoryPayloadStore();
@@ -113,10 +113,10 @@ public async Task ActivityInput_IsExternalizedByWorker_ResolvedByActivity()
// Validates worker externalizes large activity output which is resolved by the orchestrator.
[Fact]
- public async Task ActivityOutput_IsExternalizedByWorker_ResolvedByOrchestrator()
+ public async Task LargeActivityOutput()
{
string largeResult = new string('R', 850 * 1024); // 850KB
- TaskName orchestratorName = nameof(ActivityOutput_IsExternalizedByWorker_ResolvedByOrchestrator);
+ TaskName orchestratorName = nameof(LargeActivityOutput);
TaskName activityName = "ProduceLarge";
InMemoryPayloadStore workerStore = new InMemoryPayloadStore();
@@ -154,11 +154,11 @@ public async Task ActivityOutput_IsExternalizedByWorker_ResolvedByOrchestrator()
// Ensures querying a completed instance downloads and resolves an externalized output on the client.
[Fact]
- public async Task QueryCompletedInstance_DownloadsExternalizedOutputOnClient()
+ public async Task LargeOrchestrationOutput()
{
string largeOutput = new string('Q', 900 * 1024); // 900KB
string smallInput = "input";
- TaskName orchestratorName = nameof(QueryCompletedInstance_DownloadsExternalizedOutputOnClient);
+ TaskName orchestratorName = nameof(LargeOrchestrationOutput);
Dictionary shared = new System.Collections.Generic.Dictionary();
InMemoryPayloadStore workerStore = new InMemoryPayloadStore(shared);
@@ -207,10 +207,10 @@ public async Task QueryCompletedInstance_DownloadsExternalizedOutputOnClient()
// Ensures payloads below the threshold are not externalized by client or worker.
[Fact]
- public async Task BelowThreshold_NotExternalized()
+ public async Task NoLargePayloads()
{
string smallPayload = new string('X', 64 * 1024); // 64KB
- TaskName orchestratorName = nameof(BelowThreshold_NotExternalized);
+ TaskName orchestratorName = nameof(NoLargePayloads);
InMemoryPayloadStore workerStore = new InMemoryPayloadStore();
InMemoryPayloadStore clientStore = new InMemoryPayloadStore();
@@ -256,10 +256,10 @@ public async Task BelowThreshold_NotExternalized()
// Validates client externalizes a large external event payload and worker resolves it.
[Fact]
- public async Task ExternalEventPayload_IsExternalizedByClient_ResolvedByWorker()
+ public async Task LargeExternalEvent()
{
string largeEvent = new string('E', 512 * 1024); // 512KB
- TaskName orchestratorName = nameof(ExternalEventPayload_IsExternalizedByClient_ResolvedByWorker);
+ TaskName orchestratorName = nameof(LargeExternalEvent);
const string EventName = "LargeEvent";
InMemoryPayloadStore fakeStore = new InMemoryPayloadStore();
@@ -306,11 +306,11 @@ public async Task ExternalEventPayload_IsExternalizedByClient_ResolvedByWorker()
// Validates worker externalizes both output and custom status; client resolves them on query.
[Fact]
- public async Task OutputAndCustomStatus_ExternalizedByWorker_ResolvedOnQuery()
+ public async Task LargeOutputAndCustomStatus()
{
string largeOutput = new string('O', 768 * 1024); // 768KB
string largeStatus = new string('S', 600 * 1024); // 600KB
- TaskName orchestratorName = nameof(OutputAndCustomStatus_ExternalizedByWorker_ResolvedOnQuery);
+ TaskName orchestratorName = nameof(LargeOutputAndCustomStatus);
InMemoryPayloadStore fakeStore = new InMemoryPayloadStore();
From 3bd6c9ae863b412f42b0ef54a9c1461a17cadbce Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 2 Sep 2025 15:30:27 -0700
Subject: [PATCH 14/53] add entity sample for largepayload
---
samples/LargePayloadConsoleApp/Program.cs | 101 +++++++++++++++++++++-
1 file changed, 97 insertions(+), 4 deletions(-)
diff --git a/samples/LargePayloadConsoleApp/Program.cs b/samples/LargePayloadConsoleApp/Program.cs
index 10984292..9baa35f9 100644
--- a/samples/LargePayloadConsoleApp/Program.cs
+++ b/samples/LargePayloadConsoleApp/Program.cs
@@ -1,15 +1,15 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-using Microsoft.DurableTask;
using Microsoft.DurableTask.Client;
using Microsoft.DurableTask.Client.AzureManaged;
-using Microsoft.DurableTask.Converters;
+using Microsoft.DurableTask.Client.Entities;
+using Microsoft.DurableTask.Entities;
using Microsoft.DurableTask.Worker;
using Microsoft.DurableTask.Worker.AzureManaged;
+using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
-using Microsoft.Extensions.Configuration;
// Demonstrates Large Payload Externalization using Azure Blob Storage.
// This sample uses Azurite/emulator by default via UseDevelopmentStorage=true.
@@ -24,6 +24,8 @@
builder.Services.AddDurableTaskClient(b =>
{
b.UseDurableTaskScheduler(schedulerConnectionString);
+ // Ensure entity APIs are enabled for the client
+ b.Configure(o => o.EnableEntitySupport = true);
b.UseExternalizedPayloads(opts =>
{
// Keep threshold small to force externalization for demo purposes
@@ -62,6 +64,38 @@
return value;
});
+
+ // Entity samples
+ // 1) Large entity operation input (worker externalizes input; entity receives resolved payload)
+ tasks.AddOrchestratorFunc(
+ "LargeEntityOperationInput",
+ (ctx, _) => ctx.Entities.CallEntityAsync(
+ new EntityInstanceId(nameof(EchoLengthEntity), "1"),
+ operationName: "EchoLength",
+ input: new string('E', 700 * 1024)));
+ tasks.AddEntity(nameof(EchoLengthEntity));
+
+ // 2) Large entity operation output (worker externalizes output; orchestrator reads resolved payload)
+ tasks.AddOrchestratorFunc(
+ "LargeEntityOperationOutput",
+ async (ctx, _) => (await ctx.Entities.CallEntityAsync(
+ new EntityInstanceId(nameof(LargeResultEntity), "1"),
+ operationName: "Produce",
+ input: 850 * 1024)).Length);
+ tasks.AddEntity(nameof(LargeResultEntity));
+
+ // 3) Large entity state (worker externalizes state; client resolves on query)
+ tasks.AddOrchestratorFunc(
+ "LargeEntityState",
+ async (ctx, _) =>
+ {
+ await ctx.Entities.CallEntityAsync(
+ new EntityInstanceId(nameof(StateEntity), "1"),
+ operationName: "Set",
+ input: new string('S', 900 * 1024));
+ return null;
+ });
+ tasks.AddEntity(nameof(StateEntity));
});
b.UseExternalizedPayloads(opts =>
{
@@ -69,6 +103,8 @@
opts.ConnectionString = builder.Configuration.GetValue("DURABLETASK_STORAGE") ?? "UseDevelopmentStorage=true";
opts.ContainerName = builder.Configuration.GetValue("DURABLETASK_PAYLOAD_CONTAINER");
});
+ // Ensure entity APIs are enabled for the worker
+ b.Configure(o => o.EnableEntitySupport = true);
});
IHost host = builder.Build();
@@ -82,7 +118,7 @@
Console.WriteLine($"Started orchestration with direct large input. Instance: {instanceId}");
-using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(120));
+using CancellationTokenSource cts = new CancellationTokenSource(TimeSpan.FromSeconds(120));
OrchestrationMetadata result = await client.WaitForInstanceCompletionAsync(
instanceId,
getInputsAndOutputs: true,
@@ -100,3 +136,60 @@
+// Run entity samples
+Console.WriteLine();
+Console.WriteLine("Running LargeEntityOperationInput...");
+string entityInputInstance = await client.ScheduleNewOrchestrationInstanceAsync("LargeEntityOperationInput");
+OrchestrationMetadata entityInputResult = await client.WaitForInstanceCompletionAsync(entityInputInstance, getInputsAndOutputs: true, cts.Token);
+Console.WriteLine($"Status: {entityInputResult.RuntimeStatus}, Output length: {entityInputResult.ReadOutputAs()}");
+
+Console.WriteLine();
+Console.WriteLine("Running LargeEntityOperationOutput...");
+string entityOutputInstance = await client.ScheduleNewOrchestrationInstanceAsync("LargeEntityOperationOutput");
+OrchestrationMetadata entityOutputResult = await client.WaitForInstanceCompletionAsync(entityOutputInstance, getInputsAndOutputs: true, cts.Token);
+Console.WriteLine($"Status: {entityOutputResult.RuntimeStatus}, Output length: {entityOutputResult.ReadOutputAs()}");
+
+Console.WriteLine();
+Console.WriteLine("Running LargeEntityState and querying state...");
+string entityStateInstance = await client.ScheduleNewOrchestrationInstanceAsync("LargeEntityState");
+OrchestrationMetadata entityStateOrch = await client.WaitForInstanceCompletionAsync(entityStateInstance, getInputsAndOutputs: true, cts.Token);
+Console.WriteLine($"Status: {entityStateOrch.RuntimeStatus}");
+EntityMetadata? state = await client.Entities.GetEntityAsync(new EntityInstanceId(nameof(StateEntity), "1"), includeState: true);
+Console.WriteLine($"State length: {state?.State?.Length ?? 0}");
+
+
+
+
+
+public class EchoLengthEntity : TaskEntity
+{
+ public int EchoLength(string input)
+ {
+ return input.Length;
+ }
+}
+
+public class LargeResultEntity : TaskEntity
+{
+ public string Produce(int length)
+ {
+ return new string('R', length);
+ }
+}
+
+public class StateEntity : TaskEntity
+{
+ protected override string? InitializeState(TaskEntityOperation entityOperation)
+ {
+ // Avoid Activator.CreateInstance() which throws; start as null (no state)
+ return null;
+ }
+
+ public void Set(string value)
+ {
+ this.State = value;
+ }
+}
+
+
+
From 20d3e8ff038612caa63661e6c796a1e8278e6dc6 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Fri, 5 Sep 2025 17:12:35 -0700
Subject: [PATCH 15/53] some fb
---
.../DurableTaskClientBuilderExtensions.cs | 2 --
.../AzureBlobPayloads/Converters/BlobPayloadStore.cs | 12 ++++++------
.../DurableTaskWorkerBuilderExtensions.cs | 2 --
3 files changed, 6 insertions(+), 10 deletions(-)
diff --git a/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs b/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
index b3e91a8f..5c854759 100644
--- a/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
+++ b/src/Client/Core/DependencyInjection/DurableTaskClientBuilderExtensions.cs
@@ -101,6 +101,4 @@ public static IDurableTaskClientBuilder UseDefaultVersion(this IDurableTaskClien
builder.Configure(options => options.DefaultVersion = version);
return builder;
}
-
- // Large payload enablement moved to Microsoft.DurableTask.Extensions.AzureBlobPayloads package.
}
diff --git a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
index 4e1275c6..c9e287d2 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
@@ -40,7 +40,7 @@ public BlobPayloadStore(LargePayloadStorageOptions options)
public async Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
{
// Ensure container exists
- await this.containerClient.CreateIfNotExistsAsync(PublicAccessType.None, cancellationToken: cancellationToken).ConfigureAwait(false);
+ await this.containerClient.CreateIfNotExistsAsync(PublicAccessType.None, default, default, cancellationToken);
// One blob per payload using GUID-based name for uniqueness
string timestamp = DateTimeOffset.UtcNow.ToString("yyyy/MM/dd/HH/mm/ss", CultureInfo.InvariantCulture);
@@ -50,13 +50,13 @@ public async Task UploadAsync(ReadOnlyMemory payloadBytes, Cancell
byte[] payloadBuffer = payloadBytes.ToArray();
// Compress and upload streaming
- using Stream blobStream = await blob.OpenWriteAsync(overwrite: true, cancellationToken: cancellationToken).ConfigureAwait(false);
+ using Stream blobStream = await blob.OpenWriteAsync(true, default, cancellationToken);
using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
using MemoryStream payloadStream = new(payloadBuffer, writable: false);
- await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: 81920, cancellationToken).ConfigureAwait(false);
- await compressedBlobStream.FlushAsync(cancellationToken).ConfigureAwait(false);
- await blobStream.FlushAsync(cancellationToken).ConfigureAwait(false);
+ await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: 81920, cancellationToken);
+ await compressedBlobStream.FlushAsync(cancellationToken);
+ await blobStream.FlushAsync(cancellationToken);
return EncodeToken(this.containerClient.Name, blobName);
}
@@ -71,7 +71,7 @@ public async Task DownloadAsync(string token, CancellationToken cancella
}
BlobClient blob = this.containerClient.GetBlobClient(name);
- using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: cancellationToken).ConfigureAwait(false);
+ using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken);
using GZipStream decompressedBlobStream = new GZipStream(result.Content, CompressionMode.Decompress);
using StreamReader reader = new(decompressedBlobStream, Encoding.UTF8);
return await reader.ReadToEndAsync();
diff --git a/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs b/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
index 05cb4bc4..3f349b71 100644
--- a/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
+++ b/src/Worker/Core/DependencyInjection/DurableTaskWorkerBuilderExtensions.cs
@@ -137,6 +137,4 @@ public static IDurableTaskWorkerBuilder UseOrchestrationFilter(this IDurableTask
builder.Services.AddSingleton(filter);
return builder;
}
-
- // Large payload enablement moved to Microsoft.DurableTask.Extensions.AzureBlobPayloads package.
}
From e6536804c432bb411496402f51f0ab1b4f637272 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Fri, 5 Sep 2025 20:11:12 -0700
Subject: [PATCH 16/53] fix
---
.../AzureBlobPayloads/Converters/BlobPayloadStore.cs | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
index c9e287d2..f24d4378 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
@@ -4,7 +4,6 @@
using System.Globalization;
using System.IO.Compression;
using System.Text;
-using Azure;
using Azure.Storage.Blobs;
using Azure.Storage.Blobs.Models;
@@ -71,7 +70,7 @@ public async Task DownloadAsync(string token, CancellationToken cancella
}
BlobClient blob = this.containerClient.GetBlobClient(name);
- using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken);
+ using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: cancellationToken);
using GZipStream decompressedBlobStream = new GZipStream(result.Content, CompressionMode.Decompress);
using StreamReader reader = new(decompressedBlobStream, Encoding.UTF8);
return await reader.ReadToEndAsync();
@@ -96,5 +95,3 @@ public async Task DownloadAsync(string token, CancellationToken cancella
return (rest.Substring(0, sep), rest.Substring(sep + 1));
}
}
-
-
From 499c2815d65fbd5bd64bf773ee89dda26eaefdca Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Fri, 5 Sep 2025 20:35:44 -0700
Subject: [PATCH 17/53] fb
---
src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj b/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
index 54883acf..33802286 100644
--- a/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
+++ b/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
@@ -1,7 +1,7 @@
- netstandard2.0
+ netstandard2.0;net6.0
Azure Blob Storage externalized payload support for Durable Task.
Microsoft.DurableTask.Extensions.AzureBlobPayloads
Microsoft.DurableTask
From e841e5e40f658a72925b0b61d16b407e3c6a194a Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Fri, 5 Sep 2025 20:41:47 -0700
Subject: [PATCH 18/53] fb
---
.../Converters/LargePayloadDataConverter.cs | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
index 5c106ba1..1e80d593 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
@@ -37,13 +37,13 @@ public sealed class LargePayloadDataConverter(DataConverter innerConverter, IPay
/// The serialized value or the token if externalized.
public override string? Serialize(object? value)
{
- if (value is null)
+ string? json = this.innerConverter.Serialize(value);
+
+ if (string.IsNullOrEmpty(json))
{
return null;
}
- string json = this.innerConverter.Serialize(value) ?? "null";
-
int byteCount = this.utf8.GetByteCount(json);
if (byteCount < this.largePayloadStorageOptions.ExternalizeThresholdBytes)
{
From f182442a247a7d3f1c7609773c9e52fc1eb8a0cc Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Fri, 5 Sep 2025 20:42:58 -0700
Subject: [PATCH 19/53] fb
---
.../Converters/LargePayloadDataConverter.cs | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
index 1e80d593..8130b80e 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
@@ -17,7 +17,11 @@ namespace Microsoft.DurableTask.Converters;
/// The external payload store to use.
/// The options for the externalizing data converter.
/// Thrown when , , or is null.
-public sealed class LargePayloadDataConverter(DataConverter innerConverter, IPayloadStore payloadStore, LargePayloadStorageOptions largePayloadStorageOptions) : DataConverter
+public sealed class LargePayloadDataConverter(
+ DataConverter innerConverter,
+ IPayloadStore payloadStore,
+ LargePayloadStorageOptions largePayloadStorageOptions
+) : DataConverter
{
const string TokenPrefix = "blob:v1:"; // matches BlobExternalPayloadStore
From e484f4296b18b0a98bcb4332e1c447bc34561f92 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Fri, 5 Sep 2025 20:57:13 -0700
Subject: [PATCH 20/53] fb
---
src/Abstractions/Converters/IPayloadStore.cs | 8 ++++++++
.../AzureBlobPayloads/Converters/BlobPayloadStore.cs | 12 ++++++++++++
.../Converters/LargePayloadDataConverter.cs | 3 +--
3 files changed, 21 insertions(+), 2 deletions(-)
diff --git a/src/Abstractions/Converters/IPayloadStore.cs b/src/Abstractions/Converters/IPayloadStore.cs
index c7796abb..c2a6ff6c 100644
--- a/src/Abstractions/Converters/IPayloadStore.cs
+++ b/src/Abstractions/Converters/IPayloadStore.cs
@@ -23,4 +23,12 @@ public interface IPayloadStore
/// Cancellation token.
/// Payload string.
Task DownloadAsync(string token, CancellationToken cancellationToken);
+
+ ///
+ /// Returns true if the specified value appears to be a token understood by this store.
+ /// Implementations should not throw for unknown tokens.
+ ///
+ /// The value to check.
+ /// true if the value is a token issued by this store; otherwise, false.
+ bool IsKnownPayloadToken(string value);
}
diff --git a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
index f24d4378..92ce00b7 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
@@ -15,6 +15,7 @@ namespace Microsoft.DurableTask.Converters;
///
public sealed class BlobPayloadStore : IPayloadStore
{
+ const string TokenPrefix = "blob:v1:";
readonly BlobContainerClient containerClient;
readonly LargePayloadStorageOptions options;
@@ -76,6 +77,17 @@ public async Task DownloadAsync(string token, CancellationToken cancella
return await reader.ReadToEndAsync();
}
+ ///
+ public bool IsKnownPayloadToken(string value)
+ {
+ if (string.IsNullOrEmpty(value))
+ {
+ return false;
+ }
+
+ return value.StartsWith(TokenPrefix, StringComparison.Ordinal);
+ }
+
static string EncodeToken(string container, string name) => $"blob:v1:{container}:{name}";
static (string Container, string Name) DecodeToken(string token)
diff --git a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
index 8130b80e..e0f7315d 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
@@ -23,7 +23,6 @@ public sealed class LargePayloadDataConverter(
LargePayloadStorageOptions largePayloadStorageOptions
) : DataConverter
{
- const string TokenPrefix = "blob:v1:"; // matches BlobExternalPayloadStore
readonly DataConverter innerConverter = innerConverter ?? throw new ArgumentNullException(nameof(innerConverter));
readonly IPayloadStore payLoadStore = payloadStore ?? throw new ArgumentNullException(nameof(payloadStore));
@@ -74,7 +73,7 @@ LargePayloadStorageOptions largePayloadStorageOptions
}
string toDeserialize = data;
- if (data.StartsWith(TokenPrefix, StringComparison.Ordinal))
+ if (this.payLoadStore.IsKnownPayloadToken(data))
{
toDeserialize = this.payLoadStore.DownloadAsync(data, CancellationToken.None).GetAwaiter().GetResult();
}
From fb6d3fbf949648de5d1f523af4364e67ef147a07 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Fri, 5 Sep 2025 21:15:51 -0700
Subject: [PATCH 21/53] test
---
test/Grpc.IntegrationTests/LargePayloadTests.cs | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index dc7150b3..a8a09209 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -360,6 +360,7 @@ public async Task LargeOutputAndCustomStatus()
class InMemoryPayloadStore : IPayloadStore
{
+ const string TokenPrefix = "blob:v1:";
readonly Dictionary tokenToPayload;
public InMemoryPayloadStore()
@@ -391,5 +392,11 @@ public Task DownloadAsync(string token, CancellationToken cancellationTo
Interlocked.Increment(ref this.downloadCount);
return Task.FromResult(this.tokenToPayload[token]);
}
+
+ public bool IsKnownPayloadToken(string value)
+ {
+ return value.StartsWith(TokenPrefix, StringComparison.Ordinal);
+ }
+
}
}
From f734b1bbeb60d5cffcd6b40b602665dbf34d1d3a Mon Sep 17 00:00:00 2001
From: wangbill <12449837+YunchuWang@users.noreply.github.com>
Date: Mon, 8 Sep 2025 10:07:56 -0700
Subject: [PATCH 22/53] enable compression
---
.../Converters/LargePayloadStorageOptions.cs | 6 +++
.../Converters/BlobPayloadStore.cs | 39 ++++++++++++++-----
2 files changed, 35 insertions(+), 10 deletions(-)
diff --git a/src/Abstractions/Converters/LargePayloadStorageOptions.cs b/src/Abstractions/Converters/LargePayloadStorageOptions.cs
index d43e189d..af005854 100644
--- a/src/Abstractions/Converters/LargePayloadStorageOptions.cs
+++ b/src/Abstractions/Converters/LargePayloadStorageOptions.cs
@@ -41,4 +41,10 @@ public LargePayloadStorageOptions(string connectionString)
/// Gets or sets the blob container name to use for payloads. Defaults to "durabletask-payloads".
///
public string ContainerName { get; set; } = "durabletask-payloads";
+
+ ///
+ /// Gets or sets a value indicating whether payloads should be gzip-compressed when stored.
+ /// Defaults to true for reduced storage and bandwidth.
+ ///
+ public bool CompressPayloads { get; set; } = true;
}
diff --git a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
index 92ce00b7..3b83bbd3 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
@@ -49,14 +49,24 @@ public async Task UploadAsync(ReadOnlyMemory payloadBytes, Cancell
byte[] payloadBuffer = payloadBytes.ToArray();
- // Compress and upload streaming
- using Stream blobStream = await blob.OpenWriteAsync(true, default, cancellationToken);
- using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
- using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+ // Upload streaming, optionally compressing and marking ContentEncoding
+ if (this.options.CompressPayloads)
+ {
+ using Stream blobStream = await blob.OpenWriteAsync(true, default, cancellationToken);
+ using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
+ using MemoryStream payloadStream = new(payloadBuffer, writable: false);
- await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: 81920, cancellationToken);
- await compressedBlobStream.FlushAsync(cancellationToken);
- await blobStream.FlushAsync(cancellationToken);
+ await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: 81920, cancellationToken);
+ await compressedBlobStream.FlushAsync(cancellationToken);
+ await blobStream.FlushAsync(cancellationToken);
+ }
+ else
+ {
+ using Stream blobStream = await blob.OpenWriteAsync(true, default, cancellationToken);
+ using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+ await payloadStream.CopyToAsync(blobStream, bufferSize: 81920, cancellationToken);
+ await blobStream.FlushAsync(cancellationToken);
+ }
return EncodeToken(this.containerClient.Name, blobName);
}
@@ -72,9 +82,18 @@ public async Task DownloadAsync(string token, CancellationToken cancella
BlobClient blob = this.containerClient.GetBlobClient(name);
using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: cancellationToken);
- using GZipStream decompressedBlobStream = new GZipStream(result.Content, CompressionMode.Decompress);
- using StreamReader reader = new(decompressedBlobStream, Encoding.UTF8);
- return await reader.ReadToEndAsync();
+ Stream contentStream = result.Content;
+ if (this.options.CompressPayloads)
+ {
+ using GZipStream decompressedBlobStream = new(contentStream, CompressionMode.Decompress);
+ using StreamReader reader = new(decompressedBlobStream, Encoding.UTF8);
+ return await reader.ReadToEndAsync();
+ }
+ else
+ {
+ using StreamReader reader = new(contentStream, Encoding.UTF8);
+ return await reader.ReadToEndAsync();
+ }
}
///
From 50a210e17590eb3129cbe2e50e113df0cf403a57 Mon Sep 17 00:00:00 2001
From: wangbill <12449837+YunchuWang@users.noreply.github.com>
Date: Mon, 8 Sep 2025 13:34:28 -0700
Subject: [PATCH 23/53] update sample
---
samples/LargePayloadConsoleApp/Program.cs | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/samples/LargePayloadConsoleApp/Program.cs b/samples/LargePayloadConsoleApp/Program.cs
index 9baa35f9..0cef4a60 100644
--- a/samples/LargePayloadConsoleApp/Program.cs
+++ b/samples/LargePayloadConsoleApp/Program.cs
@@ -139,23 +139,32 @@ await ctx.Entities.CallEntityAsync(
// Run entity samples
Console.WriteLine();
Console.WriteLine("Running LargeEntityOperationInput...");
+string largeEntityInput = new string('E', 700 * 1024); // 700KB
string entityInputInstance = await client.ScheduleNewOrchestrationInstanceAsync("LargeEntityOperationInput");
OrchestrationMetadata entityInputResult = await client.WaitForInstanceCompletionAsync(entityInputInstance, getInputsAndOutputs: true, cts.Token);
-Console.WriteLine($"Status: {entityInputResult.RuntimeStatus}, Output length: {entityInputResult.ReadOutputAs()}");
+int entityInputLength = entityInputResult.ReadOutputAs();
+Console.WriteLine($"Status: {entityInputResult.RuntimeStatus}, Output length: {entityInputLength}");
+Console.WriteLine($"Deserialized input length equals original: {entityInputLength == largeEntityInput.Length}");
Console.WriteLine();
Console.WriteLine("Running LargeEntityOperationOutput...");
+int largeEntityOutputLength = 850 * 1024; // 850KB
string entityOutputInstance = await client.ScheduleNewOrchestrationInstanceAsync("LargeEntityOperationOutput");
OrchestrationMetadata entityOutputResult = await client.WaitForInstanceCompletionAsync(entityOutputInstance, getInputsAndOutputs: true, cts.Token);
-Console.WriteLine($"Status: {entityOutputResult.RuntimeStatus}, Output length: {entityOutputResult.ReadOutputAs()}");
+int entityOutputLength = entityOutputResult.ReadOutputAs();
+Console.WriteLine($"Status: {entityOutputResult.RuntimeStatus}, Output length: {entityOutputLength}");
+Console.WriteLine($"Deserialized output length equals original: {entityOutputLength == largeEntityOutputLength}");
Console.WriteLine();
Console.WriteLine("Running LargeEntityState and querying state...");
+string largeEntityState = new string('S', 900 * 1024); // 900KB
string entityStateInstance = await client.ScheduleNewOrchestrationInstanceAsync("LargeEntityState");
OrchestrationMetadata entityStateOrch = await client.WaitForInstanceCompletionAsync(entityStateInstance, getInputsAndOutputs: true, cts.Token);
Console.WriteLine($"Status: {entityStateOrch.RuntimeStatus}");
EntityMetadata? state = await client.Entities.GetEntityAsync(new EntityInstanceId(nameof(StateEntity), "1"), includeState: true);
-Console.WriteLine($"State length: {state?.State?.Length ?? 0}");
+int stateLength = state?.State?.Length ?? 0;
+Console.WriteLine($"State length: {stateLength}");
+Console.WriteLine($"Deserialized state equals original: {state?.State == largeEntityState}");
From 833406fcd5cdb6583170c894fd08bb85685226f6 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 9 Sep 2025 07:50:20 -0700
Subject: [PATCH 24/53] add gzip encoding header to detect for decompression
---
Microsoft.DurableTask.sln | 14 +++++++++++
.../Converters/BlobPayloadStore.cs | 23 +++++++++++--------
2 files changed, 28 insertions(+), 9 deletions(-)
diff --git a/Microsoft.DurableTask.sln b/Microsoft.DurableTask.sln
index 26c2e80d..383f2e15 100644
--- a/Microsoft.DurableTask.sln
+++ b/Microsoft.DurableTask.sln
@@ -93,6 +93,10 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ScheduleWebApp", "samples\S
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ScheduledTasks.Tests", "test\ScheduledTasks.Tests\ScheduledTasks.Tests.csproj", "{D2779F32-A548-44F8-B60A-6AC018966C79}"
EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LargePayloadConsoleApp", "samples\LargePayloadConsoleApp\LargePayloadConsoleApp.csproj", "{6EB9D002-62C8-D6C1-62A8-14C54CA6DBBC}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AzureBlobPayloads", "src\Extensions\AzureBlobPayloads\AzureBlobPayloads.csproj", "{FE1DA748-D6DB-E168-BC42-6DBBCEAF229C}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -247,6 +251,14 @@ Global
{D2779F32-A548-44F8-B60A-6AC018966C79}.Debug|Any CPU.Build.0 = Debug|Any CPU
{D2779F32-A548-44F8-B60A-6AC018966C79}.Release|Any CPU.ActiveCfg = Release|Any CPU
{D2779F32-A548-44F8-B60A-6AC018966C79}.Release|Any CPU.Build.0 = Release|Any CPU
+ {6EB9D002-62C8-D6C1-62A8-14C54CA6DBBC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {6EB9D002-62C8-D6C1-62A8-14C54CA6DBBC}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {6EB9D002-62C8-D6C1-62A8-14C54CA6DBBC}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {6EB9D002-62C8-D6C1-62A8-14C54CA6DBBC}.Release|Any CPU.Build.0 = Release|Any CPU
+ {FE1DA748-D6DB-E168-BC42-6DBBCEAF229C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {FE1DA748-D6DB-E168-BC42-6DBBCEAF229C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {FE1DA748-D6DB-E168-BC42-6DBBCEAF229C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {FE1DA748-D6DB-E168-BC42-6DBBCEAF229C}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@@ -293,6 +305,8 @@ Global
{A89B766C-987F-4C9F-8937-D0AB9FE640C8} = {EFF7632B-821E-4CFC-B4A0-ED4B24296B17}
{100348B5-4D97-4A3F-B777-AB14F276F8FE} = {EFF7632B-821E-4CFC-B4A0-ED4B24296B17}
{D2779F32-A548-44F8-B60A-6AC018966C79} = {E5637F81-2FB9-4CD7-900D-455363B142A7}
+ {6EB9D002-62C8-D6C1-62A8-14C54CA6DBBC} = {EFF7632B-821E-4CFC-B4A0-ED4B24296B17}
+ {FE1DA748-D6DB-E168-BC42-6DBBCEAF229C} = {8AFC9781-F6F1-4696-BB4A-9ED7CA9D612B}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {AB41CB55-35EA-4986-A522-387AB3402E71}
diff --git a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
index 3b83bbd3..1c6adf31 100644
--- a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
@@ -52,7 +52,11 @@ public async Task UploadAsync(ReadOnlyMemory payloadBytes, Cancell
// Upload streaming, optionally compressing and marking ContentEncoding
if (this.options.CompressPayloads)
{
- using Stream blobStream = await blob.OpenWriteAsync(true, default, cancellationToken);
+ BlobOpenWriteOptions writeOptions = new()
+ {
+ HttpHeaders = new BlobHttpHeaders { ContentEncoding = "gzip" },
+ };
+ using Stream blobStream = await blob.OpenWriteAsync(true, writeOptions, cancellationToken);
using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
using MemoryStream payloadStream = new(payloadBuffer, writable: false);
@@ -83,17 +87,18 @@ public async Task DownloadAsync(string token, CancellationToken cancella
BlobClient blob = this.containerClient.GetBlobClient(name);
using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: cancellationToken);
Stream contentStream = result.Content;
- if (this.options.CompressPayloads)
- {
- using GZipStream decompressedBlobStream = new(contentStream, CompressionMode.Decompress);
- using StreamReader reader = new(decompressedBlobStream, Encoding.UTF8);
- return await reader.ReadToEndAsync();
- }
- else
+ bool isGzip = string.Equals(
+ result.Details.ContentEncoding, "gzip", StringComparison.OrdinalIgnoreCase);
+
+ if (isGzip)
{
- using StreamReader reader = new(contentStream, Encoding.UTF8);
+ using GZipStream decompressed = new(contentStream, CompressionMode.Decompress);
+ using StreamReader reader = new(decompressed, Encoding.UTF8);
return await reader.ReadToEndAsync();
}
+
+ using StreamReader uncompressedReader = new(contentStream, Encoding.UTF8);
+ return await uncompressedReader.ReadToEndAsync();
}
///
From 8b6b9f48ccb8eeed977254a969f70b7fc2e0e5a2 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 13:43:54 -0700
Subject: [PATCH 25/53] cleanup
---
src/Abstractions/DataConverter.cs | 2 -
.../Converters/LargePayloadDataConverter.cs | 96 -------------------
...ientBuilderExtensions.AzureBlobPayloads.cs | 16 ++--
...rkerBuilderExtensions.AzureBlobPayloads.cs | 16 ++--
.../AzureBlobPayloadsInterceptor.cs | 0
.../Interceptors/LargePayloadDataConverter.cs | 96 +++++++++++++++++++
.../BlobPayloadStore.cs | 0
src/Worker/Core/DurableTaskWorkerOptions.cs | 1 +
8 files changed, 113 insertions(+), 114 deletions(-)
delete mode 100644 src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
create mode 100644 src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
create mode 100644 src/Extensions/AzureBlobPayloads/Interceptors/LargePayloadDataConverter.cs
rename src/Extensions/AzureBlobPayloads/{Converters => PayloadStore}/BlobPayloadStore.cs (100%)
diff --git a/src/Abstractions/DataConverter.cs b/src/Abstractions/DataConverter.cs
index 6c623c81..3248761a 100644
--- a/src/Abstractions/DataConverter.cs
+++ b/src/Abstractions/DataConverter.cs
@@ -13,8 +13,6 @@ namespace Microsoft.DurableTask;
/// uses the JSON serializer from the System.Text.Json namespace. Currently only strings are supported as
/// the serialized representation of data. Byte array payloads and streams are not supported by this abstraction.
/// Note that these methods all accept null values, in which case the return value should also be null.
-/// Implementations may choose to return a pointer or reference (such as an external token) to the data
-/// instead of the actual serialized data itself.
///
public abstract class DataConverter
{
diff --git a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
deleted file mode 100644
index e0f7315d..00000000
--- a/src/Extensions/AzureBlobPayloads/Converters/LargePayloadDataConverter.cs
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT License.
-
-using System.Text;
-
-namespace Microsoft.DurableTask.Converters;
-
-///
-/// A DataConverter that wraps another DataConverter and externalizes payloads larger than a configured threshold.
-/// It uploads large payloads to an and returns a reference token string.
-/// On deserialization, it resolves tokens and feeds the underlying converter the original content.
-///
-///
-/// Initializes a new instance of the class.
-///
-/// The inner data converter to wrap.
-/// The external payload store to use.
-/// The options for the externalizing data converter.
-/// Thrown when , , or is null.
-public sealed class LargePayloadDataConverter(
- DataConverter innerConverter,
- IPayloadStore payloadStore,
- LargePayloadStorageOptions largePayloadStorageOptions
-) : DataConverter
-{
-
- readonly DataConverter innerConverter = innerConverter ?? throw new ArgumentNullException(nameof(innerConverter));
- readonly IPayloadStore payLoadStore = payloadStore ?? throw new ArgumentNullException(nameof(payloadStore));
- readonly LargePayloadStorageOptions largePayloadStorageOptions = largePayloadStorageOptions ?? throw new ArgumentNullException(nameof(largePayloadStorageOptions));
- // Use UTF-8 without a BOM (encoderShouldEmitUTF8Identifier=false). JSON in UTF-8 should not include a
- // byte order mark per RFC 8259, and omitting it avoids hidden extra bytes that could skew the
- // externalization threshold calculation and prevents interop issues with strict JSON parsers.
- // A few legacy tools rely on a BOM for encoding detection, but modern JSON tooling assumes BOM-less UTF-8.
- readonly Encoding utf8 = new UTF8Encoding(false);
-
- ///
- /// Serializes the value to a JSON string and uploads it to the external payload store if it exceeds the configured threshold.
- ///
- /// The value to serialize.
- /// The serialized value or the token if externalized.
- public override string? Serialize(object? value)
- {
- string? json = this.innerConverter.Serialize(value);
-
- if (string.IsNullOrEmpty(json))
- {
- return null;
- }
-
- int byteCount = this.utf8.GetByteCount(json);
- if (byteCount < this.largePayloadStorageOptions.ExternalizeThresholdBytes)
- {
- return json;
- }
-
- // Upload synchronously in this context by blocking on async. SDK call sites already run on threadpool.
- byte[] bytes = this.utf8.GetBytes(json);
- string token = this.payLoadStore.UploadAsync(bytes, CancellationToken.None).GetAwaiter().GetResult();
- return token;
- }
-
- ///
- /// Deserializes the JSON string or resolves the token to the original value.
- ///
- /// The JSON string or token.
- /// The type to deserialize to.
- /// The deserialized value.
- public override object? Deserialize(string? data, Type targetType)
- {
- if (data is null)
- {
- return null;
- }
-
- string toDeserialize = data;
- if (this.payLoadStore.IsKnownPayloadToken(data))
- {
- toDeserialize = this.payLoadStore.DownloadAsync(data, CancellationToken.None).GetAwaiter().GetResult();
- }
-
- return this.innerConverter.Deserialize(StripArrayCharacters(toDeserialize), targetType);
- }
-
- static string? StripArrayCharacters(string? input)
- {
- if (input != null && input.StartsWith('[') && input.EndsWith(']'))
- {
- // Strip the outer bracket characters
- return input[1..^1];
- }
-
- return input;
- }
-}
-
-
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
index ef77d50f..d84be37d 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -31,14 +31,14 @@ public static IDurableTaskClientBuilder UseExternalizedPayloads(
return new BlobPayloadStore(opts);
});
- builder.Services
- .AddOptions(builder.Name)
- .PostConfigure>((opt, store, monitor) =>
- {
- LargePayloadStorageOptions opts = monitor.Get(builder.Name);
- DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
- opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
- });
+ // builder.Services
+ // .AddOptions(builder.Name)
+ // .PostConfigure>((opt, store, monitor) =>
+ // {
+ // LargePayloadStorageOptions opts = monitor.Get(builder.Name);
+ // DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
+ // opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
+ // });
return builder;
}
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
index 8ae08fff..0d638113 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
@@ -30,14 +30,14 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
return new BlobPayloadStore(opts);
});
- builder.Services
- .AddOptions(builder.Name)
- .PostConfigure>((opt, store, monitor) =>
- {
- LargePayloadStorageOptions opts = monitor.Get(builder.Name);
- DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
- opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
- });
+ // builder.Services
+ // .AddOptions(builder.Name)
+ // .PostConfigure>((opt, store, monitor) =>
+ // {
+ // LargePayloadStorageOptions opts = monitor.Get(builder.Name);
+ // DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
+ // opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
+ // });
return builder;
}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
new file mode 100644
index 00000000..e69de29b
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Interceptors/LargePayloadDataConverter.cs
new file mode 100644
index 00000000..fcef82d2
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/LargePayloadDataConverter.cs
@@ -0,0 +1,96 @@
+// // Copyright (c) Microsoft Corporation.
+// // Licensed under the MIT License.
+
+// using System.Text;
+
+// namespace Microsoft.DurableTask.Converters;
+
+// ///
+// /// A DataConverter that wraps another DataConverter and externalizes payloads larger than a configured threshold.
+// /// It uploads large payloads to an and returns a reference token string.
+// /// On deserialization, it resolves tokens and feeds the underlying converter the original content.
+// ///
+// ///
+// /// Initializes a new instance of the class.
+// ///
+// /// The inner data converter to wrap.
+// /// The external payload store to use.
+// /// The options for the externalizing data converter.
+// /// Thrown when , , or is null.
+// public sealed class LargePayloadDataConverter(
+// DataConverter innerConverter,
+// IPayloadStore payloadStore,
+// LargePayloadStorageOptions largePayloadStorageOptions
+// ) : DataConverter
+// {
+
+// readonly DataConverter innerConverter = innerConverter ?? throw new ArgumentNullException(nameof(innerConverter));
+// readonly IPayloadStore payLoadStore = payloadStore ?? throw new ArgumentNullException(nameof(payloadStore));
+// readonly LargePayloadStorageOptions largePayloadStorageOptions = largePayloadStorageOptions ?? throw new ArgumentNullException(nameof(largePayloadStorageOptions));
+// // Use UTF-8 without a BOM (encoderShouldEmitUTF8Identifier=false). JSON in UTF-8 should not include a
+// // byte order mark per RFC 8259, and omitting it avoids hidden extra bytes that could skew the
+// // externalization threshold calculation and prevents interop issues with strict JSON parsers.
+// // A few legacy tools rely on a BOM for encoding detection, but modern JSON tooling assumes BOM-less UTF-8.
+// readonly Encoding utf8 = new UTF8Encoding(false);
+
+// ///
+// /// Serializes the value to a JSON string and uploads it to the external payload store if it exceeds the configured threshold.
+// ///
+// /// The value to serialize.
+// /// The serialized value or the token if externalized.
+// public override string? Serialize(object? value)
+// {
+// string? json = this.innerConverter.Serialize(value);
+
+// if (string.IsNullOrEmpty(json))
+// {
+// return null;
+// }
+
+// int byteCount = this.utf8.GetByteCount(json);
+// if (byteCount < this.largePayloadStorageOptions.ExternalizeThresholdBytes)
+// {
+// return json;
+// }
+
+// // Upload synchronously in this context by blocking on async. SDK call sites already run on threadpool.
+// byte[] bytes = this.utf8.GetBytes(json);
+// string token = this.payLoadStore.UploadAsync(bytes, CancellationToken.None).GetAwaiter().GetResult();
+// return token;
+// }
+
+// ///
+// /// Deserializes the JSON string or resolves the token to the original value.
+// ///
+// /// The JSON string or token.
+// /// The type to deserialize to.
+// /// The deserialized value.
+// public override object? Deserialize(string? data, Type targetType)
+// {
+// if (data is null)
+// {
+// return null;
+// }
+
+// string toDeserialize = data;
+// if (this.payLoadStore.IsKnownPayloadToken(data))
+// {
+// toDeserialize = this.payLoadStore.DownloadAsync(data, CancellationToken.None).GetAwaiter().GetResult();
+// }
+
+// return this.innerConverter.Deserialize(StripArrayCharacters(toDeserialize), targetType);
+// }
+
+// static string? StripArrayCharacters(string? input)
+// {
+// if (input != null && input.StartsWith('[') && input.EndsWith(']'))
+// {
+// // Strip the outer bracket characters
+// return input[1..^1];
+// }
+
+// return input;
+// }
+// }
+
+
diff --git a/src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
similarity index 100%
rename from src/Extensions/AzureBlobPayloads/Converters/BlobPayloadStore.cs
rename to src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
diff --git a/src/Worker/Core/DurableTaskWorkerOptions.cs b/src/Worker/Core/DurableTaskWorkerOptions.cs
index c65ccdbd..703bbbd4 100644
--- a/src/Worker/Core/DurableTaskWorkerOptions.cs
+++ b/src/Worker/Core/DurableTaskWorkerOptions.cs
@@ -162,6 +162,7 @@ public DataConverter DataConverter
///
internal bool DataConverterExplicitlySet { get; private set; }
+
///
/// Applies these option values to another.
///
From 2d05b309fbb0f84858a01b78e386df8cc53e66c0 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 14:34:54 -0700
Subject: [PATCH 26/53] add interceptor v1
---
...ientBuilderExtensions.AzureBlobPayloads.cs | 21 ++
...rkerBuilderExtensions.AzureBlobPayloads.cs | 21 ++
.../AzureBlobPayloadsInterceptor.cs | 229 ++++++++++++++++++
3 files changed, 271 insertions(+)
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
index d84be37d..5d6a5fb0 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -31,6 +31,27 @@ public static IDurableTaskClientBuilder UseExternalizedPayloads(
return new BlobPayloadStore(opts);
});
+ // Wrap the gRPC CallInvoker with our interceptor when using the gRPC client
+ builder.Services
+ .AddOptions(builder.Name)
+ .PostConfigure>((opt, store, monitor) =>
+ {
+ LargePayloadStorageOptions opts = monitor.Get(builder.Name);
+ if (opt.Channel is not null)
+ {
+ var invoker = opt.Channel.Intercept(new Worker.Grpc.Internal.AzureBlobPayloadsInterceptor(store, opts));
+ opt.CallInvoker = invoker;
+ }
+ else if (opt.CallInvoker is not null)
+ {
+ opt.CallInvoker = opt.CallInvoker.Intercept(new Worker.Grpc.Internal.AzureBlobPayloadsInterceptor(store, opts));
+ }
+ else if (!string.IsNullOrEmpty(opt.Address))
+ {
+ // Channel will be built later; we can't intercept here. This will be handled in the client if CallInvoker is null.
+ }
+ });
+
// builder.Services
// .AddOptions(builder.Name)
// .PostConfigure>((opt, store, monitor) =>
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
index 0d638113..fc96354f 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
@@ -30,6 +30,27 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
return new BlobPayloadStore(opts);
});
+ // Wrap the gRPC CallInvoker with our interceptor when using the gRPC worker
+ builder.Services
+ .AddOptions(builder.Name)
+ .PostConfigure>((opt, store, monitor) =>
+ {
+ LargePayloadStorageOptions opts = monitor.Get(builder.Name);
+ if (opt.Channel is not null)
+ {
+ var invoker = opt.Channel.Intercept(new Grpc.Internal.AzureBlobPayloadsInterceptor(store, opts));
+ opt.CallInvoker = invoker;
+ }
+ else if (opt.CallInvoker is not null)
+ {
+ opt.CallInvoker = opt.CallInvoker.Intercept(new Grpc.Internal.AzureBlobPayloadsInterceptor(store, opts));
+ }
+ else if (!string.IsNullOrEmpty(opt.Address))
+ {
+ // Channel will be built later; worker will build it, intercept when possible through CallInvoker path.
+ }
+ });
+
// builder.Services
// .AddOptions(builder.Name)
// .PostConfigure>((opt, store, monitor) =>
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
index e69de29b..09a1b97e 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
@@ -0,0 +1,229 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using System.Text;
+using Grpc.Core;
+using Grpc.Core.Interceptors;
+using Microsoft.DurableTask.Converters;
+using P = Microsoft.DurableTask.Protobuf;
+
+namespace Microsoft.DurableTask.Worker.Grpc.Internal;
+
+///
+/// gRPC interceptor that externalizes large payloads to an on requests
+/// and resolves known payload tokens on responses.
+///
+sealed class AzureBlobPayloadsInterceptor : Interceptor
+{
+ readonly IPayloadStore payloadStore;
+ readonly LargePayloadStorageOptions options;
+
+ public AzureBlobPayloadsInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+ {
+ this.payloadStore = payloadStore;
+ this.options = options;
+ }
+
+ // Unary: externalize on request, resolve on response
+ public override AsyncUnaryCall AsyncUnaryCall(
+ TRequest request,
+ ClientInterceptorContext context,
+ AsyncUnaryCallContinuation continuation)
+ {
+ // Mutate request payloads before sending
+ this.ExternalizeRequestPayloads(request, context);
+
+ AsyncUnaryCall call = continuation(request, context);
+
+ // Wrap response task to resolve payloads
+ async Task ResolveAsync(Task inner)
+ {
+ TResponse response = await inner.ConfigureAwait(false);
+ await this.ResolveResponsePayloadsAsync(response, context.CancellationToken);
+ return response;
+ }
+
+ return new AsyncUnaryCall(
+ ResolveAsync(call.ResponseAsync),
+ call.ResponseHeadersAsync,
+ call.GetStatus,
+ call.GetTrailers,
+ call.Dispose);
+ }
+
+ // Server streaming: resolve payloads in streamed responses (e.g., GetWorkItems)
+ public override AsyncServerStreamingCall AsyncServerStreamingCall(
+ TRequest request,
+ ClientInterceptorContext context,
+ AsyncServerStreamingCallContinuation continuation)
+ {
+ this.ExternalizeRequestPayloads(request, context);
+
+ AsyncServerStreamingCall call = continuation(request, context);
+
+ IAsyncStreamReader wrapped = new TransformingStreamReader(call.ResponseStream, async (msg, ct) =>
+ {
+ await this.ResolveResponsePayloadsAsync(msg, ct).ConfigureAwait(false);
+ return msg;
+ });
+
+ return new AsyncServerStreamingCall(
+ wrapped,
+ call.ResponseHeadersAsync,
+ call.GetStatus,
+ call.GetTrailers,
+ call.Dispose);
+ }
+
+ void ExternalizeRequestPayloads(TRequest request, ClientInterceptorContext context)
+ {
+ // Client -> sidecar
+ switch (request)
+ {
+ case P.CreateInstanceRequest r:
+ this.MaybeExternalize(ref r.Input);
+ break;
+ case P.RaiseEventRequest r:
+ this.MaybeExternalize(ref r.Input);
+ break;
+ case P.TerminateRequest r:
+ this.MaybeExternalize(ref r.Output);
+ break;
+ case P.ActivityResponse r:
+ this.MaybeExternalize(ref r.Result);
+ break;
+ case P.OrchestratorResponse r:
+ this.MaybeExternalize(ref r.CustomStatus);
+ foreach (P.OrchestratorAction a in r.Actions)
+ {
+ if (a.CompleteOrchestration is { } complete)
+ {
+ this.MaybeExternalize(ref complete.Result);
+ }
+ }
+ break;
+ }
+ }
+
+ async Task ResolveResponsePayloadsAsync(TResponse response, CancellationToken cancellation)
+ {
+ // Sidecar -> client/worker
+ switch (response)
+ {
+ case P.GetInstanceResponse r when r.OrchestrationState is { } s:
+ this.MaybeResolve(ref s.Input, cancellation);
+ this.MaybeResolve(ref s.Output, cancellation);
+ this.MaybeResolve(ref s.CustomStatus, cancellation);
+ break;
+ case P.QueryInstancesResponse r:
+ foreach (P.OrchestrationState s in r.OrchestrationState)
+ {
+ this.MaybeResolve(ref s.Input, cancellation);
+ this.MaybeResolve(ref s.Output, cancellation);
+ this.MaybeResolve(ref s.CustomStatus, cancellation);
+ }
+ break;
+ case P.WorkItem wi:
+ // Resolve activity input
+ if (wi.ActivityRequest is { } ar)
+ {
+ this.MaybeResolve(ref ar.Input, cancellation);
+ }
+
+ // Resolve orchestration input embedded in ExecutionStarted event and external events
+ if (wi.OrchestratorRequest is { } or)
+ {
+ foreach (var e in or.PastEvents)
+ {
+ this.ResolveEventPayloads(e, cancellation);
+ }
+ foreach (var e in or.NewEvents)
+ {
+ this.ResolveEventPayloads(e, cancellation);
+ }
+ }
+ break;
+ }
+ await Task.CompletedTask;
+ }
+
+ void ResolveEventPayloads(P.HistoryEvent e, CancellationToken cancellation)
+ {
+ switch (e.EventTypeCase)
+ {
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionStarted:
+ if (e.ExecutionStarted is { } es)
+ {
+ this.MaybeResolve(ref es.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EventRaised:
+ if (e.EventRaised is { } er)
+ {
+ this.MaybeResolve(ref er.Input, cancellation);
+ }
+ break;
+ }
+ }
+
+ void MaybeExternalize(ref string? value)
+ {
+ if (string.IsNullOrEmpty(value))
+ {
+ return;
+ }
+
+ int size = Encoding.UTF8.GetByteCount(value);
+ if (size < this.options.ExternalizeThresholdBytes)
+ {
+ return;
+ }
+
+ // Upload synchronously via .GetAwaiter().GetResult() because interceptor API is sync for requests
+ string token = this.payloadStore.UploadAsync(Encoding.UTF8.GetBytes(value), CancellationToken.None)
+ .GetAwaiter()
+ .GetResult();
+ value = token;
+ }
+
+ void MaybeResolve(ref string? value, CancellationToken cancellation)
+ {
+ if (string.IsNullOrEmpty(value) || !this.payloadStore.IsKnownPayloadToken(value))
+ {
+ return;
+ }
+
+ string resolved = this.payloadStore.DownloadAsync(value, cancellation)
+ .GetAwaiter()
+ .GetResult();
+ value = resolved;
+ }
+
+ sealed class TransformingStreamReader : IAsyncStreamReader
+ {
+ readonly IAsyncStreamReader inner;
+ readonly Func> transform;
+
+ public TransformingStreamReader(IAsyncStreamReader inner, Func> transform)
+ {
+ this.inner = inner;
+ this.transform = transform;
+ }
+
+ public T Current { get; private set; } = default!;
+
+ public async Task MoveNext(CancellationToken cancellationToken)
+ {
+ bool hasNext = await this.inner.MoveNext(cancellationToken).ConfigureAwait(false);
+ if (!hasNext)
+ {
+ return false;
+ }
+
+ this.Current = await this.transform(this.inner.Current, cancellationToken).ConfigureAwait(false);
+ return true;
+ }
+ }
+}
+
+
From 05c1287164edc12aaf2785ec87d10426fdb48473 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 14:52:18 -0700
Subject: [PATCH 27/53] add interceptor v2
---
.../AzureBlobPayloads.csproj | 6 +-
...ientBuilderExtensions.AzureBlobPayloads.cs | 33 +-
...rkerBuilderExtensions.AzureBlobPayloads.cs | 29 +-
.../AzureBlobPayloadsInterceptor.cs | 370 ++++++++++++++----
.../PayloadStore/BlobPayloadStore.cs | 3 +-
5 files changed, 333 insertions(+), 108 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj b/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
index 33802286..9be33cac 100644
--- a/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
+++ b/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
@@ -12,17 +12,19 @@
+
+
+
-
-
+
\ No newline at end of file
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
index 5d6a5fb0..b284dc2f 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -1,13 +1,15 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
+using Grpc.Core.Interceptors;
using Microsoft.DurableTask.Client;
+using Microsoft.DurableTask.Client.Grpc;
using Microsoft.DurableTask.Converters;
+using Microsoft.DurableTask.Worker.Grpc.Internal;
using Microsoft.Extensions.DependencyInjection;
-using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Options;
-namespace Microsoft.DurableTask.Client;
+namespace Microsoft.DurableTask;
///
/// Extension methods to enable externalized payloads using Azure Blob Storage for Durable Task Client.
@@ -17,6 +19,10 @@ public static class DurableTaskClientBuilderExtensionsAzureBlobPayloads
///
/// Enables externalized payload storage using Azure Blob Storage for the specified client builder.
///
+ /// The builder to configure.
+ /// The callback to configure the storage options.
+ /// The original builder, for call chaining.
+ ///
public static IDurableTaskClientBuilder UseExternalizedPayloads(
this IDurableTaskClientBuilder builder,
Action configure)
@@ -39,30 +45,23 @@ public static IDurableTaskClientBuilder UseExternalizedPayloads(
LargePayloadStorageOptions opts = monitor.Get(builder.Name);
if (opt.Channel is not null)
{
- var invoker = opt.Channel.Intercept(new Worker.Grpc.Internal.AzureBlobPayloadsInterceptor(store, opts));
+ Grpc.Core.CallInvoker invoker = opt.Channel.Intercept(new AzureBlobPayloadsInterceptor(store, opts));
opt.CallInvoker = invoker;
+
+ // Ensure client uses the intercepted invoker path
+ opt.Channel = null;
}
else if (opt.CallInvoker is not null)
{
- opt.CallInvoker = opt.CallInvoker.Intercept(new Worker.Grpc.Internal.AzureBlobPayloadsInterceptor(store, opts));
+ opt.CallInvoker = opt.CallInvoker.Intercept(new AzureBlobPayloadsInterceptor(store, opts));
}
- else if (!string.IsNullOrEmpty(opt.Address))
+ else
{
- // Channel will be built later; we can't intercept here. This will be handled in the client if CallInvoker is null.
+ throw new ArgumentException(
+ "Channel or CallInvoker must be provided to use Azure Blob Payload Externalization feature");
}
});
- // builder.Services
- // .AddOptions(builder.Name)
- // .PostConfigure>((opt, store, monitor) =>
- // {
- // LargePayloadStorageOptions opts = monitor.Get(builder.Name);
- // DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
- // opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
- // });
-
return builder;
}
}
-
-
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
index fc96354f..da9081dc 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
@@ -5,8 +5,11 @@
using Microsoft.DurableTask.Worker;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
+using Microsoft.DurableTask.Worker.Grpc;
+using Grpc.Net.Client;
+using Grpc.Core.Interceptors;
-namespace Microsoft.DurableTask.Worker;
+namespace Microsoft.DurableTask;
///
/// Extension methods to enable externalized payloads using Azure Blob Storage for Durable Task Worker.
@@ -16,6 +19,9 @@ public static class DurableTaskWorkerBuilderExtensionsAzureBlobPayloads
///
/// Enables externalized payload storage using Azure Blob Storage for the specified worker builder.
///
+ /// The builder to configure.
+ /// The callback to configure the storage options.
+ /// The original builder, for call chaining.
public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
this IDurableTaskWorkerBuilder builder,
Action configure)
@@ -38,28 +44,23 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
LargePayloadStorageOptions opts = monitor.Get(builder.Name);
if (opt.Channel is not null)
{
- var invoker = opt.Channel.Intercept(new Grpc.Internal.AzureBlobPayloadsInterceptor(store, opts));
+ var invoker = opt.Channel.Intercept(new AzureBlobPayloadsInterceptor(store, opts));
opt.CallInvoker = invoker;
+ // Ensure worker uses the intercepted invoker path
+ opt.Channel = null;
}
else if (opt.CallInvoker is not null)
{
- opt.CallInvoker = opt.CallInvoker.Intercept(new Grpc.Internal.AzureBlobPayloadsInterceptor(store, opts));
+ opt.CallInvoker = opt.CallInvoker.Intercept(new AzureBlobPayloadsInterceptor(store, opts));
}
- else if (!string.IsNullOrEmpty(opt.Address))
+ else
{
- // Channel will be built later; worker will build it, intercept when possible through CallInvoker path.
+ throw new ArgumentException(
+ "Channel or CallInvoker must be provided to use Azure Blob Payload Externalization feature"
+ );
}
});
- // builder.Services
- // .AddOptions(builder.Name)
- // .PostConfigure>((opt, store, monitor) =>
- // {
- // LargePayloadStorageOptions opts = monitor.Get(builder.Name);
- // DataConverter inner = opt.DataConverter ?? Converters.JsonDataConverter.Default;
- // opt.DataConverter = new LargePayloadDataConverter(inner, store, opts);
- // });
-
return builder;
}
}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
index 09a1b97e..3e0b1898 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
@@ -7,63 +7,110 @@
using Microsoft.DurableTask.Converters;
using P = Microsoft.DurableTask.Protobuf;
-namespace Microsoft.DurableTask.Worker.Grpc.Internal;
+namespace Microsoft.DurableTask;
///
/// gRPC interceptor that externalizes large payloads to an on requests
/// and resolves known payload tokens on responses.
///
-sealed class AzureBlobPayloadsInterceptor : Interceptor
+sealed class AzureBlobPayloadsInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options) : Interceptor
{
- readonly IPayloadStore payloadStore;
- readonly LargePayloadStorageOptions options;
-
- public AzureBlobPayloadsInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
- {
- this.payloadStore = payloadStore;
- this.options = options;
- }
+ readonly IPayloadStore payloadStore = payloadStore;
+ readonly LargePayloadStorageOptions options = options;
// Unary: externalize on request, resolve on response
+
+ ///
public override AsyncUnaryCall AsyncUnaryCall(
TRequest request,
ClientInterceptorContext context,
AsyncUnaryCallContinuation continuation)
{
- // Mutate request payloads before sending
- this.ExternalizeRequestPayloads(request, context);
-
- AsyncUnaryCall call = continuation(request, context);
+ // Build the underlying call lazily after async externalization
+ Task> startCallTask = Task.Run(async () =>
+ {
+ // Externalize first; if this fails, do not proceed to send the gRPC call
+ await this.ExternalizeRequestPayloadsAsync(request, context.Options.CancellationToken);
+ // Only if externalization succeeds, proceed with the continuation
+ return continuation(request, context);
+ });
- // Wrap response task to resolve payloads
- async Task ResolveAsync(Task inner)
+ async Task ResponseAsync()
{
- TResponse response = await inner.ConfigureAwait(false);
- await this.ResolveResponsePayloadsAsync(response, context.CancellationToken);
+ AsyncUnaryCall innerCall = await startCallTask;
+ TResponse response = await innerCall.ResponseAsync;
+ await this.ResolveResponsePayloadsAsync(response, context.Options.CancellationToken);
return response;
}
+ async Task ResponseHeadersAsync()
+ {
+ AsyncUnaryCall innerCall = await startCallTask;
+ return await innerCall.ResponseHeadersAsync;
+ }
+
+ Status GetStatus()
+ {
+ if (startCallTask.IsCanceled)
+ {
+ return new Status(StatusCode.Cancelled, "Call was cancelled.");
+ }
+
+ if (startCallTask.IsFaulted)
+ {
+ return new Status(StatusCode.Internal, startCallTask.Exception?.Message ?? "Unknown error");
+ }
+ if (startCallTask.Status == TaskStatus.RanToCompletion)
+ {
+ return startCallTask.Result.GetStatus();
+ }
+
+ // Not started yet; unknown
+ return new Status(StatusCode.Unknown, string.Empty);
+ }
+
+ Metadata GetTrailers()
+ {
+ return startCallTask.Status == TaskStatus.RanToCompletion ? startCallTask.Result.GetTrailers() : [];
+ }
+
+ void Dispose()
+ {
+ _ = startCallTask.ContinueWith(
+ t =>
+ {
+ if (t.Status == TaskStatus.RanToCompletion)
+ {
+ t.Result.Dispose();
+ }
+ },
+ CancellationToken.None,
+ TaskContinuationOptions.ExecuteSynchronously,
+ TaskScheduler.Default);
+ }
+
return new AsyncUnaryCall(
- ResolveAsync(call.ResponseAsync),
- call.ResponseHeadersAsync,
- call.GetStatus,
- call.GetTrailers,
- call.Dispose);
+ ResponseAsync(),
+ ResponseHeadersAsync(),
+ GetStatus,
+ GetTrailers,
+ Dispose);
}
// Server streaming: resolve payloads in streamed responses (e.g., GetWorkItems)
+
+ ///
public override AsyncServerStreamingCall AsyncServerStreamingCall(
TRequest request,
ClientInterceptorContext context,
AsyncServerStreamingCallContinuation continuation)
{
- this.ExternalizeRequestPayloads(request, context);
-
+ // For streaming, request externalization is not needed currently
AsyncServerStreamingCall call = continuation(request, context);
IAsyncStreamReader wrapped = new TransformingStreamReader(call.ResponseStream, async (msg, ct) =>
{
- await this.ResolveResponsePayloadsAsync(msg, ct).ConfigureAwait(false);
+ await this.ResolveResponsePayloadsAsync(msg, ct);
return msg;
});
@@ -75,33 +122,112 @@ public override AsyncServerStreamingCall AsyncServerStreamingCall(TRequest request, ClientInterceptorContext context)
+ Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
{
// Client -> sidecar
switch (request)
{
case P.CreateInstanceRequest r:
- this.MaybeExternalize(ref r.Input);
- break;
+ return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
case P.RaiseEventRequest r:
- this.MaybeExternalize(ref r.Input);
- break;
+ return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
case P.TerminateRequest r:
- this.MaybeExternalize(ref r.Output);
- break;
+ return this.MaybeExternalizeAsync(v => r.Output = v, r.Output, cancellation);
+ case P.SignalEntityRequest r:
+ return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
case P.ActivityResponse r:
- this.MaybeExternalize(ref r.Result);
- break;
+ return this.MaybeExternalizeAsync(v => r.Result = v, r.Result, cancellation);
case P.OrchestratorResponse r:
- this.MaybeExternalize(ref r.CustomStatus);
- foreach (P.OrchestratorAction a in r.Actions)
+ return this.ExternalizeOrchestratorResponseAsync(r, cancellation);
+ case P.EntityBatchResult r:
+ return this.ExternalizeEntityBatchResultAsync(r, cancellation);
+ case P.EntityBatchRequest r:
+ return this.ExternalizeEntityBatchRequestAsync(r, cancellation);
+ case P.EntityRequest r:
+ return this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ }
+
+ return Task.CompletedTask;
+ }
+
+ async Task ExternalizeOrchestratorResponseAsync(P.OrchestratorResponse r, CancellationToken cancellation)
+ {
+ await this.MaybeExternalizeAsync(v => r.CustomStatus = v, r.CustomStatus, cancellation);
+ foreach (P.OrchestratorAction a in r.Actions)
+ {
+ if (a.CompleteOrchestration is { } complete)
+ {
+ await this.MaybeExternalizeAsync(v => complete.Result = v, complete.Result, cancellation);
+ await this.MaybeExternalizeAsync(v => complete.Details = v, complete.Details, cancellation);
+ }
+ if (a.TerminateOrchestration is { } term)
+ {
+ await this.MaybeExternalizeAsync(v => term.Reason = v, term.Reason, cancellation);
+ }
+ if (a.ScheduleTask is { } schedule)
+ {
+ await this.MaybeExternalizeAsync(v => schedule.Input = v, schedule.Input, cancellation);
+ }
+ if (a.CreateSubOrchestration is { } sub)
+ {
+ await this.MaybeExternalizeAsync(v => sub.Input = v, sub.Input, cancellation);
+ }
+ if (a.SendEvent is { } sendEvt)
+ {
+ await this.MaybeExternalizeAsync(v => sendEvt.Data = v, sendEvt.Data, cancellation);
+ }
+ if (a.SendEntityMessage is { } entityMsg)
+ {
+ if (entityMsg.EntityOperationSignaled is { } sig)
{
- if (a.CompleteOrchestration is { } complete)
- {
- this.MaybeExternalize(ref complete.Result);
- }
+ await this.MaybeExternalizeAsync(v => sig.Input = v, sig.Input, cancellation);
}
- break;
+ if (entityMsg.EntityOperationCalled is { } called)
+ {
+ await this.MaybeExternalizeAsync(v => called.Input = v, called.Input, cancellation);
+ }
+ }
+ }
+ }
+
+ async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, CancellationToken cancellation)
+ {
+ await this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ if (r.Results != null)
+ {
+ foreach (P.OperationResult result in r.Results)
+ {
+ if (result.Success is { } success)
+ {
+ await this.MaybeExternalizeAsync(v => success.Result = v, success.Result, cancellation);
+ }
+ }
+ }
+ if (r.Actions != null)
+ {
+ foreach (P.OperationAction action in r.Actions)
+ {
+ if (action.SendSignal is { } sendSig)
+ {
+ await this.MaybeExternalizeAsync(v => sendSig.Input = v, sendSig.Input, cancellation);
+ }
+ if (action.StartNewOrchestration is { } start)
+ {
+ await this.MaybeExternalizeAsync(v => start.Input = v, start.Input, cancellation);
+ }
+ }
+ }
+ }
+
+ async Task ExternalizeEntityBatchRequestAsync(P.EntityBatchRequest r, CancellationToken cancellation)
+ {
+ await this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ if (r.Operations != null)
+ {
+ foreach (P.OperationRequest op in r.Operations)
+ {
+ await this.MaybeExternalizeAsync(v => op.Input = v, op.Input, cancellation);
+ }
}
}
@@ -111,92 +237,190 @@ async Task ResolveResponsePayloadsAsync(TResponse response, Cancellat
switch (response)
{
case P.GetInstanceResponse r when r.OrchestrationState is { } s:
- this.MaybeResolve(ref s.Input, cancellation);
- this.MaybeResolve(ref s.Output, cancellation);
- this.MaybeResolve(ref s.CustomStatus, cancellation);
+ await this.MaybeResolveAsync(v => s.Input = v, s.Input, cancellation);
+ await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
+ await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
break;
case P.QueryInstancesResponse r:
foreach (P.OrchestrationState s in r.OrchestrationState)
{
- this.MaybeResolve(ref s.Input, cancellation);
- this.MaybeResolve(ref s.Output, cancellation);
- this.MaybeResolve(ref s.CustomStatus, cancellation);
+ await this.MaybeResolveAsync(v => s.Input = v, s.Input, cancellation);
+ await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
+ await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
+ }
+
+ break;
+ case P.GetEntityResponse r when r.Entity is { } em:
+ await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
+ break;
+ case P.QueryEntitiesResponse r:
+ foreach (P.EntityMetadata em in r.Entities)
+ {
+ await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
}
break;
case P.WorkItem wi:
// Resolve activity input
if (wi.ActivityRequest is { } ar)
{
- this.MaybeResolve(ref ar.Input, cancellation);
+ await this.MaybeResolveAsync(v => ar.Input = v, ar.Input, cancellation);
}
// Resolve orchestration input embedded in ExecutionStarted event and external events
if (wi.OrchestratorRequest is { } or)
{
- foreach (var e in or.PastEvents)
+ foreach (P.HistoryEvent? e in or.PastEvents)
{
- this.ResolveEventPayloads(e, cancellation);
+ await this.ResolveEventPayloadsAsync(e, cancellation);
}
- foreach (var e in or.NewEvents)
+
+ foreach (P.HistoryEvent? e in or.NewEvents)
{
- this.ResolveEventPayloads(e, cancellation);
+ await this.ResolveEventPayloadsAsync(e, cancellation);
}
}
+
break;
}
- await Task.CompletedTask;
}
- void ResolveEventPayloads(P.HistoryEvent e, CancellationToken cancellation)
+ async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancellation)
{
switch (e.EventTypeCase)
{
case P.HistoryEvent.EventTypeOneofCase.ExecutionStarted:
if (e.ExecutionStarted is { } es)
{
- this.MaybeResolve(ref es.Input, cancellation);
+ await this.MaybeResolveAsync(v => es.Input = v, es.Input, cancellation);
}
+
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionCompleted:
+ if (e.ExecutionCompleted is { } ec)
+ {
+ await this.MaybeResolveAsync(v => ec.Result = v, ec.Result, cancellation);
+ }
+
break;
case P.HistoryEvent.EventTypeOneofCase.EventRaised:
if (e.EventRaised is { } er)
{
- this.MaybeResolve(ref er.Input, cancellation);
+ await this.MaybeResolveAsync(v => er.Input = v, er.Input, cancellation);
+ }
+
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.TaskScheduled:
+ if (e.TaskScheduled is { } ts)
+ {
+ await this.MaybeResolveAsync(v => ts.Input = v, ts.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.TaskCompleted:
+ if (e.TaskCompleted is { } tc)
+ {
+ await this.MaybeResolveAsync(v => tc.Result = v, tc.Result, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCreated:
+ if (e.SubOrchestrationInstanceCreated is { } soc)
+ {
+ await this.MaybeResolveAsync(v => soc.Input = v, soc.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCompleted:
+ if (e.SubOrchestrationInstanceCompleted is { } sox)
+ {
+ await this.MaybeResolveAsync(v => sox.Result = v, sox.Result, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EventSent:
+ if (e.EventSent is { } esent)
+ {
+ await this.MaybeResolveAsync(v => esent.Input = v, esent.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.GenericEvent:
+ if (e.GenericEvent is { } ge)
+ {
+ await this.MaybeResolveAsync(v => ge.Data = v, ge.Data, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ContinueAsNew:
+ if (e.ContinueAsNew is { } can)
+ {
+ await this.MaybeResolveAsync(v => can.Input = v, can.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionTerminated:
+ if (e.ExecutionTerminated is { } et)
+ {
+ await this.MaybeResolveAsync(v => et.Input = v, et.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionSuspended:
+ if (e.ExecutionSuspended is { } esus)
+ {
+ await this.MaybeResolveAsync(v => esus.Input = v, esus.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionResumed:
+ if (e.ExecutionResumed is { } eres)
+ {
+ await this.MaybeResolveAsync(v => eres.Input = v, eres.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationSignaled:
+ if (e.EntityOperationSignaled is { } eos)
+ {
+ await this.MaybeResolveAsync(v => eos.Input = v, eos.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationCalled:
+ if (e.EntityOperationCalled is { } eoc)
+ {
+ await this.MaybeResolveAsync(v => eoc.Input = v, eoc.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationCompleted:
+ if (e.EntityOperationCompleted is { } ecomp)
+ {
+ await this.MaybeResolveAsync(v => ecomp.Output = v, ecomp.Output, cancellation);
}
break;
}
}
- void MaybeExternalize(ref string? value)
+ Task MaybeExternalizeAsync(Action assign, string? value, CancellationToken cancellation)
{
if (string.IsNullOrEmpty(value))
{
- return;
+ return Task.CompletedTask;
}
int size = Encoding.UTF8.GetByteCount(value);
if (size < this.options.ExternalizeThresholdBytes)
{
- return;
+ return Task.CompletedTask;
}
- // Upload synchronously via .GetAwaiter().GetResult() because interceptor API is sync for requests
- string token = this.payloadStore.UploadAsync(Encoding.UTF8.GetBytes(value), CancellationToken.None)
- .GetAwaiter()
- .GetResult();
- value = token;
+ return UploadAsync();
+
+ async Task UploadAsync()
+ {
+ string token = await this.payloadStore.UploadAsync(Encoding.UTF8.GetBytes(value), cancellation);
+ assign(token);
+ }
}
- void MaybeResolve(ref string? value, CancellationToken cancellation)
+ async Task MaybeResolveAsync(Action assign, string? value, CancellationToken cancellation)
{
if (string.IsNullOrEmpty(value) || !this.payloadStore.IsKnownPayloadToken(value))
{
return;
}
- string resolved = this.payloadStore.DownloadAsync(value, cancellation)
- .GetAwaiter()
- .GetResult();
- value = resolved;
+ string resolved = await this.payloadStore.DownloadAsync(value, cancellation);
+ assign(resolved);
}
sealed class TransformingStreamReader : IAsyncStreamReader
@@ -214,16 +438,14 @@ public TransformingStreamReader(IAsyncStreamReader inner, Func MoveNext(CancellationToken cancellationToken)
{
- bool hasNext = await this.inner.MoveNext(cancellationToken).ConfigureAwait(false);
+ bool hasNext = await this.inner.MoveNext(cancellationToken);
if (!hasNext)
{
return false;
}
- this.Current = await this.transform(this.inner.Current, cancellationToken).ConfigureAwait(false);
+ this.Current = await this.transform(this.inner.Current, cancellationToken);
return true;
}
}
}
-
-
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
index 1c6adf31..c580e227 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
@@ -6,8 +6,9 @@
using System.Text;
using Azure.Storage.Blobs;
using Azure.Storage.Blobs.Models;
+using Microsoft.DurableTask.Converters;
-namespace Microsoft.DurableTask.Converters;
+namespace Microsoft.DurableTask;
///
/// Azure Blob Storage implementation of .
From 23ebed132b022f8bfeb9952d9575d3019f46f31b Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 17:54:53 -0700
Subject: [PATCH 28/53] sample good
---
samples/LargePayloadConsoleApp/Program.cs | 1 +
.../Properties/launchSettings.json | 12 +++
samples/LargePayloadConsoleApp/run.ps1 | 81 ----------------
.../AzureBlobPayloadsInterceptor.cs | 26 +++++
.../Interceptors/LargePayloadDataConverter.cs | 96 -------------------
5 files changed, 39 insertions(+), 177 deletions(-)
create mode 100644 samples/LargePayloadConsoleApp/Properties/launchSettings.json
delete mode 100644 samples/LargePayloadConsoleApp/run.ps1
delete mode 100644 src/Extensions/AzureBlobPayloads/Interceptors/LargePayloadDataConverter.cs
diff --git a/samples/LargePayloadConsoleApp/Program.cs b/samples/LargePayloadConsoleApp/Program.cs
index 0cef4a60..8886e17a 100644
--- a/samples/LargePayloadConsoleApp/Program.cs
+++ b/samples/LargePayloadConsoleApp/Program.cs
@@ -7,6 +7,7 @@
using Microsoft.DurableTask.Entities;
using Microsoft.DurableTask.Worker;
using Microsoft.DurableTask.Worker.AzureManaged;
+using Microsoft.DurableTask;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
diff --git a/samples/LargePayloadConsoleApp/Properties/launchSettings.json b/samples/LargePayloadConsoleApp/Properties/launchSettings.json
new file mode 100644
index 00000000..89f6b592
--- /dev/null
+++ b/samples/LargePayloadConsoleApp/Properties/launchSettings.json
@@ -0,0 +1,12 @@
+{
+ "profiles": {
+ "LargePayloadConsoleApp": {
+ "commandName": "Project",
+ "environmentVariables": {
+ "DURABLE_TASK_SCHEDULER_CONNECTION_STRING": "",
+ "DURABLETASK_STORAGE": "",
+ "DURABLETASK_PAYLOAD_CONTAINER": ""
+ }
+ }
+ }
+}
diff --git a/samples/LargePayloadConsoleApp/run.ps1 b/samples/LargePayloadConsoleApp/run.ps1
deleted file mode 100644
index 466f07d7..00000000
--- a/samples/LargePayloadConsoleApp/run.ps1
+++ /dev/null
@@ -1,81 +0,0 @@
-Param(
- [Parameter(Mandatory = $true)]
- [string]$SchedulerConnectionString,
-
- [string]$StorageConnectionString = "UseDevelopmentStorage=true",
-
- [string]$PayloadContainer = "durabletask-payloads",
-
- [switch]$StartAzurite,
-
- [switch]$VerboseLogging
-)
-
-$ErrorActionPreference = "Stop"
-
-function Write-Info($msg) {
- Write-Host "[info] $msg"
-}
-
-function Start-AzuriteDocker {
- param(
- [string]$ContainerName = "durabletask-azurite"
- )
-
- if (-not (Get-Command docker -ErrorAction SilentlyContinue)) {
- Write-Info "Docker not found; skipping Azurite startup."
- return $false
- }
-
- try {
- $existing = (docker ps -a --filter "name=$ContainerName" --format "{{.ID}}")
- if ($existing) {
- Write-Info "Starting existing Azurite container '$ContainerName'..."
- docker start $ContainerName | Out-Null
- return $true
- }
-
- Write-Info "Launching Azurite in Docker as '$ContainerName' on ports 10000-10002..."
- docker run -d -p 10000:10000 -p 10001:10001 -p 10002:10002 --name $ContainerName mcr.microsoft.com/azure-storage/azurite | Out-Null
- Start-Sleep -Seconds 2
- return $true
- }
- catch {
- Write-Warning "Failed to start Azurite via Docker: $_"
- return $false
- }
-}
-
-try {
- # Set required/optional environment variables for the sample
- $env:DURABLE_TASK_SCHEDULER_CONNECTION_STRING = $SchedulerConnectionString
- $env:DURABLETASK_STORAGE = $StorageConnectionString
- $env:DURABLETASK_PAYLOAD_CONTAINER = $PayloadContainer
-
- Write-Info "DURABLE_TASK_SCHEDULER_CONNECTION_STRING is set."
- Write-Info "DURABLETASK_STORAGE = '$($env:DURABLETASK_STORAGE)'"
- Write-Info "DURABLETASK_PAYLOAD_CONTAINER = '$($env:DURABLETASK_PAYLOAD_CONTAINER)'"
-
- if ($StartAzurite) {
- $started = Start-AzuriteDocker
- if ($started) {
- Write-Info "Azurite is running (Docker)."
- }
- }
-
- $projectPath = Join-Path $PSScriptRoot "LargePayloadConsoleApp.csproj"
- if (-not (Test-Path $projectPath)) {
- throw "Project file not found at $projectPath"
- }
-
- Write-Info "Running sample..."
- $argsList = @("run", "--project", $projectPath)
- if ($VerboseLogging) { $argsList += @("-v", "detailed") }
-
- & dotnet @argsList
-}
-catch {
- Write-Error $_
- exit 1
-}
-
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
index 3e0b1898..4bf9e8ca 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
@@ -280,6 +280,32 @@ async Task ResolveResponsePayloadsAsync(TResponse response, Cancellat
}
}
+ // Resolve entity V1 batch request (OperationRequest inputs and entity state)
+ if (wi.EntityRequest is { } er1)
+ {
+ await this.MaybeResolveAsync(v => er1.EntityState = v, er1.EntityState, cancellation);
+ if (er1.Operations != null)
+ {
+ foreach (P.OperationRequest op in er1.Operations)
+ {
+ await this.MaybeResolveAsync(v => op.Input = v, op.Input, cancellation);
+ }
+ }
+ }
+
+ // Resolve entity V2 request (history-based operation requests and entity state)
+ if (wi.EntityRequestV2 is { } er2)
+ {
+ await this.MaybeResolveAsync(v => er2.EntityState = v, er2.EntityState, cancellation);
+ if (er2.OperationRequests != null)
+ {
+ foreach (P.HistoryEvent opEvt in er2.OperationRequests)
+ {
+ await this.ResolveEventPayloadsAsync(opEvt, cancellation);
+ }
+ }
+ }
+
break;
}
}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/LargePayloadDataConverter.cs b/src/Extensions/AzureBlobPayloads/Interceptors/LargePayloadDataConverter.cs
deleted file mode 100644
index fcef82d2..00000000
--- a/src/Extensions/AzureBlobPayloads/Interceptors/LargePayloadDataConverter.cs
+++ /dev/null
@@ -1,96 +0,0 @@
-// // Copyright (c) Microsoft Corporation.
-// // Licensed under the MIT License.
-
-// using System.Text;
-
-// namespace Microsoft.DurableTask.Converters;
-
-// ///
-// /// A DataConverter that wraps another DataConverter and externalizes payloads larger than a configured threshold.
-// /// It uploads large payloads to an and returns a reference token string.
-// /// On deserialization, it resolves tokens and feeds the underlying converter the original content.
-// ///
-// ///
-// /// Initializes a new instance of the class.
-// ///
-// /// The inner data converter to wrap.
-// /// The external payload store to use.
-// /// The options for the externalizing data converter.
-// /// Thrown when , , or is null.
-// public sealed class LargePayloadDataConverter(
-// DataConverter innerConverter,
-// IPayloadStore payloadStore,
-// LargePayloadStorageOptions largePayloadStorageOptions
-// ) : DataConverter
-// {
-
-// readonly DataConverter innerConverter = innerConverter ?? throw new ArgumentNullException(nameof(innerConverter));
-// readonly IPayloadStore payLoadStore = payloadStore ?? throw new ArgumentNullException(nameof(payloadStore));
-// readonly LargePayloadStorageOptions largePayloadStorageOptions = largePayloadStorageOptions ?? throw new ArgumentNullException(nameof(largePayloadStorageOptions));
-// // Use UTF-8 without a BOM (encoderShouldEmitUTF8Identifier=false). JSON in UTF-8 should not include a
-// // byte order mark per RFC 8259, and omitting it avoids hidden extra bytes that could skew the
-// // externalization threshold calculation and prevents interop issues with strict JSON parsers.
-// // A few legacy tools rely on a BOM for encoding detection, but modern JSON tooling assumes BOM-less UTF-8.
-// readonly Encoding utf8 = new UTF8Encoding(false);
-
-// ///
-// /// Serializes the value to a JSON string and uploads it to the external payload store if it exceeds the configured threshold.
-// ///
-// /// The value to serialize.
-// /// The serialized value or the token if externalized.
-// public override string? Serialize(object? value)
-// {
-// string? json = this.innerConverter.Serialize(value);
-
-// if (string.IsNullOrEmpty(json))
-// {
-// return null;
-// }
-
-// int byteCount = this.utf8.GetByteCount(json);
-// if (byteCount < this.largePayloadStorageOptions.ExternalizeThresholdBytes)
-// {
-// return json;
-// }
-
-// // Upload synchronously in this context by blocking on async. SDK call sites already run on threadpool.
-// byte[] bytes = this.utf8.GetBytes(json);
-// string token = this.payLoadStore.UploadAsync(bytes, CancellationToken.None).GetAwaiter().GetResult();
-// return token;
-// }
-
-// ///
-// /// Deserializes the JSON string or resolves the token to the original value.
-// ///
-// /// The JSON string or token.
-// /// The type to deserialize to.
-// /// The deserialized value.
-// public override object? Deserialize(string? data, Type targetType)
-// {
-// if (data is null)
-// {
-// return null;
-// }
-
-// string toDeserialize = data;
-// if (this.payLoadStore.IsKnownPayloadToken(data))
-// {
-// toDeserialize = this.payLoadStore.DownloadAsync(data, CancellationToken.None).GetAwaiter().GetResult();
-// }
-
-// return this.innerConverter.Deserialize(StripArrayCharacters(toDeserialize), targetType);
-// }
-
-// static string? StripArrayCharacters(string? input)
-// {
-// if (input != null && input.StartsWith('[') && input.EndsWith(']'))
-// {
-// // Strip the outer bracket characters
-// return input[1..^1];
-// }
-
-// return input;
-// }
-// }
-
-
From 66e80d0dfba1dbc8e34ea5eed22ab39d626f01ec Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 18:20:08 -0700
Subject: [PATCH 29/53] add blob retry
---
.../PayloadStore/BlobPayloadStore.cs | 157 +++++++++++++-----
.../PayloadStore/IPayloadStore.cs | 34 ++++
2 files changed, 153 insertions(+), 38 deletions(-)
create mode 100644 src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
index c580e227..9b8d39a8 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
@@ -4,6 +4,8 @@
using System.Globalization;
using System.IO.Compression;
using System.Text;
+using Azure;
+using Azure.Core;
using Azure.Storage.Blobs;
using Azure.Storage.Blobs.Models;
using Microsoft.DurableTask.Converters;
@@ -17,6 +19,17 @@ namespace Microsoft.DurableTask;
public sealed class BlobPayloadStore : IPayloadStore
{
const string TokenPrefix = "blob:v1:";
+ const string ContentEncodingGzip = "gzip";
+ const int DefaultCopyBufferSize = 81920;
+ const int MaxRetryAttempts = 8;
+ const int BaseDelayMs = 250;
+ const int MaxDelayMs = 10_000;
+ const int MaxJitterMs = 100;
+ const int NetworkTimeoutMinutes = 2;
+
+ // Jitter RNG for retry backoff
+ static readonly object RandomLock = new object();
+ static readonly Random SharedRandom = new Random();
readonly BlobContainerClient containerClient;
readonly LargePayloadStorageOptions options;
@@ -33,47 +46,64 @@ public BlobPayloadStore(LargePayloadStorageOptions options)
Check.NotNullOrEmpty(options.ConnectionString, nameof(options.ConnectionString));
Check.NotNullOrEmpty(options.ContainerName, nameof(options.ContainerName));
- BlobServiceClient serviceClient = new(options.ConnectionString);
+ BlobClientOptions clientOptions = new()
+ {
+ Retry =
+ {
+ Mode = RetryMode.Exponential,
+ MaxRetries = MaxRetryAttempts,
+ Delay = TimeSpan.FromMilliseconds(BaseDelayMs),
+ MaxDelay = TimeSpan.FromSeconds(MaxDelayMs),
+ NetworkTimeout = TimeSpan.FromMinutes(NetworkTimeoutMinutes),
+ },
+ };
+ BlobServiceClient serviceClient = new(options.ConnectionString, clientOptions);
this.containerClient = serviceClient.GetBlobContainerClient(options.ContainerName);
}
///
public async Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
{
- // Ensure container exists
- await this.containerClient.CreateIfNotExistsAsync(PublicAccessType.None, default, default, cancellationToken);
-
- // One blob per payload using GUID-based name for uniqueness
+ // One blob per payload using GUID-based name for uniqueness (stable across retries)
string timestamp = DateTimeOffset.UtcNow.ToString("yyyy/MM/dd/HH/mm/ss", CultureInfo.InvariantCulture);
string blobName = $"{timestamp}/{Guid.NewGuid():N}";
BlobClient blob = this.containerClient.GetBlobClient(blobName);
byte[] payloadBuffer = payloadBytes.ToArray();
- // Upload streaming, optionally compressing and marking ContentEncoding
- if (this.options.CompressPayloads)
+ string token = await WithTransientRetryAsync(
+ async ct =>
{
- BlobOpenWriteOptions writeOptions = new()
+ // Ensure container exists (idempotent)
+ await this.containerClient.CreateIfNotExistsAsync(PublicAccessType.None, default, default, ct);
+
+ if (this.options.CompressPayloads)
{
- HttpHeaders = new BlobHttpHeaders { ContentEncoding = "gzip" },
- };
- using Stream blobStream = await blob.OpenWriteAsync(true, writeOptions, cancellationToken);
- using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
- using MemoryStream payloadStream = new(payloadBuffer, writable: false);
-
- await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: 81920, cancellationToken);
- await compressedBlobStream.FlushAsync(cancellationToken);
- await blobStream.FlushAsync(cancellationToken);
- }
- else
- {
- using Stream blobStream = await blob.OpenWriteAsync(true, default, cancellationToken);
- using MemoryStream payloadStream = new(payloadBuffer, writable: false);
- await payloadStream.CopyToAsync(blobStream, bufferSize: 81920, cancellationToken);
- await blobStream.FlushAsync(cancellationToken);
- }
+ BlobOpenWriteOptions writeOptions = new()
+ {
+ HttpHeaders = new BlobHttpHeaders { ContentEncoding = ContentEncodingGzip },
+ };
+ using Stream blobStream = await blob.OpenWriteAsync(true, writeOptions, ct);
+ using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
+ using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+
+ await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: DefaultCopyBufferSize, ct);
+ await compressedBlobStream.FlushAsync(ct);
+ await blobStream.FlushAsync(ct);
+ }
+ else
+ {
+ using Stream blobStream = await blob.OpenWriteAsync(true, default, ct);
+ using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+ await payloadStream.CopyToAsync(blobStream, bufferSize: DefaultCopyBufferSize, ct);
+ await blobStream.FlushAsync(ct);
+ }
- return EncodeToken(this.containerClient.Name, blobName);
+ return EncodeToken(this.containerClient.Name, blobName);
+ },
+ cancellationToken);
+
+ return token;
}
///
@@ -86,20 +116,26 @@ public async Task DownloadAsync(string token, CancellationToken cancella
}
BlobClient blob = this.containerClient.GetBlobClient(name);
- using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: cancellationToken);
- Stream contentStream = result.Content;
- bool isGzip = string.Equals(
- result.Details.ContentEncoding, "gzip", StringComparison.OrdinalIgnoreCase);
- if (isGzip)
+ return await WithTransientRetryAsync(
+ async ct =>
{
- using GZipStream decompressed = new(contentStream, CompressionMode.Decompress);
- using StreamReader reader = new(decompressed, Encoding.UTF8);
- return await reader.ReadToEndAsync();
- }
+ using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: ct);
+ Stream contentStream = result.Content;
+ bool isGzip = string.Equals(
+ result.Details.ContentEncoding, ContentEncodingGzip, StringComparison.OrdinalIgnoreCase);
- using StreamReader uncompressedReader = new(contentStream, Encoding.UTF8);
- return await uncompressedReader.ReadToEndAsync();
+ if (isGzip)
+ {
+ using GZipStream decompressed = new(contentStream, CompressionMode.Decompress);
+ using StreamReader reader = new(decompressed, Encoding.UTF8);
+ return await reader.ReadToEndAsync();
+ }
+
+ using StreamReader uncompressedReader = new(contentStream, Encoding.UTF8);
+ return await uncompressedReader.ReadToEndAsync();
+ },
+ cancellationToken);
}
///
@@ -131,4 +167,49 @@ public bool IsKnownPayloadToken(string value)
return (rest.Substring(0, sep), rest.Substring(sep + 1));
}
-}
+
+ static async Task WithTransientRetryAsync(Func> operation, CancellationToken cancellationToken)
+ {
+ const int maxAttempts = MaxRetryAttempts;
+ TimeSpan baseDelay = TimeSpan.FromMilliseconds(BaseDelayMs);
+ int attempt = 0;
+
+ while (true)
+ {
+ cancellationToken.ThrowIfCancellationRequested();
+ try
+ {
+ return await operation(cancellationToken);
+ }
+ catch (RequestFailedException ex) when (IsTransient(ex) && attempt < maxAttempts - 1)
+ {
+ attempt++;
+ TimeSpan delay = ComputeBackoff(baseDelay, attempt);
+ await Task.Delay(delay, cancellationToken);
+ }
+ catch (IOException) when (attempt < maxAttempts - 1)
+ {
+ attempt++;
+ TimeSpan delay = ComputeBackoff(baseDelay, attempt);
+ await Task.Delay(delay, cancellationToken);
+ }
+ }
+ }
+
+ static bool IsTransient(RequestFailedException ex)
+ {
+ return ex.Status == 503 || ex.Status == 502 || ex.Status == 500 || ex.Status == 429;
+ }
+
+ static TimeSpan ComputeBackoff(TimeSpan baseDelay, int attempt)
+ {
+ double factor = Math.Pow(2, Math.Min(attempt, 6));
+ int jitterMs;
+ lock (RandomLock)
+ {
+ jitterMs = SharedRandom.Next(0, MaxJitterMs);
+ }
+
+ return TimeSpan.FromMilliseconds(Math.Min((baseDelay.TotalMilliseconds * factor) + jitterMs, MaxDelayMs));
+ }
+}
\ No newline at end of file
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
new file mode 100644
index 00000000..c226bff0
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
@@ -0,0 +1,34 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+namespace Microsoft.DurableTask;
+
+///
+/// Abstraction for storing and retrieving large payloads out-of-band.
+///
+public interface IPayloadStore
+{
+ ///
+ /// Uploads a payload and returns an opaque reference token that can be embedded in orchestration messages.
+ ///
+ /// The payload bytes.
+ /// Cancellation token.
+ /// Opaque reference token.
+ Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken);
+
+ ///
+ /// Downloads the payload referenced by the token.
+ ///
+ /// The opaque reference token.
+ /// Cancellation token.
+ /// Payload string.
+ Task DownloadAsync(string token, CancellationToken cancellationToken);
+
+ ///
+ /// Returns true if the specified value appears to be a token understood by this store.
+ /// Implementations should not throw for unknown tokens.
+ ///
+ /// The value to check.
+ /// true if the value is a token issued by this store; otherwise, false.
+ bool IsKnownPayloadToken(string value);
+}
\ No newline at end of file
From aa02fa69ff1cdd6c0a23d078b712f19bc8737ad0 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 19:21:55 -0700
Subject: [PATCH 30/53] save
---
.../LargePayloadTests.cs | 49 ++++++++++++++-----
1 file changed, 36 insertions(+), 13 deletions(-)
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index a8a09209..bc7daa2a 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -1,10 +1,9 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
+using System.Text.Json;
using Microsoft.DurableTask.Client;
-using Microsoft.DurableTask.Client.Entities;
using Microsoft.DurableTask.Converters;
-using Microsoft.DurableTask.Entities;
using Microsoft.DurableTask.Worker;
using Microsoft.Extensions.DependencyInjection;
using Xunit.Abstractions;
@@ -15,10 +14,10 @@ public class LargePayloadTests(ITestOutputHelper output, GrpcSidecarFixture side
{
// Validates client externalizes a large orchestration input and worker resolves it.
[Fact]
- public async Task LargeOrchestrationInput()
+ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
{
string largeInput = new string('A', 1024 * 1024); // 1MB
- TaskName orchestratorName = nameof(LargeOrchestrationInput);
+ TaskName orchestratorName = nameof(LargeOrchestrationInputAndOutputAndCustomStatus);
InMemoryPayloadStore fakeStore = new InMemoryPayloadStore();
@@ -27,7 +26,11 @@ public async Task LargeOrchestrationInput()
{
worker.AddTasks(tasks => tasks.AddOrchestratorFunc(
orchestratorName,
- (ctx, input) => Task.FromResult(input)));
+ (ctx, input) =>
+ {
+ ctx.SetCustomStatus(largeInput);
+ return Task.FromResult(input + input);
+ }));
// Enable externalization on the worker
worker.UseExternalizedPayloads(opts =>
@@ -62,20 +65,35 @@ public async Task LargeOrchestrationInput()
Assert.Equal(OrchestrationRuntimeStatus.Completed, completed.RuntimeStatus);
// Validate that the input made a roundtrip and was resolved on the worker
+ // validate input
+ string? input = completed.ReadInputAs();
+ Assert.NotNull(input);
+ Assert.Equal(largeInput.Length, input!.Length);
+ Assert.Equal(largeInput, input);
+
string? echoed = completed.ReadOutputAs();
Assert.NotNull(echoed);
- Assert.Equal(largeInput.Length, echoed!.Length);
+ Assert.Equal(largeInput.Length * 2, echoed!.Length);
+ Assert.Equal(largeInput + largeInput, echoed);
+
+ string? customStatus = completed.ReadCustomStatusAs();
+ Assert.NotNull(customStatus);
+ Assert.Equal(largeInput.Length, customStatus!.Length);
+ Assert.Equal(largeInput, customStatus);
// Ensure client externalized the input
Assert.True(fakeStore.UploadCount >= 1);
+ Assert.True(fakeStore.DownloadCount >= 1);
+ Assert.Contains(JsonSerializer.Serialize(largeInput), fakeStore.uploadedPayloads);
+ Assert.Contains(JsonSerializer.Serialize(largeInput + largeInput), fakeStore.uploadedPayloads);
}
// Validates worker externalizes large activity input and delivers resolved payload to activity.
[Fact]
- public async Task LargeActivityInput()
+ public async Task LargeActivityInputAndOutput()
{
string largeParam = new string('P', 700 * 1024); // 700KB
- TaskName orchestratorName = nameof(LargeActivityInput);
+ TaskName orchestratorName = nameof(LargeActivityInputAndOutput);
TaskName activityName = "EchoLength";
InMemoryPayloadStore workerStore = new InMemoryPayloadStore();
@@ -84,10 +102,10 @@ public async Task LargeActivityInput()
worker =>
{
worker.AddTasks(tasks => tasks
- .AddOrchestratorFunc(
+ .AddOrchestratorFunc(
orchestratorName,
- (ctx, _) => ctx.CallActivityAsync(activityName, largeParam))
- .AddActivityFunc(activityName, (ctx, input) => input.Length));
+ (ctx, _) => ctx.CallActivityAsync(activityName, largeParam))
+ .AddActivityFunc(activityName, (ctx, input) => input + input));
worker.UseExternalizedPayloads(opts =>
{
@@ -104,11 +122,14 @@ public async Task LargeActivityInput()
instanceId, getInputsAndOutputs: true, this.TimeoutToken);
Assert.Equal(OrchestrationRuntimeStatus.Completed, completed.RuntimeStatus);
- Assert.Equal(largeParam.Length, completed.ReadOutputAs());
- // Worker externalizes when sending activity input; worker resolves when delivering to activity
+ // validate upload and download count
Assert.True(workerStore.UploadCount >= 1);
Assert.True(workerStore.DownloadCount >= 1);
+
+ // validate hashset contains the input and output
+ Assert.Contains(JsonSerializer.Serialize(largeParam), workerStore.uploadedPayloads);
+ Assert.Contains(JsonSerializer.Serialize(largeParam + largeParam), workerStore.uploadedPayloads);
}
// Validates worker externalizes large activity output which is resolved by the orchestrator.
@@ -362,6 +383,7 @@ class InMemoryPayloadStore : IPayloadStore
{
const string TokenPrefix = "blob:v1:";
readonly Dictionary tokenToPayload;
+ public readonly HashSet uploadedPayloads = new();
public InMemoryPayloadStore()
: this(new Dictionary())
@@ -384,6 +406,7 @@ public Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationT
string json = System.Text.Encoding.UTF8.GetString(payloadBytes.Span);
string token = $"blob:v1:test:{Guid.NewGuid():N}";
this.tokenToPayload[token] = json;
+ this.uploadedPayloads.Add(json);
return Task.FromResult(token);
}
From a64283f180998f09c9a75104b569452838de7d02 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 19:33:48 -0700
Subject: [PATCH 31/53] save
---
.../LargePayloadTests.cs | 48 ++-----------------
1 file changed, 5 insertions(+), 43 deletions(-)
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index bc7daa2a..f609f8e4 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -127,51 +127,13 @@ public async Task LargeActivityInputAndOutput()
Assert.True(workerStore.UploadCount >= 1);
Assert.True(workerStore.DownloadCount >= 1);
- // validate hashset contains the input and output
- Assert.Contains(JsonSerializer.Serialize(largeParam), workerStore.uploadedPayloads);
- Assert.Contains(JsonSerializer.Serialize(largeParam + largeParam), workerStore.uploadedPayloads);
+ // validate uploaded payloads include the activity input and output forms
+ string expectedActivityInputJson = JsonSerializer.Serialize(new[] { largeParam });
+ string expectedActivityOutputJson = JsonSerializer.Serialize(largeParam + largeParam);
+ Assert.Contains(expectedActivityInputJson, workerStore.uploadedPayloads);
+ Assert.Contains(expectedActivityOutputJson, workerStore.uploadedPayloads);
}
- // Validates worker externalizes large activity output which is resolved by the orchestrator.
- [Fact]
- public async Task LargeActivityOutput()
- {
- string largeResult = new string('R', 850 * 1024); // 850KB
- TaskName orchestratorName = nameof(LargeActivityOutput);
- TaskName activityName = "ProduceLarge";
-
- InMemoryPayloadStore workerStore = new InMemoryPayloadStore();
-
- await using HostTestLifetime server = await this.StartWorkerAsync(
- worker =>
- {
- worker.AddTasks(tasks => tasks
- .AddOrchestratorFunc(
- orchestratorName,
- async (ctx, _) => (await ctx.CallActivityAsync(activityName)).Length)
- .AddActivityFunc(activityName, (ctx) => Task.FromResult(largeResult)));
-
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // force externalization for activity result
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
- worker.Services.AddSingleton(workerStore);
- },
- client => { });
-
- string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName);
- OrchestrationMetadata completed = await server.Client.WaitForInstanceCompletionAsync(
- instanceId, getInputsAndOutputs: true, this.TimeoutToken);
-
- Assert.Equal(OrchestrationRuntimeStatus.Completed, completed.RuntimeStatus);
- Assert.Equal(largeResult.Length, completed.ReadOutputAs());
-
- // Worker externalizes activity output and downloads when the orchestrator reads it
- Assert.True(workerStore.UploadCount >= 1);
- Assert.True(workerStore.DownloadCount >= 1);
- }
// Ensures querying a completed instance downloads and resolves an externalized output on the client.
[Fact]
From dd75b8d11e7aa657eb4ff7ce359b01679926e69a Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 19:50:27 -0700
Subject: [PATCH 32/53] save
---
.../LargePayloadTests.cs | 268 +++++++++++-------
1 file changed, 171 insertions(+), 97 deletions(-)
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index f609f8e4..0297c24c 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -88,75 +88,149 @@ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
Assert.Contains(JsonSerializer.Serialize(largeInput + largeInput), fakeStore.uploadedPayloads);
}
- // Validates worker externalizes large activity input and delivers resolved payload to activity.
+ // Validates large custom status and ContinueAsNew input are externalized and resolved across iterations.
[Fact]
- public async Task LargeActivityInputAndOutput()
+ public async Task LargeContinueAsNewAndCustomStatus()
{
- string largeParam = new string('P', 700 * 1024); // 700KB
- TaskName orchestratorName = nameof(LargeActivityInputAndOutput);
- TaskName activityName = "EchoLength";
+ string largeStatus = new string('S', 700 * 1024);
+ string largeNextInput = new string('N', 800 * 1024);
+ string largeFinalOutput = new string('F', 750 * 1024);
+ TaskName orch = nameof(LargeContinueAsNewAndCustomStatus);
- InMemoryPayloadStore workerStore = new InMemoryPayloadStore();
+ var shared = new Dictionary();
+ InMemoryPayloadStore workerStore = new InMemoryPayloadStore(shared);
await using HostTestLifetime server = await this.StartWorkerAsync(
worker =>
{
- worker.AddTasks(tasks => tasks
- .AddOrchestratorFunc(
- orchestratorName,
- (ctx, _) => ctx.CallActivityAsync(activityName, largeParam))
- .AddActivityFunc(activityName, (ctx, input) => input + input));
+ worker.AddTasks(tasks => tasks.AddOrchestratorFunc(
+ orch,
+ async (ctx, input) =>
+ {
+ if (input == null)
+ {
+ ctx.SetCustomStatus(largeStatus);
+ ctx.ContinueAsNew(largeNextInput);
+ // unreachable
+ return "";
+ }
+ else
+ {
+ // second iteration returns final
+ return largeFinalOutput;
+ }
+ }));
worker.UseExternalizedPayloads(opts =>
{
- opts.ExternalizeThresholdBytes = 1024; // force externalization for activity input
+ opts.ExternalizeThresholdBytes = 1024;
opts.ContainerName = "test";
opts.ConnectionString = "UseDevelopmentStorage=true";
});
worker.Services.AddSingleton(workerStore);
},
- client => { /* client not needed for externalization path here */ });
+ client =>
+ {
+ client.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ client.Services.AddSingleton(workerStore);
+ });
- string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName);
+ string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch);
OrchestrationMetadata completed = await server.Client.WaitForInstanceCompletionAsync(
instanceId, getInputsAndOutputs: true, this.TimeoutToken);
Assert.Equal(OrchestrationRuntimeStatus.Completed, completed.RuntimeStatus);
+ Assert.Equal(largeFinalOutput, completed.ReadOutputAs());
+ Assert.Contains(JsonSerializer.Serialize(largeStatus), workerStore.uploadedPayloads);
+ Assert.Contains(JsonSerializer.Serialize(largeNextInput), workerStore.uploadedPayloads);
+ Assert.Contains(JsonSerializer.Serialize(largeFinalOutput), workerStore.uploadedPayloads);
+ }
- // validate upload and download count
+ // Validates large sub-orchestration input and an activity large output in one flow.
+ [Fact]
+ public async Task LargeSubOrchestrationAndActivityOutput()
+ {
+ string largeChildInput = new string('C', 650 * 1024);
+ string largeActivityOutput = new string('A', 820 * 1024);
+ TaskName parent = nameof(LargeSubOrchestrationAndActivityOutput) + "_Parent";
+ TaskName child = nameof(LargeSubOrchestrationAndActivityOutput) + "_Child";
+ TaskName activity = "ProduceBig";
+
+ var shared = new Dictionary();
+ InMemoryPayloadStore workerStore = new InMemoryPayloadStore(shared);
+
+ await using HostTestLifetime server = await this.StartWorkerAsync(
+ worker =>
+ {
+ worker.AddTasks(tasks => tasks
+ .AddOrchestratorFunc(
+ parent,
+ async (ctx, _) =>
+ {
+ string echoed = await ctx.CallSubOrchestratorAsync(child, largeChildInput);
+ string act = await ctx.CallActivityAsync(activity);
+ return echoed.Length + act.Length;
+ })
+ .AddOrchestratorFunc(child, (ctx, input) => Task.FromResult(input))
+ .AddActivityFunc(activity, (ctx) => Task.FromResult(largeActivityOutput)));
+
+ worker.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ worker.Services.AddSingleton(workerStore);
+ },
+ client =>
+ {
+ client.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ client.Services.AddSingleton(workerStore);
+ });
+
+ string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(parent);
+ OrchestrationMetadata done = await server.Client.WaitForInstanceCompletionAsync(
+ id, getInputsAndOutputs: true, this.TimeoutToken);
+
+ Assert.Equal(OrchestrationRuntimeStatus.Completed, done.RuntimeStatus);
+ Assert.Equal(largeChildInput.Length + largeActivityOutput.Length, done.ReadOutputAs());
Assert.True(workerStore.UploadCount >= 1);
Assert.True(workerStore.DownloadCount >= 1);
-
- // validate uploaded payloads include the activity input and output forms
- string expectedActivityInputJson = JsonSerializer.Serialize(new[] { largeParam });
- string expectedActivityOutputJson = JsonSerializer.Serialize(largeParam + largeParam);
- Assert.Contains(expectedActivityInputJson, workerStore.uploadedPayloads);
- Assert.Contains(expectedActivityOutputJson, workerStore.uploadedPayloads);
+ Assert.Contains(JsonSerializer.Serialize(largeChildInput), workerStore.uploadedPayloads);
+ Assert.Contains(JsonSerializer.Serialize(largeActivityOutput), workerStore.uploadedPayloads);
}
-
- // Ensures querying a completed instance downloads and resolves an externalized output on the client.
+ // Validates query with fetch I/O resolves large outputs for completed instances.
[Fact]
- public async Task LargeOrchestrationOutput()
+ public async Task LargeQueryFetchInputsAndOutputs()
{
- string largeOutput = new string('Q', 900 * 1024); // 900KB
- string smallInput = "input";
- TaskName orchestratorName = nameof(LargeOrchestrationOutput);
+ string largeIn = new string('I', 750 * 1024);
+ string largeOut = new string('Q', 880 * 1024);
+ TaskName orch = nameof(LargeQueryFetchInputsAndOutputs);
- Dictionary shared = new System.Collections.Generic.Dictionary();
+ var shared = new Dictionary();
InMemoryPayloadStore workerStore = new InMemoryPayloadStore(shared);
- InMemoryPayloadStore clientStore = new InMemoryPayloadStore(shared);
await using HostTestLifetime server = await this.StartWorkerAsync(
worker =>
{
worker.AddTasks(tasks => tasks.AddOrchestratorFunc(
- orchestratorName,
- (ctx, _) => Task.FromResult(largeOutput)));
+ orch,
+ (ctx, input) => Task.FromResult(largeOut)));
worker.UseExternalizedPayloads(opts =>
{
- opts.ExternalizeThresholdBytes = 1024; // force externalization on worker
+ opts.ExternalizeThresholdBytes = 1024;
opts.ContainerName = "test";
opts.ConnectionString = "UseDevelopmentStorage=true";
});
@@ -166,28 +240,81 @@ public async Task LargeOrchestrationOutput()
{
client.UseExternalizedPayloads(opts =>
{
- opts.ExternalizeThresholdBytes = 1024; // allow client to resolve on query
+ opts.ExternalizeThresholdBytes = 1024;
opts.ContainerName = "test";
opts.ConnectionString = "UseDevelopmentStorage=true";
});
- client.Services.AddSingleton(clientStore);
+ client.Services.AddSingleton(workerStore);
});
- string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName, input: smallInput);
- await server.Client.WaitForInstanceCompletionAsync(instanceId, getInputsAndOutputs: false, this.TimeoutToken);
+ string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch);
+ await server.Client.WaitForInstanceCompletionAsync(id, getInputsAndOutputs: false, this.TimeoutToken);
- OrchestrationMetadata? queried = await server.Client.GetInstanceAsync(instanceId, getInputsAndOutputs: true);
+ var page = server.Client.GetAllInstancesAsync(new OrchestrationQuery { FetchInputsAndOutputs = true });
+ OrchestrationMetadata? found = null;
+ await foreach (var item in page)
+ {
+ if (item.Name == orch.Name)
+ {
+ found = item;
+ break;
+ }
+ }
- Assert.NotNull(queried);
- Assert.Equal(OrchestrationRuntimeStatus.Completed, queried!.RuntimeStatus);
- Assert.Equal(smallInput, queried.ReadInputAs());
- Assert.Equal(largeOutput, queried.ReadOutputAs());
+ Assert.NotNull(found);
+ Assert.Equal(largeOut, found!.ReadOutputAs());
+ Assert.True(workerStore.DownloadCount >= 1);
+ Assert.True(workerStore.UploadCount >= 1);
+ Assert.Contains(JsonSerializer.Serialize(largeIn), workerStore.uploadedPayloads);
+ Assert.Contains(JsonSerializer.Serialize(largeOut), workerStore.uploadedPayloads);
+ }
+ // Validates worker externalizes large activity input and delivers resolved payload to activity.
+ [Fact]
+ public async Task LargeActivityInputAndOutput()
+ {
+ string largeParam = new string('P', 700 * 1024); // 700KB
+ TaskName orchestratorName = nameof(LargeActivityInputAndOutput);
+ TaskName activityName = "EchoLength";
+
+ InMemoryPayloadStore workerStore = new InMemoryPayloadStore();
+
+ await using HostTestLifetime server = await this.StartWorkerAsync(
+ worker =>
+ {
+ worker.AddTasks(tasks => tasks
+ .AddOrchestratorFunc(
+ orchestratorName,
+ (ctx, _) => ctx.CallActivityAsync(activityName, largeParam))
+ .AddActivityFunc(activityName, (ctx, input) => input + input));
- Assert.True(workerStore.UploadCount == 0);
- Assert.True(clientStore.DownloadCount == 1);
- Assert.True(clientStore.UploadCount == 1);
+ worker.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024; // force externalization for activity input
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ worker.Services.AddSingleton(workerStore);
+ },
+ client => { /* client not needed for externalization path here */ });
+
+ string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName);
+ OrchestrationMetadata completed = await server.Client.WaitForInstanceCompletionAsync(
+ instanceId, getInputsAndOutputs: true, this.TimeoutToken);
+
+ Assert.Equal(OrchestrationRuntimeStatus.Completed, completed.RuntimeStatus);
+
+ // validate upload and download count
+ Assert.True(workerStore.UploadCount >= 1);
+ Assert.True(workerStore.DownloadCount >= 1);
+
+ // validate uploaded payloads include the activity input and output forms
+ string expectedActivityInputJson = JsonSerializer.Serialize(new[] { largeParam });
+ string expectedActivityOutputJson = JsonSerializer.Serialize(largeParam + largeParam);
+ Assert.Contains(expectedActivityInputJson, workerStore.uploadedPayloads);
+ Assert.Contains(expectedActivityOutputJson, workerStore.uploadedPayloads);
}
+
// Ensures payloads below the threshold are not externalized by client or worker.
[Fact]
public async Task NoLargePayloads()
@@ -287,59 +414,6 @@ public async Task LargeExternalEvent()
Assert.True(fakeStore.UploadCount >= 1);
}
- // Validates worker externalizes both output and custom status; client resolves them on query.
- [Fact]
- public async Task LargeOutputAndCustomStatus()
- {
- string largeOutput = new string('O', 768 * 1024); // 768KB
- string largeStatus = new string('S', 600 * 1024); // 600KB
- TaskName orchestratorName = nameof(LargeOutputAndCustomStatus);
-
- InMemoryPayloadStore fakeStore = new InMemoryPayloadStore();
-
- await using HostTestLifetime server = await this.StartWorkerAsync(
- worker =>
- {
- worker.AddTasks(tasks => tasks.AddOrchestratorFunc(
- orchestratorName,
- async (ctx, _) =>
- {
- ctx.SetCustomStatus(largeStatus);
- await ctx.CreateTimer(TimeSpan.Zero, CancellationToken.None);
- return largeOutput;
- }));
-
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // ensure externalization for status/output
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
- worker.Services.AddSingleton(fakeStore);
- },
- client =>
- {
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // ensure resolution on query
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
- client.Services.AddSingleton(fakeStore);
- });
-
- string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName);
-
- OrchestrationMetadata completed = await server.Client.WaitForInstanceCompletionAsync(
- instanceId, getInputsAndOutputs: true, this.TimeoutToken);
-
- Assert.Equal(OrchestrationRuntimeStatus.Completed, completed.RuntimeStatus);
- Assert.Equal(largeOutput, completed.ReadOutputAs());
- Assert.Equal(largeStatus, completed.ReadCustomStatusAs());
-
- // Worker may externalize both status and output
- Assert.True(fakeStore.UploadCount >= 2);
- }
class InMemoryPayloadStore : IPayloadStore
{
From b9d764f11160014c674926fea3f40920d18cccac Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 20:01:47 -0700
Subject: [PATCH 33/53] save
---
.../LargePayloadTests.cs | 54 +++++++++++++++++++
1 file changed, 54 insertions(+)
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index 0297c24c..7de74a74 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -88,6 +88,58 @@ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
Assert.Contains(JsonSerializer.Serialize(largeInput + largeInput), fakeStore.uploadedPayloads);
}
+ // Validates terminating an instance with a large output payload is externalized by the client.
+ [Fact]
+ public async Task LargeTerminateWithPayload()
+ {
+ string largeOutput = new string('T', 900 * 1024);
+ TaskName orch = nameof(LargeTerminateWithPayload);
+
+ InMemoryPayloadStore store = new InMemoryPayloadStore();
+
+ await using HostTestLifetime server = await this.StartWorkerAsync(
+ worker =>
+ {
+ worker.AddTasks(tasks => tasks.AddOrchestratorFunc(
+ orch,
+ async (ctx, _) =>
+ {
+ await ctx.CreateTimer(TimeSpan.FromSeconds(30), CancellationToken.None);
+ return null;
+ }));
+
+ worker.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ worker.Services.AddSingleton(store);
+ },
+ client =>
+ {
+ client.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ client.Services.AddSingleton(store);
+ });
+
+ string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch);
+ await server.Client.WaitForInstanceStartAsync(id, this.TimeoutToken);
+
+ await server.Client.TerminateInstanceAsync(id, new TerminateInstanceOptions { Output = largeOutput }, this.TimeoutToken);
+
+ await server.Client.WaitForInstanceCompletionAsync(id, this.TimeoutToken);
+ OrchestrationMetadata? status = await server.Client.GetInstanceAsync(id, getInputsAndOutputs: false);
+ Assert.NotNull(status);
+ Assert.Equal(OrchestrationRuntimeStatus.Terminated, status!.RuntimeStatus);
+ Assert.True(store.UploadCount >= 1);
+ Assert.True(store.DownloadCount >= 1);
+ Assert.Contains(JsonSerializer.Serialize(largeOutput), store.uploadedPayloads);
+ }
// Validates large custom status and ContinueAsNew input are externalized and resolved across iterations.
[Fact]
public async Task LargeContinueAsNewAndCustomStatus()
@@ -412,6 +464,8 @@ public async Task LargeExternalEvent()
string? output = completed.ReadOutputAs();
Assert.Equal(largeEvent, output);
Assert.True(fakeStore.UploadCount >= 1);
+ Assert.True(fakeStore.DownloadCount >= 1);
+ Assert.Contains(JsonSerializer.Serialize(largeEvent), fakeStore.uploadedPayloads);
}
From 989c99dd16a55db3fd51021bf04029301b35b132 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 16 Sep 2025 20:25:54 -0700
Subject: [PATCH 34/53] test
---
test/Grpc.IntegrationTests/LargePayloadTests.cs | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index 7de74a74..b7f1b446 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -92,6 +92,7 @@ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
[Fact]
public async Task LargeTerminateWithPayload()
{
+ string largeInput = new string('I', 900 * 1024);
string largeOutput = new string('T', 900 * 1024);
TaskName orch = nameof(LargeTerminateWithPayload);
@@ -127,7 +128,7 @@ public async Task LargeTerminateWithPayload()
client.Services.AddSingleton(store);
});
- string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch);
+ string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch, largeInput);
await server.Client.WaitForInstanceStartAsync(id, this.TimeoutToken);
await server.Client.TerminateInstanceAsync(id, new TerminateInstanceOptions { Output = largeOutput }, this.TimeoutToken);
@@ -299,10 +300,10 @@ public async Task LargeQueryFetchInputsAndOutputs()
client.Services.AddSingleton(workerStore);
});
- string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch);
+ string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch, largeIn);
await server.Client.WaitForInstanceCompletionAsync(id, getInputsAndOutputs: false, this.TimeoutToken);
- var page = server.Client.GetAllInstancesAsync(new OrchestrationQuery { FetchInputsAndOutputs = true });
+ var page = server.Client.GetAllInstancesAsync(new OrchestrationQuery { FetchInputsAndOutputs = true, InstanceIdPrefix = id });
OrchestrationMetadata? found = null;
await foreach (var item in page)
{
From 1c407ddadb9717b523b3e866ad0eef1dcce68a41 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 17 Sep 2025 08:45:36 -0700
Subject: [PATCH 35/53] resume/suspend
---
.../AzureBlobPayloadsInterceptor.cs | 4 +
.../LargePayloadTests.cs | 75 +++++++++++++++++++
2 files changed, 79 insertions(+)
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
index 4bf9e8ca..33f77436 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
@@ -133,6 +133,10 @@ Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationTok
return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
case P.TerminateRequest r:
return this.MaybeExternalizeAsync(v => r.Output = v, r.Output, cancellation);
+ case P.SuspendRequest r:
+ return this.MaybeExternalizeAsync(v => r.Reason = v, r.Reason, cancellation);
+ case P.ResumeRequest r:
+ return this.MaybeExternalizeAsync(v => r.Reason = v, r.Reason, cancellation);
case P.SignalEntityRequest r:
return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
case P.ActivityResponse r:
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index b7f1b446..61b2bad1 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -88,6 +88,81 @@ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
Assert.Contains(JsonSerializer.Serialize(largeInput + largeInput), fakeStore.uploadedPayloads);
}
+ // Validates client externalizes large suspend and resume reasons.
+ [Fact]
+ public async Task SuspendAndResume_Reason_IsExternalizedByClient()
+ {
+ string largeReason1 = new string('Z', 700 * 1024); // 700KB
+ string largeReason2 = new string('Y', 650 * 1024); // 650KB
+ TaskName orchestratorName = nameof(SuspendAndResume_Reason_IsExternalizedByClient);
+
+ InMemoryPayloadStore clientStore = new InMemoryPayloadStore();
+
+ await using HostTestLifetime server = await this.StartWorkerAsync(
+ worker =>
+ {
+ // Long-running orchestrator to give time for suspend/resume
+ worker.AddTasks(tasks => tasks.AddOrchestratorFunc(
+ orchestratorName,
+ async (ctx, _) =>
+ {
+ await ctx.CreateTimer(TimeSpan.FromMinutes(5), CancellationToken.None);
+ return "done";
+ }));
+ },
+ client =>
+ {
+ // Enable externalization on the client and use the in-memory store to track uploads
+ client.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024; // 1KB threshold to force externalization
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ client.Services.AddSingleton(clientStore);
+ });
+
+ string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName);
+ await server.Client.WaitForInstanceStartAsync(instanceId, this.TimeoutToken);
+
+ // Suspend with large reason (should be externalized by client)
+ await server.Client.SuspendInstanceAsync(instanceId, largeReason1, this.TimeoutToken);
+ await server.Client.WaitForInstanceStartAsync(instanceId, this.TimeoutToken);
+
+ // verify it is suspended
+ OrchestrationMetadata? status = await server.Client.GetInstanceAsync(instanceId, getInputsAndOutputs: false, this.TimeoutToken);
+ Assert.NotNull(status);
+ Assert.Equal(OrchestrationRuntimeStatus.Suspended, status!.RuntimeStatus);
+
+ // Resume with large reason (should be externalized by client)
+ await server.Client.ResumeInstanceAsync(instanceId, largeReason2, this.TimeoutToken);
+
+ // verify it is resumed (poll up to 5 seconds)
+ var deadline = DateTime.UtcNow.AddSeconds(5);
+ while (true)
+ {
+ status = await server.Client.GetInstanceAsync(instanceId, getInputsAndOutputs: false, this.TimeoutToken);
+ if (status is not null && status.RuntimeStatus == OrchestrationRuntimeStatus.Running)
+ {
+ break;
+ }
+
+ if (DateTime.UtcNow >= deadline)
+ {
+ Assert.NotNull(status);
+ Assert.Equal(OrchestrationRuntimeStatus.Running, status!.RuntimeStatus);
+ }
+
+ await Task.Delay(TimeSpan.FromSeconds(1), this.TimeoutToken);
+ }
+
+
+
+ Assert.True(clientStore.UploadCount >= 2);
+ Assert.Contains(largeReason1, clientStore.uploadedPayloads);
+ Assert.Contains(largeReason2, clientStore.uploadedPayloads);
+ }
+
// Validates terminating an instance with a large output payload is externalized by the client.
[Fact]
public async Task LargeTerminateWithPayload()
From c6fa5431264116ccd766b0e51c3b85c02cb6eeda Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 17 Sep 2025 08:53:33 -0700
Subject: [PATCH 36/53] historystateevent
---
.../Interceptors/AzureBlobPayloadsInterceptor.cs | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
index 33f77436..0dffe503 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
@@ -417,6 +417,14 @@ async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancell
await this.MaybeResolveAsync(v => ecomp.Output = v, ecomp.Output, cancellation);
}
break;
+ case P.HistoryEvent.EventTypeOneofCase.HistoryState:
+ if (e.HistoryState is { } hs && hs.OrchestrationState is { } os)
+ {
+ await this.MaybeResolveAsync(v => os.Input = v, os.Input, cancellation);
+ await this.MaybeResolveAsync(v => os.Output = v, os.Output, cancellation);
+ await this.MaybeResolveAsync(v => os.CustomStatus = v, os.CustomStatus, cancellation);
+ }
+ break;
}
}
From bf043634f4d17ab047e052e44928aea46889a172 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 17 Sep 2025 09:57:53 -0700
Subject: [PATCH 37/53] streaming chunk
---
.../Interceptors/AzureBlobPayloadsInterceptor.cs | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
index 0dffe503..835fa3ae 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
@@ -245,6 +245,12 @@ async Task ResolveResponsePayloadsAsync(TResponse response, Cancellat
await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
break;
+ case P.HistoryChunk c when c.Events != null:
+ foreach (P.HistoryEvent e in c.Events)
+ {
+ await this.ResolveEventPayloadsAsync(e, cancellation);
+ }
+ break;
case P.QueryInstancesResponse r:
foreach (P.OrchestrationState s in r.OrchestrationState)
{
From fbecfd28bf5ffce3f75c4931fe495652baa0437e Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 17 Sep 2025 10:48:29 -0700
Subject: [PATCH 38/53] test fix
---
.../LargePayloadTests.cs | 21 ++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index 61b2bad1..f3b3d81a 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -129,11 +129,22 @@ public async Task SuspendAndResume_Reason_IsExternalizedByClient()
await server.Client.SuspendInstanceAsync(instanceId, largeReason1, this.TimeoutToken);
await server.Client.WaitForInstanceStartAsync(instanceId, this.TimeoutToken);
- // verify it is suspended
- OrchestrationMetadata? status = await server.Client.GetInstanceAsync(instanceId, getInputsAndOutputs: false, this.TimeoutToken);
- Assert.NotNull(status);
- Assert.Equal(OrchestrationRuntimeStatus.Suspended, status!.RuntimeStatus);
+ // poll up to 5 seconds to verify it is suspended
+ var deadline1 = DateTime.UtcNow.AddSeconds(5);
+ while (true)
+ {
+ OrchestrationMetadata? status1 = await server.Client.GetInstanceAsync(instanceId, getInputsAndOutputs: false, this.TimeoutToken);
+ if (status1 is not null && status1.RuntimeStatus == OrchestrationRuntimeStatus.Suspended)
+ {
+ break;
+ }
+ if (DateTime.UtcNow >= deadline1)
+ {
+ Assert.NotNull(status1);
+ Assert.Equal(OrchestrationRuntimeStatus.Suspended, status1!.RuntimeStatus);
+ }
+ }
// Resume with large reason (should be externalized by client)
await server.Client.ResumeInstanceAsync(instanceId, largeReason2, this.TimeoutToken);
@@ -141,7 +152,7 @@ public async Task SuspendAndResume_Reason_IsExternalizedByClient()
var deadline = DateTime.UtcNow.AddSeconds(5);
while (true)
{
- status = await server.Client.GetInstanceAsync(instanceId, getInputsAndOutputs: false, this.TimeoutToken);
+ OrchestrationMetadata? status = await server.Client.GetInstanceAsync(instanceId, getInputsAndOutputs: false, this.TimeoutToken);
if (status is not null && status.RuntimeStatus == OrchestrationRuntimeStatus.Running)
{
break;
From a5b5bd1826f49650ca117861938af860c25c9b52 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 17 Sep 2025 12:04:45 -0700
Subject: [PATCH 39/53] history event streaming test added
---
.../GrpcSidecar/Grpc/TaskHubGrpcServer.cs | 71 ++++++++++++++++---
.../LargePayloadTests.cs | 58 +++++++++++++++
2 files changed, 121 insertions(+), 8 deletions(-)
diff --git a/test/Grpc.IntegrationTests/GrpcSidecar/Grpc/TaskHubGrpcServer.cs b/test/Grpc.IntegrationTests/GrpcSidecar/Grpc/TaskHubGrpcServer.cs
index e3a320f7..87f0e631 100644
--- a/test/Grpc.IntegrationTests/GrpcSidecar/Grpc/TaskHubGrpcServer.cs
+++ b/test/Grpc.IntegrationTests/GrpcSidecar/Grpc/TaskHubGrpcServer.cs
@@ -31,6 +31,9 @@ public class TaskHubGrpcServer : P.TaskHubSidecarService.TaskHubSidecarServiceBa
readonly TaskHubDispatcherHost dispatcherHost;
readonly IsConnectedSignal isConnectedSignal = new();
readonly SemaphoreSlim sendWorkItemLock = new(initialCount: 1);
+ readonly ConcurrentDictionary> streamingPastEvents = new(StringComparer.OrdinalIgnoreCase);
+
+ volatile bool supportsHistoryStreaming;
// Initialized when a client connects to this service to receive work-item commands.
IServerStreamWriter? workerToClientStream;
@@ -479,6 +482,8 @@ static P.GetInstanceResponse CreateGetInstanceResponse(OrchestrationState state,
public override async Task GetWorkItems(P.GetWorkItemsRequest request, IServerStreamWriter responseStream, ServerCallContext context)
{
+ // Record whether the client supports history streaming
+ this.supportsHistoryStreaming = request.Capabilities.Contains(P.WorkerCapability.HistoryStreaming);
// Use a lock to mitigate the race condition where we signal the dispatch host to start but haven't
// yet saved a reference to the client response stream.
lock (this.isConnectedSignal)
@@ -521,6 +526,35 @@ public override async Task GetWorkItems(P.GetWorkItemsRequest request, IServerSt
}
}
+ public override async Task StreamInstanceHistory(P.StreamInstanceHistoryRequest request, IServerStreamWriter responseStream, ServerCallContext context)
+ {
+ if (this.streamingPastEvents.TryGetValue(request.InstanceId, out List? pastEvents))
+ {
+ const int MaxChunkBytes = 256 * 1024; // 256KB per chunk to simulate chunked streaming
+ int currentSize = 0;
+ P.HistoryChunk chunk = new();
+
+ foreach (P.HistoryEvent e in pastEvents)
+ {
+ int eventSize = e.CalculateSize();
+ if (currentSize > 0 && currentSize + eventSize > MaxChunkBytes)
+ {
+ await responseStream.WriteAsync(chunk);
+ chunk = new P.HistoryChunk();
+ currentSize = 0;
+ }
+
+ chunk.Events.Add(e);
+ currentSize += eventSize;
+ }
+
+ if (chunk.Events.Count > 0)
+ {
+ await responseStream.WriteAsync(chunk);
+ }
+ }
+ }
+
///
/// Invoked by the when a work item is available, proxies the call to execute an orchestrator over a gRPC channel.
///
@@ -547,16 +581,37 @@ async Task ITaskExecutor.ExecuteOrchestrator(
try
{
+ var orkRequest = new P.OrchestratorRequest
+ {
+ InstanceId = instance.InstanceId,
+ ExecutionId = instance.ExecutionId,
+ NewEvents = { newEvents.Select(ProtobufUtils.ToHistoryEventProto) },
+ OrchestrationTraceContext = orchestrationTraceContext,
+ };
+
+ // Decide whether to stream based on total size of past events (> 1MiB)
+ List protoPastEvents = pastEvents.Select(ProtobufUtils.ToHistoryEventProto).ToList();
+ int totalBytes = 0;
+ foreach (P.HistoryEvent ev in protoPastEvents)
+ {
+ totalBytes += ev.CalculateSize();
+ }
+
+ if (this.supportsHistoryStreaming && totalBytes > (1024))
+ {
+ orkRequest.RequiresHistoryStreaming = true;
+ // Store past events to serve via StreamInstanceHistory
+ this.streamingPastEvents[instance.InstanceId] = protoPastEvents;
+ }
+ else
+ {
+ // Embed full history in the work item
+ orkRequest.PastEvents.AddRange(protoPastEvents);
+ }
+
await this.SendWorkItemToClientAsync(new P.WorkItem
{
- OrchestratorRequest = new P.OrchestratorRequest
- {
- InstanceId = instance.InstanceId,
- ExecutionId = instance.ExecutionId,
- NewEvents = { newEvents.Select(ProtobufUtils.ToHistoryEventProto) },
- OrchestrationTraceContext = orchestrationTraceContext,
- PastEvents = { pastEvents.Select(ProtobufUtils.ToHistoryEventProto) },
- }
+ OrchestratorRequest = orkRequest,
});
}
catch
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index f3b3d81a..ff9681f9 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -88,6 +88,64 @@ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
Assert.Contains(JsonSerializer.Serialize(largeInput + largeInput), fakeStore.uploadedPayloads);
}
+ // Validates history streaming path resolves externalized inputs/outputs in HistoryChunk.
+ [Fact]
+ public async Task HistoryStreaming_ResolvesPayloads()
+ {
+ // Make payloads large enough so that past events history exceeds 1 MiB to trigger streaming
+ string largeInput = new string('H', 2 * 1024 * 1024); // 2 MiB
+ string largeOutput = new string('O', 2 * 1024 * 1024); // 2 MiB
+ TaskName orch = nameof(HistoryStreaming_ResolvesPayloads);
+
+ InMemoryPayloadStore store = new InMemoryPayloadStore();
+
+ await using HostTestLifetime server = await this.StartWorkerAsync(
+ worker =>
+ {
+ worker.AddTasks(tasks => tasks.AddOrchestratorFunc(
+ orch,
+ async (ctx, input) =>
+ {
+ // Emit several events so that the serialized history size grows
+ for (int i = 0; i < 50; i++)
+ {
+ await ctx.CreateTimer(TimeSpan.FromMilliseconds(10), CancellationToken.None);
+ }
+ return largeOutput;
+ }));
+
+ worker.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ worker.Services.AddSingleton(store);
+ },
+ client =>
+ {
+ // Enable client to resolve outputs on query
+ client.UseExternalizedPayloads(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ client.Services.AddSingleton(store);
+ });
+
+ // Start orchestration with large input to exercise history input resolution
+ string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch, largeInput);
+ OrchestrationMetadata completed = await server.Client.WaitForInstanceCompletionAsync(
+ instanceId, getInputsAndOutputs: true, this.TimeoutToken);
+
+ Assert.Equal(OrchestrationRuntimeStatus.Completed, completed.RuntimeStatus);
+ Assert.Equal(largeInput, completed.ReadInputAs());
+ Assert.Equal(largeOutput, completed.ReadOutputAs());
+ Assert.True(store.UploadCount >= 2);
+ Assert.True(store.DownloadCount >= 2);
+ }
+
// Validates client externalizes large suspend and resume reasons.
[Fact]
public async Task SuspendAndResume_Reason_IsExternalizedByClient()
From 78465d5ae1dfa4c6d8038e66742c6ca57a902a70 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Fri, 19 Sep 2025 18:21:54 -0700
Subject: [PATCH 40/53] cleanup
---
src/Abstractions/Converters/IPayloadStore.cs | 34 -------------------
.../Options}/LargePayloadStorageOptions.cs | 2 +-
2 files changed, 1 insertion(+), 35 deletions(-)
delete mode 100644 src/Abstractions/Converters/IPayloadStore.cs
rename src/{Abstractions/Converters => Extensions/AzureBlobPayloads/Options}/LargePayloadStorageOptions.cs (97%)
diff --git a/src/Abstractions/Converters/IPayloadStore.cs b/src/Abstractions/Converters/IPayloadStore.cs
deleted file mode 100644
index c2a6ff6c..00000000
--- a/src/Abstractions/Converters/IPayloadStore.cs
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT License.
-
-namespace Microsoft.DurableTask.Converters;
-
-///
-/// Abstraction for storing and retrieving large payloads out-of-band.
-///
-public interface IPayloadStore
-{
- ///
- /// Uploads a payload and returns an opaque reference token that can be embedded in orchestration messages.
- ///
- /// The payload bytes.
- /// Cancellation token.
- /// Opaque reference token.
- Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken);
-
- ///
- /// Downloads the payload referenced by the token.
- ///
- /// The opaque reference token.
- /// Cancellation token.
- /// Payload string.
- Task DownloadAsync(string token, CancellationToken cancellationToken);
-
- ///
- /// Returns true if the specified value appears to be a token understood by this store.
- /// Implementations should not throw for unknown tokens.
- ///
- /// The value to check.
- /// true if the value is a token issued by this store; otherwise, false.
- bool IsKnownPayloadToken(string value);
-}
diff --git a/src/Abstractions/Converters/LargePayloadStorageOptions.cs b/src/Extensions/AzureBlobPayloads/Options/LargePayloadStorageOptions.cs
similarity index 97%
rename from src/Abstractions/Converters/LargePayloadStorageOptions.cs
rename to src/Extensions/AzureBlobPayloads/Options/LargePayloadStorageOptions.cs
index af005854..64875b84 100644
--- a/src/Abstractions/Converters/LargePayloadStorageOptions.cs
+++ b/src/Extensions/AzureBlobPayloads/Options/LargePayloadStorageOptions.cs
@@ -2,7 +2,7 @@
// Licensed under the MIT License.
// Intentionally no DataAnnotations to avoid extra package requirements in minimal hosts.
-namespace Microsoft.DurableTask.Converters;
+namespace Microsoft.DurableTask;
///
/// Options for externalized payload storage, used by SDKs to store large payloads out-of-band.
From cf64affd44492c3004dcd95dd31cc3124b0f620f Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Sun, 21 Sep 2025 01:05:26 -0700
Subject: [PATCH 41/53] Interface for supporting azuremanaged
---
samples/LargePayloadConsoleApp/Program.cs | 7 +----
...ientBuilderExtensions.AzureBlobPayloads.cs | 1 -
...rkerBuilderExtensions.AzureBlobPayloads.cs | 8 +++---
.../AzureBlobPayloadCallInvokerFactory.cs | 27 +++++++++++++++++++
.../AzureBlobPayloadsInterceptor.cs | 2 +-
.../PayloadStore/BlobPayloadStore.cs | 2 +-
6 files changed, 33 insertions(+), 14 deletions(-)
create mode 100644 src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
diff --git a/samples/LargePayloadConsoleApp/Program.cs b/samples/LargePayloadConsoleApp/Program.cs
index 8886e17a..6995b50c 100644
--- a/samples/LargePayloadConsoleApp/Program.cs
+++ b/samples/LargePayloadConsoleApp/Program.cs
@@ -135,8 +135,6 @@ await ctx.Entities.CallEntityAsync(
Console.WriteLine($"Deserialized output equals original: {deserializedOutput == largeInput}");
Console.WriteLine($"Deserialized input length: {deserializedInput.Length}");
-
-
// Run entity samples
Console.WriteLine();
Console.WriteLine("Running LargeEntityOperationInput...");
@@ -199,7 +197,4 @@ public void Set(string value)
{
this.State = value;
}
-}
-
-
-
+}
\ No newline at end of file
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
index b284dc2f..6e6e546d 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -22,7 +22,6 @@ public static class DurableTaskClientBuilderExtensionsAzureBlobPayloads
/// The builder to configure.
/// The callback to configure the storage options.
/// The original builder, for call chaining.
- ///
public static IDurableTaskClientBuilder UseExternalizedPayloads(
this IDurableTaskClientBuilder builder,
Action configure)
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
index da9081dc..4137a40a 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
@@ -1,13 +1,13 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
+using Grpc.Core.Interceptors;
+using Grpc.Net.Client;
using Microsoft.DurableTask.Converters;
using Microsoft.DurableTask.Worker;
+using Microsoft.DurableTask.Worker.Grpc;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
-using Microsoft.DurableTask.Worker.Grpc;
-using Grpc.Net.Client;
-using Grpc.Core.Interceptors;
namespace Microsoft.DurableTask;
@@ -64,5 +64,3 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
return builder;
}
}
-
-
diff --git a/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs b/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
new file mode 100644
index 00000000..43cd8acd
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
@@ -0,0 +1,27 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using Grpc.Core;
+using Grpc.Core.Interceptors;
+using Grpc.Net.Client;
+
+namespace Microsoft.DurableTask;
+
+///
+/// Static factory for creating large payload interceptors without exposing internal implementation details.
+///
+public static class AzureBlobPayloadCallInvokerFactory
+{
+ ///
+ /// Creates a CallInvoker with large payload support interceptor applied to the given GrpcChannel.
+ ///
+ /// The gRPC channel to intercept.
+ /// The large payload storage options.
+ /// A CallInvoker with the large payload interceptor applied.
+ public static CallInvoker Create(GrpcChannel channel, LargePayloadStorageOptions options)
+ {
+ IPayloadStore payloadStore = new BlobPayloadStore(options);
+ // return channel.Intercept(new AzureBlobPayloadsInterceptor(payloadStore, options));
+ return channel.CreateCallInvoker().Intercept(new AzureBlobPayloadsInterceptor(payloadStore, options));
+ }
+}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
index 835fa3ae..71e2f71a 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
@@ -13,7 +13,7 @@ namespace Microsoft.DurableTask;
/// gRPC interceptor that externalizes large payloads to an on requests
/// and resolves known payload tokens on responses.
///
-sealed class AzureBlobPayloadsInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options) : Interceptor
+internal sealed class AzureBlobPayloadsInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options) : Interceptor
{
readonly IPayloadStore payloadStore = payloadStore;
readonly LargePayloadStorageOptions options = options;
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
index 9b8d39a8..634870f3 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
@@ -16,7 +16,7 @@ namespace Microsoft.DurableTask;
/// Azure Blob Storage implementation of .
/// Stores payloads as blobs and returns opaque tokens in the form "blob:v1:<container>:<blobName>".
///
-public sealed class BlobPayloadStore : IPayloadStore
+internal sealed class BlobPayloadStore : IPayloadStore
{
const string TokenPrefix = "blob:v1:";
const string ContentEncodingGzip = "gzip";
From 089a1488745d11b423d5a94b4c7f9bb4a9629446 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Sun, 28 Sep 2025 21:29:24 -0700
Subject: [PATCH 42/53] one pkg experiment
---
...ientBuilderExtensions.AzureBlobPayloads.cs | 4 +-
...rkerBuilderExtensions.AzureBlobPayloads.cs | 4 +-
.../AzureBlobPayloadCallInvokerFactory.cs | 3 +-
...bPayloadsAzureManagedBackendInterceptor.cs | 448 ++++++++++++++++++
...=> AzureBlobPayloadsSideCarInterceptor.cs} | 202 ++------
.../Interceptors/BasePayloadInterceptor.cs | 225 +++++++++
.../PayloadStore/BlobPayloadStore.cs | 2 +-
src/Grpc/backend_service.proto | 282 +++++++++++
src/Grpc/orchestrator_service.proto | 9 +-
src/Grpc/refresh-protos.ps1 | 10 +-
src/Grpc/versions.txt | 5 +-
11 files changed, 1010 insertions(+), 184 deletions(-)
create mode 100644 src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsAzureManagedBackendInterceptor.cs
rename src/Extensions/AzureBlobPayloads/Interceptors/{AzureBlobPayloadsInterceptor.cs => AzureBlobPayloadsSideCarInterceptor.cs} (69%)
create mode 100644 src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
create mode 100644 src/Grpc/backend_service.proto
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
index 6e6e546d..c571a7fc 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -44,7 +44,7 @@ public static IDurableTaskClientBuilder UseExternalizedPayloads(
LargePayloadStorageOptions opts = monitor.Get(builder.Name);
if (opt.Channel is not null)
{
- Grpc.Core.CallInvoker invoker = opt.Channel.Intercept(new AzureBlobPayloadsInterceptor(store, opts));
+ Grpc.Core.CallInvoker invoker = opt.Channel.Intercept(new AzureBlobPayloadsSideCarInterceptor(store, opts));
opt.CallInvoker = invoker;
// Ensure client uses the intercepted invoker path
@@ -52,7 +52,7 @@ public static IDurableTaskClientBuilder UseExternalizedPayloads(
}
else if (opt.CallInvoker is not null)
{
- opt.CallInvoker = opt.CallInvoker.Intercept(new AzureBlobPayloadsInterceptor(store, opts));
+ opt.CallInvoker = opt.CallInvoker.Intercept(new AzureBlobPayloadsSideCarInterceptor(store, opts));
}
else
{
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
index 4137a40a..c4d13810 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
@@ -44,14 +44,14 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
LargePayloadStorageOptions opts = monitor.Get(builder.Name);
if (opt.Channel is not null)
{
- var invoker = opt.Channel.Intercept(new AzureBlobPayloadsInterceptor(store, opts));
+ var invoker = opt.Channel.Intercept(new AzureBlobPayloadsSideCarInterceptor(store, opts));
opt.CallInvoker = invoker;
// Ensure worker uses the intercepted invoker path
opt.Channel = null;
}
else if (opt.CallInvoker is not null)
{
- opt.CallInvoker = opt.CallInvoker.Intercept(new AzureBlobPayloadsInterceptor(store, opts));
+ opt.CallInvoker = opt.CallInvoker.Intercept(new AzureBlobPayloadsSideCarInterceptor(store, opts));
}
else
{
diff --git a/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs b/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
index 43cd8acd..46315599 100644
--- a/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
+++ b/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
@@ -21,7 +21,6 @@ public static class AzureBlobPayloadCallInvokerFactory
public static CallInvoker Create(GrpcChannel channel, LargePayloadStorageOptions options)
{
IPayloadStore payloadStore = new BlobPayloadStore(options);
- // return channel.Intercept(new AzureBlobPayloadsInterceptor(payloadStore, options));
- return channel.CreateCallInvoker().Intercept(new AzureBlobPayloadsInterceptor(payloadStore, options));
+ return channel.CreateCallInvoker().Intercept(new AzureBlobPayloadsAzureManagedBackendInterceptor(payloadStore, options));
}
}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsAzureManagedBackendInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsAzureManagedBackendInterceptor.cs
new file mode 100644
index 00000000..632f702b
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsAzureManagedBackendInterceptor.cs
@@ -0,0 +1,448 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using Grpc.Core.Interceptors;
+
+using P = Microsoft.DurableTask.Protobuf;
+
+namespace Microsoft.DurableTask;
+
+///
+/// gRPC interceptor that externalizes large payloads to an on requests
+/// and resolves known payload tokens on responses for Azure Managed Backend.
+///
+public sealed class AzureBlobPayloadsAzureManagedBackendInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+ : BasePayloadInterceptor(payloadStore, options)
+{
+ protected override Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
+ {
+ // Azure Managed Backend -> Backend Service
+ // Note: This interceptor is designed for backend_service.proto types, but since those types
+ // are not yet generated with the updated namespace, we'll use the existing orchestrator_service.proto types
+ // for now. The user should run refresh-protos.ps1 to generate the proper types.
+ switch (request)
+ {
+ case P.CreateInstanceRequest r:
+ return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ case P.RaiseEventRequest r:
+ return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ case P.TerminateRequest r:
+ return this.MaybeExternalizeAsync(v => r.Output = v, r.Output, cancellation);
+ case P.SuspendRequest r:
+ return this.MaybeExternalizeAsync(v => r.Reason = v, r.Reason, cancellation);
+ case P.ResumeRequest r:
+ return this.MaybeExternalizeAsync(v => r.Reason = v, r.Reason, cancellation);
+ case P.SignalEntityRequest r:
+ return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ case P.ActivityResponse r:
+ return this.MaybeExternalizeAsync(v => r.Result = v, r.Result, cancellation);
+ case P.OrchestratorResponse r:
+ return this.ExternalizeOrchestratorResponseAsync(r, cancellation);
+ case P.EntityBatchResult r:
+ return this.ExternalizeEntityBatchResultAsync(r, cancellation);
+ case P.EntityBatchRequest r:
+ return this.ExternalizeEntityBatchRequestAsync(r, cancellation);
+ case P.EntityRequest r:
+ return this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ }
+
+ return Task.CompletedTask;
+ }
+
+ async Task ExternalizeOrchestratorResponseAsync(P.OrchestratorResponse r, CancellationToken cancellation)
+ {
+ await this.MaybeExternalizeAsync(v => r.CustomStatus = v, r.CustomStatus, cancellation);
+ foreach (P.OrchestratorAction a in r.Actions)
+ {
+ if (a.CompleteOrchestration is { } complete)
+ {
+ await this.MaybeExternalizeAsync(v => complete.Result = v, complete.Result, cancellation);
+ await this.MaybeExternalizeAsync(v => complete.Details = v, complete.Details, cancellation);
+ }
+
+ if (a.TerminateOrchestration is { } term)
+ {
+ await this.MaybeExternalizeAsync(v => term.Reason = v, term.Reason, cancellation);
+ }
+
+ if (a.ScheduleTask is { } schedule)
+ {
+ await this.MaybeExternalizeAsync(v => schedule.Input = v, schedule.Input, cancellation);
+ }
+
+ if (a.CreateSubOrchestration is { } sub)
+ {
+ await this.MaybeExternalizeAsync(v => sub.Input = v, sub.Input, cancellation);
+ }
+
+ if (a.SendEvent is { } sendEvt)
+ {
+ await this.MaybeExternalizeAsync(v => sendEvt.Data = v, sendEvt.Data, cancellation);
+ }
+
+ if (a.SendEntityMessage is { } entityMsg)
+ {
+ if (entityMsg.EntityOperationSignaled is { } sig)
+ {
+ await this.MaybeExternalizeAsync(v => sig.Input = v, sig.Input, cancellation);
+ }
+
+ if (entityMsg.EntityOperationCalled is { } called)
+ {
+ await this.MaybeExternalizeAsync(v => called.Input = v, called.Input, cancellation);
+ }
+ }
+ }
+ }
+
+ async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, CancellationToken cancellation)
+ {
+ await this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ if (r.Results != null)
+ {
+ foreach (P.OperationResult result in r.Results)
+ {
+ if (result.Success is { } success)
+ {
+ await this.MaybeExternalizeAsync(v => success.Result = v, success.Result, cancellation);
+ }
+ }
+ }
+
+ if (r.Actions != null)
+ {
+ foreach (P.OperationAction action in r.Actions)
+ {
+ if (action.SendSignal is { } sendSig)
+ {
+ await this.MaybeExternalizeAsync(v => sendSig.Input = v, sendSig.Input, cancellation);
+ }
+
+ if (action.StartNewOrchestration is { } start)
+ {
+ await this.MaybeExternalizeAsync(v => start.Input = v, start.Input, cancellation);
+ }
+ }
+ }
+ }
+
+ async Task ExternalizeEntityBatchRequestAsync(P.EntityBatchRequest r, CancellationToken cancellation)
+ {
+ await this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ if (r.Operations != null)
+ {
+ foreach (P.OperationRequest op in r.Operations)
+ {
+ await this.MaybeExternalizeAsync(v => op.Input = v, op.Input, cancellation);
+ }
+ }
+ }
+
+ async Task ExternalizeHistoryEventAsync(P.HistoryEvent e, CancellationToken cancellation)
+ {
+ switch (e.EventTypeCase)
+ {
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionStarted:
+ if (e.ExecutionStarted is { } es)
+ {
+ await this.MaybeExternalizeAsync(v => es.Input = v, es.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionCompleted:
+ if (e.ExecutionCompleted is { } ec)
+ {
+ await this.MaybeExternalizeAsync(v => ec.Result = v, ec.Result, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EventRaised:
+ if (e.EventRaised is { } er)
+ {
+ await this.MaybeExternalizeAsync(v => er.Input = v, er.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.TaskScheduled:
+ if (e.TaskScheduled is { } ts)
+ {
+ await this.MaybeExternalizeAsync(v => ts.Input = v, ts.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.TaskCompleted:
+ if (e.TaskCompleted is { } tc)
+ {
+ await this.MaybeExternalizeAsync(v => tc.Result = v, tc.Result, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCreated:
+ if (e.SubOrchestrationInstanceCreated is { } soc)
+ {
+ await this.MaybeExternalizeAsync(v => soc.Input = v, soc.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCompleted:
+ if (e.SubOrchestrationInstanceCompleted is { } sox)
+ {
+ await this.MaybeExternalizeAsync(v => sox.Result = v, sox.Result, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EventSent:
+ if (e.EventSent is { } esent)
+ {
+ await this.MaybeExternalizeAsync(v => esent.Input = v, esent.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.GenericEvent:
+ if (e.GenericEvent is { } ge)
+ {
+ await this.MaybeExternalizeAsync(v => ge.Data = v, ge.Data, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ContinueAsNew:
+ if (e.ContinueAsNew is { } can)
+ {
+ await this.MaybeExternalizeAsync(v => can.Input = v, can.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionTerminated:
+ if (e.ExecutionTerminated is { } et)
+ {
+ await this.MaybeExternalizeAsync(v => et.Input = v, et.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionSuspended:
+ if (e.ExecutionSuspended is { } esus)
+ {
+ await this.MaybeExternalizeAsync(v => esus.Input = v, esus.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionResumed:
+ if (e.ExecutionResumed is { } eres)
+ {
+ await this.MaybeExternalizeAsync(v => eres.Input = v, eres.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationSignaled:
+ if (e.EntityOperationSignaled is { } eos)
+ {
+ await this.MaybeExternalizeAsync(v => eos.Input = v, eos.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationCalled:
+ if (e.EntityOperationCalled is { } eoc)
+ {
+ await this.MaybeExternalizeAsync(v => eoc.Input = v, eoc.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationCompleted:
+ if (e.EntityOperationCompleted is { } ecomp)
+ {
+ await this.MaybeExternalizeAsync(v => ecomp.Output = v, ecomp.Output, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.HistoryState:
+ if (e.HistoryState is { } hs && hs.OrchestrationState is { } os)
+ {
+ await this.MaybeExternalizeAsync(v => os.Input = v, os.Input, cancellation);
+ await this.MaybeExternalizeAsync(v => os.Output = v, os.Output, cancellation);
+ await this.MaybeExternalizeAsync(v => os.CustomStatus = v, os.CustomStatus, cancellation);
+ }
+ break;
+ }
+ }
+
+ protected override async Task ResolveResponsePayloadsAsync(TResponse response, CancellationToken cancellation)
+ {
+ // Backend Service -> Azure Managed Backend
+ // Note: This interceptor is designed for backend_service.proto types, but since those types
+ // are not yet generated with the updated namespace, we'll use the existing orchestrator_service.proto types
+ // for now. The user should run refresh-protos.ps1 to generate the proper types.
+ switch (response)
+ {
+ case P.GetInstanceResponse r when r.OrchestrationState is { } s:
+ await this.MaybeResolveAsync(v => s.Input = v, s.Input, cancellation);
+ await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
+ await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
+ break;
+ case P.HistoryChunk c when c.Events != null:
+ foreach (P.HistoryEvent e in c.Events)
+ {
+ await this.ResolveEventPayloadsAsync(e, cancellation);
+ }
+ break;
+ case P.QueryInstancesResponse r:
+ foreach (P.OrchestrationState s in r.OrchestrationState)
+ {
+ await this.MaybeResolveAsync(v => s.Input = v, s.Input, cancellation);
+ await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
+ await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
+ }
+ break;
+ case P.GetEntityResponse r when r.Entity is { } em:
+ await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
+ break;
+ case P.QueryEntitiesResponse r:
+ foreach (P.EntityMetadata em in r.Entities)
+ {
+ await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
+ }
+ break;
+ case P.WorkItem wi:
+ // Resolve activity input
+ if (wi.ActivityRequest is { } ar)
+ {
+ await this.MaybeResolveAsync(v => ar.Input = v, ar.Input, cancellation);
+ }
+
+ // Resolve orchestration input embedded in ExecutionStarted event and external events
+ if (wi.OrchestratorRequest is { } or)
+ {
+ foreach (P.HistoryEvent? e in or.PastEvents)
+ {
+ await this.ResolveEventPayloadsAsync(e, cancellation);
+ }
+
+ foreach (P.HistoryEvent? e in or.NewEvents)
+ {
+ await this.ResolveEventPayloadsAsync(e, cancellation);
+ }
+ }
+
+ // Resolve entity V1 batch request (OperationRequest inputs and entity state)
+ if (wi.EntityRequest is { } er1)
+ {
+ await this.MaybeResolveAsync(v => er1.EntityState = v, er1.EntityState, cancellation);
+ if (er1.Operations != null)
+ {
+ foreach (P.OperationRequest op in er1.Operations)
+ {
+ await this.MaybeResolveAsync(v => op.Input = v, op.Input, cancellation);
+ }
+ }
+ }
+
+ // Resolve entity V2 request (history-based operation requests and entity state)
+ if (wi.EntityRequestV2 is { } er2)
+ {
+ await this.MaybeResolveAsync(v => er2.EntityState = v, er2.EntityState, cancellation);
+ if (er2.OperationRequests != null)
+ {
+ foreach (P.HistoryEvent opEvt in er2.OperationRequests)
+ {
+ await this.ResolveEventPayloadsAsync(opEvt, cancellation);
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancellation)
+ {
+ switch (e.EventTypeCase)
+ {
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionStarted:
+ if (e.ExecutionStarted is { } es)
+ {
+ await this.MaybeResolveAsync(v => es.Input = v, es.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionCompleted:
+ if (e.ExecutionCompleted is { } ec)
+ {
+ await this.MaybeResolveAsync(v => ec.Result = v, ec.Result, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EventRaised:
+ if (e.EventRaised is { } er)
+ {
+ await this.MaybeResolveAsync(v => er.Input = v, er.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.TaskScheduled:
+ if (e.TaskScheduled is { } ts)
+ {
+ await this.MaybeResolveAsync(v => ts.Input = v, ts.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.TaskCompleted:
+ if (e.TaskCompleted is { } tc)
+ {
+ await this.MaybeResolveAsync(v => tc.Result = v, tc.Result, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCreated:
+ if (e.SubOrchestrationInstanceCreated is { } soc)
+ {
+ await this.MaybeResolveAsync(v => soc.Input = v, soc.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCompleted:
+ if (e.SubOrchestrationInstanceCompleted is { } sox)
+ {
+ await this.MaybeResolveAsync(v => sox.Result = v, sox.Result, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EventSent:
+ if (e.EventSent is { } esent)
+ {
+ await this.MaybeResolveAsync(v => esent.Input = v, esent.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.GenericEvent:
+ if (e.GenericEvent is { } ge)
+ {
+ await this.MaybeResolveAsync(v => ge.Data = v, ge.Data, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ContinueAsNew:
+ if (e.ContinueAsNew is { } can)
+ {
+ await this.MaybeResolveAsync(v => can.Input = v, can.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionTerminated:
+ if (e.ExecutionTerminated is { } et)
+ {
+ await this.MaybeResolveAsync(v => et.Input = v, et.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionSuspended:
+ if (e.ExecutionSuspended is { } esus)
+ {
+ await this.MaybeResolveAsync(v => esus.Input = v, esus.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.ExecutionResumed:
+ if (e.ExecutionResumed is { } eres)
+ {
+ await this.MaybeResolveAsync(v => eres.Input = v, eres.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationSignaled:
+ if (e.EntityOperationSignaled is { } eos)
+ {
+ await this.MaybeResolveAsync(v => eos.Input = v, eos.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationCalled:
+ if (e.EntityOperationCalled is { } eoc)
+ {
+ await this.MaybeResolveAsync(v => eoc.Input = v, eoc.Input, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.EntityOperationCompleted:
+ if (e.EntityOperationCompleted is { } ecomp)
+ {
+ await this.MaybeResolveAsync(v => ecomp.Output = v, ecomp.Output, cancellation);
+ }
+ break;
+ case P.HistoryEvent.EventTypeOneofCase.HistoryState:
+ if (e.HistoryState is { } hs && hs.OrchestrationState is { } os)
+ {
+ await this.MaybeResolveAsync(v => os.Input = v, os.Input, cancellation);
+ await this.MaybeResolveAsync(v => os.Output = v, os.Output, cancellation);
+ await this.MaybeResolveAsync(v => os.CustomStatus = v, os.CustomStatus, cancellation);
+ }
+ break;
+ }
+ }
+}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
similarity index 69%
rename from src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
rename to src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
index 71e2f71a..77082c26 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
@@ -1,128 +1,20 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-using System.Text;
-using Grpc.Core;
using Grpc.Core.Interceptors;
-using Microsoft.DurableTask.Converters;
+
using P = Microsoft.DurableTask.Protobuf;
namespace Microsoft.DurableTask;
///
/// gRPC interceptor that externalizes large payloads to an on requests
-/// and resolves known payload tokens on responses.
+/// and resolves known payload tokens on responses for SideCar.
///
-internal sealed class AzureBlobPayloadsInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options) : Interceptor
+public sealed class AzureBlobPayloadsSideCarInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+ : BasePayloadInterceptor(payloadStore, options)
{
- readonly IPayloadStore payloadStore = payloadStore;
- readonly LargePayloadStorageOptions options = options;
-
- // Unary: externalize on request, resolve on response
-
- ///
- public override AsyncUnaryCall AsyncUnaryCall(
- TRequest request,
- ClientInterceptorContext context,
- AsyncUnaryCallContinuation continuation)
- {
- // Build the underlying call lazily after async externalization
- Task> startCallTask = Task.Run(async () =>
- {
- // Externalize first; if this fails, do not proceed to send the gRPC call
- await this.ExternalizeRequestPayloadsAsync(request, context.Options.CancellationToken);
- // Only if externalization succeeds, proceed with the continuation
- return continuation(request, context);
- });
-
- async Task ResponseAsync()
- {
- AsyncUnaryCall innerCall = await startCallTask;
- TResponse response = await innerCall.ResponseAsync;
- await this.ResolveResponsePayloadsAsync(response, context.Options.CancellationToken);
- return response;
- }
-
- async Task ResponseHeadersAsync()
- {
- AsyncUnaryCall innerCall = await startCallTask;
- return await innerCall.ResponseHeadersAsync;
- }
-
- Status GetStatus()
- {
- if (startCallTask.IsCanceled)
- {
- return new Status(StatusCode.Cancelled, "Call was cancelled.");
- }
-
- if (startCallTask.IsFaulted)
- {
- return new Status(StatusCode.Internal, startCallTask.Exception?.Message ?? "Unknown error");
- }
- if (startCallTask.Status == TaskStatus.RanToCompletion)
- {
- return startCallTask.Result.GetStatus();
- }
-
- // Not started yet; unknown
- return new Status(StatusCode.Unknown, string.Empty);
- }
-
- Metadata GetTrailers()
- {
- return startCallTask.Status == TaskStatus.RanToCompletion ? startCallTask.Result.GetTrailers() : [];
- }
-
- void Dispose()
- {
- _ = startCallTask.ContinueWith(
- t =>
- {
- if (t.Status == TaskStatus.RanToCompletion)
- {
- t.Result.Dispose();
- }
- },
- CancellationToken.None,
- TaskContinuationOptions.ExecuteSynchronously,
- TaskScheduler.Default);
- }
-
- return new AsyncUnaryCall(
- ResponseAsync(),
- ResponseHeadersAsync(),
- GetStatus,
- GetTrailers,
- Dispose);
- }
-
- // Server streaming: resolve payloads in streamed responses (e.g., GetWorkItems)
-
- ///
- public override AsyncServerStreamingCall AsyncServerStreamingCall(
- TRequest request,
- ClientInterceptorContext context,
- AsyncServerStreamingCallContinuation continuation)
- {
- // For streaming, request externalization is not needed currently
- AsyncServerStreamingCall call = continuation(request, context);
-
- IAsyncStreamReader wrapped = new TransformingStreamReader(call.ResponseStream, async (msg, ct) =>
- {
- await this.ResolveResponsePayloadsAsync(msg, ct);
- return msg;
- });
-
- return new AsyncServerStreamingCall(
- wrapped,
- call.ResponseHeadersAsync,
- call.GetStatus,
- call.GetTrailers,
- call.Dispose);
- }
-
- Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
+ protected override Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
{
// Client -> sidecar
switch (request)
@@ -164,28 +56,34 @@ async Task ExternalizeOrchestratorResponseAsync(P.OrchestratorResponse r, Cancel
await this.MaybeExternalizeAsync(v => complete.Result = v, complete.Result, cancellation);
await this.MaybeExternalizeAsync(v => complete.Details = v, complete.Details, cancellation);
}
+
if (a.TerminateOrchestration is { } term)
{
await this.MaybeExternalizeAsync(v => term.Reason = v, term.Reason, cancellation);
}
+
if (a.ScheduleTask is { } schedule)
{
await this.MaybeExternalizeAsync(v => schedule.Input = v, schedule.Input, cancellation);
}
+
if (a.CreateSubOrchestration is { } sub)
{
await this.MaybeExternalizeAsync(v => sub.Input = v, sub.Input, cancellation);
}
+
if (a.SendEvent is { } sendEvt)
{
await this.MaybeExternalizeAsync(v => sendEvt.Data = v, sendEvt.Data, cancellation);
}
+
if (a.SendEntityMessage is { } entityMsg)
{
if (entityMsg.EntityOperationSignaled is { } sig)
{
await this.MaybeExternalizeAsync(v => sig.Input = v, sig.Input, cancellation);
}
+
if (entityMsg.EntityOperationCalled is { } called)
{
await this.MaybeExternalizeAsync(v => called.Input = v, called.Input, cancellation);
@@ -207,6 +105,7 @@ async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, Cancellation
}
}
}
+
if (r.Actions != null)
{
foreach (P.OperationAction action in r.Actions)
@@ -215,6 +114,7 @@ async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, Cancellation
{
await this.MaybeExternalizeAsync(v => sendSig.Input = v, sendSig.Input, cancellation);
}
+
if (action.StartNewOrchestration is { } start)
{
await this.MaybeExternalizeAsync(v => start.Input = v, start.Input, cancellation);
@@ -235,7 +135,7 @@ async Task ExternalizeEntityBatchRequestAsync(P.EntityBatchRequest r, Cancellati
}
}
- async Task ResolveResponsePayloadsAsync(TResponse response, CancellationToken cancellation)
+ protected override async Task ResolveResponsePayloadsAsync(TResponse response, CancellationToken cancellation)
{
// Sidecar -> client/worker
switch (response)
@@ -250,6 +150,7 @@ async Task ResolveResponsePayloadsAsync(TResponse response, Cancellat
{
await this.ResolveEventPayloadsAsync(e, cancellation);
}
+
break;
case P.QueryInstancesResponse r:
foreach (P.OrchestrationState s in r.OrchestrationState)
@@ -268,6 +169,7 @@ async Task ResolveResponsePayloadsAsync(TResponse response, Cancellat
{
await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
}
+
break;
case P.WorkItem wi:
// Resolve activity input
@@ -350,78 +252,91 @@ async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancell
{
await this.MaybeResolveAsync(v => ts.Input = v, ts.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.TaskCompleted:
if (e.TaskCompleted is { } tc)
{
await this.MaybeResolveAsync(v => tc.Result = v, tc.Result, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCreated:
if (e.SubOrchestrationInstanceCreated is { } soc)
{
await this.MaybeResolveAsync(v => soc.Input = v, soc.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCompleted:
if (e.SubOrchestrationInstanceCompleted is { } sox)
{
await this.MaybeResolveAsync(v => sox.Result = v, sox.Result, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EventSent:
if (e.EventSent is { } esent)
{
await this.MaybeResolveAsync(v => esent.Input = v, esent.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.GenericEvent:
if (e.GenericEvent is { } ge)
{
await this.MaybeResolveAsync(v => ge.Data = v, ge.Data, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ContinueAsNew:
if (e.ContinueAsNew is { } can)
{
await this.MaybeResolveAsync(v => can.Input = v, can.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionTerminated:
if (e.ExecutionTerminated is { } et)
{
await this.MaybeResolveAsync(v => et.Input = v, et.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionSuspended:
if (e.ExecutionSuspended is { } esus)
{
await this.MaybeResolveAsync(v => esus.Input = v, esus.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionResumed:
if (e.ExecutionResumed is { } eres)
{
await this.MaybeResolveAsync(v => eres.Input = v, eres.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationSignaled:
if (e.EntityOperationSignaled is { } eos)
{
await this.MaybeResolveAsync(v => eos.Input = v, eos.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationCalled:
if (e.EntityOperationCalled is { } eoc)
{
await this.MaybeResolveAsync(v => eoc.Input = v, eoc.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationCompleted:
if (e.EntityOperationCompleted is { } ecomp)
{
await this.MaybeResolveAsync(v => ecomp.Output = v, ecomp.Output, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.HistoryState:
if (e.HistoryState is { } hs && hs.OrchestrationState is { } os)
@@ -430,66 +345,9 @@ async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancell
await this.MaybeResolveAsync(v => os.Output = v, os.Output, cancellation);
await this.MaybeResolveAsync(v => os.CustomStatus = v, os.CustomStatus, cancellation);
}
- break;
- }
- }
-
- Task MaybeExternalizeAsync(Action assign, string? value, CancellationToken cancellation)
- {
- if (string.IsNullOrEmpty(value))
- {
- return Task.CompletedTask;
- }
- int size = Encoding.UTF8.GetByteCount(value);
- if (size < this.options.ExternalizeThresholdBytes)
- {
- return Task.CompletedTask;
- }
-
- return UploadAsync();
-
- async Task UploadAsync()
- {
- string token = await this.payloadStore.UploadAsync(Encoding.UTF8.GetBytes(value), cancellation);
- assign(token);
- }
- }
-
- async Task MaybeResolveAsync(Action assign, string? value, CancellationToken cancellation)
- {
- if (string.IsNullOrEmpty(value) || !this.payloadStore.IsKnownPayloadToken(value))
- {
- return;
+ break;
}
-
- string resolved = await this.payloadStore.DownloadAsync(value, cancellation);
- assign(resolved);
}
- sealed class TransformingStreamReader : IAsyncStreamReader
- {
- readonly IAsyncStreamReader inner;
- readonly Func> transform;
-
- public TransformingStreamReader(IAsyncStreamReader inner, Func> transform)
- {
- this.inner = inner;
- this.transform = transform;
- }
-
- public T Current { get; private set; } = default!;
-
- public async Task MoveNext(CancellationToken cancellationToken)
- {
- bool hasNext = await this.inner.MoveNext(cancellationToken);
- if (!hasNext)
- {
- return false;
- }
-
- this.Current = await this.transform(this.inner.Current, cancellationToken);
- return true;
- }
- }
}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
new file mode 100644
index 00000000..f5278bcc
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
@@ -0,0 +1,225 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using System.Text;
+using Grpc.Core;
+using Grpc.Core.Interceptors;
+
+namespace Microsoft.DurableTask;
+
+///
+/// Base class for gRPC interceptors that externalize large payloads to an on requests
+/// and resolves known payload tokens on responses.
+///
+/// The namespace for request message types.
+/// The namespace for response message types.
+public abstract class BasePayloadInterceptor : Interceptor
+ where TRequestNamespace : class
+ where TResponseNamespace : class
+{
+ readonly IPayloadStore payloadStore;
+ readonly LargePayloadStorageOptions options;
+
+ protected BasePayloadInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+ {
+ this.payloadStore = payloadStore;
+ this.options = options;
+ }
+
+ // Unary: externalize on request, resolve on response
+
+ ///
+ public override AsyncUnaryCall AsyncUnaryCall(
+ TRequest request,
+ ClientInterceptorContext context,
+ AsyncUnaryCallContinuation continuation)
+ {
+ // Build the underlying call lazily after async externalization
+ Task> startCallTask = Task.Run(async () =>
+ {
+ // Externalize first; if this fails, do not proceed to send the gRPC call
+ await this.ExternalizeRequestPayloadsAsync(request, context.Options.CancellationToken);
+
+ // Only if externalization succeeds, proceed with the continuation
+ return continuation(request, context);
+ });
+
+ async Task ResponseAsync()
+ {
+ AsyncUnaryCall innerCall = await startCallTask;
+ TResponse response = await innerCall.ResponseAsync;
+ await this.ResolveResponsePayloadsAsync(response, context.Options.CancellationToken);
+ return response;
+ }
+
+ async Task ResponseHeadersAsync()
+ {
+ AsyncUnaryCall innerCall = await startCallTask;
+ return await innerCall.ResponseHeadersAsync;
+ }
+
+ Status GetStatus()
+ {
+ if (startCallTask.IsCanceled)
+ {
+ return new Status(StatusCode.Cancelled, "Call was cancelled.");
+ }
+
+ if (startCallTask.IsFaulted)
+ {
+ return new Status(StatusCode.Internal, startCallTask.Exception?.Message ?? "Unknown error");
+ }
+
+ if (startCallTask.Status == TaskStatus.RanToCompletion)
+ {
+ return startCallTask.Result.GetStatus();
+ }
+
+ // Not started yet; unknown
+ return new Status(StatusCode.Unknown, string.Empty);
+ }
+
+ Metadata GetTrailers()
+ {
+ return startCallTask.Status == TaskStatus.RanToCompletion ? startCallTask.Result.GetTrailers() : [];
+ }
+
+ void Dispose()
+ {
+ _ = startCallTask.ContinueWith(
+ t =>
+ {
+ if (t.Status == TaskStatus.RanToCompletion)
+ {
+ t.Result.Dispose();
+ }
+ },
+ CancellationToken.None,
+ TaskContinuationOptions.ExecuteSynchronously,
+ TaskScheduler.Default);
+ }
+
+ return new AsyncUnaryCall(
+ ResponseAsync(),
+ ResponseHeadersAsync(),
+ GetStatus,
+ GetTrailers,
+ Dispose);
+ }
+
+ // Server streaming: resolve payloads in streamed responses (e.g., GetWorkItems)
+
+ ///
+ public override AsyncServerStreamingCall AsyncServerStreamingCall(
+ TRequest request,
+ ClientInterceptorContext context,
+ AsyncServerStreamingCallContinuation continuation)
+ {
+ // For streaming, request externalization is not needed currently
+ AsyncServerStreamingCall call = continuation(request, context);
+
+ IAsyncStreamReader wrapped = new TransformingStreamReader(call.ResponseStream, async (msg, ct) =>
+ {
+ await this.ResolveResponsePayloadsAsync(msg, ct);
+ return msg;
+ });
+
+ return new AsyncServerStreamingCall(
+ wrapped,
+ call.ResponseHeadersAsync,
+ call.GetStatus,
+ call.GetTrailers,
+ call.Dispose);
+ }
+
+ ///
+ /// Externalizes large payloads in request messages.
+ ///
+ /// The request type.
+ /// The request to process.
+ /// Cancellation token.
+ /// A task representing the async operation.
+ protected abstract Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation);
+
+ ///
+ /// Resolves payload tokens in response messages.
+ ///
+ /// The response type.
+ /// The response to process.
+ /// Cancellation token.
+ /// A task representing the async operation.
+ protected abstract Task ResolveResponsePayloadsAsync(TResponse response, CancellationToken cancellation);
+
+ ///
+ /// Externalizes a payload if it exceeds the threshold.
+ ///
+ /// Action to assign the externalized token.
+ /// The value to potentially externalize.
+ /// Cancellation token.
+ /// A task representing the async operation.
+ protected Task MaybeExternalizeAsync(Action assign, string? value, CancellationToken cancellation)
+ {
+ if (string.IsNullOrEmpty(value))
+ {
+ return Task.CompletedTask;
+ }
+
+ int size = Encoding.UTF8.GetByteCount(value);
+ if (size < this.options.ExternalizeThresholdBytes)
+ {
+ return Task.CompletedTask;
+ }
+
+ return UploadAsync();
+
+ async Task UploadAsync()
+ {
+ string token = await this.payloadStore.UploadAsync(Encoding.UTF8.GetBytes(value), cancellation);
+ assign(token);
+ }
+ }
+
+ ///
+ /// Resolves a payload token if it's known to the store.
+ ///
+ /// Action to assign the resolved value.
+ /// The value to potentially resolve.
+ /// Cancellation token.
+ /// A task representing the async operation.
+ protected async Task MaybeResolveAsync(Action assign, string? value, CancellationToken cancellation)
+ {
+ if (string.IsNullOrEmpty(value) || !this.payloadStore.IsKnownPayloadToken(value))
+ {
+ return;
+ }
+
+ string resolved = await this.payloadStore.DownloadAsync(value, cancellation);
+ assign(resolved);
+ }
+
+ sealed class TransformingStreamReader : IAsyncStreamReader
+ {
+ readonly IAsyncStreamReader inner;
+ readonly Func> transform;
+
+ public TransformingStreamReader(IAsyncStreamReader inner, Func> transform)
+ {
+ this.inner = inner;
+ this.transform = transform;
+ }
+
+ public T Current { get; private set; } = default!;
+
+ public async Task MoveNext(CancellationToken cancellationToken)
+ {
+ bool hasNext = await this.inner.MoveNext(cancellationToken);
+ if (!hasNext)
+ {
+ return false;
+ }
+
+ this.Current = await this.transform(this.inner.Current, cancellationToken);
+ return true;
+ }
+ }
+}
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
index 634870f3..16a83641 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
@@ -84,7 +84,7 @@ public async Task UploadAsync(ReadOnlyMemory payloadBytes, Cancell
HttpHeaders = new BlobHttpHeaders { ContentEncoding = ContentEncodingGzip },
};
using Stream blobStream = await blob.OpenWriteAsync(true, writeOptions, ct);
- using GZipStream compressedBlobStream = new(blobStream, CompressionLevel.Optimal, leaveOpen: true);
+ using GZipStream compressedBlobStream = new(blobStream, System.IO.Compression.CompressionLevel.Optimal, leaveOpen: true);
using MemoryStream payloadStream = new(payloadBuffer, writable: false);
await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: DefaultCopyBufferSize, ct);
diff --git a/src/Grpc/backend_service.proto b/src/Grpc/backend_service.proto
new file mode 100644
index 00000000..8ac980cf
--- /dev/null
+++ b/src/Grpc/backend_service.proto
@@ -0,0 +1,282 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+syntax = "proto3";
+
+package durabletask.protos.backend.v1;
+
+option csharp_namespace = "Microsoft.DurableTask.AzureManagedBackend.Protobuf";
+option java_package = "com.microsoft.durabletask.implementation.protobuf";
+option go_package = "github.com/microsoft/durabletask-protobuf/internal/protos";
+
+import "orchestrator_service.proto";
+
+
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+import "google/protobuf/empty.proto";
+
+// gRPC service used by Durable Task Framework (DTFx) backend implementations.
+// The RPCs in this service are used by DTFx backends to manage orchestration state.
+service BackendService {
+ // Creates a new orchestration instance.
+ rpc CreateInstance (CreateInstanceRequest) returns (CreateInstanceResponse);
+
+ // Sends an event to an orchestration instance. This RPC is used for raising external events to orchestrations
+ // and for sending orchestration lifecycle events, such as terminate, suspend, resume, etc.
+ rpc AddEvent (AddEventRequest) returns (AddEventResponse);
+
+ // Returns metadata about an orchestration instance.
+ rpc GetInstance (GetInstanceRequest) returns (GetInstanceResponse);
+
+ // Returns metadata about an entity instance.
+ rpc GetEntity (GetEntityRequest) returns (GetEntityResponse);
+
+ // Returns metadata about multiple orchestration instances using a query.
+ rpc QueryInstances (QueryInstancesRequest) returns (QueryInstancesResponse);
+
+ // Returns metadata for multiple entities using a query.
+ rpc QueryEntities(QueryEntitiesRequest) returns (QueryEntitiesResponse);
+
+ // Waits for an orchestration to reach a terminal state and then returns metadata for that orchestration.
+ rpc WaitForInstance (WaitForInstanceRequest) returns (WaitForInstanceResponse);
+
+ // Purges the state of one or more orchestration instances.
+ rpc PurgeInstances (PurgeInstancesRequest) returns (PurgeInstancesResponse);
+
+ // Restarts an orchestration instance with the option to use a new instance ID.
+ rpc RestartInstance (RestartInstanceRequest) returns (RestartInstanceResponse);
+
+ // Cleans entity storage.
+ rpc CleanEntityStorage(CleanEntityStorageRequest) returns (CleanEntityStorageResponse);
+
+ // Starts a server stream for receiving work items
+ rpc GetWorkItems (GetWorkItemsRequest) returns (stream WorkItem);
+
+ // Gets orchestration runtime state (history, etc.) for a given orchestration instance.
+ rpc GetOrchestrationRuntimeState (GetOrchestrationRuntimeStateRequest) returns (GetOrchestrationRuntimeStateResponse);
+
+ // Gets the history of an orchestration instance as a stream of events.
+ rpc StreamInstanceHistory(StreamInstanceHistoryRequest) returns (stream HistoryChunk);
+
+ // Completes an outstanding activity work item and adds a new event to the target orchestration's inbox.
+ rpc CompleteActivityWorkItem (CompleteActivityWorkItemRequest) returns (CompleteActivityWorkItemResponse);
+
+ // Abandons an outstanding activity work item. Abandoned work items will be delivered again after some delay.
+ rpc AbandonActivityWorkItem (AbandonActivityWorkItemRequest) returns (AbandonActivityWorkItemResponse);
+
+ // Completes an outstanding orchestrator work item, and adds a new event to the target orchestration's inbox.
+ rpc CompleteOrchestrationWorkItem (CompleteOrchestrationWorkItemRequest) returns (CompleteOrchestrationWorkItemResponse);
+
+ // Abandons an outstanding orchestrator work item. Abandoned work items will be delivered again after some delay.
+ rpc AbandonOrchestrationWorkItem (AbandonOrchestrationWorkItemRequest) returns (AbandonOrchestrationWorkItemResponse);
+
+ // Completes an outstanding entity work item.
+ rpc CompleteEntityWorkItem (CompleteEntityWorkItemRequest) returns (CompleteEntityWorkItemResponse);
+
+ // Abandons an outstanding entity work item. Abandoned work items will be delivered again after some delay.
+ rpc AbandonEntityWorkItem (AbandonEntityWorkItemRequest) returns (AbandonEntityWorkItemResponse);
+
+ // Sends a health check ping to the backend service.
+ rpc Ping (PingRequest) returns (PingResponse);
+
+ // Returns the current metrics for the backend service.
+ rpc GetMetrics (GetMetricsRequest) returns (GetMetricsResponse);
+
+ // "Skip" graceful termination of orchestrations by immediately changing their status in storage to "terminated".
+ // Note that a maximum of 500 orchestrations can be terminated at a time using this method.
+ rpc SkipGracefulOrchestrationTerminations(SkipGracefulOrchestrationTerminationsRequest) returns (SkipGracefulOrchestrationTerminationsResponse);
+}
+
+// Request payload for adding new orchestration events.
+message AddEventRequest {
+ // The ID of the orchestration to send an event to.
+ OrchestrationInstance instance = 1;
+ // The event to send to the orchestration.
+ HistoryEvent event = 2;
+}
+
+// Response payload for adding new orchestration events.
+message AddEventResponse {
+ // No fields
+}
+
+// Request payload for waiting for instance completion.
+message WaitForInstanceRequest {
+ string instanceId = 1;
+ bool getInputsAndOutputs = 2;
+}
+
+// Response payload for waiting for instance completion.
+message WaitForInstanceResponse {
+ bool exists = 1;
+ OrchestrationState orchestrationState = 2;
+}
+
+// Request parameters for fetching orchestration runtime state.
+message GetOrchestrationRuntimeStateRequest {
+ // The ID of the target orchestration instance.
+ OrchestrationInstance instance = 1;
+}
+
+// Response payload returned when fetching orchestration runtime state.
+message GetOrchestrationRuntimeStateResponse {
+ // The existing history events for the target orchestration instance.
+ repeated HistoryEvent history = 1;
+}
+
+// Request payload for completing an activity work item.
+message CompleteActivityWorkItemRequest {
+ // The completion token that was provided when the work item was fetched.
+ string completionToken = 1;
+
+ // The response event that will be sent to the orchestrator.
+ // This must be either a TaskCompleted event or a TaskFailed event.
+ HistoryEvent responseEvent = 2;
+}
+
+// Response payload for completing an activity work item.
+message CompleteActivityWorkItemResponse {
+ // No fields
+}
+
+// Request payload for abandoning an activity work item.
+message AbandonActivityWorkItemRequest {
+ // The completion token that was provided when the work item was fetched.
+ string completionToken = 1;
+}
+
+// Response payload for abandoning an activity work item.
+message AbandonActivityWorkItemResponse {
+ // No fields
+}
+
+// Request payload for completing an orchestration work item.
+message CompleteOrchestrationWorkItemRequest {
+ // The completion token that was provided when the work item was fetched.
+ string completionToken = 1;
+ OrchestrationInstance instance = 2;
+ OrchestrationStatus runtimeStatus = 3;
+ google.protobuf.StringValue customStatus = 4;
+ repeated HistoryEvent newHistory = 5;
+ repeated HistoryEvent newTasks = 6;
+ repeated HistoryEvent newTimers = 7;
+ repeated OrchestratorMessage newMessages = 8;
+
+ // The number of work item events that were processed by the orchestrator.
+ // This field is optional. If not set, the service should assume that the orchestrator processed all events.
+ google.protobuf.Int32Value numEventsProcessed = 9;
+
+ OrchestrationTraceContext orchestrationTraceContext = 10;
+}
+
+// Response payload for completing an orchestration work item.
+message CompleteOrchestrationWorkItemResponse {
+ // No fields
+}
+
+// A message to be delivered to an orchestration by the backend.
+message OrchestratorMessage {
+ // The ID of the orchestration instance to receive the message.
+ OrchestrationInstance instance = 1;
+ // The event payload to be received by the target orchestration.
+ HistoryEvent event = 2;
+}
+
+// Request payload for abandoning an orchestration work item.
+message AbandonOrchestrationWorkItemRequest {
+ // The completion token that was provided when the work item was fetched.
+ string completionToken = 1;
+}
+
+// Response payload for abandoning an orchestration work item.
+message AbandonOrchestrationWorkItemResponse {
+ // No fields
+}
+
+// Request payload for completing an entity work item.
+message CompleteEntityWorkItemRequest {
+ // The completion token that was provided when the work item was fetched.
+ string completionToken = 1;
+
+ // The execution id of the scheduler.
+ string executionId = 2;
+
+ // The number of requests that were executed.
+ // If this is smaller than the number of operations in the work item,
+ // any left-over operations will be sent again with the next work item.
+ int32 numberOperationsExecuted = 3;
+
+ // The state of the entity after the executed operations, or null if none
+ google.protobuf.StringValue entityState = 4;
+
+ // The messages that were sent by the executed operations. This must
+ // include any responses to the operation calls.
+ repeated OrchestratorMessage messages = 5;
+}
+
+// Response payload for completing an entity work item.
+message CompleteEntityWorkItemResponse {
+ // No fields
+}
+
+// Request payload for abandoning an entity work item.
+message AbandonEntityWorkItemRequest {
+ // The completion token that was provided when the work item was fetched.
+ string completionToken = 1;
+ string reason = 2;
+}
+
+// Response payload for abandoning an entity work item.
+message AbandonEntityWorkItemResponse {
+ // No fields
+}
+
+// Request payload for ping operations.
+message PingRequest {
+ // No fields
+}
+
+// Response payload for ping operations.
+message PingResponse {
+ // No fields
+}
+
+// Request payload for fetching backend metrics.
+message GetMetricsRequest {
+ // No fields
+}
+
+// Response payload for fetching backend metrics
+message GetMetricsResponse {
+ // The current metrics for the backend service.
+ BackendMetrics metrics = 1;
+}
+
+// Metrics for the backend service.
+message BackendMetrics {
+ // Activity work item metrics
+ WorkItemMetrics activityWorkItems = 1 [json_name="activityWorkItems"];
+ // Orchestrator work item metrics
+ WorkItemMetrics orchestratorWorkItems = 2 [json_name="orchestratorWorkItems"];
+ // Entity work item metrics
+ WorkItemMetrics entityWorkItems = 3 [json_name="entityWorkItems"];
+ // Metrics related to workers currently connected to the backend
+ ConnectedWorkerMetrics connectedWorkers = 4 [json_name="connectedWorkers"];
+}
+
+// Metrics related to work items
+message WorkItemMetrics {
+ // Number of work items that are queued and waiting to be processed
+ int32 pending = 1 [json_name="pending"];
+ // Number of work items that are currently being processed
+ int32 active = 2 [json_name="active"];
+ // Age of the oldest work item in the queue, in seconds
+ int32 oldestAgeInSeconds = 3 [json_name="oldestAgeInSeconds"];
+}
+
+// Metrics related to workers currently connected to the backend
+message ConnectedWorkerMetrics {
+ // Number of worker instances that are currently connected to the backend
+ int32 count = 1 [json_name="count"];
+}
diff --git a/src/Grpc/orchestrator_service.proto b/src/Grpc/orchestrator_service.proto
index df5143bc..52ca10d3 100644
--- a/src/Grpc/orchestrator_service.proto
+++ b/src/Grpc/orchestrator_service.proto
@@ -469,6 +469,7 @@ message PurgeInstancesRequest {
oneof request {
string instanceId = 1;
PurgeInstanceFilter purgeInstanceFilter = 2;
+ InstanceBatch instanceBatch = 4;
}
bool recursive = 3;
}
@@ -681,8 +682,7 @@ message AbandonEntityTaskResponse {
}
message SkipGracefulOrchestrationTerminationsRequest {
- // A maximum of 500 instance IDs can be provided in this list.
- repeated string instanceIds = 1;
+ InstanceBatch instanceBatch = 1;
google.protobuf.StringValue reason = 2;
}
@@ -818,4 +818,9 @@ message StreamInstanceHistoryRequest {
message HistoryChunk {
repeated HistoryEvent events = 1;
+}
+
+message InstanceBatch {
+ // A maximum of 500 instance IDs can be provided in this list.
+ repeated string instanceIds = 1;
}
\ No newline at end of file
diff --git a/src/Grpc/refresh-protos.ps1 b/src/Grpc/refresh-protos.ps1
index a91393a4..eb8d5b28 100644
--- a/src/Grpc/refresh-protos.ps1
+++ b/src/Grpc/refresh-protos.ps1
@@ -18,7 +18,7 @@ $commitId = $commitDetails.sha
# These are the proto files we need to download from the durabletask-protobuf repository.
$protoFileNames = @(
- "orchestrator_service.proto"
+ "orchestrator_service.proto", "backend_service.proto"
)
# Download each proto file to the local directory using the above commit ID
@@ -37,6 +37,14 @@ foreach ($protoFileName in $protoFileNames) {
Write-Output "Downloaded $url to $outputFile"
}
+# Post-process backend_service.proto to update the namespace
+$backendServiceFile = "$PSScriptRoot\backend_service.proto"
+if (Test-Path $backendServiceFile) {
+ $content = Get-Content $backendServiceFile -Raw
+ $content = $content -replace 'option csharp_namespace = "Microsoft\.DurableTask\.Protobuf";', 'option csharp_namespace = "Microsoft.DurableTask.AzureManagedBackend.Protobuf";'
+ Set-Content -Path $backendServiceFile -Value $content -NoNewline
+}
+
# Log the commit ID and the URLs of the downloaded proto files to a versions file.
# Overwrite the file if it already exists.
$versionsFile = "$PSScriptRoot\versions.txt"
diff --git a/src/Grpc/versions.txt b/src/Grpc/versions.txt
index 3e4d1b21..51497576 100644
--- a/src/Grpc/versions.txt
+++ b/src/Grpc/versions.txt
@@ -1,2 +1,3 @@
-# The following files were downloaded from branch main at 2025-09-17 01:45:58 UTC
-https://raw.githubusercontent.com/microsoft/durabletask-protobuf/f5745e0d83f608d77871c1894d9260ceaae08967/protos/orchestrator_service.proto
+# The following files were downloaded from branch main at 2025-09-29 04:31:40 UTC
+https://raw.githubusercontent.com/microsoft/durabletask-protobuf/a4e448066e3d85e676839a8bd23036a36b3c5f88/protos/orchestrator_service.proto
+https://raw.githubusercontent.com/microsoft/durabletask-protobuf/a4e448066e3d85e676839a8bd23036a36b3c5f88/protos/backend_service.proto
From 863fc2dc4138da12662adaeccae4bd4d2202390a Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Tue, 30 Sep 2025 10:14:19 -0700
Subject: [PATCH 43/53] rename managedbackendinterceptor
---
.../Factories/AzureBlobPayloadCallInvokerFactory.cs | 2 +-
...rceptor.cs => AzureBlobPayloadsManagedBackendInterceptor.cs} | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
rename src/Extensions/AzureBlobPayloads/Interceptors/{AzureBlobPayloadsAzureManagedBackendInterceptor.cs => AzureBlobPayloadsManagedBackendInterceptor.cs} (99%)
diff --git a/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs b/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
index 46315599..9a9bbd35 100644
--- a/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
+++ b/src/Extensions/AzureBlobPayloads/Factories/AzureBlobPayloadCallInvokerFactory.cs
@@ -21,6 +21,6 @@ public static class AzureBlobPayloadCallInvokerFactory
public static CallInvoker Create(GrpcChannel channel, LargePayloadStorageOptions options)
{
IPayloadStore payloadStore = new BlobPayloadStore(options);
- return channel.CreateCallInvoker().Intercept(new AzureBlobPayloadsAzureManagedBackendInterceptor(payloadStore, options));
+ return channel.CreateCallInvoker().Intercept(new AzureBlobPayloadsManagedBackendInterceptor(payloadStore, options));
}
}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsAzureManagedBackendInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
similarity index 99%
rename from src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsAzureManagedBackendInterceptor.cs
rename to src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
index 632f702b..45f3b4d3 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsAzureManagedBackendInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
@@ -11,7 +11,7 @@ namespace Microsoft.DurableTask;
/// gRPC interceptor that externalizes large payloads to an on requests
/// and resolves known payload tokens on responses for Azure Managed Backend.
///
-public sealed class AzureBlobPayloadsAzureManagedBackendInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+public sealed class AzureBlobPayloadsManagedBackendInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
: BasePayloadInterceptor(payloadStore, options)
{
protected override Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
From f8dfbdbd19e507bd1b1230afde819859a9ae8e39 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 1 Oct 2025 10:37:17 -0700
Subject: [PATCH 44/53] one option configure
---
samples/LargePayloadConsoleApp/Program.cs | 40 ++++++++--------
...ientBuilderExtensions.AzureBlobPayloads.cs | 18 +++++++
...rkerBuilderExtensions.AzureBlobPayloads.cs | 22 ++++++++-
...eCollectionExtensions.AzureBlobPayloads.cs | 45 ++++++++++++++++++
.../Examples/SharedPayloadStoreExample.cs | Bin 0 -> 3374 bytes
...reBlobPayloadsManagedBackendInterceptor.cs | 2 +-
6 files changed, 103 insertions(+), 24 deletions(-)
create mode 100644 src/Extensions/AzureBlobPayloads/DependencyInjection/ServiceCollectionExtensions.AzureBlobPayloads.cs
create mode 100644 src/Extensions/AzureBlobPayloads/Examples/SharedPayloadStoreExample.cs
diff --git a/samples/LargePayloadConsoleApp/Program.cs b/samples/LargePayloadConsoleApp/Program.cs
index 6995b50c..9e956180 100644
--- a/samples/LargePayloadConsoleApp/Program.cs
+++ b/samples/LargePayloadConsoleApp/Program.cs
@@ -21,25 +21,30 @@
string schedulerConnectionString = builder.Configuration.GetValue("DURABLE_TASK_SCHEDULER_CONNECTION_STRING")
?? throw new InvalidOperationException("Missing required configuration 'DURABLE_TASK_SCHEDULER_CONNECTION_STRING'");
-// Configure Durable Task client with Durable Task Scheduler and externalized payloads
+// 1) Register shared payload store ONCE
+builder.Services.AddExternalizedPayloadStore(opts =>
+{
+ // Keep threshold small to force externalization for demo purposes
+ opts.ExternalizeThresholdBytes = 1024; // 1KB
+ opts.ConnectionString = builder.Configuration.GetValue("DURABLETASK_STORAGE") ?? "UseDevelopmentStorage=true";
+ opts.ContainerName = builder.Configuration.GetValue("DURABLETASK_PAYLOAD_CONTAINER");
+});
+
+// 2) Configure Durable Task client
builder.Services.AddDurableTaskClient(b =>
{
b.UseDurableTaskScheduler(schedulerConnectionString);
- // Ensure entity APIs are enabled for the client
b.Configure(o => o.EnableEntitySupport = true);
- b.UseExternalizedPayloads(opts =>
- {
- // Keep threshold small to force externalization for demo purposes
- opts.ExternalizeThresholdBytes = 1024; // 1KB
- opts.ConnectionString = builder.Configuration.GetValue("DURABLETASK_STORAGE") ?? "UseDevelopmentStorage=true";
- opts.ContainerName = builder.Configuration.GetValue("DURABLETASK_PAYLOAD_CONTAINER");
- });
+
+ // Use shared store (no duplication of options)
+ b.UseExternalizedPayloads();
});
-// Configure Durable Task worker with tasks and externalized payloads
+// 3) Configure Durable Task worker
builder.Services.AddDurableTaskWorker(b =>
{
b.UseDurableTaskScheduler(schedulerConnectionString);
+
b.AddTasks(tasks =>
{
// Orchestrator: call activity first, return its output (should equal original input)
@@ -57,7 +62,6 @@
return string.Empty;
}
- // If we ever see a token in the activity, externalization is not being resolved correctly.
if (value.StartsWith("blob:v1:", StringComparison.Ordinal))
{
throw new InvalidOperationException("Activity received a payload token instead of raw input.");
@@ -67,7 +71,6 @@
});
// Entity samples
- // 1) Large entity operation input (worker externalizes input; entity receives resolved payload)
tasks.AddOrchestratorFunc(
"LargeEntityOperationInput",
(ctx, _) => ctx.Entities.CallEntityAsync(
@@ -76,7 +79,6 @@
input: new string('E', 700 * 1024)));
tasks.AddEntity(nameof(EchoLengthEntity));
- // 2) Large entity operation output (worker externalizes output; orchestrator reads resolved payload)
tasks.AddOrchestratorFunc(
"LargeEntityOperationOutput",
async (ctx, _) => (await ctx.Entities.CallEntityAsync(
@@ -85,7 +87,6 @@
input: 850 * 1024)).Length);
tasks.AddEntity(nameof(LargeResultEntity));
- // 3) Large entity state (worker externalizes state; client resolves on query)
tasks.AddOrchestratorFunc(
"LargeEntityState",
async (ctx, _) =>
@@ -98,13 +99,10 @@ await ctx.Entities.CallEntityAsync(
});
tasks.AddEntity(nameof(StateEntity));
});
- b.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // mirror client
- opts.ConnectionString = builder.Configuration.GetValue("DURABLETASK_STORAGE") ?? "UseDevelopmentStorage=true";
- opts.ContainerName = builder.Configuration.GetValue("DURABLETASK_PAYLOAD_CONTAINER");
- });
- // Ensure entity APIs are enabled for the worker
+
+ // Use shared store (no duplication of options)
+ b.UseExternalizedPayloads();
+
b.Configure(o => o.EnableEntitySupport = true);
});
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
index c571a7fc..57c0d502 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -36,6 +36,24 @@ public static IDurableTaskClientBuilder UseExternalizedPayloads(
return new BlobPayloadStore(opts);
});
+ return UseExternalizedPayloadsCore(builder);
+ }
+
+ ///
+ /// Enables externalized payload storage using a pre-configured shared payload store.
+ /// This overload helps ensure client and worker use the same configuration.
+ ///
+ /// The builder to configure.
+ /// The original builder, for call chaining.
+ public static IDurableTaskClientBuilder UseExternalizedPayloads(
+ this IDurableTaskClientBuilder builder)
+ {
+ Check.NotNull(builder);
+ return UseExternalizedPayloadsCore(builder);
+ }
+
+ static IDurableTaskClientBuilder UseExternalizedPayloadsCore(IDurableTaskClientBuilder builder)
+ {
// Wrap the gRPC CallInvoker with our interceptor when using the gRPC client
builder.Services
.AddOptions(builder.Name)
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
index c4d13810..03d8ae21 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
@@ -36,6 +36,24 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
return new BlobPayloadStore(opts);
});
+ return UseExternalizedPayloadsCore(builder);
+ }
+
+ ///
+ /// Enables externalized payload storage using a pre-configured shared payload store.
+ /// This overload helps ensure client and worker use the same configuration.
+ ///
+ /// The builder to configure.
+ /// The original builder, for call chaining.
+ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
+ this IDurableTaskWorkerBuilder builder)
+ {
+ Check.NotNull(builder);
+ return UseExternalizedPayloadsCore(builder);
+ }
+
+ static IDurableTaskWorkerBuilder UseExternalizedPayloadsCore(IDurableTaskWorkerBuilder builder)
+ {
// Wrap the gRPC CallInvoker with our interceptor when using the gRPC worker
builder.Services
.AddOptions(builder.Name)
@@ -46,6 +64,7 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
{
var invoker = opt.Channel.Intercept(new AzureBlobPayloadsSideCarInterceptor(store, opts));
opt.CallInvoker = invoker;
+
// Ensure worker uses the intercepted invoker path
opt.Channel = null;
}
@@ -56,8 +75,7 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
else
{
throw new ArgumentException(
- "Channel or CallInvoker must be provided to use Azure Blob Payload Externalization feature"
- );
+ "Channel or CallInvoker must be provided to use Azure Blob Payload Externalization feature");
}
});
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/ServiceCollectionExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/ServiceCollectionExtensions.AzureBlobPayloads.cs
new file mode 100644
index 00000000..b3895e31
--- /dev/null
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/ServiceCollectionExtensions.AzureBlobPayloads.cs
@@ -0,0 +1,45 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Options;
+
+namespace Microsoft.DurableTask;
+
+///
+/// DI extensions for configuring a shared Azure Blob payload store used by both client and worker.
+///
+public static class ServiceCollectionExtensionsAzureBlobPayloads
+{
+ ///
+ /// Registers a shared Azure Blob-based externalized payload store and its options.
+ /// The provided options apply to all named Durable Task builders (client/worker),
+ /// so UseExternalizedPayloads() can be called without repeating configuration.
+ ///
+ /// The service collection.
+ /// The configuration callback for the payload store.
+ /// The original service collection.
+ public static IServiceCollection AddExternalizedPayloadStore(
+ this IServiceCollection services,
+ Action configure)
+ {
+ Check.NotNull(services);
+ Check.NotNull(configure);
+
+ // Apply once to ALL names (IConfigureOptions hits every named options instance),
+ // so monitor.Get(builder.Name) in the client/worker extensions will see the same config.
+ services.Configure(configure);
+
+ // Provide a single shared IPayloadStore instance built from the default options.
+ services.AddSingleton(sp =>
+ {
+ IOptionsMonitor monitor =
+ sp.GetRequiredService>();
+
+ LargePayloadStorageOptions opts = monitor.Get(Options.DefaultName);
+ return new BlobPayloadStore(opts);
+ });
+
+ return services;
+ }
+}
diff --git a/src/Extensions/AzureBlobPayloads/Examples/SharedPayloadStoreExample.cs b/src/Extensions/AzureBlobPayloads/Examples/SharedPayloadStoreExample.cs
new file mode 100644
index 0000000000000000000000000000000000000000..427d1e53c91fa2c870e4fd007a0b00266f77a6d6
GIT binary patch
literal 3374
zcmdT{O>f&q5bZfY|HHs1`4B1}Jvml@M%KYVtOSx0pV#DA+IqRm>@F>JYc1e+I
zOSRlIMN1eIL=WfV&6_u?;Sh#H7@KNi<#Od=kRQW^%&l?e+S7Ae87sUrI{Wn*8PeZ-
znWJ`CK&=b3;8zG2=Zp3+nFYhzNxkemWoNY&R|*%x-DG1WQTsQK+FcrZgLb#woC`$Yv&gqXCFRK&OlU^H6mY8
zMO@zFHO0JJw~Li@Kq%BXy5QJKcLNmUez|akevPQ9ASM6^UBEi@ICXdrj5f|`C9Eic
zdpbIDXm5##?&$4;2-tih3dQF
zRTG-=>ZI#&%erQAS+|K`xZK68Q~0XZ3Ok011Hm
zA${G4G)`|ThCcz)xB><-&W}LaWcpW-CPDroNE1-Gchn9VYGCp|=xYF-4EXxiWAwp_
zKK?YDO{R-Z(EUnyYPtr6w4I3g_a(U_76)mR{7Wrq=3q;In*3yK{l|42S@=B$C3k;Q
z%DahdcGa3T1V~a`v)1XlPg?xHQHEXHgL(HgDTkl%b=VeRa$nSr&N;%uSo)bTli8TWuX
Date: Wed, 1 Oct 2025 12:05:55 -0700
Subject: [PATCH 45/53] update test
---
...ientBuilderExtensions.AzureBlobPayloads.cs | 23 ----
...reBlobPayloadsManagedBackendInterceptor.cs | 2 +-
.../LargePayloadTests.cs | 128 +++---------------
3 files changed, 22 insertions(+), 131 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
index 57c0d502..06cbdb1a 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -16,29 +16,6 @@ namespace Microsoft.DurableTask;
///
public static class DurableTaskClientBuilderExtensionsAzureBlobPayloads
{
- ///
- /// Enables externalized payload storage using Azure Blob Storage for the specified client builder.
- ///
- /// The builder to configure.
- /// The callback to configure the storage options.
- /// The original builder, for call chaining.
- public static IDurableTaskClientBuilder UseExternalizedPayloads(
- this IDurableTaskClientBuilder builder,
- Action configure)
- {
- Check.NotNull(builder);
- Check.NotNull(configure);
-
- builder.Services.Configure(builder.Name, configure);
- builder.Services.AddSingleton(sp =>
- {
- LargePayloadStorageOptions opts = sp.GetRequiredService>().Get(builder.Name);
- return new BlobPayloadStore(opts);
- });
-
- return UseExternalizedPayloadsCore(builder);
- }
-
///
/// Enables externalized payload storage using a pre-configured shared payload store.
/// This overload helps ensure client and worker use the same configuration.
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
index 8ac716a6..45f3b4d3 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
@@ -3,7 +3,7 @@
using Grpc.Core.Interceptors;
-using P = Microsoft.DurableTask.AzureManagedBackend.Protobuf;
+using P = Microsoft.DurableTask.Protobuf;
namespace Microsoft.DurableTask;
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index ff9681f9..61c2c4d3 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -3,6 +3,7 @@
using System.Text.Json;
using Microsoft.DurableTask.Client;
+using Microsoft.DurableTask;
using Microsoft.DurableTask.Converters;
using Microsoft.DurableTask.Worker;
using Microsoft.Extensions.DependencyInjection;
@@ -32,26 +33,20 @@ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
return Task.FromResult(input + input);
}));
- // Enable externalization on the worker
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // small threshold to force externalization for test data
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
- // Override store with in-memory test double
worker.Services.AddSingleton(fakeStore);
},
client =>
{
- // Enable externalization on the client
- client.UseExternalizedPayloads(opts =>
+ // Enable externalization on the client (shared store configuration)
+ client.Services.AddExternalizedPayloadStore(opts =>
{
opts.ExternalizeThresholdBytes = 1024;
opts.ContainerName = "test";
opts.ConnectionString = "UseDevelopmentStorage=true";
});
+ client.UseExternalizedPayloads();
// Override store with in-memory test double
client.Services.AddSingleton(fakeStore);
@@ -114,23 +109,12 @@ public async Task HistoryStreaming_ResolvesPayloads()
return largeOutput;
}));
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
worker.Services.AddSingleton(store);
},
client =>
{
- // Enable client to resolve outputs on query
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ client.UseExternalizedPayloads();
client.Services.AddSingleton(store);
});
@@ -171,12 +155,7 @@ public async Task SuspendAndResume_Reason_IsExternalizedByClient()
client =>
{
// Enable externalization on the client and use the in-memory store to track uploads
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // 1KB threshold to force externalization
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ client.UseExternalizedPayloads();
client.Services.AddSingleton(clientStore);
});
@@ -253,22 +232,12 @@ public async Task LargeTerminateWithPayload()
return null;
}));
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
worker.Services.AddSingleton(store);
},
client =>
{
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ client.UseExternalizedPayloads();
client.Services.AddSingleton(store);
});
@@ -318,22 +287,12 @@ public async Task LargeContinueAsNewAndCustomStatus()
}
}));
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
worker.Services.AddSingleton(workerStore);
},
client =>
{
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ client.UseExternalizedPayloads();
client.Services.AddSingleton(workerStore);
});
@@ -376,22 +335,12 @@ public async Task LargeSubOrchestrationAndActivityOutput()
.AddOrchestratorFunc(child, (ctx, input) => Task.FromResult(input))
.AddActivityFunc(activity, (ctx) => Task.FromResult(largeActivityOutput)));
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
worker.Services.AddSingleton(workerStore);
},
client =>
{
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ client.UseExternalizedPayloads();
client.Services.AddSingleton(workerStore);
});
@@ -425,22 +374,12 @@ public async Task LargeQueryFetchInputsAndOutputs()
orch,
(ctx, input) => Task.FromResult(largeOut)));
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
worker.Services.AddSingleton(workerStore);
},
client =>
{
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024;
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ client.UseExternalizedPayloads();
client.Services.AddSingleton(workerStore);
});
@@ -484,12 +423,7 @@ public async Task LargeActivityInputAndOutput()
(ctx, _) => ctx.CallActivityAsync(activityName, largeParam))
.AddActivityFunc(activityName, (ctx, input) => input + input));
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // force externalization for activity input
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
worker.Services.AddSingleton(workerStore);
},
client => { /* client not needed for externalization path here */ });
@@ -529,22 +463,12 @@ public async Task NoLargePayloads()
orchestratorName,
(ctx, input) => Task.FromResult(input)));
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 2 * 1024 * 1024; // 2MB, higher than payload
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
worker.Services.AddSingleton(workerStore);
},
client =>
{
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 2 * 1024 * 1024; // 2MB, higher than payload
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ client.UseExternalizedPayloads();
client.Services.AddSingleton(clientStore);
});
@@ -578,22 +502,12 @@ public async Task LargeExternalEvent()
orchestratorName,
async ctx => await ctx.WaitForExternalEvent(EventName)));
- worker.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // force externalization
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ worker.UseExternalizedPayloads();
worker.Services.AddSingleton(fakeStore);
},
client =>
{
- client.UseExternalizedPayloads(opts =>
- {
- opts.ExternalizeThresholdBytes = 1024; // force externalization
- opts.ContainerName = "test";
- opts.ConnectionString = "UseDevelopmentStorage=true";
- });
+ client.UseExternalizedPayloads();
client.Services.AddSingleton(fakeStore);
});
From e7f48ec15cfc28c0ff159824e7f1370a66bbde87 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 1 Oct 2025 12:41:15 -0700
Subject: [PATCH 46/53] test fix
---
.../AzureBlobPayloadsSideCarInterceptor.cs | 178 +++++++++---------
.../Interceptors/BasePayloadInterceptor.cs | 5 +
.../IntegrationTestBase.cs | 13 +-
.../LargePayloadTests.cs | 92 ++++++++-
4 files changed, 188 insertions(+), 100 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
index 77082c26..dc41d986 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
@@ -11,9 +11,10 @@ namespace Microsoft.DurableTask;
/// gRPC interceptor that externalizes large payloads to an on requests
/// and resolves known payload tokens on responses for SideCar.
///
-public sealed class AzureBlobPayloadsSideCarInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+public sealed class AzureBlobPayloadsSideCarInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
: BasePayloadInterceptor(payloadStore, options)
{
+ ///
protected override Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
{
// Client -> sidecar
@@ -46,6 +47,94 @@ protected override Task ExternalizeRequestPayloadsAsync(TRequest reque
return Task.CompletedTask;
}
+ ///
+ protected override async Task ResolveResponsePayloadsAsync(TResponse response, CancellationToken cancellation)
+ {
+ // Sidecar -> client/worker
+ switch (response)
+ {
+ case P.GetInstanceResponse r when r.OrchestrationState is { } s:
+ await this.MaybeResolveAsync(v => s.Input = v, s.Input, cancellation);
+ await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
+ await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
+ break;
+ case P.HistoryChunk c when c.Events != null:
+ foreach (P.HistoryEvent e in c.Events)
+ {
+ await this.ResolveEventPayloadsAsync(e, cancellation);
+ }
+
+ break;
+ case P.QueryInstancesResponse r:
+ foreach (P.OrchestrationState s in r.OrchestrationState)
+ {
+ await this.MaybeResolveAsync(v => s.Input = v, s.Input, cancellation);
+ await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
+ await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
+ }
+
+ break;
+ case P.GetEntityResponse r when r.Entity is { } em:
+ await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
+ break;
+ case P.QueryEntitiesResponse r:
+ foreach (P.EntityMetadata em in r.Entities)
+ {
+ await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
+ }
+
+ break;
+ case P.WorkItem wi:
+ // Resolve activity input
+ if (wi.ActivityRequest is { } ar)
+ {
+ await this.MaybeResolveAsync(v => ar.Input = v, ar.Input, cancellation);
+ }
+
+ // Resolve orchestration input embedded in ExecutionStarted event and external events
+ if (wi.OrchestratorRequest is { } or)
+ {
+ foreach (P.HistoryEvent? e in or.PastEvents)
+ {
+ await this.ResolveEventPayloadsAsync(e, cancellation);
+ }
+
+ foreach (P.HistoryEvent? e in or.NewEvents)
+ {
+ await this.ResolveEventPayloadsAsync(e, cancellation);
+ }
+ }
+
+ // Resolve entity V1 batch request (OperationRequest inputs and entity state)
+ if (wi.EntityRequest is { } er1)
+ {
+ await this.MaybeResolveAsync(v => er1.EntityState = v, er1.EntityState, cancellation);
+ if (er1.Operations != null)
+ {
+ foreach (P.OperationRequest op in er1.Operations)
+ {
+ await this.MaybeResolveAsync(v => op.Input = v, op.Input, cancellation);
+ }
+ }
+ }
+
+ // Resolve entity V2 request (history-based operation requests and entity state)
+ if (wi.EntityRequestV2 is { } er2)
+ {
+ await this.MaybeResolveAsync(v => er2.EntityState = v, er2.EntityState, cancellation);
+ if (er2.OperationRequests != null)
+ {
+ foreach (P.HistoryEvent opEvt in er2.OperationRequests)
+ {
+ await this.ResolveEventPayloadsAsync(opEvt, cancellation);
+ }
+ }
+ }
+
+ break;
+ }
+ }
+
async Task ExternalizeOrchestratorResponseAsync(P.OrchestratorResponse r, CancellationToken cancellation)
{
await this.MaybeExternalizeAsync(v => r.CustomStatus = v, r.CustomStatus, cancellation);
@@ -135,93 +224,6 @@ async Task ExternalizeEntityBatchRequestAsync(P.EntityBatchRequest r, Cancellati
}
}
- protected override async Task ResolveResponsePayloadsAsync(TResponse response, CancellationToken cancellation)
- {
- // Sidecar -> client/worker
- switch (response)
- {
- case P.GetInstanceResponse r when r.OrchestrationState is { } s:
- await this.MaybeResolveAsync(v => s.Input = v, s.Input, cancellation);
- await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
- await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
- break;
- case P.HistoryChunk c when c.Events != null:
- foreach (P.HistoryEvent e in c.Events)
- {
- await this.ResolveEventPayloadsAsync(e, cancellation);
- }
-
- break;
- case P.QueryInstancesResponse r:
- foreach (P.OrchestrationState s in r.OrchestrationState)
- {
- await this.MaybeResolveAsync(v => s.Input = v, s.Input, cancellation);
- await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
- await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
- }
-
- break;
- case P.GetEntityResponse r when r.Entity is { } em:
- await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
- break;
- case P.QueryEntitiesResponse r:
- foreach (P.EntityMetadata em in r.Entities)
- {
- await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
- }
-
- break;
- case P.WorkItem wi:
- // Resolve activity input
- if (wi.ActivityRequest is { } ar)
- {
- await this.MaybeResolveAsync(v => ar.Input = v, ar.Input, cancellation);
- }
-
- // Resolve orchestration input embedded in ExecutionStarted event and external events
- if (wi.OrchestratorRequest is { } or)
- {
- foreach (P.HistoryEvent? e in or.PastEvents)
- {
- await this.ResolveEventPayloadsAsync(e, cancellation);
- }
-
- foreach (P.HistoryEvent? e in or.NewEvents)
- {
- await this.ResolveEventPayloadsAsync(e, cancellation);
- }
- }
-
- // Resolve entity V1 batch request (OperationRequest inputs and entity state)
- if (wi.EntityRequest is { } er1)
- {
- await this.MaybeResolveAsync(v => er1.EntityState = v, er1.EntityState, cancellation);
- if (er1.Operations != null)
- {
- foreach (P.OperationRequest op in er1.Operations)
- {
- await this.MaybeResolveAsync(v => op.Input = v, op.Input, cancellation);
- }
- }
- }
-
- // Resolve entity V2 request (history-based operation requests and entity state)
- if (wi.EntityRequestV2 is { } er2)
- {
- await this.MaybeResolveAsync(v => er2.EntityState = v, er2.EntityState, cancellation);
- if (er2.OperationRequests != null)
- {
- foreach (P.HistoryEvent opEvt in er2.OperationRequests)
- {
- await this.ResolveEventPayloadsAsync(opEvt, cancellation);
- }
- }
- }
-
- break;
- }
- }
-
async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancellation)
{
switch (e.EventTypeCase)
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
index f5278bcc..d064d42f 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
@@ -20,6 +20,11 @@ public abstract class BasePayloadInterceptor
+ /// Initializes a new instance of the class.
+ ///
+ /// The payload store.
+ /// The options.
protected BasePayloadInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
{
this.payloadStore = payloadStore;
diff --git a/test/Grpc.IntegrationTests/IntegrationTestBase.cs b/test/Grpc.IntegrationTests/IntegrationTestBase.cs
index 642107ef..13d92a7a 100644
--- a/test/Grpc.IntegrationTests/IntegrationTestBase.cs
+++ b/test/Grpc.IntegrationTests/IntegrationTestBase.cs
@@ -45,9 +45,12 @@ void IDisposable.Dispose()
GC.SuppressFinalize(this);
}
- protected async Task StartWorkerAsync(Action workerConfigure, Action? clientConfigure = null)
+ protected async Task StartWorkerAsync(
+ Action workerConfigure,
+ Action? clientConfigure = null,
+ Action? servicesConfigure = null)
{
- IHost host = this.CreateHostBuilder(workerConfigure, clientConfigure).Build();
+ IHost host = this.CreateHostBuilder(workerConfigure, clientConfigure, servicesConfigure).Build();
await host.StartAsync(this.TimeoutToken);
return new HostTestLifetime(host, this.TimeoutToken);
}
@@ -57,7 +60,10 @@ protected async Task StartWorkerAsync(Action
/// Configures the durable task worker builder.
/// Configures the durable task client builder.
- protected IHostBuilder CreateHostBuilder(Action workerConfigure, Action? clientConfigure)
+ protected IHostBuilder CreateHostBuilder(
+ Action workerConfigure,
+ Action? clientConfigure,
+ Action? servicesConfigure)
{
var host = Host.CreateDefaultBuilder()
.ConfigureLogging(b =>
@@ -69,6 +75,7 @@ protected IHostBuilder CreateHostBuilder(Action worke
})
.ConfigureServices((context, services) =>
{
+ servicesConfigure?.Invoke(services);
services.AddDurableTaskWorker(b =>
{
b.UseGrpc(this.sidecarFixture.Channel);
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index 61c2c4d3..21b3b55a 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -39,17 +39,19 @@ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
},
client =>
{
- // Enable externalization on the client (shared store configuration)
- client.Services.AddExternalizedPayloadStore(opts =>
+ client.UseExternalizedPayloads();
+
+ // Override store with in-memory test double
+ client.Services.AddSingleton(fakeStore);
+ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
{
opts.ExternalizeThresholdBytes = 1024;
opts.ContainerName = "test";
opts.ConnectionString = "UseDevelopmentStorage=true";
});
- client.UseExternalizedPayloads();
-
- // Override store with in-memory test double
- client.Services.AddSingleton(fakeStore);
});
string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName, input: largeInput);
@@ -116,6 +118,15 @@ public async Task HistoryStreaming_ResolvesPayloads()
{
client.UseExternalizedPayloads();
client.Services.AddSingleton(store);
+ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
});
// Start orchestration with large input to exercise history input resolution
@@ -157,6 +168,15 @@ public async Task SuspendAndResume_Reason_IsExternalizedByClient()
// Enable externalization on the client and use the in-memory store to track uploads
client.UseExternalizedPayloads();
client.Services.AddSingleton(clientStore);
+ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
});
string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName);
@@ -239,6 +259,15 @@ public async Task LargeTerminateWithPayload()
{
client.UseExternalizedPayloads();
client.Services.AddSingleton(store);
+ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
});
string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch, largeInput);
@@ -294,6 +323,15 @@ public async Task LargeContinueAsNewAndCustomStatus()
{
client.UseExternalizedPayloads();
client.Services.AddSingleton(workerStore);
+ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
});
string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch);
@@ -342,6 +380,15 @@ public async Task LargeSubOrchestrationAndActivityOutput()
{
client.UseExternalizedPayloads();
client.Services.AddSingleton(workerStore);
+ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
});
string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(parent);
@@ -381,6 +428,15 @@ public async Task LargeQueryFetchInputsAndOutputs()
{
client.UseExternalizedPayloads();
client.Services.AddSingleton(workerStore);
+ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
});
string id = await server.Client.ScheduleNewOrchestrationInstanceAsync(orch, largeIn);
@@ -426,7 +482,16 @@ public async Task LargeActivityInputAndOutput()
worker.UseExternalizedPayloads();
worker.Services.AddSingleton(workerStore);
},
- client => { /* client not needed for externalization path here */ });
+ client => { /* client not needed for externalization path here */ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
+ });
string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName);
OrchestrationMetadata completed = await server.Client.WaitForInstanceCompletionAsync(
@@ -502,13 +567,22 @@ public async Task LargeExternalEvent()
orchestratorName,
async ctx => await ctx.WaitForExternalEvent(EventName)));
- worker.UseExternalizedPayloads();
worker.Services.AddSingleton(fakeStore);
+ worker.UseExternalizedPayloads();
},
client =>
{
- client.UseExternalizedPayloads();
client.Services.AddSingleton(fakeStore);
+ client.UseExternalizedPayloads();
+ },
+ services =>
+ {
+ services.AddExternalizedPayloadStore(opts =>
+ {
+ opts.ExternalizeThresholdBytes = 1024;
+ opts.ContainerName = "test";
+ opts.ConnectionString = "UseDevelopmentStorage=true";
+ });
});
string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName);
From 069201f13df8e3308af55330f8cbfa67aa6eeb3f Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 1 Oct 2025 14:32:05 -0700
Subject: [PATCH 47/53] update proto
---
src/Grpc/backend_service.proto | 2 +-
src/Grpc/refresh-protos.ps1 | 10 +---------
2 files changed, 2 insertions(+), 10 deletions(-)
diff --git a/src/Grpc/backend_service.proto b/src/Grpc/backend_service.proto
index 8ac980cf..b7cb601b 100644
--- a/src/Grpc/backend_service.proto
+++ b/src/Grpc/backend_service.proto
@@ -5,7 +5,7 @@ syntax = "proto3";
package durabletask.protos.backend.v1;
-option csharp_namespace = "Microsoft.DurableTask.AzureManagedBackend.Protobuf";
+option csharp_namespace = "Microsoft.DurableTask.Protobuf";
option java_package = "com.microsoft.durabletask.implementation.protobuf";
option go_package = "github.com/microsoft/durabletask-protobuf/internal/protos";
diff --git a/src/Grpc/refresh-protos.ps1 b/src/Grpc/refresh-protos.ps1
index eb8d5b28..6a095ba7 100644
--- a/src/Grpc/refresh-protos.ps1
+++ b/src/Grpc/refresh-protos.ps1
@@ -18,7 +18,7 @@ $commitId = $commitDetails.sha
# These are the proto files we need to download from the durabletask-protobuf repository.
$protoFileNames = @(
- "orchestrator_service.proto", "backend_service.proto"
+ "orchestrator_service.proto","backend_service.proto"
)
# Download each proto file to the local directory using the above commit ID
@@ -37,14 +37,6 @@ foreach ($protoFileName in $protoFileNames) {
Write-Output "Downloaded $url to $outputFile"
}
-# Post-process backend_service.proto to update the namespace
-$backendServiceFile = "$PSScriptRoot\backend_service.proto"
-if (Test-Path $backendServiceFile) {
- $content = Get-Content $backendServiceFile -Raw
- $content = $content -replace 'option csharp_namespace = "Microsoft\.DurableTask\.Protobuf";', 'option csharp_namespace = "Microsoft.DurableTask.AzureManagedBackend.Protobuf";'
- Set-Content -Path $backendServiceFile -Value $content -NoNewline
-}
-
# Log the commit ID and the URLs of the downloaded proto files to a versions file.
# Overwrite the file if it already exists.
$versionsFile = "$PSScriptRoot\versions.txt"
From 8ce977fbf3c154173ccceb64a12beea7d7e38c92 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 1 Oct 2025 15:26:45 -0700
Subject: [PATCH 48/53] one package finalize
---
.../AzureBlobPayloads.csproj | 2 +-
...reBlobPayloadsManagedBackendInterceptor.cs | 46 +-
.../Grpc.AzureManagedBackend.csproj | 31 +
src/Grpc.AzureManagedBackend/README.md | 24 +
.../backend_service.proto | 2 +-
.../orchestrator_service.proto | 826 ++++++++++++++++++
.../refresh-protos.ps1 | 66 ++
src/Grpc.AzureManagedBackend/versions.txt | 3 +
src/Grpc/Grpc.csproj | 5 +-
src/Grpc/refresh-protos.ps1 | 2 +-
10 files changed, 999 insertions(+), 8 deletions(-)
create mode 100644 src/Grpc.AzureManagedBackend/Grpc.AzureManagedBackend.csproj
create mode 100644 src/Grpc.AzureManagedBackend/README.md
rename src/{Grpc => Grpc.AzureManagedBackend}/backend_service.proto (99%)
create mode 100644 src/Grpc.AzureManagedBackend/orchestrator_service.proto
create mode 100644 src/Grpc.AzureManagedBackend/refresh-protos.ps1
create mode 100644 src/Grpc.AzureManagedBackend/versions.txt
diff --git a/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj b/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
index 9be33cac..8462b0f1 100644
--- a/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
+++ b/src/Extensions/AzureBlobPayloads/AzureBlobPayloads.csproj
@@ -21,10 +21,10 @@
+
-
\ No newline at end of file
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
index 45f3b4d3..0b1df512 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
@@ -1,9 +1,7 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-using Grpc.Core.Interceptors;
-
-using P = Microsoft.DurableTask.Protobuf;
+using P = Microsoft.DurableTask.AzureManagedBackend.Protobuf;
namespace Microsoft.DurableTask;
@@ -11,9 +9,10 @@ namespace Microsoft.DurableTask;
/// gRPC interceptor that externalizes large payloads to an on requests
/// and resolves known payload tokens on responses for Azure Managed Backend.
///
-public sealed class AzureBlobPayloadsManagedBackendInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+public sealed class AzureBlobPayloadsManagedBackendInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
: BasePayloadInterceptor(payloadStore, options)
{
+ ///
protected override Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
{
// Azure Managed Backend -> Backend Service
@@ -147,96 +146,112 @@ async Task ExternalizeHistoryEventAsync(P.HistoryEvent e, CancellationToken canc
{
await this.MaybeExternalizeAsync(v => es.Input = v, es.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionCompleted:
if (e.ExecutionCompleted is { } ec)
{
await this.MaybeExternalizeAsync(v => ec.Result = v, ec.Result, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EventRaised:
if (e.EventRaised is { } er)
{
await this.MaybeExternalizeAsync(v => er.Input = v, er.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.TaskScheduled:
if (e.TaskScheduled is { } ts)
{
await this.MaybeExternalizeAsync(v => ts.Input = v, ts.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.TaskCompleted:
if (e.TaskCompleted is { } tc)
{
await this.MaybeExternalizeAsync(v => tc.Result = v, tc.Result, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCreated:
if (e.SubOrchestrationInstanceCreated is { } soc)
{
await this.MaybeExternalizeAsync(v => soc.Input = v, soc.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCompleted:
if (e.SubOrchestrationInstanceCompleted is { } sox)
{
await this.MaybeExternalizeAsync(v => sox.Result = v, sox.Result, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EventSent:
if (e.EventSent is { } esent)
{
await this.MaybeExternalizeAsync(v => esent.Input = v, esent.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.GenericEvent:
if (e.GenericEvent is { } ge)
{
await this.MaybeExternalizeAsync(v => ge.Data = v, ge.Data, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ContinueAsNew:
if (e.ContinueAsNew is { } can)
{
await this.MaybeExternalizeAsync(v => can.Input = v, can.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionTerminated:
if (e.ExecutionTerminated is { } et)
{
await this.MaybeExternalizeAsync(v => et.Input = v, et.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionSuspended:
if (e.ExecutionSuspended is { } esus)
{
await this.MaybeExternalizeAsync(v => esus.Input = v, esus.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionResumed:
if (e.ExecutionResumed is { } eres)
{
await this.MaybeExternalizeAsync(v => eres.Input = v, eres.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationSignaled:
if (e.EntityOperationSignaled is { } eos)
{
await this.MaybeExternalizeAsync(v => eos.Input = v, eos.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationCalled:
if (e.EntityOperationCalled is { } eoc)
{
await this.MaybeExternalizeAsync(v => eoc.Input = v, eoc.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationCompleted:
if (e.EntityOperationCompleted is { } ecomp)
{
await this.MaybeExternalizeAsync(v => ecomp.Output = v, ecomp.Output, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.HistoryState:
if (e.HistoryState is { } hs && hs.OrchestrationState is { } os)
@@ -245,10 +260,12 @@ async Task ExternalizeHistoryEventAsync(P.HistoryEvent e, CancellationToken canc
await this.MaybeExternalizeAsync(v => os.Output = v, os.Output, cancellation);
await this.MaybeExternalizeAsync(v => os.CustomStatus = v, os.CustomStatus, cancellation);
}
+
break;
}
}
+ ///
protected override async Task ResolveResponsePayloadsAsync(TResponse response, CancellationToken cancellation)
{
// Backend Service -> Azure Managed Backend
@@ -267,6 +284,7 @@ protected override async Task ResolveResponsePayloadsAsync(TResponse
{
await this.ResolveEventPayloadsAsync(e, cancellation);
}
+
break;
case P.QueryInstancesResponse r:
foreach (P.OrchestrationState s in r.OrchestrationState)
@@ -275,6 +293,7 @@ protected override async Task ResolveResponsePayloadsAsync(TResponse
await this.MaybeResolveAsync(v => s.Output = v, s.Output, cancellation);
await this.MaybeResolveAsync(v => s.CustomStatus = v, s.CustomStatus, cancellation);
}
+
break;
case P.GetEntityResponse r when r.Entity is { } em:
await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
@@ -284,6 +303,7 @@ protected override async Task ResolveResponsePayloadsAsync(TResponse
{
await this.MaybeResolveAsync(v => em.SerializedState = v, em.SerializedState, cancellation);
}
+
break;
case P.WorkItem wi:
// Resolve activity input
@@ -331,6 +351,7 @@ protected override async Task ResolveResponsePayloadsAsync(TResponse
}
}
}
+
break;
}
}
@@ -344,96 +365,112 @@ async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancell
{
await this.MaybeResolveAsync(v => es.Input = v, es.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionCompleted:
if (e.ExecutionCompleted is { } ec)
{
await this.MaybeResolveAsync(v => ec.Result = v, ec.Result, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EventRaised:
if (e.EventRaised is { } er)
{
await this.MaybeResolveAsync(v => er.Input = v, er.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.TaskScheduled:
if (e.TaskScheduled is { } ts)
{
await this.MaybeResolveAsync(v => ts.Input = v, ts.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.TaskCompleted:
if (e.TaskCompleted is { } tc)
{
await this.MaybeResolveAsync(v => tc.Result = v, tc.Result, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCreated:
if (e.SubOrchestrationInstanceCreated is { } soc)
{
await this.MaybeResolveAsync(v => soc.Input = v, soc.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCompleted:
if (e.SubOrchestrationInstanceCompleted is { } sox)
{
await this.MaybeResolveAsync(v => sox.Result = v, sox.Result, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EventSent:
if (e.EventSent is { } esent)
{
await this.MaybeResolveAsync(v => esent.Input = v, esent.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.GenericEvent:
if (e.GenericEvent is { } ge)
{
await this.MaybeResolveAsync(v => ge.Data = v, ge.Data, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ContinueAsNew:
if (e.ContinueAsNew is { } can)
{
await this.MaybeResolveAsync(v => can.Input = v, can.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionTerminated:
if (e.ExecutionTerminated is { } et)
{
await this.MaybeResolveAsync(v => et.Input = v, et.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionSuspended:
if (e.ExecutionSuspended is { } esus)
{
await this.MaybeResolveAsync(v => esus.Input = v, esus.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionResumed:
if (e.ExecutionResumed is { } eres)
{
await this.MaybeResolveAsync(v => eres.Input = v, eres.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationSignaled:
if (e.EntityOperationSignaled is { } eos)
{
await this.MaybeResolveAsync(v => eos.Input = v, eos.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationCalled:
if (e.EntityOperationCalled is { } eoc)
{
await this.MaybeResolveAsync(v => eoc.Input = v, eoc.Input, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationCompleted:
if (e.EntityOperationCompleted is { } ecomp)
{
await this.MaybeResolveAsync(v => ecomp.Output = v, ecomp.Output, cancellation);
}
+
break;
case P.HistoryEvent.EventTypeOneofCase.HistoryState:
if (e.HistoryState is { } hs && hs.OrchestrationState is { } os)
@@ -442,6 +479,7 @@ async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancell
await this.MaybeResolveAsync(v => os.Output = v, os.Output, cancellation);
await this.MaybeResolveAsync(v => os.CustomStatus = v, os.CustomStatus, cancellation);
}
+
break;
}
}
diff --git a/src/Grpc.AzureManagedBackend/Grpc.AzureManagedBackend.csproj b/src/Grpc.AzureManagedBackend/Grpc.AzureManagedBackend.csproj
new file mode 100644
index 00000000..dcb538b5
--- /dev/null
+++ b/src/Grpc.AzureManagedBackend/Grpc.AzureManagedBackend.csproj
@@ -0,0 +1,31 @@
+
+
+
+ netstandard2.0;net6.0
+ The gRPC Protobuf .NET services for Durable Task Framework.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/Grpc.AzureManagedBackend/README.md b/src/Grpc.AzureManagedBackend/README.md
new file mode 100644
index 00000000..a4f5e97b
--- /dev/null
+++ b/src/Grpc.AzureManagedBackend/README.md
@@ -0,0 +1,24 @@
+# Durable Task Protobuf
+
+This directory contains the protobuf definitions for the Durable Task SDK, which are used to generate the C# source code for the gRPC service contracts. The official protobuf definitions are maintained in the [Durable Task Protobuf repository](https://github.com/microsoft/durabletask-protobuf).
+
+## Updating the Protobuf Definitions
+
+To update the protobuf definitions in this directory, follow these steps:
+
+1. Make sure you have [PowerShell](https://learn.microsoft.com/powershell/scripting/install/installing-powershell) installed on your machine.
+2. Run the following command to download the latest protobuf definitions from the Durable Task SDK repository:
+
+```powershell
+.\refresh-protos.ps1
+```
+
+This script will download the latest protobuf definitions from the `https://github.com/microsoft/durabletask-protobuf` repository and copy them to this directory.
+
+By default, the latest versions of the protobufs are downloaded from the `main` branch. To specify an alternative branch, use the `-branch` parameter:
+
+```powershell
+.\refresh-protos.ps1 -branch
+```
+
+The `versions.txt` file in this directory contains the list of protobuf files and their commit hashes that were last downloaded. It is updated automatically by the `refresh-protos.ps1` script.
diff --git a/src/Grpc/backend_service.proto b/src/Grpc.AzureManagedBackend/backend_service.proto
similarity index 99%
rename from src/Grpc/backend_service.proto
rename to src/Grpc.AzureManagedBackend/backend_service.proto
index b7cb601b..8ac980cf 100644
--- a/src/Grpc/backend_service.proto
+++ b/src/Grpc.AzureManagedBackend/backend_service.proto
@@ -5,7 +5,7 @@ syntax = "proto3";
package durabletask.protos.backend.v1;
-option csharp_namespace = "Microsoft.DurableTask.Protobuf";
+option csharp_namespace = "Microsoft.DurableTask.AzureManagedBackend.Protobuf";
option java_package = "com.microsoft.durabletask.implementation.protobuf";
option go_package = "github.com/microsoft/durabletask-protobuf/internal/protos";
diff --git a/src/Grpc.AzureManagedBackend/orchestrator_service.proto b/src/Grpc.AzureManagedBackend/orchestrator_service.proto
new file mode 100644
index 00000000..ce458d4e
--- /dev/null
+++ b/src/Grpc.AzureManagedBackend/orchestrator_service.proto
@@ -0,0 +1,826 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT License.
+
+syntax = "proto3";
+
+option csharp_namespace = "Microsoft.DurableTask.AzureManagedBackend.Protobuf";
+option java_package = "com.microsoft.durabletask.implementation.protobuf";
+option go_package = "/internal/protos";
+
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/wrappers.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/struct.proto";
+
+message OrchestrationInstance {
+ string instanceId = 1;
+ google.protobuf.StringValue executionId = 2;
+}
+
+message ActivityRequest {
+ string name = 1;
+ google.protobuf.StringValue version = 2;
+ google.protobuf.StringValue input = 3;
+ OrchestrationInstance orchestrationInstance = 4;
+ int32 taskId = 5;
+ TraceContext parentTraceContext = 6;
+}
+
+message ActivityResponse {
+ string instanceId = 1;
+ int32 taskId = 2;
+ google.protobuf.StringValue result = 3;
+ TaskFailureDetails failureDetails = 4;
+ string completionToken = 5;
+}
+
+message TaskFailureDetails {
+ string errorType = 1;
+ string errorMessage = 2;
+ google.protobuf.StringValue stackTrace = 3;
+ TaskFailureDetails innerFailure = 4;
+ bool isNonRetriable = 5;
+}
+
+enum OrchestrationStatus {
+ ORCHESTRATION_STATUS_RUNNING = 0;
+ ORCHESTRATION_STATUS_COMPLETED = 1;
+ ORCHESTRATION_STATUS_CONTINUED_AS_NEW = 2;
+ ORCHESTRATION_STATUS_FAILED = 3;
+ ORCHESTRATION_STATUS_CANCELED = 4;
+ ORCHESTRATION_STATUS_TERMINATED = 5;
+ ORCHESTRATION_STATUS_PENDING = 6;
+ ORCHESTRATION_STATUS_SUSPENDED = 7;
+}
+
+message ParentInstanceInfo {
+ int32 taskScheduledId = 1;
+ google.protobuf.StringValue name = 2;
+ google.protobuf.StringValue version = 3;
+ OrchestrationInstance orchestrationInstance = 4;
+}
+
+message TraceContext {
+ string traceParent = 1;
+ string spanID = 2 [deprecated=true];
+ google.protobuf.StringValue traceState = 3;
+}
+
+message ExecutionStartedEvent {
+ string name = 1;
+ google.protobuf.StringValue version = 2;
+ google.protobuf.StringValue input = 3;
+ OrchestrationInstance orchestrationInstance = 4;
+ ParentInstanceInfo parentInstance = 5;
+ google.protobuf.Timestamp scheduledStartTimestamp = 6;
+ TraceContext parentTraceContext = 7;
+ google.protobuf.StringValue orchestrationSpanID = 8;
+ map tags = 9;
+}
+
+message ExecutionCompletedEvent {
+ OrchestrationStatus orchestrationStatus = 1;
+ google.protobuf.StringValue result = 2;
+ TaskFailureDetails failureDetails = 3;
+}
+
+message ExecutionTerminatedEvent {
+ google.protobuf.StringValue input = 1;
+ bool recurse = 2;
+}
+
+message TaskScheduledEvent {
+ string name = 1;
+ google.protobuf.StringValue version = 2;
+ google.protobuf.StringValue input = 3;
+ TraceContext parentTraceContext = 4;
+ map tags = 5;
+}
+
+message TaskCompletedEvent {
+ int32 taskScheduledId = 1;
+ google.protobuf.StringValue result = 2;
+}
+
+message TaskFailedEvent {
+ int32 taskScheduledId = 1;
+ TaskFailureDetails failureDetails = 2;
+}
+
+message SubOrchestrationInstanceCreatedEvent {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue version = 3;
+ google.protobuf.StringValue input = 4;
+ TraceContext parentTraceContext = 5;
+}
+
+message SubOrchestrationInstanceCompletedEvent {
+ int32 taskScheduledId = 1;
+ google.protobuf.StringValue result = 2;
+}
+
+message SubOrchestrationInstanceFailedEvent {
+ int32 taskScheduledId = 1;
+ TaskFailureDetails failureDetails = 2;
+}
+
+message TimerCreatedEvent {
+ google.protobuf.Timestamp fireAt = 1;
+}
+
+message TimerFiredEvent {
+ google.protobuf.Timestamp fireAt = 1;
+ int32 timerId = 2;
+}
+
+message OrchestratorStartedEvent {
+ // No payload data
+}
+
+message OrchestratorCompletedEvent {
+ // No payload data
+}
+
+message EventSentEvent {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue input = 3;
+}
+
+message EventRaisedEvent {
+ string name = 1;
+ google.protobuf.StringValue input = 2;
+}
+
+message GenericEvent {
+ google.protobuf.StringValue data = 1;
+}
+
+message HistoryStateEvent {
+ OrchestrationState orchestrationState = 1;
+}
+
+message ContinueAsNewEvent {
+ google.protobuf.StringValue input = 1;
+}
+
+message ExecutionSuspendedEvent {
+ google.protobuf.StringValue input = 1;
+}
+
+message ExecutionResumedEvent {
+ google.protobuf.StringValue input = 1;
+}
+
+message EntityOperationSignaledEvent {
+ string requestId = 1;
+ string operation = 2;
+ google.protobuf.Timestamp scheduledTime = 3;
+ google.protobuf.StringValue input = 4;
+ google.protobuf.StringValue targetInstanceId = 5; // used only within histories, null in messages
+}
+
+message EntityOperationCalledEvent {
+ string requestId = 1;
+ string operation = 2;
+ google.protobuf.Timestamp scheduledTime = 3;
+ google.protobuf.StringValue input = 4;
+ google.protobuf.StringValue parentInstanceId = 5; // used only within messages, null in histories
+ google.protobuf.StringValue parentExecutionId = 6; // used only within messages, null in histories
+ google.protobuf.StringValue targetInstanceId = 7; // used only within histories, null in messages
+}
+
+message EntityLockRequestedEvent {
+ string criticalSectionId = 1;
+ repeated string lockSet = 2;
+ int32 position = 3;
+ google.protobuf.StringValue parentInstanceId = 4; // used only within messages, null in histories
+}
+
+message EntityOperationCompletedEvent {
+ string requestId = 1;
+ google.protobuf.StringValue output = 2;
+}
+
+message EntityOperationFailedEvent {
+ string requestId = 1;
+ TaskFailureDetails failureDetails = 2;
+}
+
+message EntityUnlockSentEvent {
+ string criticalSectionId = 1;
+ google.protobuf.StringValue parentInstanceId = 2; // used only within messages, null in histories
+ google.protobuf.StringValue targetInstanceId = 3; // used only within histories, null in messages
+}
+
+message EntityLockGrantedEvent {
+ string criticalSectionId = 1;
+}
+
+message HistoryEvent {
+ int32 eventId = 1;
+ google.protobuf.Timestamp timestamp = 2;
+ oneof eventType {
+ ExecutionStartedEvent executionStarted = 3;
+ ExecutionCompletedEvent executionCompleted = 4;
+ ExecutionTerminatedEvent executionTerminated = 5;
+ TaskScheduledEvent taskScheduled = 6;
+ TaskCompletedEvent taskCompleted = 7;
+ TaskFailedEvent taskFailed = 8;
+ SubOrchestrationInstanceCreatedEvent subOrchestrationInstanceCreated = 9;
+ SubOrchestrationInstanceCompletedEvent subOrchestrationInstanceCompleted = 10;
+ SubOrchestrationInstanceFailedEvent subOrchestrationInstanceFailed = 11;
+ TimerCreatedEvent timerCreated = 12;
+ TimerFiredEvent timerFired = 13;
+ OrchestratorStartedEvent orchestratorStarted = 14;
+ OrchestratorCompletedEvent orchestratorCompleted = 15;
+ EventSentEvent eventSent = 16;
+ EventRaisedEvent eventRaised = 17;
+ GenericEvent genericEvent = 18;
+ HistoryStateEvent historyState = 19;
+ ContinueAsNewEvent continueAsNew = 20;
+ ExecutionSuspendedEvent executionSuspended = 21;
+ ExecutionResumedEvent executionResumed = 22;
+ EntityOperationSignaledEvent entityOperationSignaled = 23;
+ EntityOperationCalledEvent entityOperationCalled = 24;
+ EntityOperationCompletedEvent entityOperationCompleted = 25;
+ EntityOperationFailedEvent entityOperationFailed = 26;
+ EntityLockRequestedEvent entityLockRequested = 27;
+ EntityLockGrantedEvent entityLockGranted = 28;
+ EntityUnlockSentEvent entityUnlockSent = 29;
+ }
+}
+
+message ScheduleTaskAction {
+ string name = 1;
+ google.protobuf.StringValue version = 2;
+ google.protobuf.StringValue input = 3;
+ map tags = 4;
+ TraceContext parentTraceContext = 5;
+}
+
+message CreateSubOrchestrationAction {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue version = 3;
+ google.protobuf.StringValue input = 4;
+ TraceContext parentTraceContext = 5;
+}
+
+message CreateTimerAction {
+ google.protobuf.Timestamp fireAt = 1;
+}
+
+message SendEventAction {
+ OrchestrationInstance instance = 1;
+ string name = 2;
+ google.protobuf.StringValue data = 3;
+}
+
+message CompleteOrchestrationAction {
+ OrchestrationStatus orchestrationStatus = 1;
+ google.protobuf.StringValue result = 2;
+ google.protobuf.StringValue details = 3;
+ google.protobuf.StringValue newVersion = 4;
+ repeated HistoryEvent carryoverEvents = 5;
+ TaskFailureDetails failureDetails = 6;
+}
+
+message TerminateOrchestrationAction {
+ string instanceId = 1;
+ google.protobuf.StringValue reason = 2;
+ bool recurse = 3;
+}
+
+message SendEntityMessageAction {
+ oneof EntityMessageType {
+ EntityOperationSignaledEvent entityOperationSignaled = 1;
+ EntityOperationCalledEvent entityOperationCalled = 2;
+ EntityLockRequestedEvent entityLockRequested = 3;
+ EntityUnlockSentEvent entityUnlockSent = 4;
+ }
+}
+
+message OrchestratorAction {
+ int32 id = 1;
+ oneof orchestratorActionType {
+ ScheduleTaskAction scheduleTask = 2;
+ CreateSubOrchestrationAction createSubOrchestration = 3;
+ CreateTimerAction createTimer = 4;
+ SendEventAction sendEvent = 5;
+ CompleteOrchestrationAction completeOrchestration = 6;
+ TerminateOrchestrationAction terminateOrchestration = 7;
+ SendEntityMessageAction sendEntityMessage = 8;
+ }
+}
+
+message OrchestrationTraceContext {
+ google.protobuf.StringValue spanID = 1;
+ google.protobuf.Timestamp spanStartTime = 2;
+}
+
+message OrchestratorRequest {
+ string instanceId = 1;
+ google.protobuf.StringValue executionId = 2;
+ repeated HistoryEvent pastEvents = 3;
+ repeated HistoryEvent newEvents = 4;
+ OrchestratorEntityParameters entityParameters = 5;
+ bool requiresHistoryStreaming = 6;
+ map properties = 7;
+
+ OrchestrationTraceContext orchestrationTraceContext = 8;
+}
+
+message OrchestratorResponse {
+ string instanceId = 1;
+ repeated OrchestratorAction actions = 2;
+ google.protobuf.StringValue customStatus = 3;
+ string completionToken = 4;
+
+ // The number of work item events that were processed by the orchestrator.
+ // This field is optional. If not set, the service should assume that the orchestrator processed all events.
+ google.protobuf.Int32Value numEventsProcessed = 5;
+ OrchestrationTraceContext orchestrationTraceContext = 6;
+
+ // Whether or not a history is required to complete the original OrchestratorRequest and none was provided.
+ bool requiresHistory = 7;
+}
+
+message CreateInstanceRequest {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue version = 3;
+ google.protobuf.StringValue input = 4;
+ google.protobuf.Timestamp scheduledStartTimestamp = 5;
+ OrchestrationIdReusePolicy orchestrationIdReusePolicy = 6;
+ google.protobuf.StringValue executionId = 7;
+ map tags = 8;
+ TraceContext parentTraceContext = 9;
+ google.protobuf.Timestamp requestTime = 10;
+}
+
+message OrchestrationIdReusePolicy {
+ repeated OrchestrationStatus replaceableStatus = 1;
+ reserved 2;
+}
+
+message CreateInstanceResponse {
+ string instanceId = 1;
+}
+
+message GetInstanceRequest {
+ string instanceId = 1;
+ bool getInputsAndOutputs = 2;
+}
+
+message GetInstanceResponse {
+ bool exists = 1;
+ OrchestrationState orchestrationState = 2;
+}
+
+message RewindInstanceRequest {
+ string instanceId = 1;
+ google.protobuf.StringValue reason = 2;
+}
+
+message RewindInstanceResponse {
+ // Empty for now. Using explicit type incase we want to add content later.
+}
+
+message OrchestrationState {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue version = 3;
+ OrchestrationStatus orchestrationStatus = 4;
+ google.protobuf.Timestamp scheduledStartTimestamp = 5;
+ google.protobuf.Timestamp createdTimestamp = 6;
+ google.protobuf.Timestamp lastUpdatedTimestamp = 7;
+ google.protobuf.StringValue input = 8;
+ google.protobuf.StringValue output = 9;
+ google.protobuf.StringValue customStatus = 10;
+ TaskFailureDetails failureDetails = 11;
+ google.protobuf.StringValue executionId = 12;
+ google.protobuf.Timestamp completedTimestamp = 13;
+ google.protobuf.StringValue parentInstanceId = 14;
+ map tags = 15;
+}
+
+message RaiseEventRequest {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue input = 3;
+}
+
+message RaiseEventResponse {
+ // No payload
+}
+
+message TerminateRequest {
+ string instanceId = 1;
+ google.protobuf.StringValue output = 2;
+ bool recursive = 3;
+}
+
+message TerminateResponse {
+ // No payload
+}
+
+message SuspendRequest {
+ string instanceId = 1;
+ google.protobuf.StringValue reason = 2;
+}
+
+message SuspendResponse {
+ // No payload
+}
+
+message ResumeRequest {
+ string instanceId = 1;
+ google.protobuf.StringValue reason = 2;
+}
+
+message ResumeResponse {
+ // No payload
+}
+
+message QueryInstancesRequest {
+ InstanceQuery query = 1;
+}
+
+message InstanceQuery{
+ repeated OrchestrationStatus runtimeStatus = 1;
+ google.protobuf.Timestamp createdTimeFrom = 2;
+ google.protobuf.Timestamp createdTimeTo = 3;
+ repeated google.protobuf.StringValue taskHubNames = 4;
+ int32 maxInstanceCount = 5;
+ google.protobuf.StringValue continuationToken = 6;
+ google.protobuf.StringValue instanceIdPrefix = 7;
+ bool fetchInputsAndOutputs = 8;
+}
+
+message QueryInstancesResponse {
+ repeated OrchestrationState orchestrationState = 1;
+ google.protobuf.StringValue continuationToken = 2;
+}
+
+message PurgeInstancesRequest {
+ oneof request {
+ string instanceId = 1;
+ PurgeInstanceFilter purgeInstanceFilter = 2;
+ InstanceBatch instanceBatch = 4;
+ }
+ bool recursive = 3;
+}
+
+message PurgeInstanceFilter {
+ google.protobuf.Timestamp createdTimeFrom = 1;
+ google.protobuf.Timestamp createdTimeTo = 2;
+ repeated OrchestrationStatus runtimeStatus = 3;
+}
+
+message PurgeInstancesResponse {
+ int32 deletedInstanceCount = 1;
+ google.protobuf.BoolValue isComplete = 2;
+}
+
+message RestartInstanceRequest {
+ string instanceId = 1;
+ bool restartWithNewInstanceId = 2;
+}
+
+message RestartInstanceResponse {
+ string instanceId = 1;
+}
+
+message CreateTaskHubRequest {
+ bool recreateIfExists = 1;
+}
+
+message CreateTaskHubResponse {
+ //no playload
+}
+
+message DeleteTaskHubRequest {
+ //no playload
+}
+
+message DeleteTaskHubResponse {
+ //no playload
+}
+
+message SignalEntityRequest {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue input = 3;
+ string requestId = 4;
+ google.protobuf.Timestamp scheduledTime = 5;
+ TraceContext parentTraceContext = 6;
+ google.protobuf.Timestamp requestTime = 7;
+}
+
+message SignalEntityResponse {
+ // no payload
+}
+
+message GetEntityRequest {
+ string instanceId = 1;
+ bool includeState = 2;
+}
+
+message GetEntityResponse {
+ bool exists = 1;
+ EntityMetadata entity = 2;
+}
+
+message EntityQuery {
+ google.protobuf.StringValue instanceIdStartsWith = 1;
+ google.protobuf.Timestamp lastModifiedFrom = 2;
+ google.protobuf.Timestamp lastModifiedTo = 3;
+ bool includeState = 4;
+ bool includeTransient = 5;
+ google.protobuf.Int32Value pageSize = 6;
+ google.protobuf.StringValue continuationToken = 7;
+}
+
+message QueryEntitiesRequest {
+ EntityQuery query = 1;
+}
+
+message QueryEntitiesResponse {
+ repeated EntityMetadata entities = 1;
+ google.protobuf.StringValue continuationToken = 2;
+}
+
+message EntityMetadata {
+ string instanceId = 1;
+ google.protobuf.Timestamp lastModifiedTime = 2;
+ int32 backlogQueueSize = 3;
+ google.protobuf.StringValue lockedBy = 4;
+ google.protobuf.StringValue serializedState = 5;
+}
+
+message CleanEntityStorageRequest {
+ google.protobuf.StringValue continuationToken = 1;
+ bool removeEmptyEntities = 2;
+ bool releaseOrphanedLocks = 3;
+}
+
+message CleanEntityStorageResponse {
+ google.protobuf.StringValue continuationToken = 1;
+ int32 emptyEntitiesRemoved = 2;
+ int32 orphanedLocksReleased = 3;
+}
+
+message OrchestratorEntityParameters {
+ google.protobuf.Duration entityMessageReorderWindow = 1;
+}
+
+message EntityBatchRequest {
+ string instanceId = 1;
+ google.protobuf.StringValue entityState = 2;
+ repeated OperationRequest operations = 3;
+}
+
+message EntityBatchResult {
+ repeated OperationResult results = 1;
+ repeated OperationAction actions = 2;
+ google.protobuf.StringValue entityState = 3;
+ TaskFailureDetails failureDetails = 4;
+ string completionToken = 5;
+ repeated OperationInfo operationInfos = 6; // used only with DTS
+}
+
+message EntityRequest {
+ string instanceId = 1;
+ string executionId = 2;
+ google.protobuf.StringValue entityState = 3; // null if entity does not exist
+ repeated HistoryEvent operationRequests = 4;
+}
+
+message OperationRequest {
+ string operation = 1;
+ string requestId = 2;
+ google.protobuf.StringValue input = 3;
+ TraceContext traceContext = 4;
+}
+
+message OperationResult {
+ oneof resultType {
+ OperationResultSuccess success = 1;
+ OperationResultFailure failure = 2;
+ }
+}
+
+message OperationInfo {
+ string requestId = 1;
+ OrchestrationInstance responseDestination = 2; // null for signals
+}
+
+message OperationResultSuccess {
+ google.protobuf.StringValue result = 1;
+ google.protobuf.Timestamp startTimeUtc = 2;
+ google.protobuf.Timestamp endTimeUtc = 3;
+}
+
+message OperationResultFailure {
+ TaskFailureDetails failureDetails = 1;
+ google.protobuf.Timestamp startTimeUtc = 2;
+ google.protobuf.Timestamp endTimeUtc = 3;
+}
+
+message OperationAction {
+ int32 id = 1;
+ oneof operationActionType {
+ SendSignalAction sendSignal = 2;
+ StartNewOrchestrationAction startNewOrchestration = 3;
+ }
+}
+
+message SendSignalAction {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue input = 3;
+ google.protobuf.Timestamp scheduledTime = 4;
+ google.protobuf.Timestamp requestTime = 5;
+ TraceContext parentTraceContext = 6;
+}
+
+message StartNewOrchestrationAction {
+ string instanceId = 1;
+ string name = 2;
+ google.protobuf.StringValue version = 3;
+ google.protobuf.StringValue input = 4;
+ google.protobuf.Timestamp scheduledTime = 5;
+ google.protobuf.Timestamp requestTime = 6;
+ TraceContext parentTraceContext = 7;
+}
+
+message AbandonActivityTaskRequest {
+ string completionToken = 1;
+}
+
+message AbandonActivityTaskResponse {
+ // Empty.
+}
+
+message AbandonOrchestrationTaskRequest {
+ string completionToken = 1;
+}
+
+message AbandonOrchestrationTaskResponse {
+ // Empty.
+}
+
+message AbandonEntityTaskRequest {
+ string completionToken = 1;
+}
+
+message AbandonEntityTaskResponse {
+ // Empty.
+}
+
+message SkipGracefulOrchestrationTerminationsRequest {
+ InstanceBatch instanceBatch = 1;
+ google.protobuf.StringValue reason = 2;
+}
+
+message SkipGracefulOrchestrationTerminationsResponse {
+ // Those instances which could not be terminated because they had locked entities at the time of this termination call,
+ // are already in a terminal state (completed, failed, terminated, etc.), are not orchestrations, or do not exist (i.e. have been purged)
+ repeated string unterminatedInstanceIds = 1;
+}
+
+service TaskHubSidecarService {
+ // Sends a hello request to the sidecar service.
+ rpc Hello(google.protobuf.Empty) returns (google.protobuf.Empty);
+
+ // Starts a new orchestration instance.
+ rpc StartInstance(CreateInstanceRequest) returns (CreateInstanceResponse);
+
+ // Gets the status of an existing orchestration instance.
+ rpc GetInstance(GetInstanceRequest) returns (GetInstanceResponse);
+
+ // Rewinds an orchestration instance to last known good state and replays from there.
+ rpc RewindInstance(RewindInstanceRequest) returns (RewindInstanceResponse);
+
+ // Restarts an orchestration instance.
+ rpc RestartInstance(RestartInstanceRequest) returns (RestartInstanceResponse);
+
+ // Waits for an orchestration instance to reach a running or completion state.
+ rpc WaitForInstanceStart(GetInstanceRequest) returns (GetInstanceResponse);
+
+ // Waits for an orchestration instance to reach a completion state (completed, failed, terminated, etc.).
+ rpc WaitForInstanceCompletion(GetInstanceRequest) returns (GetInstanceResponse);
+
+ // Raises an event to a running orchestration instance.
+ rpc RaiseEvent(RaiseEventRequest) returns (RaiseEventResponse);
+
+ // Terminates a running orchestration instance.
+ rpc TerminateInstance(TerminateRequest) returns (TerminateResponse);
+
+ // Suspends a running orchestration instance.
+ rpc SuspendInstance(SuspendRequest) returns (SuspendResponse);
+
+ // Resumes a suspended orchestration instance.
+ rpc ResumeInstance(ResumeRequest) returns (ResumeResponse);
+
+ // rpc DeleteInstance(DeleteInstanceRequest) returns (DeleteInstanceResponse);
+
+ rpc QueryInstances(QueryInstancesRequest) returns (QueryInstancesResponse);
+ rpc PurgeInstances(PurgeInstancesRequest) returns (PurgeInstancesResponse);
+
+ rpc GetWorkItems(GetWorkItemsRequest) returns (stream WorkItem);
+ rpc CompleteActivityTask(ActivityResponse) returns (CompleteTaskResponse);
+ rpc CompleteOrchestratorTask(OrchestratorResponse) returns (CompleteTaskResponse);
+ rpc CompleteEntityTask(EntityBatchResult) returns (CompleteTaskResponse);
+
+ // Gets the history of an orchestration instance as a stream of events.
+ rpc StreamInstanceHistory(StreamInstanceHistoryRequest) returns (stream HistoryChunk);
+
+ // Deletes and Creates the necessary resources for the orchestration service and the instance store
+ rpc CreateTaskHub(CreateTaskHubRequest) returns (CreateTaskHubResponse);
+
+ // Deletes the resources for the orchestration service and optionally the instance store
+ rpc DeleteTaskHub(DeleteTaskHubRequest) returns (DeleteTaskHubResponse);
+
+ // sends a signal to an entity
+ rpc SignalEntity(SignalEntityRequest) returns (SignalEntityResponse);
+
+ // get information about a specific entity
+ rpc GetEntity(GetEntityRequest) returns (GetEntityResponse);
+
+ // query entities
+ rpc QueryEntities(QueryEntitiesRequest) returns (QueryEntitiesResponse);
+
+ // clean entity storage
+ rpc CleanEntityStorage(CleanEntityStorageRequest) returns (CleanEntityStorageResponse);
+
+ // Abandons a single work item
+ rpc AbandonTaskActivityWorkItem(AbandonActivityTaskRequest) returns (AbandonActivityTaskResponse);
+
+ // Abandon an orchestration work item
+ rpc AbandonTaskOrchestratorWorkItem(AbandonOrchestrationTaskRequest) returns (AbandonOrchestrationTaskResponse);
+
+ // Abandon an entity work item
+ rpc AbandonTaskEntityWorkItem(AbandonEntityTaskRequest) returns (AbandonEntityTaskResponse);
+
+ // "Skip" graceful termination of orchestrations by immediately changing their status in storage to "terminated".
+ // Note that a maximum of 500 orchestrations can be terminated at a time using this method.
+ rpc SkipGracefulOrchestrationTerminations(SkipGracefulOrchestrationTerminationsRequest) returns (SkipGracefulOrchestrationTerminationsResponse);
+}
+
+message GetWorkItemsRequest {
+ int32 maxConcurrentOrchestrationWorkItems = 1;
+ int32 maxConcurrentActivityWorkItems = 2;
+ int32 maxConcurrentEntityWorkItems = 3;
+
+ repeated WorkerCapability capabilities = 10;
+}
+
+enum WorkerCapability {
+ WORKER_CAPABILITY_UNSPECIFIED = 0;
+
+ // Indicates that the worker is capable of streaming instance history as a more optimized
+ // alternative to receiving the full history embedded in the orchestrator work-item.
+ // When set, the service may return work items without any history events as an optimization.
+ // It is strongly recommended that all SDKs support this capability.
+ WORKER_CAPABILITY_HISTORY_STREAMING = 1;
+}
+
+message WorkItem {
+ oneof request {
+ OrchestratorRequest orchestratorRequest = 1;
+ ActivityRequest activityRequest = 2;
+ EntityBatchRequest entityRequest = 3; // (older) used by orchestration services implementations
+ HealthPing healthPing = 4;
+ EntityRequest entityRequestV2 = 5; // (newer) used by backend service implementations
+ }
+ string completionToken = 10;
+}
+
+message CompleteTaskResponse {
+ // No payload
+}
+
+message HealthPing {
+ // No payload
+}
+
+message StreamInstanceHistoryRequest {
+ string instanceId = 1;
+ google.protobuf.StringValue executionId = 2;
+
+ // When set to true, the service may return a more optimized response suitable for workers.
+ bool forWorkItemProcessing = 3;
+}
+
+message HistoryChunk {
+ repeated HistoryEvent events = 1;
+}
+
+message InstanceBatch {
+ // A maximum of 500 instance IDs can be provided in this list.
+ repeated string instanceIds = 1;
+}
\ No newline at end of file
diff --git a/src/Grpc.AzureManagedBackend/refresh-protos.ps1 b/src/Grpc.AzureManagedBackend/refresh-protos.ps1
new file mode 100644
index 00000000..a70e3cb6
--- /dev/null
+++ b/src/Grpc.AzureManagedBackend/refresh-protos.ps1
@@ -0,0 +1,66 @@
+#!/usr/bin/env pwsh
+param(
+ [string]$branch = "main"
+)
+
+# Fail with an error if the PowerShell version is less than 7.0
+if ($PSVersionTable.PSVersion -lt [Version]"7.0") {
+ Write-Error "This script requires PowerShell 7.0 or later."
+ exit 1
+}
+
+# Get the commit ID of the latest commit in the durabletask-protobuf repository.
+# We need this to download the proto files from the correct commit, avoiding race conditions
+# in rare cases where the proto files are updated between the time we download the commit ID
+# and the time we download the proto files.
+$commitDetails = Invoke-RestMethod -Uri "https://api.github.com/repos/microsoft/durabletask-protobuf/commits/$branch"
+$commitId = $commitDetails.sha
+
+# These are the proto files we need to download from the durabletask-protobuf repository.
+$protoFileNames = @(
+ "orchestrator_service.proto",
+ "backend_service.proto"
+)
+
+# Download each proto file to the local directory using the above commit ID
+foreach ($protoFileName in $protoFileNames) {
+ $url = "https://raw.githubusercontent.com/microsoft/durabletask-protobuf/$commitId/protos/$protoFileName"
+ $outputFile = "$PSScriptRoot\$protoFileName"
+
+ try {
+ Invoke-WebRequest -Uri $url -OutFile $outputFile
+ }
+ catch {
+ Write-Error "Failed to download $url to ${outputFile}: $_"
+ exit 1
+ }
+
+ Write-Output "Downloaded $url to $outputFile"
+}
+
+# Post-process all downloaded proto files to update the namespace
+foreach ($protoFileName in $protoFileNames) {
+ $protoFilePath = "$PSScriptRoot\$protoFileName"
+ if (Test-Path $protoFilePath) {
+ $content = Get-Content $protoFilePath -Raw
+ $content = $content -replace 'option csharp_namespace = "Microsoft\.DurableTask\.Protobuf";', 'option csharp_namespace = "Microsoft.DurableTask.AzureManagedBackend.Protobuf";'
+ Set-Content -Path $protoFilePath -Value $content -NoNewline
+ }
+}
+
+# Log the commit ID and the URLs of the downloaded proto files to a versions file.
+# Overwrite the file if it already exists.
+$versionsFile = "$PSScriptRoot\versions.txt"
+Remove-Item -Path $versionsFile -ErrorAction SilentlyContinue
+
+Add-Content `
+ -Path $versionsFile `
+ -Value "# The following files were downloaded from branch $branch at $(Get-Date -Format "yyyy-MM-dd HH:mm:ss" -AsUTC) UTC"
+
+foreach ($protoFileName in $protoFileNames) {
+ Add-Content `
+ -Path $versionsFile `
+ -Value "https://raw.githubusercontent.com/microsoft/durabletask-protobuf/$commitId/protos/$protoFileName"
+}
+
+Write-Host "Wrote commit ID $commitId to $versionsFile" -ForegroundColor Green
diff --git a/src/Grpc.AzureManagedBackend/versions.txt b/src/Grpc.AzureManagedBackend/versions.txt
new file mode 100644
index 00000000..d5d60da9
--- /dev/null
+++ b/src/Grpc.AzureManagedBackend/versions.txt
@@ -0,0 +1,3 @@
+# The following files were downloaded from branch main at 2025-10-01 21:40:16 UTC
+https://raw.githubusercontent.com/microsoft/durabletask-protobuf/a4e448066e3d85e676839a8bd23036a36b3c5f88/protos/orchestrator_service.proto
+https://raw.githubusercontent.com/microsoft/durabletask-protobuf/a4e448066e3d85e676839a8bd23036a36b3c5f88/protos/backend_service.proto
diff --git a/src/Grpc/Grpc.csproj b/src/Grpc/Grpc.csproj
index 4839af20..2c489063 100644
--- a/src/Grpc/Grpc.csproj
+++ b/src/Grpc/Grpc.csproj
@@ -18,7 +18,10 @@
-
+
diff --git a/src/Grpc/refresh-protos.ps1 b/src/Grpc/refresh-protos.ps1
index 6a095ba7..a91393a4 100644
--- a/src/Grpc/refresh-protos.ps1
+++ b/src/Grpc/refresh-protos.ps1
@@ -18,7 +18,7 @@ $commitId = $commitDetails.sha
# These are the proto files we need to download from the durabletask-protobuf repository.
$protoFileNames = @(
- "orchestrator_service.proto","backend_service.proto"
+ "orchestrator_service.proto"
)
# Download each proto file to the local directory using the above commit ID
From a46b930e41ece98d4c914e8b4c2f9f5fbefe6db5 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 1 Oct 2025 16:22:40 -0700
Subject: [PATCH 49/53] abstract payloadstore
---
.../AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs | 6 +++---
.../AzureBlobPayloads/PayloadStore/IPayloadStore.cs | 8 ++++----
test/Grpc.IntegrationTests/LargePayloadTests.cs | 6 +++---
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
index 16a83641..ff34dc91 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
@@ -62,7 +62,7 @@ public BlobPayloadStore(LargePayloadStorageOptions options)
}
///
- public async Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
+ public override async Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
{
// One blob per payload using GUID-based name for uniqueness (stable across retries)
string timestamp = DateTimeOffset.UtcNow.ToString("yyyy/MM/dd/HH/mm/ss", CultureInfo.InvariantCulture);
@@ -107,7 +107,7 @@ public async Task UploadAsync(ReadOnlyMemory payloadBytes, Cancell
}
///
- public async Task DownloadAsync(string token, CancellationToken cancellationToken)
+ public override async Task DownloadAsync(string token, CancellationToken cancellationToken)
{
(string container, string name) = DecodeToken(token);
if (!string.Equals(container, this.containerClient.Name, StringComparison.Ordinal))
@@ -139,7 +139,7 @@ public async Task DownloadAsync(string token, CancellationToken cancella
}
///
- public bool IsKnownPayloadToken(string value)
+ public override bool IsKnownPayloadToken(string value)
{
if (string.IsNullOrEmpty(value))
{
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
index c226bff0..385088e5 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
@@ -6,7 +6,7 @@ namespace Microsoft.DurableTask;
///
/// Abstraction for storing and retrieving large payloads out-of-band.
///
-public interface IPayloadStore
+public abstract class IPayloadStore
{
///
/// Uploads a payload and returns an opaque reference token that can be embedded in orchestration messages.
@@ -14,7 +14,7 @@ public interface IPayloadStore
/// The payload bytes.
/// Cancellation token.
/// Opaque reference token.
- Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken);
+ public abstract Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken);
///
/// Downloads the payload referenced by the token.
@@ -22,7 +22,7 @@ public interface IPayloadStore
/// The opaque reference token.
/// Cancellation token.
/// Payload string.
- Task DownloadAsync(string token, CancellationToken cancellationToken);
+ public abstract Task DownloadAsync(string token, CancellationToken cancellationToken);
///
/// Returns true if the specified value appears to be a token understood by this store.
@@ -30,5 +30,5 @@ public interface IPayloadStore
///
/// The value to check.
/// true if the value is a token issued by this store; otherwise, false.
- bool IsKnownPayloadToken(string value);
+ public abstract bool IsKnownPayloadToken(string value);
}
\ No newline at end of file
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index 21b3b55a..8ec29582 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -623,7 +623,7 @@ public InMemoryPayloadStore(Dictionary shared)
int downloadCount;
public int DownloadCount => this.downloadCount;
- public Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
+ public override Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
{
Interlocked.Increment(ref this.uploadCount);
string json = System.Text.Encoding.UTF8.GetString(payloadBytes.Span);
@@ -633,13 +633,13 @@ public Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationT
return Task.FromResult(token);
}
- public Task DownloadAsync(string token, CancellationToken cancellationToken)
+ public override Task DownloadAsync(string token, CancellationToken cancellationToken)
{
Interlocked.Increment(ref this.downloadCount);
return Task.FromResult(this.tokenToPayload[token]);
}
- public bool IsKnownPayloadToken(string value)
+ public override bool IsKnownPayloadToken(string value)
{
return value.StartsWith(TokenPrefix, StringComparison.Ordinal);
}
From d0954438bad4cfc93d4c3c793a56ef7b23f76a91 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 1 Oct 2025 22:39:06 -0700
Subject: [PATCH 50/53] remove retry
---
.../PayloadStore/BlobPayloadStore.cs | 139 +++++-------------
1 file changed, 38 insertions(+), 101 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
index ff34dc91..c6f038db 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
@@ -16,7 +16,7 @@ namespace Microsoft.DurableTask;
/// Azure Blob Storage implementation of .
/// Stores payloads as blobs and returns opaque tokens in the form "blob:v1:<container>:<blobName>".
///
-internal sealed class BlobPayloadStore : IPayloadStore
+sealed class BlobPayloadStore : IPayloadStore
{
const string TokenPrefix = "blob:v1:";
const string ContentEncodingGzip = "gzip";
@@ -24,12 +24,7 @@ internal sealed class BlobPayloadStore : IPayloadStore
const int MaxRetryAttempts = 8;
const int BaseDelayMs = 250;
const int MaxDelayMs = 10_000;
- const int MaxJitterMs = 100;
const int NetworkTimeoutMinutes = 2;
-
- // Jitter RNG for retry backoff
- static readonly object RandomLock = new object();
- static readonly Random SharedRandom = new Random();
readonly BlobContainerClient containerClient;
readonly LargePayloadStorageOptions options;
@@ -65,45 +60,37 @@ public BlobPayloadStore(LargePayloadStorageOptions options)
public override async Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
{
// One blob per payload using GUID-based name for uniqueness (stable across retries)
- string timestamp = DateTimeOffset.UtcNow.ToString("yyyy/MM/dd/HH/mm/ss", CultureInfo.InvariantCulture);
- string blobName = $"{timestamp}/{Guid.NewGuid():N}";
+ string blobName = $"{Guid.NewGuid():N}";
BlobClient blob = this.containerClient.GetBlobClient(blobName);
byte[] payloadBuffer = payloadBytes.ToArray();
- string token = await WithTransientRetryAsync(
- async ct =>
- {
- // Ensure container exists (idempotent)
- await this.containerClient.CreateIfNotExistsAsync(PublicAccessType.None, default, default, ct);
+ // Ensure container exists (idempotent)
+ await this.containerClient.CreateIfNotExistsAsync(PublicAccessType.None, default, default, cancellationToken);
- if (this.options.CompressPayloads)
- {
- BlobOpenWriteOptions writeOptions = new()
- {
- HttpHeaders = new BlobHttpHeaders { ContentEncoding = ContentEncodingGzip },
- };
- using Stream blobStream = await blob.OpenWriteAsync(true, writeOptions, ct);
- using GZipStream compressedBlobStream = new(blobStream, System.IO.Compression.CompressionLevel.Optimal, leaveOpen: true);
- using MemoryStream payloadStream = new(payloadBuffer, writable: false);
-
- await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: DefaultCopyBufferSize, ct);
- await compressedBlobStream.FlushAsync(ct);
- await blobStream.FlushAsync(ct);
- }
- else
+ if (this.options.CompressPayloads)
+ {
+ BlobOpenWriteOptions writeOptions = new()
{
- using Stream blobStream = await blob.OpenWriteAsync(true, default, ct);
- using MemoryStream payloadStream = new(payloadBuffer, writable: false);
- await payloadStream.CopyToAsync(blobStream, bufferSize: DefaultCopyBufferSize, ct);
- await blobStream.FlushAsync(ct);
- }
-
- return EncodeToken(this.containerClient.Name, blobName);
- },
- cancellationToken);
+ HttpHeaders = new BlobHttpHeaders { ContentEncoding = ContentEncodingGzip },
+ };
+ using Stream blobStream = await blob.OpenWriteAsync(true, writeOptions, cancellationToken);
+ using GZipStream compressedBlobStream = new(blobStream, System.IO.Compression.CompressionLevel.Optimal, leaveOpen: true);
+ using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+
+ await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: DefaultCopyBufferSize, cancellationToken);
+ await compressedBlobStream.FlushAsync(cancellationToken);
+ await blobStream.FlushAsync(cancellationToken);
+ }
+ else
+ {
+ using Stream blobStream = await blob.OpenWriteAsync(true, default, cancellationToken);
+ using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+ await payloadStream.CopyToAsync(blobStream, bufferSize: DefaultCopyBufferSize, cancellationToken);
+ await blobStream.FlushAsync(cancellationToken);
+ }
- return token;
+ return EncodeToken(this.containerClient.Name, blobName);
}
///
@@ -117,25 +104,20 @@ public override async Task DownloadAsync(string token, CancellationToken
BlobClient blob = this.containerClient.GetBlobClient(name);
- return await WithTransientRetryAsync(
- async ct =>
+ using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: cancellationToken);
+ Stream contentStream = result.Content;
+ bool isGzip = string.Equals(
+ result.Details.ContentEncoding, ContentEncodingGzip, StringComparison.OrdinalIgnoreCase);
+
+ if (isGzip)
{
- using BlobDownloadStreamingResult result = await blob.DownloadStreamingAsync(cancellationToken: ct);
- Stream contentStream = result.Content;
- bool isGzip = string.Equals(
- result.Details.ContentEncoding, ContentEncodingGzip, StringComparison.OrdinalIgnoreCase);
+ using GZipStream decompressed = new(contentStream, CompressionMode.Decompress);
+ using StreamReader reader = new(decompressed, Encoding.UTF8);
+ return await reader.ReadToEndAsync();
+ }
- if (isGzip)
- {
- using GZipStream decompressed = new(contentStream, CompressionMode.Decompress);
- using StreamReader reader = new(decompressed, Encoding.UTF8);
- return await reader.ReadToEndAsync();
- }
-
- using StreamReader uncompressedReader = new(contentStream, Encoding.UTF8);
- return await uncompressedReader.ReadToEndAsync();
- },
- cancellationToken);
+ using StreamReader uncompressedReader = new(contentStream, Encoding.UTF8);
+ return await uncompressedReader.ReadToEndAsync();
}
///
@@ -167,49 +149,4 @@ public override bool IsKnownPayloadToken(string value)
return (rest.Substring(0, sep), rest.Substring(sep + 1));
}
-
- static async Task WithTransientRetryAsync(Func> operation, CancellationToken cancellationToken)
- {
- const int maxAttempts = MaxRetryAttempts;
- TimeSpan baseDelay = TimeSpan.FromMilliseconds(BaseDelayMs);
- int attempt = 0;
-
- while (true)
- {
- cancellationToken.ThrowIfCancellationRequested();
- try
- {
- return await operation(cancellationToken);
- }
- catch (RequestFailedException ex) when (IsTransient(ex) && attempt < maxAttempts - 1)
- {
- attempt++;
- TimeSpan delay = ComputeBackoff(baseDelay, attempt);
- await Task.Delay(delay, cancellationToken);
- }
- catch (IOException) when (attempt < maxAttempts - 1)
- {
- attempt++;
- TimeSpan delay = ComputeBackoff(baseDelay, attempt);
- await Task.Delay(delay, cancellationToken);
- }
- }
- }
-
- static bool IsTransient(RequestFailedException ex)
- {
- return ex.Status == 503 || ex.Status == 502 || ex.Status == 500 || ex.Status == 429;
- }
-
- static TimeSpan ComputeBackoff(TimeSpan baseDelay, int attempt)
- {
- double factor = Math.Pow(2, Math.Min(attempt, 6));
- int jitterMs;
- lock (RandomLock)
- {
- jitterMs = SharedRandom.Next(0, MaxJitterMs);
- }
-
- return TimeSpan.FromMilliseconds(Math.Min((baseDelay.TotalMilliseconds * factor) + jitterMs, MaxDelayMs));
- }
-}
\ No newline at end of file
+}
From eaff7532d5d6117db9d962c70b720966d3b8df5e Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 1 Oct 2025 22:56:44 -0700
Subject: [PATCH 51/53] refactor
---
...reBlobPayloadsManagedBackendInterceptor.cs | 105 ++++++++++--------
.../AzureBlobPayloadsSideCarInterceptor.cs | 68 +++++++-----
.../Interceptors/BasePayloadInterceptor.cs | 17 +--
3 files changed, 100 insertions(+), 90 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
index 0b1df512..699461b2 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
@@ -13,7 +13,7 @@ public sealed class AzureBlobPayloadsManagedBackendInterceptor(IPayloadStore pay
: BasePayloadInterceptor(payloadStore, options)
{
///
- protected override Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
+ protected override async Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
{
// Azure Managed Backend -> Backend Service
// Note: This interceptor is designed for backend_service.proto types, but since those types
@@ -22,73 +22,82 @@ protected override Task ExternalizeRequestPayloadsAsync(TRequest reque
switch (request)
{
case P.CreateInstanceRequest r:
- return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ r.Input = await this.MaybeExternalizeAsync(r.Input, cancellation);
+ break;
case P.RaiseEventRequest r:
- return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ r.Input = await this.MaybeExternalizeAsync(r.Input, cancellation);
+ break;
case P.TerminateRequest r:
- return this.MaybeExternalizeAsync(v => r.Output = v, r.Output, cancellation);
+ r.Output = await this.MaybeExternalizeAsync(r.Output, cancellation);
+ break;
case P.SuspendRequest r:
- return this.MaybeExternalizeAsync(v => r.Reason = v, r.Reason, cancellation);
+ r.Reason = await this.MaybeExternalizeAsync(r.Reason, cancellation);
+ break;
case P.ResumeRequest r:
- return this.MaybeExternalizeAsync(v => r.Reason = v, r.Reason, cancellation);
+ r.Reason = await this.MaybeExternalizeAsync(r.Reason, cancellation);
+ break;
case P.SignalEntityRequest r:
- return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ r.Input = await this.MaybeExternalizeAsync(r.Input, cancellation);
+ break;
case P.ActivityResponse r:
- return this.MaybeExternalizeAsync(v => r.Result = v, r.Result, cancellation);
+ r.Result = await this.MaybeExternalizeAsync(r.Result, cancellation);
+ break;
case P.OrchestratorResponse r:
- return this.ExternalizeOrchestratorResponseAsync(r, cancellation);
+ await this.ExternalizeOrchestratorResponseAsync(r, cancellation);
+ break;
case P.EntityBatchResult r:
- return this.ExternalizeEntityBatchResultAsync(r, cancellation);
+ await this.ExternalizeEntityBatchResultAsync(r, cancellation);
+ break;
case P.EntityBatchRequest r:
- return this.ExternalizeEntityBatchRequestAsync(r, cancellation);
+ await this.ExternalizeEntityBatchRequestAsync(r, cancellation);
+ break;
case P.EntityRequest r:
- return this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ r.EntityState = await this.MaybeExternalizeAsync(r.EntityState, cancellation);
+ break;
}
-
- return Task.CompletedTask;
}
async Task ExternalizeOrchestratorResponseAsync(P.OrchestratorResponse r, CancellationToken cancellation)
{
- await this.MaybeExternalizeAsync(v => r.CustomStatus = v, r.CustomStatus, cancellation);
+ r.CustomStatus = await this.MaybeExternalizeAsync(r.CustomStatus, cancellation);
foreach (P.OrchestratorAction a in r.Actions)
{
if (a.CompleteOrchestration is { } complete)
{
- await this.MaybeExternalizeAsync(v => complete.Result = v, complete.Result, cancellation);
- await this.MaybeExternalizeAsync(v => complete.Details = v, complete.Details, cancellation);
+ complete.Result = await this.MaybeExternalizeAsync(complete.Result, cancellation);
+ complete.Details = await this.MaybeExternalizeAsync(complete.Details, cancellation);
}
if (a.TerminateOrchestration is { } term)
{
- await this.MaybeExternalizeAsync(v => term.Reason = v, term.Reason, cancellation);
+ term.Reason = await this.MaybeExternalizeAsync(term.Reason, cancellation);
}
if (a.ScheduleTask is { } schedule)
{
- await this.MaybeExternalizeAsync(v => schedule.Input = v, schedule.Input, cancellation);
+ schedule.Input = await this.MaybeExternalizeAsync(schedule.Input, cancellation);
}
if (a.CreateSubOrchestration is { } sub)
{
- await this.MaybeExternalizeAsync(v => sub.Input = v, sub.Input, cancellation);
+ sub.Input = await this.MaybeExternalizeAsync(sub.Input, cancellation);
}
if (a.SendEvent is { } sendEvt)
{
- await this.MaybeExternalizeAsync(v => sendEvt.Data = v, sendEvt.Data, cancellation);
+ sendEvt.Data = await this.MaybeExternalizeAsync(sendEvt.Data, cancellation);
}
if (a.SendEntityMessage is { } entityMsg)
{
if (entityMsg.EntityOperationSignaled is { } sig)
{
- await this.MaybeExternalizeAsync(v => sig.Input = v, sig.Input, cancellation);
+ sig.Input = await this.MaybeExternalizeAsync(sig.Input, cancellation);
}
if (entityMsg.EntityOperationCalled is { } called)
{
- await this.MaybeExternalizeAsync(v => called.Input = v, called.Input, cancellation);
+ called.Input = await this.MaybeExternalizeAsync(called.Input, cancellation);
}
}
}
@@ -96,14 +105,14 @@ async Task ExternalizeOrchestratorResponseAsync(P.OrchestratorResponse r, Cancel
async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, CancellationToken cancellation)
{
- await this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ r.EntityState = await this.MaybeExternalizeAsync(r.EntityState, cancellation);
if (r.Results != null)
{
foreach (P.OperationResult result in r.Results)
{
if (result.Success is { } success)
{
- await this.MaybeExternalizeAsync(v => success.Result = v, success.Result, cancellation);
+ success.Result = await this.MaybeExternalizeAsync(success.Result, cancellation);
}
}
}
@@ -114,12 +123,12 @@ async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, Cancellation
{
if (action.SendSignal is { } sendSig)
{
- await this.MaybeExternalizeAsync(v => sendSig.Input = v, sendSig.Input, cancellation);
+ sendSig.Input = await this.MaybeExternalizeAsync(sendSig.Input, cancellation);
}
if (action.StartNewOrchestration is { } start)
{
- await this.MaybeExternalizeAsync(v => start.Input = v, start.Input, cancellation);
+ start.Input = await this.MaybeExternalizeAsync(start.Input, cancellation);
}
}
}
@@ -127,12 +136,12 @@ async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, Cancellation
async Task ExternalizeEntityBatchRequestAsync(P.EntityBatchRequest r, CancellationToken cancellation)
{
- await this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ r.EntityState = await this.MaybeExternalizeAsync(r.EntityState, cancellation);
if (r.Operations != null)
{
foreach (P.OperationRequest op in r.Operations)
{
- await this.MaybeExternalizeAsync(v => op.Input = v, op.Input, cancellation);
+ op.Input = await this.MaybeExternalizeAsync(op.Input, cancellation);
}
}
}
@@ -144,121 +153,121 @@ async Task ExternalizeHistoryEventAsync(P.HistoryEvent e, CancellationToken canc
case P.HistoryEvent.EventTypeOneofCase.ExecutionStarted:
if (e.ExecutionStarted is { } es)
{
- await this.MaybeExternalizeAsync(v => es.Input = v, es.Input, cancellation);
+ es.Input = await this.MaybeExternalizeAsync(es.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionCompleted:
if (e.ExecutionCompleted is { } ec)
{
- await this.MaybeExternalizeAsync(v => ec.Result = v, ec.Result, cancellation);
+ ec.Result = await this.MaybeExternalizeAsync(ec.Result, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.EventRaised:
if (e.EventRaised is { } er)
{
- await this.MaybeExternalizeAsync(v => er.Input = v, er.Input, cancellation);
+ er.Input = await this.MaybeExternalizeAsync(er.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.TaskScheduled:
if (e.TaskScheduled is { } ts)
{
- await this.MaybeExternalizeAsync(v => ts.Input = v, ts.Input, cancellation);
+ ts.Input = await this.MaybeExternalizeAsync(ts.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.TaskCompleted:
if (e.TaskCompleted is { } tc)
{
- await this.MaybeExternalizeAsync(v => tc.Result = v, tc.Result, cancellation);
+ tc.Result = await this.MaybeExternalizeAsync(tc.Result, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCreated:
if (e.SubOrchestrationInstanceCreated is { } soc)
{
- await this.MaybeExternalizeAsync(v => soc.Input = v, soc.Input, cancellation);
+ soc.Input = await this.MaybeExternalizeAsync(soc.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.SubOrchestrationInstanceCompleted:
if (e.SubOrchestrationInstanceCompleted is { } sox)
{
- await this.MaybeExternalizeAsync(v => sox.Result = v, sox.Result, cancellation);
+ sox.Result = await this.MaybeExternalizeAsync(sox.Result, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.EventSent:
if (e.EventSent is { } esent)
{
- await this.MaybeExternalizeAsync(v => esent.Input = v, esent.Input, cancellation);
+ esent.Input = await this.MaybeExternalizeAsync(esent.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.GenericEvent:
if (e.GenericEvent is { } ge)
{
- await this.MaybeExternalizeAsync(v => ge.Data = v, ge.Data, cancellation);
+ ge.Data = await this.MaybeExternalizeAsync(ge.Data, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.ContinueAsNew:
if (e.ContinueAsNew is { } can)
{
- await this.MaybeExternalizeAsync(v => can.Input = v, can.Input, cancellation);
+ can.Input = await this.MaybeExternalizeAsync(can.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionTerminated:
if (e.ExecutionTerminated is { } et)
{
- await this.MaybeExternalizeAsync(v => et.Input = v, et.Input, cancellation);
+ et.Input = await this.MaybeExternalizeAsync(et.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionSuspended:
if (e.ExecutionSuspended is { } esus)
{
- await this.MaybeExternalizeAsync(v => esus.Input = v, esus.Input, cancellation);
+ esus.Input = await this.MaybeExternalizeAsync(esus.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.ExecutionResumed:
if (e.ExecutionResumed is { } eres)
{
- await this.MaybeExternalizeAsync(v => eres.Input = v, eres.Input, cancellation);
+ eres.Input = await this.MaybeExternalizeAsync(eres.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationSignaled:
if (e.EntityOperationSignaled is { } eos)
{
- await this.MaybeExternalizeAsync(v => eos.Input = v, eos.Input, cancellation);
+ eos.Input = await this.MaybeExternalizeAsync(eos.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationCalled:
if (e.EntityOperationCalled is { } eoc)
{
- await this.MaybeExternalizeAsync(v => eoc.Input = v, eoc.Input, cancellation);
+ eoc.Input = await this.MaybeExternalizeAsync(eoc.Input, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.EntityOperationCompleted:
if (e.EntityOperationCompleted is { } ecomp)
{
- await this.MaybeExternalizeAsync(v => ecomp.Output = v, ecomp.Output, cancellation);
+ ecomp.Output = await this.MaybeExternalizeAsync(ecomp.Output, cancellation);
}
break;
case P.HistoryEvent.EventTypeOneofCase.HistoryState:
if (e.HistoryState is { } hs && hs.OrchestrationState is { } os)
{
- await this.MaybeExternalizeAsync(v => os.Input = v, os.Input, cancellation);
- await this.MaybeExternalizeAsync(v => os.Output = v, os.Output, cancellation);
- await this.MaybeExternalizeAsync(v => os.CustomStatus = v, os.CustomStatus, cancellation);
+ os.Input = await this.MaybeExternalizeAsync(os.Input, cancellation);
+ os.Output = await this.MaybeExternalizeAsync(os.Output, cancellation);
+ os.CustomStatus = await this.MaybeExternalizeAsync(os.CustomStatus, cancellation);
}
break;
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
index dc41d986..99fa8903 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
@@ -15,36 +15,45 @@ public sealed class AzureBlobPayloadsSideCarInterceptor(IPayloadStore payloadSto
: BasePayloadInterceptor(payloadStore, options)
{
///
- protected override Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
+ protected override async Task ExternalizeRequestPayloadsAsync(TRequest request, CancellationToken cancellation)
{
// Client -> sidecar
switch (request)
{
case P.CreateInstanceRequest r:
- return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ r.Input = await this.MaybeExternalizeAsync(r.Input, cancellation);
+ break;
case P.RaiseEventRequest r:
- return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ r.Input = await this.MaybeExternalizeAsync(r.Input, cancellation);
+ break;
case P.TerminateRequest r:
- return this.MaybeExternalizeAsync(v => r.Output = v, r.Output, cancellation);
+ r.Output = await this.MaybeExternalizeAsync(r.Output, cancellation);
+ break;
case P.SuspendRequest r:
- return this.MaybeExternalizeAsync(v => r.Reason = v, r.Reason, cancellation);
+ r.Reason = await this.MaybeExternalizeAsync(r.Reason, cancellation);
+ break;
case P.ResumeRequest r:
- return this.MaybeExternalizeAsync(v => r.Reason = v, r.Reason, cancellation);
+ r.Reason = await this.MaybeExternalizeAsync(r.Reason, cancellation);
+ break;
case P.SignalEntityRequest r:
- return this.MaybeExternalizeAsync(v => r.Input = v, r.Input, cancellation);
+ r.Input = await this.MaybeExternalizeAsync(r.Input, cancellation);
+ break;
case P.ActivityResponse r:
- return this.MaybeExternalizeAsync(v => r.Result = v, r.Result, cancellation);
+ r.Result = await this.MaybeExternalizeAsync(r.Result, cancellation);
+ break;
case P.OrchestratorResponse r:
- return this.ExternalizeOrchestratorResponseAsync(r, cancellation);
+ await this.ExternalizeOrchestratorResponseAsync(r, cancellation);
+ break;
case P.EntityBatchResult r:
- return this.ExternalizeEntityBatchResultAsync(r, cancellation);
+ await this.ExternalizeEntityBatchResultAsync(r, cancellation);
+ break;
case P.EntityBatchRequest r:
- return this.ExternalizeEntityBatchRequestAsync(r, cancellation);
+ await this.ExternalizeEntityBatchRequestAsync(r, cancellation);
+ break;
case P.EntityRequest r:
- return this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ r.EntityState = await this.MaybeExternalizeAsync(r.EntityState, cancellation);
+ break;
}
-
- return Task.CompletedTask;
}
///
@@ -137,45 +146,45 @@ protected override async Task ResolveResponsePayloadsAsync(TResponse
async Task ExternalizeOrchestratorResponseAsync(P.OrchestratorResponse r, CancellationToken cancellation)
{
- await this.MaybeExternalizeAsync(v => r.CustomStatus = v, r.CustomStatus, cancellation);
+ r.CustomStatus = await this.MaybeExternalizeAsync(r.CustomStatus, cancellation);
foreach (P.OrchestratorAction a in r.Actions)
{
if (a.CompleteOrchestration is { } complete)
{
- await this.MaybeExternalizeAsync(v => complete.Result = v, complete.Result, cancellation);
- await this.MaybeExternalizeAsync(v => complete.Details = v, complete.Details, cancellation);
+ complete.Result = await this.MaybeExternalizeAsync(complete.Result, cancellation);
+ complete.Details = await this.MaybeExternalizeAsync(complete.Details, cancellation);
}
if (a.TerminateOrchestration is { } term)
{
- await this.MaybeExternalizeAsync(v => term.Reason = v, term.Reason, cancellation);
+ term.Reason = await this.MaybeExternalizeAsync(term.Reason, cancellation);
}
if (a.ScheduleTask is { } schedule)
{
- await this.MaybeExternalizeAsync(v => schedule.Input = v, schedule.Input, cancellation);
+ schedule.Input = await this.MaybeExternalizeAsync(schedule.Input, cancellation);
}
if (a.CreateSubOrchestration is { } sub)
{
- await this.MaybeExternalizeAsync(v => sub.Input = v, sub.Input, cancellation);
+ sub.Input = await this.MaybeExternalizeAsync(sub.Input, cancellation);
}
if (a.SendEvent is { } sendEvt)
{
- await this.MaybeExternalizeAsync(v => sendEvt.Data = v, sendEvt.Data, cancellation);
+ sendEvt.Data = await this.MaybeExternalizeAsync(sendEvt.Data, cancellation);
}
if (a.SendEntityMessage is { } entityMsg)
{
if (entityMsg.EntityOperationSignaled is { } sig)
{
- await this.MaybeExternalizeAsync(v => sig.Input = v, sig.Input, cancellation);
+ sig.Input = await this.MaybeExternalizeAsync(sig.Input, cancellation);
}
if (entityMsg.EntityOperationCalled is { } called)
{
- await this.MaybeExternalizeAsync(v => called.Input = v, called.Input, cancellation);
+ called.Input = await this.MaybeExternalizeAsync(called.Input, cancellation);
}
}
}
@@ -183,14 +192,14 @@ async Task ExternalizeOrchestratorResponseAsync(P.OrchestratorResponse r, Cancel
async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, CancellationToken cancellation)
{
- await this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ r.EntityState = await this.MaybeExternalizeAsync(r.EntityState, cancellation);
if (r.Results != null)
{
foreach (P.OperationResult result in r.Results)
{
if (result.Success is { } success)
{
- await this.MaybeExternalizeAsync(v => success.Result = v, success.Result, cancellation);
+ success.Result = await this.MaybeExternalizeAsync(success.Result, cancellation);
}
}
}
@@ -201,12 +210,12 @@ async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, Cancellation
{
if (action.SendSignal is { } sendSig)
{
- await this.MaybeExternalizeAsync(v => sendSig.Input = v, sendSig.Input, cancellation);
+ sendSig.Input = await this.MaybeExternalizeAsync(sendSig.Input, cancellation);
}
if (action.StartNewOrchestration is { } start)
{
- await this.MaybeExternalizeAsync(v => start.Input = v, start.Input, cancellation);
+ start.Input = await this.MaybeExternalizeAsync(start.Input, cancellation);
}
}
}
@@ -214,12 +223,12 @@ async Task ExternalizeEntityBatchResultAsync(P.EntityBatchResult r, Cancellation
async Task ExternalizeEntityBatchRequestAsync(P.EntityBatchRequest r, CancellationToken cancellation)
{
- await this.MaybeExternalizeAsync(v => r.EntityState = v, r.EntityState, cancellation);
+ r.EntityState = await this.MaybeExternalizeAsync(r.EntityState, cancellation);
if (r.Operations != null)
{
foreach (P.OperationRequest op in r.Operations)
{
- await this.MaybeExternalizeAsync(v => op.Input = v, op.Input, cancellation);
+ op.Input = await this.MaybeExternalizeAsync(op.Input, cancellation);
}
}
}
@@ -351,5 +360,4 @@ async Task ResolveEventPayloadsAsync(P.HistoryEvent e, CancellationToken cancell
break;
}
}
-
}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
index d064d42f..d0e414bb 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
@@ -158,30 +158,23 @@ public override AsyncServerStreamingCall AsyncServerStreamingCall
/// Externalizes a payload if it exceeds the threshold.
///
- /// Action to assign the externalized token.
/// The value to potentially externalize.
/// Cancellation token.
- /// A task representing the async operation.
- protected Task MaybeExternalizeAsync(Action assign, string? value, CancellationToken cancellation)
+ /// A task that returns the externalized token or the original value.
+ protected async Task MaybeExternalizeAsync(string? value, CancellationToken cancellation)
{
if (string.IsNullOrEmpty(value))
{
- return Task.CompletedTask;
+ return value;
}
int size = Encoding.UTF8.GetByteCount(value);
if (size < this.options.ExternalizeThresholdBytes)
{
- return Task.CompletedTask;
+ return value;
}
- return UploadAsync();
-
- async Task UploadAsync()
- {
- string token = await this.payloadStore.UploadAsync(Encoding.UTF8.GetBytes(value), cancellation);
- assign(token);
- }
+ return await this.payloadStore.UploadAsync(Encoding.UTF8.GetBytes(value), cancellation);
}
///
From 8da17bd87cb575fb727a5b15975f250d9a56559a Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Wed, 1 Oct 2025 23:17:41 -0700
Subject: [PATCH 52/53] msi support
---
.../Options/LargePayloadStorageOptions.cs | 45 ++++++++++++++++++-
.../PayloadStore/BlobPayloadStore.cs | 21 +++++++--
2 files changed, 61 insertions(+), 5 deletions(-)
diff --git a/src/Extensions/AzureBlobPayloads/Options/LargePayloadStorageOptions.cs b/src/Extensions/AzureBlobPayloads/Options/LargePayloadStorageOptions.cs
index 64875b84..115a9b08 100644
--- a/src/Extensions/AzureBlobPayloads/Options/LargePayloadStorageOptions.cs
+++ b/src/Extensions/AzureBlobPayloads/Options/LargePayloadStorageOptions.cs
@@ -1,11 +1,28 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
+using Azure.Core;
+
// Intentionally no DataAnnotations to avoid extra package requirements in minimal hosts.
namespace Microsoft.DurableTask;
///
/// Options for externalized payload storage, used by SDKs to store large payloads out-of-band.
+/// Supports both connection string and identity-based authentication.
+///
+///
+/// Connection string authentication:
+///
+/// var options = new LargePayloadStorageOptions("DefaultEndpointsProtocol=https;AccountName=mystorageaccount;AccountKey=...");
+///
+///
+/// Identity-based authentication:
+///
+/// var options = new LargePayloadStorageOptions(
+/// new Uri("https://mystorageaccount.blob.core.windows.net"),
+/// new DefaultAzureCredential());
+///
+///
///
public sealed class LargePayloadStorageOptions
{
@@ -27,16 +44,42 @@ public LargePayloadStorageOptions(string connectionString)
this.ConnectionString = connectionString;
}
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// The Azure Storage account URI.
+ /// The credential to use for authentication.
+ public LargePayloadStorageOptions(Uri accountUri, TokenCredential credential)
+ {
+ Check.NotNull(accountUri, nameof(accountUri));
+ Check.NotNull(credential, nameof(credential));
+ this.AccountUri = accountUri;
+ this.Credential = credential;
+ }
+
///
/// Gets or sets the threshold in bytes at which payloads are externalized. Default is 900_000 bytes.
///
public int ExternalizeThresholdBytes { get; set; } = 900_000; // leave headroom below 1MB
///
- /// Gets or sets the Azure Storage connection string to the customer's storage account. Required.
+ /// Gets or sets the Azure Storage connection string to the customer's storage account.
+ /// Either this or and must be set.
///
public string ConnectionString { get; set; } = string.Empty;
+ ///
+ /// Gets or sets the Azure Storage account URI.
+ /// Either this and or must be set.
+ ///
+ public Uri? AccountUri { get; set; }
+
+ ///
+ /// Gets or sets the credential to use for authentication.
+ /// Either this and or must be set.
+ ///
+ public TokenCredential? Credential { get; set; }
+
///
/// Gets or sets the blob container name to use for payloads. Defaults to "durabletask-payloads".
///
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
index c6f038db..f91ce0ea 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
@@ -33,14 +33,23 @@ sealed class BlobPayloadStore : IPayloadStore
///
/// The options for the blob payload store.
/// Thrown when is null.
- /// Thrown when is null or empty.
+ /// Thrown when neither connection string nor account URI/credential are provided.
public BlobPayloadStore(LargePayloadStorageOptions options)
{
this.options = options ?? throw new ArgumentNullException(nameof(options));
-
- Check.NotNullOrEmpty(options.ConnectionString, nameof(options.ConnectionString));
Check.NotNullOrEmpty(options.ContainerName, nameof(options.ContainerName));
+ // Validate that either connection string or account URI/credential are provided
+ bool hasConnectionString = !string.IsNullOrEmpty(options.ConnectionString);
+ bool hasIdentityAuth = options.AccountUri != null && options.Credential != null;
+
+ if (!hasConnectionString && !hasIdentityAuth)
+ {
+ throw new ArgumentException(
+ "Either ConnectionString or AccountUri and Credential must be provided.",
+ nameof(options));
+ }
+
BlobClientOptions clientOptions = new()
{
Retry =
@@ -52,7 +61,11 @@ public BlobPayloadStore(LargePayloadStorageOptions options)
NetworkTimeout = TimeSpan.FromMinutes(NetworkTimeoutMinutes),
},
};
- BlobServiceClient serviceClient = new(options.ConnectionString, clientOptions);
+
+ BlobServiceClient serviceClient = hasIdentityAuth
+ ? new BlobServiceClient(options.AccountUri, options.Credential, clientOptions)
+ : new BlobServiceClient(options.ConnectionString, clientOptions);
+
this.containerClient = serviceClient.GetBlobContainerClient(options.ContainerName);
}
From ad3024240597d2bba19976f00bc77eb8357d94b8 Mon Sep 17 00:00:00 2001
From: peterstone2017 <12449837+YunchuWang@users.noreply.github.com>
Date: Mon, 6 Oct 2025 12:14:54 -0700
Subject: [PATCH 53/53] feedback
---
...ientBuilderExtensions.AzureBlobPayloads.cs | 2 +-
...rkerBuilderExtensions.AzureBlobPayloads.cs | 4 +-
...eCollectionExtensions.AzureBlobPayloads.cs | 4 +-
.../Examples/SharedPayloadStoreExample.cs | Bin 3374 -> 0 bytes
.../AzureBlobPayloadCallInvokerFactory.cs | 2 +-
...reBlobPayloadsManagedBackendInterceptor.cs | 4 +-
.../AzureBlobPayloadsSideCarInterceptor.cs | 4 +-
.../Interceptors/BasePayloadInterceptor.cs | 8 ++--
.../PayloadStore/BlobPayloadStore.cs | 23 ++++-----
.../PayloadStore/IPayloadStore.cs | 6 +--
.../LargePayloadTests.cs | 45 +++++++++---------
11 files changed, 51 insertions(+), 51 deletions(-)
delete mode 100644 src/Extensions/AzureBlobPayloads/Examples/SharedPayloadStoreExample.cs
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
index 06cbdb1a..0817bcea 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskClientBuilderExtensions.AzureBlobPayloads.cs
@@ -34,7 +34,7 @@ static IDurableTaskClientBuilder UseExternalizedPayloadsCore(IDurableTaskClientB
// Wrap the gRPC CallInvoker with our interceptor when using the gRPC client
builder.Services
.AddOptions(builder.Name)
- .PostConfigure>((opt, store, monitor) =>
+ .PostConfigure>((opt, store, monitor) =>
{
LargePayloadStorageOptions opts = monitor.Get(builder.Name);
if (opt.Channel is not null)
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
index 03d8ae21..d65e30b8 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/DurableTaskWorkerBuilderExtensions.AzureBlobPayloads.cs
@@ -30,7 +30,7 @@ public static IDurableTaskWorkerBuilder UseExternalizedPayloads(
Check.NotNull(configure);
builder.Services.Configure(builder.Name, configure);
- builder.Services.AddSingleton(sp =>
+ builder.Services.AddSingleton(sp =>
{
LargePayloadStorageOptions opts = sp.GetRequiredService>().Get(builder.Name);
return new BlobPayloadStore(opts);
@@ -57,7 +57,7 @@ static IDurableTaskWorkerBuilder UseExternalizedPayloadsCore(IDurableTaskWorkerB
// Wrap the gRPC CallInvoker with our interceptor when using the gRPC worker
builder.Services
.AddOptions(builder.Name)
- .PostConfigure>((opt, store, monitor) =>
+ .PostConfigure>((opt, store, monitor) =>
{
LargePayloadStorageOptions opts = monitor.Get(builder.Name);
if (opt.Channel is not null)
diff --git a/src/Extensions/AzureBlobPayloads/DependencyInjection/ServiceCollectionExtensions.AzureBlobPayloads.cs b/src/Extensions/AzureBlobPayloads/DependencyInjection/ServiceCollectionExtensions.AzureBlobPayloads.cs
index b3895e31..787888b6 100644
--- a/src/Extensions/AzureBlobPayloads/DependencyInjection/ServiceCollectionExtensions.AzureBlobPayloads.cs
+++ b/src/Extensions/AzureBlobPayloads/DependencyInjection/ServiceCollectionExtensions.AzureBlobPayloads.cs
@@ -30,8 +30,8 @@ public static IServiceCollection AddExternalizedPayloadStore(
// so monitor.Get(builder.Name) in the client/worker extensions will see the same config.
services.Configure(configure);
- // Provide a single shared IPayloadStore instance built from the default options.
- services.AddSingleton(sp =>
+ // Provide a single shared PayloadStore instance built from the default options.
+ services.AddSingleton(sp =>
{
IOptionsMonitor monitor =
sp.GetRequiredService>();
diff --git a/src/Extensions/AzureBlobPayloads/Examples/SharedPayloadStoreExample.cs b/src/Extensions/AzureBlobPayloads/Examples/SharedPayloadStoreExample.cs
deleted file mode 100644
index 427d1e53c91fa2c870e4fd007a0b00266f77a6d6..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 3374
zcmdT{O>f&q5bZfY|HHs1`4B1}Jvml@M%KYVtOSx0pV#DA+IqRm>@F>JYc1e+I
zOSRlIMN1eIL=WfV&6_u?;Sh#H7@KNi<#Od=kRQW^%&l?e+S7Ae87sUrI{Wn*8PeZ-
znWJ`CK&=b3;8zG2=Zp3+nFYhzNxkemWoNY&R|*%x-DG1WQTsQK+FcrZgLb#woC`$Yv&gqXCFRK&OlU^H6mY8
zMO@zFHO0JJw~Li@Kq%BXy5QJKcLNmUez|akevPQ9ASM6^UBEi@ICXdrj5f|`C9Eic
zdpbIDXm5##?&$4;2-tih3dQF
zRTG-=>ZI#&%erQAS+|K`xZK68Q~0XZ3Ok011Hm
zA${G4G)`|ThCcz)xB><-&W}LaWcpW-CPDroNE1-Gchn9VYGCp|=xYF-4EXxiWAwp_
zKK?YDO{R-Z(EUnyYPtr6w4I3g_a(U_76)mR{7Wrq=3q;In*3yK{l|42S@=B$C3k;Q
z%DahdcGa3T1V~a`v)1XlPg?xHQHEXHgL(HgDTkl%b=VeRa$nSr&N;%uSo)bTli8TWuXA CallInvoker with the large payload interceptor applied.
public static CallInvoker Create(GrpcChannel channel, LargePayloadStorageOptions options)
{
- IPayloadStore payloadStore = new BlobPayloadStore(options);
+ PayloadStore payloadStore = new BlobPayloadStore(options);
return channel.CreateCallInvoker().Intercept(new AzureBlobPayloadsManagedBackendInterceptor(payloadStore, options));
}
}
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
index 699461b2..5e2f1acd 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsManagedBackendInterceptor.cs
@@ -6,10 +6,10 @@
namespace Microsoft.DurableTask;
///
-/// gRPC interceptor that externalizes large payloads to an on requests
+/// gRPC interceptor that externalizes large payloads to an on requests
/// and resolves known payload tokens on responses for Azure Managed Backend.
///
-public sealed class AzureBlobPayloadsManagedBackendInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+public sealed class AzureBlobPayloadsManagedBackendInterceptor(PayloadStore payloadStore, LargePayloadStorageOptions options)
: BasePayloadInterceptor(payloadStore, options)
{
///
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
index 99fa8903..60db027e 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/AzureBlobPayloadsSideCarInterceptor.cs
@@ -8,10 +8,10 @@
namespace Microsoft.DurableTask;
///
-/// gRPC interceptor that externalizes large payloads to an on requests
+/// gRPC interceptor that externalizes large payloads to an on requests
/// and resolves known payload tokens on responses for SideCar.
///
-public sealed class AzureBlobPayloadsSideCarInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+public sealed class AzureBlobPayloadsSideCarInterceptor(PayloadStore payloadStore, LargePayloadStorageOptions options)
: BasePayloadInterceptor(payloadStore, options)
{
///
diff --git a/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs b/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
index d0e414bb..80e90f88 100644
--- a/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
+++ b/src/Extensions/AzureBlobPayloads/Interceptors/BasePayloadInterceptor.cs
@@ -8,7 +8,7 @@
namespace Microsoft.DurableTask;
///
-/// Base class for gRPC interceptors that externalize large payloads to an on requests
+/// Base class for gRPC interceptors that externalize large payloads to an on requests
/// and resolves known payload tokens on responses.
///
/// The namespace for request message types.
@@ -17,7 +17,7 @@ public abstract class BasePayloadInterceptor
@@ -25,7 +25,7 @@ public abstract class BasePayloadInterceptor
/// The payload store.
/// The options.
- protected BasePayloadInterceptor(IPayloadStore payloadStore, LargePayloadStorageOptions options)
+ protected BasePayloadInterceptor(PayloadStore payloadStore, LargePayloadStorageOptions options)
{
this.payloadStore = payloadStore;
this.options = options;
@@ -174,7 +174,7 @@ public override AsyncServerStreamingCall AsyncServerStreamingCall
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
index f91ce0ea..5d0757a4 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/BlobPayloadStore.cs
@@ -1,22 +1,19 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-using System.Globalization;
using System.IO.Compression;
using System.Text;
-using Azure;
using Azure.Core;
using Azure.Storage.Blobs;
using Azure.Storage.Blobs.Models;
-using Microsoft.DurableTask.Converters;
namespace Microsoft.DurableTask;
///
-/// Azure Blob Storage implementation of .
+/// Azure Blob Storage implementation of .
/// Stores payloads as blobs and returns opaque tokens in the form "blob:v1:<container>:<blobName>".
///
-sealed class BlobPayloadStore : IPayloadStore
+sealed class BlobPayloadStore : PayloadStore
{
const string TokenPrefix = "blob:v1:";
const string ContentEncodingGzip = "gzip";
@@ -70,13 +67,13 @@ public BlobPayloadStore(LargePayloadStorageOptions options)
}
///
- public override async Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
+ public override async Task UploadAsync(string payLoad, CancellationToken cancellationToken)
{
// One blob per payload using GUID-based name for uniqueness (stable across retries)
string blobName = $"{Guid.NewGuid():N}";
BlobClient blob = this.containerClient.GetBlobClient(blobName);
- byte[] payloadBuffer = payloadBytes.ToArray();
+ byte[] payloadBuffer = Encoding.UTF8.GetBytes(payLoad);
// Ensure container exists (idempotent)
await this.containerClient.CreateIfNotExistsAsync(PublicAccessType.None, default, default, cancellationToken);
@@ -89,17 +86,21 @@ public override async Task UploadAsync(ReadOnlyMemory payloadBytes
};
using Stream blobStream = await blob.OpenWriteAsync(true, writeOptions, cancellationToken);
using GZipStream compressedBlobStream = new(blobStream, System.IO.Compression.CompressionLevel.Optimal, leaveOpen: true);
- using MemoryStream payloadStream = new(payloadBuffer, writable: false);
- await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: DefaultCopyBufferSize, cancellationToken);
+ // using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+
+ // await payloadStream.CopyToAsync(compressedBlobStream, bufferSize: DefaultCopyBufferSize, cancellationToken);
+ await compressedBlobStream.WriteAsync(payloadBuffer, 0, payloadBuffer.Length, cancellationToken);
await compressedBlobStream.FlushAsync(cancellationToken);
await blobStream.FlushAsync(cancellationToken);
}
else
{
using Stream blobStream = await blob.OpenWriteAsync(true, default, cancellationToken);
- using MemoryStream payloadStream = new(payloadBuffer, writable: false);
- await payloadStream.CopyToAsync(blobStream, bufferSize: DefaultCopyBufferSize, cancellationToken);
+
+ // using MemoryStream payloadStream = new(payloadBuffer, writable: false);
+ // await payloadStream.CopyToAsync(blobStream, bufferSize: DefaultCopyBufferSize, cancellationToken);
+ await blobStream.WriteAsync(payloadBuffer, 0, payloadBuffer.Length, cancellationToken);
await blobStream.FlushAsync(cancellationToken);
}
diff --git a/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs b/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
index 385088e5..6d6896d6 100644
--- a/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
+++ b/src/Extensions/AzureBlobPayloads/PayloadStore/IPayloadStore.cs
@@ -6,15 +6,15 @@ namespace Microsoft.DurableTask;
///
/// Abstraction for storing and retrieving large payloads out-of-band.
///
-public abstract class IPayloadStore
+public abstract class PayloadStore
{
///
/// Uploads a payload and returns an opaque reference token that can be embedded in orchestration messages.
///
- /// The payload bytes.
+ /// The payload.
/// Cancellation token.
/// Opaque reference token.
- public abstract Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken);
+ public abstract Task UploadAsync(string payLoad, CancellationToken cancellationToken);
///
/// Downloads the payload referenced by the token.
diff --git a/test/Grpc.IntegrationTests/LargePayloadTests.cs b/test/Grpc.IntegrationTests/LargePayloadTests.cs
index 8ec29582..6e73f877 100644
--- a/test/Grpc.IntegrationTests/LargePayloadTests.cs
+++ b/test/Grpc.IntegrationTests/LargePayloadTests.cs
@@ -35,14 +35,14 @@ public async Task LargeOrchestrationInputAndOutputAndCustomStatus()
worker.UseExternalizedPayloads();
- worker.Services.AddSingleton(fakeStore);
+ worker.Services.AddSingleton(fakeStore);
},
client =>
{
client.UseExternalizedPayloads();
// Override store with in-memory test double
- client.Services.AddSingleton(fakeStore);
+ client.Services.AddSingleton(fakeStore);
},
services =>
{
@@ -112,12 +112,12 @@ public async Task HistoryStreaming_ResolvesPayloads()
}));
worker.UseExternalizedPayloads();
- worker.Services.AddSingleton(store);
+ worker.Services.AddSingleton(store);
},
client =>
{
client.UseExternalizedPayloads();
- client.Services.AddSingleton(store);
+ client.Services.AddSingleton(store);
},
services =>
{
@@ -167,7 +167,7 @@ public async Task SuspendAndResume_Reason_IsExternalizedByClient()
{
// Enable externalization on the client and use the in-memory store to track uploads
client.UseExternalizedPayloads();
- client.Services.AddSingleton(clientStore);
+ client.Services.AddSingleton(clientStore);
},
services =>
{
@@ -253,12 +253,12 @@ public async Task LargeTerminateWithPayload()
}));
worker.UseExternalizedPayloads();
- worker.Services.AddSingleton(store);
+ worker.Services.AddSingleton(store);
},
client =>
{
client.UseExternalizedPayloads();
- client.Services.AddSingleton(store);
+ client.Services.AddSingleton(store);
},
services =>
{
@@ -317,12 +317,12 @@ public async Task LargeContinueAsNewAndCustomStatus()
}));
worker.UseExternalizedPayloads();
- worker.Services.AddSingleton(workerStore);
+ worker.Services.AddSingleton(workerStore);
},
client =>
{
client.UseExternalizedPayloads();
- client.Services.AddSingleton(workerStore);
+ client.Services.AddSingleton(workerStore);
},
services =>
{
@@ -374,12 +374,12 @@ public async Task LargeSubOrchestrationAndActivityOutput()
.AddActivityFunc(activity, (ctx) => Task.FromResult(largeActivityOutput)));
worker.UseExternalizedPayloads();
- worker.Services.AddSingleton(workerStore);
+ worker.Services.AddSingleton(workerStore);
},
client =>
{
client.UseExternalizedPayloads();
- client.Services.AddSingleton(workerStore);
+ client.Services.AddSingleton(workerStore);
},
services =>
{
@@ -422,12 +422,12 @@ public async Task LargeQueryFetchInputsAndOutputs()
(ctx, input) => Task.FromResult(largeOut)));
worker.UseExternalizedPayloads();
- worker.Services.AddSingleton(workerStore);
+ worker.Services.AddSingleton(workerStore);
},
client =>
{
client.UseExternalizedPayloads();
- client.Services.AddSingleton(workerStore);
+ client.Services.AddSingleton(workerStore);
},
services =>
{
@@ -480,7 +480,7 @@ public async Task LargeActivityInputAndOutput()
.AddActivityFunc(activityName, (ctx, input) => input + input));
worker.UseExternalizedPayloads();
- worker.Services.AddSingleton(workerStore);
+ worker.Services.AddSingleton(workerStore);
},
client => { /* client not needed for externalization path here */ },
services =>
@@ -529,12 +529,12 @@ public async Task NoLargePayloads()
(ctx, input) => Task.FromResult(input)));
worker.UseExternalizedPayloads();
- worker.Services.AddSingleton(workerStore);
+ worker.Services.AddSingleton(workerStore);
},
client =>
{
client.UseExternalizedPayloads();
- client.Services.AddSingleton(clientStore);
+ client.Services.AddSingleton(clientStore);
});
string instanceId = await server.Client.ScheduleNewOrchestrationInstanceAsync(orchestratorName, input: smallPayload);
@@ -567,12 +567,12 @@ public async Task LargeExternalEvent()
orchestratorName,
async ctx => await ctx.WaitForExternalEvent(EventName)));
- worker.Services.AddSingleton(fakeStore);
+ worker.Services.AddSingleton(fakeStore);
worker.UseExternalizedPayloads();
},
client =>
{
- client.Services.AddSingleton(fakeStore);
+ client.Services.AddSingleton(fakeStore);
client.UseExternalizedPayloads();
},
services =>
@@ -602,7 +602,7 @@ public async Task LargeExternalEvent()
}
- class InMemoryPayloadStore : IPayloadStore
+ class InMemoryPayloadStore : PayloadStore
{
const string TokenPrefix = "blob:v1:";
readonly Dictionary tokenToPayload;
@@ -623,13 +623,12 @@ public InMemoryPayloadStore(Dictionary shared)
int downloadCount;
public int DownloadCount => this.downloadCount;
- public override Task UploadAsync(ReadOnlyMemory payloadBytes, CancellationToken cancellationToken)
+ public override Task UploadAsync(string payLoad, CancellationToken cancellationToken)
{
Interlocked.Increment(ref this.uploadCount);
- string json = System.Text.Encoding.UTF8.GetString(payloadBytes.Span);
string token = $"blob:v1:test:{Guid.NewGuid():N}";
- this.tokenToPayload[token] = json;
- this.uploadedPayloads.Add(json);
+ this.tokenToPayload[token] = payLoad;
+ this.uploadedPayloads.Add(payLoad);
return Task.FromResult(token);
}