Skip to content

Commit 8c81e28

Browse files
authored
Merge branch 'main' into dapr-state-store-clickhouse
2 parents 3e2b6b0 + 26808c9 commit 8c81e28

File tree

5 files changed

+65
-64
lines changed

5 files changed

+65
-64
lines changed
File renamed without changes.

common/component/azure/eventhubs/eventhubs.go

Lines changed: 47 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -247,12 +247,6 @@ func (aeh *AzureEventHubs) Subscribe(subscribeCtx context.Context, config Subscr
247247
}
248248
topic := config.Topic
249249

250-
// Get the processor client
251-
processor, err := aeh.getProcessorForTopic(subscribeCtx, topic)
252-
if err != nil {
253-
return fmt.Errorf("error trying to establish a connection: %w", err)
254-
}
255-
256250
// This component has built-in retries because Event Hubs doesn't support N/ACK for messages
257251
retryHandler := func(ctx context.Context, events []*azeventhubs.ReceivedEventData) ([]HandlerResponseItem, error) {
258252
b := aeh.backOffConfig.NewBackOffWithContext(ctx)
@@ -282,51 +276,58 @@ func (aeh *AzureEventHubs) Subscribe(subscribeCtx context.Context, config Subscr
282276

283277
subscriptionLoopFinished := make(chan bool, 1)
284278

285-
// Process all partition clients as they come in
286-
subscriberLoop := func() {
287-
for {
288-
// This will block until a new partition client is available
289-
// It returns nil if processor.Run terminates or if the context is canceled
290-
partitionClient := processor.NextPartitionClient(subscribeCtx)
291-
if partitionClient == nil {
292-
subscriptionLoopFinished <- true
293-
return
294-
}
295-
aeh.logger.Debugf("Received client for partition %s", partitionClient.PartitionID())
296-
297-
// Once we get a partition client, process the events in a separate goroutine
298-
go func() {
299-
processErr := aeh.processEvents(subscribeCtx, partitionClient, retryConfig)
300-
// Do not log context.Canceled which happens at shutdown
301-
if processErr != nil && !errors.Is(processErr, context.Canceled) {
302-
aeh.logger.Errorf("Error processing events from partition client: %v", processErr)
303-
}
304-
}()
305-
}
306-
}
307-
308-
// Start the processor
279+
// Start the subscribe + processor loop
309280
go func() {
310281
for {
311-
go subscriberLoop()
312-
// This is a blocking call that runs until the context is canceled
313-
err = processor.Run(subscribeCtx)
314-
// Exit if the context is canceled
315-
if err != nil && errors.Is(err, context.Canceled) {
316-
return
317-
}
282+
// Get the processor client
283+
processor, err := aeh.getProcessorForTopic(subscribeCtx, topic)
318284
if err != nil {
319-
aeh.logger.Errorf("Error from event processor: %v", err)
285+
aeh.logger.Errorf("error trying to establish a connection: %w", err)
320286
} else {
321-
aeh.logger.Debugf("Event processor terminated without error")
322-
}
323-
// wait for subscription loop finished signal
324-
select {
325-
case <-subscribeCtx.Done():
326-
return
327-
case <-subscriptionLoopFinished:
328-
// noop
287+
// Process all partition clients as they come in
288+
subscriberLoop := func() {
289+
for {
290+
// This will block until a new partition client is available
291+
// It returns nil if processor.Run terminates or if the context is canceled
292+
partitionClient := processor.NextPartitionClient(subscribeCtx)
293+
if partitionClient == nil {
294+
subscriptionLoopFinished <- true
295+
return
296+
}
297+
aeh.logger.Debugf("Received client for partition %s", partitionClient.PartitionID())
298+
299+
// Once we get a partition client, process the events in a separate goroutine
300+
go func() {
301+
processErr := aeh.processEvents(subscribeCtx, partitionClient, retryConfig)
302+
// Do not log context.Canceled which happens at shutdown
303+
if processErr != nil && !errors.Is(processErr, context.Canceled) {
304+
aeh.logger.Errorf("Error processing events from partition client: %v", processErr)
305+
}
306+
}()
307+
}
308+
}
309+
310+
go subscriberLoop()
311+
// This is a blocking call that runs until the context is canceled or a non-recoverable error is returned.
312+
err = processor.Run(subscribeCtx)
313+
// Exit if the context is canceled
314+
if err != nil && errors.Is(err, context.Canceled) {
315+
return
316+
}
317+
if err != nil {
318+
aeh.logger.Errorf("Error from event processor: %v", err)
319+
} else {
320+
aeh.logger.Debugf("Event processor terminated without error")
321+
}
322+
// wait for subscription loop finished signal
323+
select {
324+
case <-subscribeCtx.Done():
325+
return
326+
case <-subscriptionLoopFinished:
327+
// noop
328+
}
329329
}
330+
330331
// Waiting here is not strictly necessary, however, we will wait for a short time to increase the likelihood of transient errors having disappeared
331332
select {
332333
case <-subscribeCtx.Done():

common/component/redis/redis.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -214,21 +214,21 @@ func ParseClientFromProperties(properties map[string]string, componentType metad
214214
// start the token refresh goroutine
215215

216216
if settings.UseEntraID {
217-
StartEntraIDTokenRefreshBackgroundRoutine(c, settings.Username, *tokenExpires, tokenCredential, ctx, logger)
217+
StartEntraIDTokenRefreshBackgroundRoutine(c, settings.Username, *tokenExpires, tokenCredential, logger)
218218
}
219219
return c, &settings, nil
220220
}
221221

222-
func StartEntraIDTokenRefreshBackgroundRoutine(client RedisClient, username string, nextExpiration time.Time, cred *azcore.TokenCredential, parentCtx context.Context, logger *kitlogger.Logger) {
222+
func StartEntraIDTokenRefreshBackgroundRoutine(client RedisClient, username string, nextExpiration time.Time, cred *azcore.TokenCredential, logger *kitlogger.Logger) {
223223
go func(cred *azcore.TokenCredential, username string, logger *kitlogger.Logger) {
224-
ctx, cancel := context.WithCancel(parentCtx)
224+
ctx, cancel := context.WithCancel(context.Background())
225225
defer cancel()
226226
backoffConfig := kitretry.DefaultConfig()
227227
backoffConfig.MaxRetries = 3
228228
backoffConfig.Policy = kitretry.PolicyExponential
229229

230230
var backoffManager backoff.BackOff
231-
const refreshGracePeriod = 2 * time.Minute
231+
const refreshGracePeriod = 5 * time.Minute
232232
tokenRefreshDuration := time.Until(nextExpiration.Add(-refreshGracePeriod))
233233

234234
(*logger).Debugf("redis client: starting entraID token refresh loop")

tests/conformance/workflows/workflows.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,12 @@ package workflows
1515

1616
import (
1717
"context"
18-
"encoding/json"
1918
"testing"
2019
"time"
2120

2221
"github.com/stretchr/testify/assert"
2322
"github.com/stretchr/testify/require"
23+
"google.golang.org/protobuf/types/known/wrapperspb"
2424

2525
"github.com/dapr/kit/logger"
2626

@@ -61,14 +61,12 @@ func ConformanceTests(t *testing.T, props map[string]string, workflowItem workfl
6161
t.Run("start", func(t *testing.T) {
6262
testLogger.Info("Start test running...")
6363

64-
inputBytes, _ := json.Marshal(10) // Time that the activity within the workflow runs for
65-
6664
testInstanceID := "TestID"
6765
t.Run("start", func(t *testing.T) {
6866
req := &workflows.StartRequest{
69-
InstanceID: testInstanceID,
67+
InstanceID: &testInstanceID,
7068
WorkflowName: "TestWorkflow",
71-
WorkflowInput: inputBytes,
69+
WorkflowInput: wrapperspb.String("10"),
7270
Options: map[string]string{
7371
"task_queue": "TestTaskQueue",
7472
},

workflows/requests.go

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
11
package workflows
22

3+
import "google.golang.org/protobuf/types/known/wrapperspb"
4+
35
// StartRequest is the struct describing a start workflow request.
46
type StartRequest struct {
5-
InstanceID string `json:"instanceID"`
6-
Options map[string]string `json:"options"`
7-
WorkflowName string `json:"workflowName"`
8-
WorkflowInput []byte `json:"workflowInput"`
7+
InstanceID *string `json:"instanceID"`
8+
Options map[string]string `json:"options"`
9+
WorkflowName string `json:"workflowName"`
10+
WorkflowInput *wrapperspb.StringValue `json:"workflowInput"`
911
}
1012

1113
// GetRequest is the struct describing a get workflow state request.
@@ -16,14 +18,14 @@ type GetRequest struct {
1618
// TerminateRequest is the struct describing a terminate workflow request.
1719
type TerminateRequest struct {
1820
InstanceID string `json:"instanceID"`
19-
Recursive bool `json:"recursive"`
21+
Recursive *bool `json:"recursive"`
2022
}
2123

2224
// RaiseEventRequest is the struct describing a raise workflow event request.
2325
type RaiseEventRequest struct {
24-
InstanceID string `json:"instanceID"`
25-
EventName string `json:"name"`
26-
EventData []byte `json:"data"`
26+
InstanceID string `json:"instanceID"`
27+
EventName string `json:"name"`
28+
EventData *wrapperspb.StringValue `json:"data"`
2729
}
2830

2931
// PauseRequest is the struct describing a pause workflow request.
@@ -39,5 +41,5 @@ type ResumeRequest struct {
3941
// PurgeRequest is the object describing a Purge request.
4042
type PurgeRequest struct {
4143
InstanceID string `json:"instanceID"`
42-
Recursive bool `json:"recursive"`
44+
Recursive *bool `json:"recursive"`
4345
}

0 commit comments

Comments
 (0)