Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions common/dynamicconfig/dynamicproperties/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -2688,6 +2688,12 @@ const (
// Default value: 1s (1*time.Second)
// Allowed filters: N/A
TimerProcessorMaxTimeShift
// TimerProcessorInMemoryQueueMaxTimeShift is the max shift timer processor in memory queue can have. If set to 0, in memory queue is disabled.
// KeyName: history.timerProcessorInMemoryQueueMaxTimeShift
// Value type: Duration
// Default value: 0
// Allowed filters: ShardID
TimerProcessorInMemoryQueueMaxTimeShift
// TransferProcessorFailoverMaxStartJitterInterval is the max jitter interval for starting transfer
// failover queue processing. The actual jitter interval used will be a random duration between
// 0 and the max interval so that timer failover queue across different shards won't start at
Expand Down Expand Up @@ -5226,6 +5232,12 @@ var DurationKeys = map[DurationKey]DynamicDuration{
Description: "TimerProcessorMaxTimeShift is the max shift timer processor can have",
DefaultValue: time.Second,
},
TimerProcessorInMemoryQueueMaxTimeShift: {
KeyName: "history.timerProcessorInMemoryQueueMaxTimeShift",
Filters: []Filter{ShardID},
Description: "TimerProcessorInMemoryQueueMaxTimeShift is the max shift timer processor in memory queue can have. If set to 0, in memory queue is disabled.",
DefaultValue: 0,
},
TransferProcessorFailoverMaxStartJitterInterval: {
KeyName: "history.transferProcessorFailoverMaxStartJitterInterval",
Description: "TransferProcessorFailoverMaxStartJitterInterval is the max jitter interval for starting transfer failover queue processing. The actual jitter interval used will be a random duration between 0 and the max interval so that timer failover queue across different shards won't start at the same time",
Expand Down
6 changes: 6 additions & 0 deletions service/history/common/type.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,5 +36,11 @@ type (
Activities map[int64]*persistence.ActivityInfo
History events.PersistedBlobs
PersistenceError bool

// if true, the task will be scheduled in memory for the current execution, otherwise
// it will only be scheduled after the next DB scan. This notification is sometimes passed with a fake
// timer with the sole purpose of resetting the next scheduled DB read, that's why sometimes we want to
// avoid scheduling the task in memory.
ScheduleInMemory bool
}
)
2 changes: 2 additions & 0 deletions service/history/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,7 @@ type Config struct {
TimerProcessorSplitQueueIntervalJitterCoefficient dynamicproperties.FloatPropertyFn
TimerProcessorMaxRedispatchQueueSize dynamicproperties.IntPropertyFn
TimerProcessorMaxTimeShift dynamicproperties.DurationPropertyFn
TimerProcessorInMemoryQueueMaxTimeShift dynamicproperties.DurationPropertyFnWithShardIDFilter
TimerProcessorHistoryArchivalSizeLimit dynamicproperties.IntPropertyFn
TimerProcessorArchivalTimeLimit dynamicproperties.DurationPropertyFn
DisableTimerFailoverQueue dynamicproperties.BoolPropertyFn
Expand Down Expand Up @@ -453,6 +454,7 @@ func New(dc *dynamicconfig.Collection, numberOfShards int, maxMessageSize int, i
TimerProcessorSplitQueueIntervalJitterCoefficient: dc.GetFloat64Property(dynamicproperties.TimerProcessorSplitQueueIntervalJitterCoefficient),
TimerProcessorMaxRedispatchQueueSize: dc.GetIntProperty(dynamicproperties.TimerProcessorMaxRedispatchQueueSize),
TimerProcessorMaxTimeShift: dc.GetDurationProperty(dynamicproperties.TimerProcessorMaxTimeShift),
TimerProcessorInMemoryQueueMaxTimeShift: dc.GetDurationPropertyFilteredByShardID(dynamicproperties.TimerProcessorInMemoryQueueMaxTimeShift),
TimerProcessorHistoryArchivalSizeLimit: dc.GetIntProperty(dynamicproperties.TimerProcessorHistoryArchivalSizeLimit),
TimerProcessorArchivalTimeLimit: dc.GetDurationProperty(dynamicproperties.TimerProcessorArchivalTimeLimit),
DisableTimerFailoverQueue: dc.GetBoolProperty(dynamicproperties.DisableTimerFailoverQueue),
Expand Down
1 change: 1 addition & 0 deletions service/history/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ func TestNewConfig(t *testing.T) {
"TimerProcessorSplitQueueIntervalJitterCoefficient": {dynamicproperties.TimerProcessorSplitQueueIntervalJitterCoefficient, 4.0},
"TimerProcessorMaxRedispatchQueueSize": {dynamicproperties.TimerProcessorMaxRedispatchQueueSize, 45},
"TimerProcessorMaxTimeShift": {dynamicproperties.TimerProcessorMaxTimeShift, time.Second},
"TimerProcessorInMemoryQueueMaxTimeShift": {dynamicproperties.TimerProcessorInMemoryQueueMaxTimeShift, time.Duration(0)},
"TimerProcessorHistoryArchivalSizeLimit": {dynamicproperties.TimerProcessorHistoryArchivalSizeLimit, 46},
"TimerProcessorArchivalTimeLimit": {dynamicproperties.TimerProcessorArchivalTimeLimit, time.Second},
"TransferTaskBatchSize": {dynamicproperties.TransferTaskBatchSize, 47},
Expand Down
1 change: 1 addition & 0 deletions service/history/execution/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -992,6 +992,7 @@ func notifyTasks(
ExecutionInfo: executionInfo,
Tasks: tasksByCategory[persistence.HistoryTaskCategoryTimer],
PersistenceError: persistenceError,
ScheduleInMemory: true,
}
replicationTaskInfo := &hcommon.NotifyTaskInfo{
ExecutionInfo: executionInfo,
Expand Down
2 changes: 2 additions & 0 deletions service/history/execution/context_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ func TestNotifyTasksFromWorkflowSnapshot(t *testing.T) {
},
},
PersistenceError: true,
ScheduleInMemory: true,
})
mockEngine.EXPECT().NotifyNewReplicationTasks(&hcommon.NotifyTaskInfo{
ExecutionInfo: &persistence.WorkflowExecutionInfo{
Expand Down Expand Up @@ -419,6 +420,7 @@ func TestNotifyTasksFromWorkflowMutation(t *testing.T) {
},
},
PersistenceError: true,
ScheduleInMemory: true,
})
mockEngine.EXPECT().NotifyNewReplicationTasks(&hcommon.NotifyTaskInfo{
ExecutionInfo: &persistence.WorkflowExecutionInfo{
Expand Down
8 changes: 8 additions & 0 deletions service/history/queuev2/queue_base.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@ func (q *queueBase) processNewTasks() bool {
return true
}

func (q *queueBase) insertSingleTask(task task.Task) bool {
return q.virtualQueueManager.InsertSingleTask(task)
}

func (q *queueBase) resetProgress(key persistence.HistoryTaskKey) {
q.virtualQueueManager.ResetProgress(key)
}

func (q *queueBase) updateQueueState(ctx context.Context) {
q.metricsScope.IncCounter(metrics.AckLevelUpdateCounter)
queueState := &QueueState{
Expand Down
39 changes: 33 additions & 6 deletions service/history/queuev2/queue_scheduled.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,15 +143,42 @@ func (q *scheduledQueue) NotifyNewTask(clusterName string, info *hcommon.NotifyT
return
}

nextTime := info.Tasks[0].GetVisibilityTimestamp()
for i := 1; i < numTasks; i++ {
ts := info.Tasks[i].GetVisibilityTimestamp()
if ts.Before(nextTime) {
nextTime = ts
q.base.logger.Debug(
"New timer task notification received",
tag.Dynamic("numTasks", numTasks),
tag.Dynamic("scheduleInMemory", info.ScheduleInMemory),
tag.Dynamic("persistenceError", info.PersistenceError),
tag.Dynamic("shardId", q.base.shard.GetShardID()),
)

tasksToBeReadFromDB := make([]persistence.Task, 0)

if info.ScheduleInMemory && !info.PersistenceError {
for _, task := range info.Tasks {
ts := task.GetVisibilityTimestamp()
q.base.logger.Debug("Submitting task to an in-memory queue", tag.Dynamic("scheduledTime", ts), tag.Dynamic("shardId", q.base.shard.GetShardID()))

if !q.base.insertSingleTask(q.base.taskInitializer(task)) {
tasksToBeReadFromDB = append(tasksToBeReadFromDB, task)
}
}
} else {
tasksToBeReadFromDB = info.Tasks
}

var nextReadTime time.Time
for _, task := range tasksToBeReadFromDB {
ts := task.GetVisibilityTimestamp()
if nextReadTime.IsZero() || ts.Before(nextReadTime) {
nextReadTime = ts
}
}

if !nextReadTime.IsZero() {
q.base.resetProgress(persistence.NewHistoryTaskKey(nextReadTime, 0))
q.notify(nextReadTime)
}

q.notify(nextTime)
q.base.metricsScope.AddCounter(metrics.NewHistoryTaskCounter, int64(numTasks))
}

Expand Down
Loading
Loading