@@ -155,7 +155,7 @@ def __init__(
155
155
topic_create_timeout : float = 60 ,
156
156
processing_guarantee : ProcessingGuarantee = "at-least-once" ,
157
157
max_partition_buffer_size : int = 10000 ,
158
- heartbeat_interval : float = 0.0 ,
158
+ wall_clock_interval : float = 0.0 ,
159
159
):
160
160
"""
161
161
:param broker_address: Connection settings for Kafka.
@@ -224,11 +224,11 @@ def __init__(
224
224
It is a soft limit, and the actual number of buffered messages can be up to x2 higher.
225
225
Lower value decreases the memory use, but increases the latency.
226
226
Default - `10000`.
227
- :param heartbeat_interval : the interval (seconds) at which to send heartbeat messages.
228
- The heartbeat timing starts counting from application start .
229
- TODO: Save and respect last heartbeat timestamp .
230
- The heartbeat is sent for every partition of every topic with registered heartbeat streams .
231
- If the value is 0, no heartbeat messages will be sent .
227
+ :param wall_clock_interval : the interval (seconds) at which to invoke
228
+ the registered wall clock logic .
229
+ The wall clock timing starts counting from application start .
230
+ TODO: Save and respect last wall clock timestamp .
231
+ If the value is 0, no wall clock logic will be invoked .
232
232
Default - `0.0`.
233
233
234
234
<br><br>***Error Handlers***<br>
@@ -381,9 +381,9 @@ def __init__(
381
381
recovery_manager = recovery_manager ,
382
382
)
383
383
384
- self ._heartbeat_active = heartbeat_interval > 0
385
- self ._heartbeat_interval = heartbeat_interval
386
- self ._heartbeat_last_sent = datetime .now ().timestamp ()
384
+ self ._wall_clock_active = wall_clock_interval > 0
385
+ self ._wall_clock_interval = wall_clock_interval
386
+ self ._wall_clock_last_sent = datetime .now ().timestamp ()
387
387
388
388
self ._source_manager = SourceManager ()
389
389
self ._sink_manager = SinkManager ()
@@ -914,7 +914,7 @@ def _run_dataframe(self, sink: Optional[VoidExecutor] = None):
914
914
processing_context = self ._processing_context
915
915
source_manager = self ._source_manager
916
916
process_message = self ._process_message
917
- process_heartbeat = self ._process_heartbeat
917
+ process_wall_clock = self ._process_wall_clock
918
918
printer = self ._processing_context .printer
919
919
run_tracker = self ._run_tracker
920
920
consumer = self ._consumer
@@ -927,9 +927,9 @@ def _run_dataframe(self, sink: Optional[VoidExecutor] = None):
927
927
)
928
928
929
929
dataframes_composed = self ._dataframe_registry .compose_all (sink = sink )
930
- heartbeats_composed = self ._dataframe_registry .compose_heartbeats ()
931
- if not heartbeats_composed :
932
- self ._heartbeat_active = False
930
+ wall_clock_executors = self ._dataframe_registry .compose_wall_clock ()
931
+ if not wall_clock_executors :
932
+ self ._wall_clock_active = False
933
933
934
934
processing_context .init_checkpoint ()
935
935
run_tracker .set_as_running ()
@@ -941,7 +941,7 @@ def _run_dataframe(self, sink: Optional[VoidExecutor] = None):
941
941
run_tracker .timeout_refresh ()
942
942
else :
943
943
process_message (dataframes_composed )
944
- process_heartbeat ( heartbeats_composed )
944
+ process_wall_clock ( wall_clock_executors )
945
945
processing_context .commit_checkpoint ()
946
946
consumer .resume_backpressured ()
947
947
source_manager .raise_for_error ()
@@ -1025,18 +1025,18 @@ def _process_message(self, dataframe_composed):
1025
1025
if self ._on_message_processed is not None :
1026
1026
self ._on_message_processed (topic_name , partition , offset )
1027
1027
1028
- def _process_heartbeat (self , heartbeats_composed ):
1029
- if not self ._heartbeat_active :
1028
+ def _process_wall_clock (self , wall_clock_executors ):
1029
+ if not self ._wall_clock_active :
1030
1030
return
1031
1031
1032
1032
now = datetime .now ().timestamp ()
1033
- if self ._heartbeat_last_sent > now - self ._heartbeat_interval :
1033
+ if self ._wall_clock_last_sent > now - self ._wall_clock_interval :
1034
1034
return
1035
1035
1036
1036
value , key , timestamp , headers = None , None , int (now * 1000 ), {}
1037
1037
1038
1038
for tp in self ._consumer .assignment ():
1039
- if executor := heartbeats_composed .get (tp .topic ):
1039
+ if executor := wall_clock_executors .get (tp .topic ):
1040
1040
row = Row (
1041
1041
value = value ,
1042
1042
key = key ,
@@ -1058,7 +1058,7 @@ def _process_heartbeat(self, heartbeats_composed):
1058
1058
if not to_suppress :
1059
1059
raise
1060
1060
1061
- self ._heartbeat_last_sent = now
1061
+ self ._wall_clock_last_sent = now
1062
1062
1063
1063
def _on_assign (self , _ , topic_partitions : List [TopicPartition ]):
1064
1064
"""
0 commit comments