@@ -199,18 +199,25 @@ def upload_metrics(workflow_metrics, metrics_userid, api_key):
199
199
metrics_userid: The userid to use for the upload.
200
200
api_key: The API key to use for the upload.
201
201
"""
202
+
203
+ if len (workflow_metrics ) == 0 :
204
+ print ("No metrics found to upload." , file = sys .stdout )
205
+ return
206
+
202
207
metrics_batch = []
203
208
for workflow_metric in workflow_metrics :
204
209
if isinstance (workflow_metric , GaugeMetric ):
205
210
name = workflow_metric .name .lower ().replace (" " , "_" )
206
211
metrics_batch .append (
207
212
f"{ name } value={ workflow_metric .value } { workflow_metric .time_ns } "
208
213
)
209
- else :
214
+ elif isinstance ( workflow_metric , JobMetrics ) :
210
215
name = workflow_metric .job_name .lower ().replace (" " , "_" )
211
216
metrics_batch .append (
212
217
f"{ name } queue_time={ workflow_metric .queue_time } ,run_time={ workflow_metric .run_time } ,status={ workflow_metric .status } { workflow_metric .created_at_ns } "
213
218
)
219
+ else :
220
+ raise ValueError (f"Unsupported object type { type (workflow_metric )} : { str (workflow_metric )} " )
214
221
215
222
request_data = "\n " .join (metrics_batch )
216
223
response = requests .post (
@@ -244,8 +251,8 @@ def main():
244
251
while True :
245
252
current_metrics = get_per_workflow_metrics (github_repo , workflows_to_track )
246
253
current_metrics += get_sampled_workflow_metrics (github_repo )
247
- if len ( current_metrics ) == 0 :
248
- print ( "No metrics found to upload. " , file = sys . stdout )
254
+ # Always send a hearbeat metric so we can monitor is this container is still able to log to Grafana.
255
+ current_metrics . append ( GaugeMetric ( "metrics_container_heartbeat " , 1 , time . time_ns ()) )
249
256
250
257
upload_metrics (current_metrics , grafana_metrics_userid , grafana_api_key )
251
258
print (f"Uploaded { len (current_metrics )} metrics" , file = sys .stdout )
0 commit comments