@@ -170,7 +170,7 @@ async def _start_indexing_pipeline(index_name: str):
170170 "type" : "index" ,
171171 })
172172
173- reporter = LoggerSingleton ().get_instance ()
173+ logger = LoggerSingleton ().get_instance ()
174174 pipelinejob = PipelineJob ()
175175 pipeline_job = pipelinejob .load_item (sanitized_index_name )
176176 sanitized_storage_name = pipeline_job .sanitized_storage_name
@@ -229,19 +229,19 @@ async def _start_indexing_pipeline(index_name: str):
229229 for workflow in pipeline_config .workflows :
230230 pipeline_job .all_workflows .append (workflow .name )
231231
232- # create new reporters /callbacks just for this job
233- reporters = []
234- reporter_names = os .getenv ("REPORTERS" , Reporters .CONSOLE .name .upper ()).split ("," )
235- for reporter_name in reporter_names :
232+ # create new loggers /callbacks just for this job
233+ loggers = []
234+ logger_names = os .getenv ("REPORTERS" , Reporters .CONSOLE .name .upper ()).split ("," )
235+ for logger_name in logger_names :
236236 try :
237- reporters .append (Reporters [reporter_name .upper ()])
237+ loggers .append (Reporters [logger_name .upper ()])
238238 except KeyError :
239- raise ValueError (f"Unknown reporter type: { reporter_name } " )
239+ raise ValueError (f"Unknown logger type: { logger_name } " )
240240 workflow_callbacks = load_pipeline_logger (
241241 index_name = index_name ,
242242 num_workflow_steps = len (pipeline_job .all_workflows ),
243243 reporting_dir = sanitized_index_name ,
244- reporters = reporters ,
244+ reporters = loggers ,
245245 )
246246
247247 # add pipeline job callback to the callback manager
@@ -305,7 +305,7 @@ async def _start_indexing_pipeline(index_name: str):
305305 details = error_details ,
306306 )
307307 # log error in global index directory logs
308- reporter .on_error (
308+ logger .on_error (
309309 message = f"Indexing pipeline failed for index '{ index_name } '." ,
310310 cause = e ,
311311 stack = traceback .format_exc (),
@@ -337,8 +337,8 @@ async def get_all_indexes():
337337 if item ["type" ] == "index" :
338338 items .append (item ["human_readable_name" ])
339339 except Exception :
340- reporter = LoggerSingleton ().get_instance ()
341- reporter .on_error ("Error retrieving index names" )
340+ logger = LoggerSingleton ().get_instance ()
341+ logger .on_error ("Error retrieving index names" )
342342 return IndexNameList (index_name = items )
343343
344344
@@ -363,13 +363,13 @@ def _delete_k8s_job(job_name: str, namespace: str) -> None:
363363 # function should only work when running in AKS
364364 if not os .getenv ("KUBERNETES_SERVICE_HOST" ):
365365 return None
366- reporter = LoggerSingleton ().get_instance ()
366+ logger = LoggerSingleton ().get_instance ()
367367 kubernetes_config .load_incluster_config ()
368368 try :
369369 batch_v1 = kubernetes_client .BatchV1Api ()
370370 batch_v1 .delete_namespaced_job (name = job_name , namespace = namespace )
371371 except Exception :
372- reporter .on_error (
372+ logger .on_error (
373373 message = f"Error deleting k8s job { job_name } ." ,
374374 details = {"container" : job_name },
375375 )
@@ -380,7 +380,7 @@ def _delete_k8s_job(job_name: str, namespace: str) -> None:
380380 if job_pod :
381381 core_v1 .delete_namespaced_pod (job_pod , namespace = namespace )
382382 except Exception :
383- reporter .on_error (
383+ logger .on_error (
384384 message = f"Error deleting k8s pod for job { job_name } ." ,
385385 details = {"container" : job_name },
386386 )
@@ -442,8 +442,8 @@ async def delete_index(index_name: str):
442442 index_client .delete_index (ai_search_index_name )
443443
444444 except Exception :
445- reporter = LoggerSingleton ().get_instance ()
446- reporter .on_error (
445+ logger = LoggerSingleton ().get_instance ()
446+ logger .on_error (
447447 message = f"Error encountered while deleting all data for index { index_name } ." ,
448448 stack = None ,
449449 details = {"container" : index_name },
0 commit comments