@@ -276,8 +276,7 @@ def test_tracks_bedrock_metrics_with_error(client: LDClient):
276276 assert tracker .get_summary ().usage == TokenUsage (330 , 220 , 110 )
277277
278278
279- @pytest .mark .asyncio
280- async def test_tracks_openai_metrics (client : LDClient ):
279+ def test_tracks_openai_metrics (client : LDClient ):
281280 context = Context .create ("user-key" )
282281 tracker = LDAIConfigTracker (client , "variation-key" , "config-key" , 3 , "fakeModel" , "fakeProvider" , context )
283282
@@ -293,10 +292,10 @@ def to_dict(self):
293292 "completion_tokens" : 110 ,
294293 }
295294
296- async def get_result ():
295+ def get_result ():
297296 return Result ()
298297
299- await tracker .track_openai_metrics (get_result )
298+ tracker .track_openai_metrics (get_result )
300299
301300 calls = [
302301 call (
@@ -330,16 +329,15 @@ async def get_result():
330329 assert tracker .get_summary ().usage == TokenUsage (330 , 220 , 110 )
331330
332331
333- @pytest .mark .asyncio
334- async def test_tracks_openai_metrics_with_exception (client : LDClient ):
332+ def test_tracks_openai_metrics_with_exception (client : LDClient ):
335333 context = Context .create ("user-key" )
336334 tracker = LDAIConfigTracker (client , "variation-key" , "config-key" , 3 , "fakeModel" , "fakeProvider" , context )
337335
338- async def raise_exception ():
336+ def raise_exception ():
339337 raise ValueError ("Something went wrong" )
340338
341339 try :
342- await tracker .track_openai_metrics (raise_exception )
340+ tracker .track_openai_metrics (raise_exception )
343341 assert False , "Should have thrown an exception"
344342 except ValueError :
345343 pass
0 commit comments