1+ import asyncio
12from time import sleep
23from typing import Optional
34
1011from tracely import get_current_span
1112
1213client = openai .Client ()
14+ async_cl = openai .AsyncClient ()
1315
1416
1517@trace_event ()
@@ -27,6 +29,22 @@ def call_openai(input: str) -> Optional[str]:
2729 return None
2830
2931
32+ @trace_event ()
33+ async def async_call_openai (input : str ) -> Optional [str ]:
34+ response = await async_cl .responses .create (model = "gpt-4.1" , input = input )
35+ span = get_current_span ()
36+ if response :
37+ if span :
38+ span .update_usage (
39+ tokens = {
40+ "input" : response .usage .input_tokens ,
41+ "output" : response .usage .output_tokens ,
42+ },
43+ )
44+ return response .output [0 ].content [0 ].text
45+ return None
46+
47+
3048@trace_event ()
3149def call_openai_with_helper (input : str ):
3250 response = client .responses .create (model = "gpt-4.1" , input = input )
@@ -69,14 +87,23 @@ def call_with_user_id_explicit(input: str):
6987 return second_answer
7088
7189
72- if __name__ == "__main__" :
90+ async def main () :
7391 init_tracing (
7492 default_usage_details = UsageDetails (
7593 cost_per_token = {"input" : 2.0 / 1_000_000 , "cached_input" : 2.0 / 1_000_000 , "output" : 8.0 / 1_000_000 }
7694 )
7795 )
96+ print (await async_call_openai ("What is LLM?" ))
7897
79- print (call_openai ("What is LLM?" ))
98+
99+ if __name__ == "__main__" :
100+ # init_tracing(
101+ # default_usage_details=UsageDetails(
102+ # cost_per_token={"input": 2.0 / 1_000_000, "cached_input": 2.0 / 1_000_000, "output": 8.0 / 1_000_000}
103+ # )
104+ # )
105+ asyncio .run (main ())
106+ # print(call_openai("What is LLM?"))
80107 # print(call_openai_with_helper("What is LLM?"))
81108 # print(call_openai_with_context("What is LLM?"))
82109 # print(multiple_calls_openai("What is LLM?"))
0 commit comments