|
24 | 24 | add_token_count_to_events, |
25 | 25 | disabled_ai_monitoring_record_content_settings, |
26 | 26 | events_sans_content, |
| 27 | + events_with_context_attrs, |
27 | 28 | llm_token_count_callback, |
28 | 29 | set_trace_info, |
29 | 30 | ) |
|
118 | 119 | rollup_metrics=[("Llm/completion/OpenAI/create", 1)], |
119 | 120 | background_task=True, |
120 | 121 | ) |
121 | | -@validate_custom_events(expected_events_on_no_model_error) |
| 122 | +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) |
122 | 123 | @validate_custom_event_count(count=3) |
123 | 124 | @background_task() |
124 | 125 | def test_chat_completion_invalid_request_error_no_model(set_trace_info, sync_openai_client): |
125 | 126 | with pytest.raises(TypeError): |
126 | 127 | set_trace_info() |
127 | 128 | add_custom_attribute("llm.conversation_id", "my-awesome-id") |
128 | | - generator = sync_openai_client.chat.completions.create( |
129 | | - messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True |
130 | | - ) |
131 | | - for resp in generator: |
132 | | - assert resp |
| 129 | + with WithLlmCustomAttributes({"context": "attr"}): |
| 130 | + generator = sync_openai_client.chat.completions.create( |
| 131 | + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True |
| 132 | + ) |
| 133 | + for resp in generator: |
| 134 | + assert resp |
133 | 135 |
|
134 | 136 |
|
135 | 137 | @dt_enabled |
@@ -189,22 +191,23 @@ def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_inf |
189 | 191 | rollup_metrics=[("Llm/completion/OpenAI/create", 1)], |
190 | 192 | background_task=True, |
191 | 193 | ) |
192 | | -@validate_custom_events(expected_events_on_no_model_error) |
| 194 | +@validate_custom_events(events_with_context_attrs(expected_events_on_no_model_error)) |
193 | 195 | @validate_custom_event_count(count=3) |
194 | 196 | @background_task() |
195 | 197 | def test_chat_completion_invalid_request_error_no_model_async(loop, set_trace_info, async_openai_client): |
196 | 198 | with pytest.raises(TypeError): |
197 | 199 | set_trace_info() |
198 | 200 | add_custom_attribute("llm.conversation_id", "my-awesome-id") |
| 201 | + with WithLlmCustomAttributes({"context": "attr"}): |
199 | 202 |
|
200 | | - async def consumer(): |
201 | | - generator = await async_openai_client.chat.completions.create( |
202 | | - messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True |
203 | | - ) |
204 | | - async for resp in generator: |
205 | | - assert resp |
| 203 | + async def consumer(): |
| 204 | + generator = await async_openai_client.chat.completions.create( |
| 205 | + messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100, stream=True |
| 206 | + ) |
| 207 | + async for resp in generator: |
| 208 | + assert resp |
206 | 209 |
|
207 | | - loop.run_until_complete(consumer()) |
| 210 | + loop.run_until_complete(consumer()) |
208 | 211 |
|
209 | 212 |
|
210 | 213 | @dt_enabled |
|
0 commit comments