@@ -251,3 +251,111 @@ def test_message_conversion():
251251
252252 assert result [5 ]["role" ] == "assistant"
253253 assert result [5 ]["content" ] == "One plus one is two"
254+
255+
256+ @patch ("ddtrace.appsec.ai_guard._api_client.AIGuardClient._execute_request" )
257+ def test_streamed_chat_sync_allow (mock_execute_request , langchain_openai , openai_url ):
258+ mock_execute_request .return_value = mock_evaluate_response ("ALLOW" )
259+
260+ model = langchain_openai .ChatOpenAI (base_url = openai_url )
261+
262+ for _ in model .stream (input = "how can langsmith help with testing?" ):
263+ pass
264+
265+ mock_execute_request .assert_called_once ()
266+
267+
268+ @pytest .mark .parametrize ("decision" , ["DENY" , "ABORT" ], ids = ["deny" , "abort" ])
269+ @patch ("ddtrace.appsec.ai_guard._api_client.AIGuardClient._execute_request" )
270+ def test_streamed_chat_sync_block (mock_execute_request , langchain_openai , openai_url , decision ):
271+ mock_execute_request .return_value = mock_evaluate_response (decision )
272+
273+ model = langchain_openai .ChatOpenAI (base_url = openai_url )
274+
275+ with pytest .raises (AIGuardAbortError ):
276+ for _ in model .stream (input = "how can langsmith help with testing?" ):
277+ pass
278+
279+ mock_execute_request .assert_called_once ()
280+
281+
282+ @pytest .mark .asyncio
283+ @patch ("ddtrace.appsec.ai_guard._api_client.AIGuardClient._execute_request" )
284+ async def test_streamed_chat_async_allow (mock_execute_request , langchain_openai , openai_url ):
285+ mock_execute_request .return_value = mock_evaluate_response ("ALLOW" )
286+
287+ model = langchain_openai .ChatOpenAI (base_url = openai_url )
288+
289+ async for _ in model .astream (input = "how can langsmith help with testing?" ):
290+ pass
291+
292+ mock_execute_request .assert_called_once ()
293+
294+
295+ @pytest .mark .asyncio
296+ @pytest .mark .parametrize ("decision" , ["DENY" , "ABORT" ], ids = ["deny" , "abort" ])
297+ @patch ("ddtrace.appsec.ai_guard._api_client.AIGuardClient._execute_request" )
298+ async def test_streamed_chat_async_block (mock_execute_request , langchain_openai , openai_url , decision ):
299+ mock_execute_request .return_value = mock_evaluate_response (decision )
300+
301+ model = langchain_openai .ChatOpenAI (base_url = openai_url )
302+
303+ with pytest .raises (AIGuardAbortError ):
304+ async for _ in model .astream (input = "how can langsmith help with testing?" ):
305+ pass
306+
307+ mock_execute_request .assert_called_once ()
308+
309+
310+ @patch ("ddtrace.appsec.ai_guard._api_client.AIGuardClient._execute_request" )
311+ def test_streamed_llm_sync_allow (mock_execute_request , langchain_openai , openai_url ):
312+ mock_execute_request .return_value = mock_evaluate_response ("ALLOW" )
313+
314+ llm = langchain_openai .OpenAI (base_url = openai_url )
315+
316+ for _ in llm .stream (input = "How do I write technical documentation?" ):
317+ pass
318+
319+ mock_execute_request .assert_called_once ()
320+
321+
322+ @pytest .mark .parametrize ("decision" , ["DENY" , "ABORT" ], ids = ["deny" , "abort" ])
323+ @patch ("ddtrace.appsec.ai_guard._api_client.AIGuardClient._execute_request" )
324+ async def test_streamed_llm_sync_block (mock_execute_request , langchain_openai , openai_url , decision ):
325+ mock_execute_request .return_value = mock_evaluate_response (decision )
326+
327+ llm = langchain_openai .OpenAI (base_url = openai_url )
328+
329+ with pytest .raises (AIGuardAbortError ):
330+ for _ in llm .stream (input = "How do I write technical documentation?" ):
331+ pass
332+
333+ mock_execute_request .assert_called_once ()
334+
335+
336+ @pytest .mark .asyncio
337+ @patch ("ddtrace.appsec.ai_guard._api_client.AIGuardClient._execute_request" )
338+ async def test_streamed_llm_async_allow (mock_execute_request , langchain_openai , openai_url ):
339+ mock_execute_request .return_value = mock_evaluate_response ("ALLOW" )
340+
341+ llm = langchain_openai .OpenAI (base_url = openai_url )
342+
343+ async for _ in llm .astream (input = "How do I write technical documentation?" ):
344+ pass
345+
346+ mock_execute_request .assert_called_once ()
347+
348+
349+ @pytest .mark .asyncio
350+ @pytest .mark .parametrize ("decision" , ["DENY" , "ABORT" ], ids = ["deny" , "abort" ])
351+ @patch ("ddtrace.appsec.ai_guard._api_client.AIGuardClient._execute_request" )
352+ async def test_streamed_llm_async_block (mock_execute_request , langchain_openai , openai_url , decision ):
353+ mock_execute_request .return_value = mock_evaluate_response (decision )
354+
355+ llm = langchain_openai .OpenAI (base_url = openai_url )
356+
357+ with pytest .raises (AIGuardAbortError ):
358+ async for _ in llm .astream (input = "How do I write technical documentation?" ):
359+ pass
360+
361+ mock_execute_request .assert_called_once ()
0 commit comments