2626)
2727
2828from .mock_llm_outputs import (
29+ MockLiteLLMCallableOther ,
2930 MockLiteLLMCallable ,
3031 entity_extraction ,
3132 lists_object ,
@@ -173,10 +174,10 @@ def test_entity_extraction_with_reask(
173174 )
174175
175176 content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
176- guard = guard_initializer (rail , prompt )
177+ guard = guard_initializer (rail , messages = [{ "role" : "user" , "content" : prompt }] )
177178
178179 final_output : ValidationOutcome = guard (
179- llm_api = openai . completions . create ,
180+ model = "gpt-3.5-turbo" ,
180181 prompt_params = {"document" : content [:6000 ]},
181182 num_reasks = 1 ,
182183 max_tokens = 2000 ,
@@ -259,7 +260,7 @@ def test_entity_extraction_with_noop(mocker, rail, prompt):
259260 mocker .patch ("guardrails.llm_providers.LiteLLMCallable" , new = MockLiteLLMCallable )
260261
261262 content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
262- guard = guard_initializer (rail , prompt )
263+ guard = guard_initializer (rail , messages = [{ "role" : "user" , "content" : prompt }] )
263264 final_output = guard (
264265 llm_api = openai .completions .create ,
265266 prompt_params = {"document" : content [:6000 ]},
@@ -305,7 +306,7 @@ def test_entity_extraction_with_filter(mocker, rail, prompt):
305306 mocker .patch ("guardrails.llm_providers.LiteLLMCallable" , new = MockLiteLLMCallable )
306307
307308 content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
308- guard = guard_initializer (rail , prompt )
309+ guard = guard_initializer (rail , messages = [{ "role" : "user" , "content" : prompt }] )
309310 final_output = guard (
310311 llm_api = openai .completions .create ,
311312 prompt_params = {"document" : content [:6000 ]},
@@ -340,7 +341,7 @@ def test_entity_extraction_with_fix(mocker, rail, prompt):
340341 mocker .patch ("guardrails.llm_providers.LiteLLMCallable" , new = MockLiteLLMCallable )
341342
342343 content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
343- guard = guard_initializer (rail , prompt )
344+ guard = guard_initializer (rail , messages = [{ "role" : "user" , "content" : prompt }] )
344345 final_output = guard (
345346 llm_api = openai .completions .create ,
346347 prompt_params = {"document" : content [:6000 ]},
@@ -376,7 +377,7 @@ def test_entity_extraction_with_refrain(mocker, rail, prompt):
376377 mocker .patch ("guardrails.llm_providers.LiteLLMCallable" , new = MockLiteLLMCallable )
377378
378379 content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
379- guard = guard_initializer (rail , prompt )
380+ guard = guard_initializer (rail , messages = [{ "role" : "user" , "content" : prompt }] )
380381 final_output = guard (
381382 llm_api = openai .completions .create ,
382383 prompt_params = {"document" : content [:6000 ]},
@@ -857,11 +858,12 @@ def test_in_memory_validator_log_is_not_duplicated(mocker):
857858 try :
858859 content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
859860 guard = guard_initializer (
860- entity_extraction .PYDANTIC_RAIL_WITH_NOOP , entity_extraction .PYDANTIC_PROMPT
861+ entity_extraction .PYDANTIC_RAIL_WITH_NOOP ,
862+ messages = [{"role" : "user" , "content" : entity_extraction .PYDANTIC_PROMPT }],
861863 )
862864
863865 guard (
864- llm_api = openai . completions . create ,
866+ model = "gpt-3.5-turbo" ,
865867 prompt_params = {"document" : content [:6000 ]},
866868 num_reasks = 1 ,
867869 )
@@ -942,11 +944,13 @@ def test_guard_with_top_level_list_return_type(mocker, rail, prompt):
942944 # Create a Guard with a top level list return type
943945
944946 # Mock the LLM
945- mocker .patch ("guardrails.llm_providers.LiteLLMCallable" , new = MockLiteLLMCallable )
947+ mocker .patch (
948+ "guardrails.llm_providers.LiteLLMCallable" , new = MockLiteLLMCallableOther
949+ )
946950
947- guard = guard_initializer (rail , prompt = prompt )
951+ guard = guard_initializer (rail , messages = [{ "role" : "user" , "content" : prompt }] )
948952
949- output = guard (llm_api = openai . completions . create )
953+ output = guard (model = "gpt-3.5-turbo" )
950954
951955 # Validate the output
952956 assert output .validated_output == [
@@ -1002,7 +1006,7 @@ def test_string_output(mocker):
10021006
10031007 guard = gd .Guard .from_rail_string (string .RAIL_SPEC_FOR_STRING )
10041008 final_output = guard (
1005- llm_api = openai . completions . create ,
1009+ model = "gpt-3.5-turbo" ,
10061010 prompt_params = {"ingredients" : "tomato, cheese, sour cream" },
10071011 num_reasks = 1 ,
10081012 )
@@ -1015,7 +1019,7 @@ def test_string_output(mocker):
10151019 assert call .iterations .length == 1
10161020
10171021 # For original prompt and output
1018- assert call .compiled_prompt == string .COMPILED_PROMPT
1022+ assert call .compiled_messages [ 1 ][ "content" ]. _source == string .COMPILED_PROMPT
10191023 assert call .raw_outputs .last == string .LLM_OUTPUT
10201024 assert mock_invoke_llm .call_count == 1
10211025 mock_invoke_llm = None
@@ -1138,7 +1142,7 @@ def test_string_reask(mocker):
11381142
11391143 guard = gd .Guard .from_rail_string (string .RAIL_SPEC_FOR_STRING_REASK )
11401144 final_output = guard (
1141- llm_api = openai . completions . create ,
1145+ model = "gpt-3.5-turbo" ,
11421146 prompt_params = {"ingredients" : "tomato, cheese, sour cream" },
11431147 num_reasks = 1 ,
11441148 max_tokens = 100 ,
@@ -1152,15 +1156,18 @@ def test_string_reask(mocker):
11521156 assert call .iterations .length == 2
11531157
11541158 # For orginal prompt and output
1155- assert call .compiled_instructions == string .COMPILED_INSTRUCTIONS
1156- assert call .compiled_prompt == string .COMPILED_PROMPT
1159+ assert call .compiled_messages [ 0 ][ "content" ]. _source == string .COMPILED_INSTRUCTIONS
1160+ assert call .compiled_messages [ 1 ][ "content" ]. _source == string .COMPILED_PROMPT
11571161 assert call .iterations .first .raw_output == string .LLM_OUTPUT
11581162 assert call .iterations .first .validation_response == string .VALIDATED_OUTPUT_REASK
11591163
11601164 # For re-asked prompt and output
1161- assert call .iterations .last .inputs .prompt == gd .Prompt (string .COMPILED_PROMPT_REASK )
1165+ assert (
1166+ call .iterations .last .inputs .messages [1 ]["content" ]
1167+ == string .COMPILED_PROMPT_REASK
1168+ )
11621169 # Same thing as above
1163- assert call .reask_prompts . last == string .COMPILED_PROMPT_REASK
1170+ assert call .reask_messages [ 0 ][ 1 ][ "content" ] == string .COMPILED_PROMPT_REASK
11641171
11651172 assert call .raw_outputs .last == string .LLM_OUTPUT_REASK
11661173 assert call .guarded_output == string .LLM_OUTPUT_REASK
0 commit comments