Skip to content
Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
b0baed1
reapply litellm updates to support only messages llm kwarg
dtam Sep 19, 2024
5e0eb15
tests run and make progress on rewrite, most of unit_tests_passing
dtam Sep 20, 2024
a11c4ee
migrate more tests partially
dtam Sep 24, 2024
487a06a
Merge remote-tracking branch 'origin/main' into feature/litellm_cleanup
dtam Sep 24, 2024
b97caa2
some progress
dtam Sep 24, 2024
e684739
more progress
dtam Sep 24, 2024
18591b7
fix some more tests
dtam Sep 25, 2024
a1f7893
fix some more tests
dtam Sep 25, 2024
816ef5f
Merge remote-tracking branch 'origin/main' into feature/litellm_cleanup
dtam Sep 25, 2024
67feff4
more progress
dtam Sep 25, 2024
da2dc63
more tests
dtam Sep 26, 2024
66a568b
more tests
dtam Sep 26, 2024
bc7ff07
tests passing
dtam Sep 27, 2024
e8eb7f5
typing and lint
dtam Sep 27, 2024
f4c4827
lint
dtam Sep 27, 2024
1228310
typing
dtam Sep 27, 2024
1d0490f
Merge remote-tracking branch 'origin/main' into feature/litellm_cleanup
dtam Sep 27, 2024
4e333b9
fix bad merge
dtam Sep 27, 2024
0611c7b
minor fixes
dtam Oct 10, 2024
c4b7b4b
Merge remote-tracking branch 'origin/main' into feature/litellm_cleanup
dtam Oct 10, 2024
8c5f9c5
notebooks
dtam Oct 10, 2024
6dfc337
last few notebooks
dtam Oct 10, 2024
955622c
last books
dtam Oct 11, 2024
27248aa
update docs for messages
dtam Oct 12, 2024
d2df838
last of docs
dtam Oct 14, 2024
95e2767
update more docs and start migration guide
dtam Oct 17, 2024
ef2863b
fix tests and format
dtam Oct 17, 2024
10a1dde
Merge remote-tracking branch 'origin/main' into feature/litellm_cleanup
dtam Oct 18, 2024
58d16ec
update some tests
dtam Oct 18, 2024
e167d83
Merge remote-tracking branch 'origin/main' into feature/litellm_cleanup
dtam Oct 21, 2024
e5ccff3
renable history by default
dtam Oct 21, 2024
e167a6d
expose messages to prompt helper and finish docs for it
dtam Oct 21, 2024
7898e76
indention
dtam Oct 21, 2024
d8c0404
Merge remote-tracking branch 'origin/0.6.0-dev' into feature/litellm_…
dtam Oct 21, 2024
60605fa
update api client to point to its alpha
dtam Oct 21, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 36 additions & 43 deletions guardrails/actions/reask.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ def get_reask_setup_for_string(
validation_response: Optional[Union[str, List, Dict, ReAsk]] = None,
prompt_params: Optional[Dict[str, Any]] = None,
exec_options: Optional[GuardExecutionOptions] = None,
) -> Tuple[Dict[str, Any], Prompt, Instructions]:
) -> Tuple[Dict[str, Any], Messages]:
prompt_params = prompt_params or {}
exec_options = exec_options or GuardExecutionOptions()

Expand All @@ -259,13 +259,11 @@ def get_reask_setup_for_string(
)

reask_prompt_template = None
if exec_options.reask_prompt:
reask_prompt_template = Prompt(exec_options.reask_prompt)
else:
reask_prompt_template = Prompt(
constants["high_level_string_reask_prompt"]
+ constants["complete_string_suffix"]
)

reask_prompt_template = Prompt(
constants["high_level_string_reask_prompt"]
+ constants["complete_string_suffix"]
)

error_messages = "\n".join(
[
Expand All @@ -286,10 +284,7 @@ def get_reask_setup_for_string(
)

instructions = None
if exec_options.reask_instructions:
instructions = Instructions(exec_options.reask_instructions)
if instructions is None:
instructions = Instructions("You are a helpful assistant.")
instructions = Instructions("You are a helpful assistant.")
instructions = instructions.format(
output_schema=schema_prompt_content,
xml_output_schema=xml_output_schema,
Expand All @@ -298,9 +293,13 @@ def get_reask_setup_for_string(
messages = None
if exec_options.reask_messages:
messages = Messages(exec_options.reask_messages)

if messages is None:
messages = Messages(
[{"role": "system", "content": "You are a helpful assistant."}]
[
{"role": "system", "content": instructions},
{"role": "user", "content": prompt},
]
)

messages = messages.format(
Expand All @@ -309,21 +308,21 @@ def get_reask_setup_for_string(
**prompt_params,
)

return output_schema, prompt, instructions
return output_schema, messages


def get_original_prompt(exec_options: Optional[GuardExecutionOptions] = None) -> str:
exec_options = exec_options or GuardExecutionOptions()
original_msg_history = exec_options.msg_history or []
msg_history_prompt = next(
original_messages = exec_options.messages or []
messages_prompt = next(
(
h.get("content")
for h in original_msg_history
for h in original_messages
if isinstance(h, dict) and h.get("role") == "user"
),
"",
)
original_prompt = exec_options.prompt or msg_history_prompt or ""
original_prompt = messages_prompt or ""
return original_prompt


Expand All @@ -338,7 +337,7 @@ def get_reask_setup_for_json(
use_full_schema: Optional[bool] = False,
prompt_params: Optional[Dict[str, Any]] = None,
exec_options: Optional[GuardExecutionOptions] = None,
) -> Tuple[Dict[str, Any], Prompt, Instructions]:
) -> Tuple[Dict[str, Any], Messages]:
reask_schema = output_schema
is_skeleton_reask = not any(isinstance(reask, FieldReAsk) for reask in reasks)
is_nonparseable_reask = any(
Expand All @@ -351,8 +350,6 @@ def get_reask_setup_for_json(
use_xml = prompt_uses_xml(original_prompt)

reask_prompt_template = None
if exec_options.reask_prompt:
reask_prompt_template = Prompt(exec_options.reask_prompt)

if is_nonparseable_reask:
if reask_prompt_template is None:
Expand Down Expand Up @@ -462,30 +459,26 @@ def reask_decoder(obj: ReAsk):
)

instructions = None
if exec_options.reask_instructions:
instructions = Instructions(exec_options.reask_instructions)
else:
instructions_const = (
constants["high_level_xml_instructions"]
if use_xml
else constants["high_level_json_instructions"]
)
instructions = Instructions(instructions_const)
instructions_const = (
constants["high_level_xml_instructions"]
if use_xml
else constants["high_level_json_instructions"]
)
instructions = Instructions(instructions_const)
instructions = instructions.format(**prompt_params)

# TODO: enable this in 0.6.0
# messages = None
# if exec_options.reask_messages:
# messages = Messages(exec_options.reask_messages)
# else:
# messages = Messages(
# [
# {"role": "system", "content": instructions},
# {"role": "user", "content": prompt},
# ]
# )
messages = None
if exec_options.reask_messages:
messages = Messages(exec_options.reask_messages)
else:
messages = Messages(
[
{"role": "system", "content": instructions},
{"role": "user", "content": prompt},
]
)

return reask_schema, prompt, instructions
return reask_schema, messages


def get_reask_setup(
Expand All @@ -499,7 +492,7 @@ def get_reask_setup(
use_full_schema: Optional[bool] = False,
prompt_params: Optional[Dict[str, Any]] = None,
exec_options: Optional[GuardExecutionOptions] = None,
) -> Tuple[Dict[str, Any], Prompt, Instructions]:
) -> Tuple[Dict[str, Any], Messages]:
prompt_params = prompt_params or {}
exec_options = exec_options or GuardExecutionOptions()

Expand Down
18 changes: 14 additions & 4 deletions guardrails/applications/text2sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,12 @@ def __init__(
rail_spec: Optional[str] = None,
rail_params: Optional[Dict] = None,
example_formatter: Callable = example_formatter,
reask_prompt: str = REASK_PROMPT,
reask_messages: list[Dict[str, str]] = [
{
"role": "user",
"content": REASK_PROMPT,
}
],
llm_api: Optional[Callable] = None,
llm_api_kwargs: Optional[Dict] = None,
num_relevant_examples: int = 2,
Expand Down Expand Up @@ -108,7 +113,7 @@ def __init__(
schema_file,
rail_spec,
rail_params,
reask_prompt,
reask_messages,
)

# Initialize the document store.
Expand All @@ -122,7 +127,12 @@ def _init_guard(
schema_file: Optional[str] = None,
rail_spec: Optional[str] = None,
rail_params: Optional[Dict] = None,
reask_prompt: str = REASK_PROMPT,
reask_messages: list[Dict[str, str]] = [
{
"role": "user",
"content": REASK_PROMPT,
}
],
Comment on lines +130 to +135
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Pre-initializing lists in _init functions can have odd behaviors sometimes.

def foo(my_list=[]):
  my_list.append(len(my_list))
  return my_list

>>> foo()
[0]
>>> foo()
[0, 1]
>>> foo()
[0, 1, 2]

If we're modifying and appending to reask_messages internally, we should instead default this to None and check inside the method if it's None.

):
# Initialize the Guard class
if rail_spec is None:
Expand All @@ -140,7 +150,7 @@ def _init_guard(
rail_spec_str = Template(rail_spec_str).safe_substitute(**rail_params)

guard = Guard.for_rail_string(rail_spec_str)
guard._exec_opts.reask_prompt = reask_prompt
guard._exec_opts.reask_messages = reask_messages

return guard

Expand Down
Loading
Loading