Skip to content

Commit e167a6d

Browse files
committed
expose messages to prompt helper and finish docs for it
1 parent e5ccff3 commit e167a6d

File tree

9 files changed

+73
-40
lines changed

9 files changed

+73
-40
lines changed

docs/how_to_guides/using_llms.md

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -306,24 +306,23 @@ guard = Guard().use(ProfanityFree())
306306
# Function that takes the prompt as a string and returns the LLM output as string
307307
def my_llm_api(
308308
*,
309-
messages: Optional[list[dict]] = None,
310309
**kwargs
311310
) -> str:
312311
"""Custom LLM API wrapper.
313312
314313
At least one of messages should be provided.
315314
316315
Args:
317-
messages (list[dict]): The message history to be passed to the LLM API
318316
**kwargs: Any additional arguments to be passed to the LLM API
319317
320318
Returns:
321319
str: The output of the LLM API
322320
"""
323-
321+
messages = kwargs.pop("messages", [])
322+
updated_messages = some_message_processing(messages)
324323
# Call your LLM API here
325324
# What you pass to the llm will depend on what arguments it accepts.
326-
llm_output = some_llm(messages, **kwargs)
325+
llm_output = some_llm(updated_messages, **kwargs)
327326

328327
return llm_output
329328

docs/migration_guides/0-6-migration.md

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,4 +90,24 @@ guard(
9090
These callables are being removed in favor of support through passing no callable and setting the appropriate api key and model argument. See LINK TO DOCS for more info.
9191

9292
### Prompt no longer a required positional argument on custom callables
93-
Custom callables will no longer throw an error if the prompt arg is missing in their declaration and guardrails will no longer pass prompt as the first argument. They need to be updated to the messages kwarg to get text input. If a custom callables underlying llm only accepts a single string a helper exists that can compose messages into one otherwise some code to adapt them will be required.
93+
Custom callables will no longer throw an error if the prompt arg is missing in their declaration and guardrails will no longer pass prompt as the first argument. They need to be updated to the messages kwarg to get text input. If a custom callables underlying llm only accepts a single string a helper exists that can compose messages into one otherwise some code to adapt them will be required.
94+
95+
```py
96+
from guardrails import messages_to_prompt_string
97+
98+
class CustomCallableCallable(PromptCallableBase):
99+
def llm_api(
100+
self,
101+
*args,
102+
**kwargs,
103+
) -> str:
104+
messages = kwargs.pop("messages", [])
105+
prompt = messages_to_prompt_string(messages)
106+
107+
llm_string_output = some_llm_call_requiring_prompt(
108+
prompt,
109+
*args,
110+
**kwargs,
111+
)
112+
return llm_string_output
113+
````

guardrails/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,14 @@
44
from guardrails.async_guard import AsyncGuard
55
from guardrails.llm_providers import PromptCallableBase
66
from guardrails.logging_utils import configure_logging
7-
from guardrails.prompt import Instructions, Prompt
7+
from guardrails.prompt import Instructions, Prompt, Messages
88
from guardrails.utils import constants, docs_utils
99
from guardrails.types.on_fail import OnFailAction
1010
from guardrails.validator_base import Validator, register_validator
1111
from guardrails.settings import settings
1212
from guardrails.hub.install import install
1313
from guardrails.classes.validation_outcome import ValidationOutcome
14+
from guardrails.utils.prompt_utils import messages_to_prompt_string
1415

1516
__all__ = [
1617
"Guard",
@@ -22,8 +23,10 @@
2223
"constants",
2324
"docs_utils",
2425
"configure_logging",
26+
"messages_to_prompt_string",
2527
"Prompt",
2628
"Instructions",
29+
"Messages",
2730
"settings",
2831
"install",
2932
"ValidationOutcome",

guardrails/llm_providers.py

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -31,22 +31,7 @@
3131
from guardrails.utils.safe_get import safe_get
3232
from guardrails.telemetry import trace_llm_call, trace_operation
3333

34-
35-
# todo fix circular import
36-
def messages_string(
37-
messages: Union[list[dict[str, Union[str, Prompt, Instructions]]], MessageHistory],
38-
) -> str:
39-
messages_copy = ""
40-
for msg in messages:
41-
content = (
42-
msg["content"].source # type: ignore
43-
if isinstance(msg["content"], Prompt)
44-
or isinstance(msg["content"], Instructions) # type: ignore
45-
else msg["content"] # type: ignore
46-
)
47-
messages_copy += content
48-
return messages_copy
49-
34+
from guardrails.utils.prompt_utils import messages_to_prompt_string
5035

5136
###
5237
# Synchronous wrappers
@@ -296,7 +281,7 @@ def _invoke_llm(
296281
"The `torch` package is not installed. "
297282
"Install with `pip install torch`"
298283
)
299-
prompt = messages_string(messages)
284+
prompt = messages_to_prompt_string(messages)
300285
tokenizer = kwargs.pop("tokenizer")
301286
if not tokenizer:
302287
raise UserFacingException(
@@ -408,7 +393,7 @@ def _invoke_llm(
408393
temperature = kwargs.pop("temperature", None)
409394
if temperature == 0:
410395
temperature = None
411-
prompt = messages_string(messages)
396+
prompt = messages_to_prompt_string(messages)
412397
trace_operation(
413398
input_mime_type="application/json",
414399
input_value={

guardrails/prompt/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
from .instructions import Instructions
22
from .prompt import Prompt
3+
from .messages import Messages
34

45
__all__ = [
56
"Prompt",
67
"Instructions",
8+
"Messages",
79
]

guardrails/run/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,12 @@
22
from guardrails.run.runner import Runner
33
from guardrails.run.stream_runner import StreamRunner
44
from guardrails.run.async_stream_runner import AsyncStreamRunner
5-
from guardrails.run.utils import messages_source, messages_string
5+
from guardrails.run.utils import messages_source
66

77
__all__ = [
88
"Runner",
99
"AsyncRunner",
1010
"StreamRunner",
1111
"AsyncStreamRunner",
1212
"messages_source",
13-
"messages_string",
1413
]

guardrails/run/utils.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -28,18 +28,6 @@ def messages_source(messages: MessageHistory) -> MessageHistory:
2828
return messages_copy
2929

3030

31-
def messages_string(messages: MessageHistory) -> str:
32-
messages_copy = ""
33-
for msg in messages:
34-
content = (
35-
msg["content"].source
36-
if isinstance(msg["content"], Prompt)
37-
else msg["content"]
38-
)
39-
messages_copy += content
40-
return messages_copy
41-
42-
4331
def preprocess_prompt_for_string_output(
4432
prompt_callable: PromptCallableBase,
4533
instructions: Optional[Instructions],

guardrails/utils/docs_utils.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import typing as t
22

3-
from guardrails.prompt import Prompt
3+
from guardrails.prompt import Prompt, Instructions
4+
5+
from guardrails.types.inputs import MessageHistory
46

57
try:
68
import tiktoken
@@ -19,6 +21,23 @@
1921
nltk.download("punkt")
2022

2123

24+
def messages_to_prompt_string(
25+
messages: t.Union[
26+
list[dict[str, t.Union[str, Prompt, Instructions]]], MessageHistory
27+
],
28+
) -> str:
29+
messages_copy = ""
30+
for msg in messages:
31+
content = (
32+
msg["content"].source # type: ignore
33+
if isinstance(msg["content"], Prompt)
34+
or isinstance(msg["content"], Instructions) # type: ignore
35+
else msg["content"] # type: ignore
36+
)
37+
messages_copy += content
38+
return messages_copy
39+
40+
2241
class TextSplitter:
2342
"""Split the docs into chunks with token boundaries."""
2443

guardrails/utils/prompt_utils.py

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
11
import json
22
import re
3-
from typing import Any, Dict
3+
from typing import Any, Dict, Union
44

55
from guardrails.classes.output_type import OutputTypes
66

77
from guardrails.types.validator import ValidatorMap
8+
from guardrails.prompt.prompt import Prompt
9+
from guardrails.prompt.instructions import Instructions
10+
from guardrails.types.inputs import MessageHistory
811

912

1013
def prompt_uses_xml(prompt: str) -> bool:
@@ -47,3 +50,18 @@ def prompt_content_for_schema(
4750
if output_type == OutputTypes.STRING:
4851
return prompt_content_for_string_schema(output_schema, validator_map, json_path)
4952
return json.dumps(output_schema)
53+
54+
55+
def messages_to_prompt_string(
56+
messages: Union[list[dict[str, Union[str, Prompt, Instructions]]], MessageHistory],
57+
) -> str:
58+
messages_copy = ""
59+
for msg in messages:
60+
content = (
61+
msg["content"].source # type: ignore
62+
if isinstance(msg["content"], Prompt)
63+
or isinstance(msg["content"], Instructions) # type: ignore
64+
else msg["content"] # type: ignore
65+
)
66+
messages_copy += content
67+
return messages_copy

0 commit comments

Comments
 (0)