|
31 | 31 | from guardrails.utils.safe_get import safe_get |
32 | 32 | from guardrails.telemetry import trace_llm_call, trace_operation |
33 | 33 |
|
34 | | - |
35 | | -# todo fix circular import |
36 | | -def messages_string( |
37 | | - messages: Union[list[dict[str, Union[str, Prompt, Instructions]]], MessageHistory], |
38 | | -) -> str: |
39 | | - messages_copy = "" |
40 | | - for msg in messages: |
41 | | - content = ( |
42 | | - msg["content"].source # type: ignore |
43 | | - if isinstance(msg["content"], Prompt) |
44 | | - or isinstance(msg["content"], Instructions) # type: ignore |
45 | | - else msg["content"] # type: ignore |
46 | | - ) |
47 | | - messages_copy += content |
48 | | - return messages_copy |
49 | | - |
| 34 | +from guardrails.utils.prompt_utils import messages_to_prompt_string |
50 | 35 |
|
51 | 36 | ### |
52 | 37 | # Synchronous wrappers |
@@ -296,7 +281,7 @@ def _invoke_llm( |
296 | 281 | "The `torch` package is not installed. " |
297 | 282 | "Install with `pip install torch`" |
298 | 283 | ) |
299 | | - prompt = messages_string(messages) |
| 284 | + prompt = messages_to_prompt_string(messages) |
300 | 285 | tokenizer = kwargs.pop("tokenizer") |
301 | 286 | if not tokenizer: |
302 | 287 | raise UserFacingException( |
@@ -408,7 +393,7 @@ def _invoke_llm( |
408 | 393 | temperature = kwargs.pop("temperature", None) |
409 | 394 | if temperature == 0: |
410 | 395 | temperature = None |
411 | | - prompt = messages_string(messages) |
| 396 | + prompt = messages_to_prompt_string(messages) |
412 | 397 | trace_operation( |
413 | 398 | input_mime_type="application/json", |
414 | 399 | input_value={ |
|
0 commit comments