Skip to content

Commit 27248aa

Browse files
committed
update docs for messages
1 parent 955622c commit 27248aa

15 files changed

+7171
-397
lines changed

docs/concepts/async_streaming.ipynb

Lines changed: 6674 additions & 16 deletions
Large diffs are not rendered by default.

docs/concepts/error_remediation.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ Note that this list is not exhaustive of the possible errors that could occur.
1818
```log
1919
The callable `fn` passed to `Guard(fn, ...)` failed with the following error:
2020
{Root error message here!}.
21-
Make sure that `fn` can be called as a function that takes in a single prompt string and returns a string.
2221
```
2322

2423

docs/concepts/logs.md

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -33,17 +33,17 @@ docs/html/single-step-history.html
3333

3434
## Calls
3535
### Initial Input
36-
Inital inputs like prompt and instructions from a call are available on each call.
36+
Initial inputs like messages from a call are available on each call.
3737

3838
```py
3939
first_call = my_guard.history.first
40-
print("prompt\n-----")
41-
print(first_call.prompt)
40+
print("message\n-----")
41+
print(first_call.messages[0]["content"])
4242
print("prompt params\n------------- ")
4343
print(first_call.prompt_params)
4444
```
4545
```log
46-
prompt
46+
message
4747
-----
4848
4949
You are a human in an enchanted forest. You come across opponents of different types. You should fight smaller opponents, run away from bigger ones, and freeze if the opponent is a bear.
@@ -67,18 +67,6 @@ prompt params
6767
{'opp_type': 'grizzly'}
6868
```
6969

70-
Note: Input messages and msg_history currently can be accessed through iterations
71-
```py
72-
print(guard.history.last.iterations.last.inputs.msg_history)
73-
```
74-
```log
75-
[
76-
{"role":"system","content":"You are a helpful assistant."},
77-
{"role":"user","content":"Tell me a joke"}
78-
]
79-
```
80-
81-
8270
### Final Output
8371
Final output of call is accessible on a call.
8472
```py

docs/concepts/streaming.ipynb

Lines changed: 36 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
},
2020
{
2121
"cell_type": "code",
22-
"execution_count": 1,
22+
"execution_count": null,
2323
"metadata": {},
2424
"outputs": [],
2525
"source": [
@@ -39,7 +39,7 @@
3939
},
4040
{
4141
"cell_type": "code",
42-
"execution_count": null,
42+
"execution_count": 2,
4343
"metadata": {},
4444
"outputs": [],
4545
"source": [
@@ -51,12 +51,41 @@
5151
},
5252
{
5353
"cell_type": "code",
54-
"execution_count": null,
54+
"execution_count": 3,
5555
"metadata": {},
56-
"outputs": [],
56+
"outputs": [
57+
{
58+
"data": {
59+
"text/html": [
60+
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">ValidationOutcome</span><span style=\"font-weight: bold\">(</span>\n",
61+
" <span style=\"color: #808000; text-decoration-color: #808000\">call_id</span>=<span style=\"color: #008000; text-decoration-color: #008000\">'14148119808'</span>,\n",
62+
" <span style=\"color: #808000; text-decoration-color: #808000\">raw_llm_output</span>=<span style=\"color: #008000; text-decoration-color: #008000\">'.'</span>,\n",
63+
" <span style=\"color: #808000; text-decoration-color: #808000\">validation_summaries</span>=<span style=\"font-weight: bold\">[]</span>,\n",
64+
" <span style=\"color: #808000; text-decoration-color: #808000\">validated_output</span>=<span style=\"color: #008000; text-decoration-color: #008000\">'.'</span>,\n",
65+
" <span style=\"color: #808000; text-decoration-color: #808000\">reask</span>=<span style=\"color: #800080; text-decoration-color: #800080; font-style: italic\">None</span>,\n",
66+
" <span style=\"color: #808000; text-decoration-color: #808000\">validation_passed</span>=<span style=\"color: #00ff00; text-decoration-color: #00ff00; font-style: italic\">True</span>,\n",
67+
" <span style=\"color: #808000; text-decoration-color: #808000\">error</span>=<span style=\"color: #800080; text-decoration-color: #800080; font-style: italic\">None</span>\n",
68+
"<span style=\"font-weight: bold\">)</span>\n",
69+
"</pre>\n"
70+
],
71+
"text/plain": [
72+
"\u001b[1;35mValidationOutcome\u001b[0m\u001b[1m(\u001b[0m\n",
73+
" \u001b[33mcall_id\u001b[0m=\u001b[32m'14148119808'\u001b[0m,\n",
74+
" \u001b[33mraw_llm_output\u001b[0m=\u001b[32m'.'\u001b[0m,\n",
75+
" \u001b[33mvalidation_summaries\u001b[0m=\u001b[1m[\u001b[0m\u001b[1m]\u001b[0m,\n",
76+
" \u001b[33mvalidated_output\u001b[0m=\u001b[32m'.'\u001b[0m,\n",
77+
" \u001b[33mreask\u001b[0m=\u001b[3;35mNone\u001b[0m,\n",
78+
" \u001b[33mvalidation_passed\u001b[0m=\u001b[3;92mTrue\u001b[0m,\n",
79+
" \u001b[33merror\u001b[0m=\u001b[3;35mNone\u001b[0m\n",
80+
"\u001b[1m)\u001b[0m\n"
81+
]
82+
},
83+
"metadata": {},
84+
"output_type": "display_data"
85+
}
86+
],
5787
"source": [
5888
"fragment_generator = guard(\n",
59-
" litellm.completion,\n",
6089
" model=\"gpt-4o\",\n",
6190
" messages=[\n",
6291
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
@@ -116,7 +145,6 @@
116145
"guard = gd.Guard()\n",
117146
"\n",
118147
"fragment_generator = await guard(\n",
119-
" litellm.completion,\n",
120148
" model=\"gpt-3.5-turbo\",\n",
121149
" messages=[\n",
122150
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
@@ -137,7 +165,7 @@
137165
],
138166
"metadata": {
139167
"kernelspec": {
140-
"display_name": ".venv",
168+
"display_name": "litellm",
141169
"language": "python",
142170
"name": "python3"
143171
},
@@ -151,7 +179,7 @@
151179
"name": "python",
152180
"nbconvert_exporter": "python",
153181
"pygments_lexer": "ipython3",
154-
"version": "3.11.8"
182+
"version": "3.12.3"
155183
}
156184
},
157185
"nbformat": 4,

0 commit comments

Comments
 (0)