Skip to content

Commit 88deba9

Browse files
Merge branch 'main' into feat/validation-summary
2 parents 938b447 + b40b514 commit 88deba9

File tree

78 files changed

+1831
-844
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

78 files changed

+1831
-844
lines changed

docs/faq.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,21 @@
11
# Frequently Asked Questions
22

3+
## I get an "Unauthorized" error when installing validators from the Guardrails Hub. What should I do?
4+
5+
If you see an "Unauthorized" error when installing validators from the Guardrails hub, it means that the API key you are using is not authorized to access the Guardrails hub. It may be unset or expired.
6+
7+
To fix this, first generate a new API key from the [Guardrails Hub](https://hub.guardrailsai.com/keys). Then, configure the Guardrails CLI with the new API key.
8+
9+
```bash
10+
guardrails configure
11+
```
12+
13+
There is also a headless option to configure the CLI with the token.
14+
15+
```bash
16+
guardrails configure --token <your_token>
17+
```
18+
319
## I'm seeing a PromptCallableException when invoking my Guard. What should I do?
420

521
If you see an exception that looks like this

docs/getting_started/guardrails_server.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,13 @@ This document will overview a few of the key features of the Guardrails Server,
1313

1414
# Walkthrough
1515

16+
## 0. Configure Guardrails
17+
First, get a free auth key from [Guardrails Hub](https://hub.guardrailsai.com/keys). Then, configure the Guardrails CLI with the auth key.
18+
19+
```bash
20+
guardrails configure
21+
```
22+
1623
## 1. Install the Guardrails Server
1724
This is done by simply installing the `guardrails-ai` package. See the [installation guide](./quickstart.md) for more information.
1825

docs/getting_started/quickstart.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@ pip install guardrails-ai
1717
```
1818

1919
### Configure the Guardrails CLI (required)
20+
21+
First, get a free auth key from [Guardrails Hub](https://hub.guardrailsai.com/keys). Then, configure the Guardrails CLI with the auth key.
2022

2123
```bash
2224
guardrails configure
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
# AWS - Deploying Guardrails Server
22

3-
This doc has moved to [continuous integration and deployment - aws](docs/how_to_guides/continuous_integration_continuous_deployment).
3+
This doc has moved to [continuous integration and deployment - aws](/docs/how_to_guides/continuous_integration_continuous_deployment).

docs/how_to_guides/using_llms.md

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,3 +289,49 @@ for chunk in stream_chunk_generator
289289
## Other LLMs
290290

291291
See LiteLLM’s documentation [here](https://docs.litellm.ai/docs/providers) for details on many other llms.
292+
293+
## Custom LLM Wrappers
294+
In case you're using an LLM that isn't natively supported by Guardrails and you don't want to use LiteLLM, you can build a custom LLM API wrapper. In order to use a custom LLM, create a function that accepts a positional argument for the prompt as a string and any other arguments that you want to pass to the LLM API as keyword args. The function should return the output of the LLM API as a string.
295+
296+
```python
297+
from guardrails import Guard
298+
from guardrails.hub import ProfanityFree
299+
300+
# Create a Guard class
301+
guard = Guard().use(ProfanityFree())
302+
303+
# Function that takes the prompt as a string and returns the LLM output as string
304+
def my_llm_api(
305+
prompt: Optional[str] = None,
306+
*,
307+
instructions: Optional[str] = None,
308+
msg_history: Optional[list[dict]] = None,
309+
**kwargs
310+
) -> str:
311+
"""Custom LLM API wrapper.
312+
313+
At least one of prompt, instruction or msg_history should be provided.
314+
315+
Args:
316+
prompt (str): The prompt to be passed to the LLM API
317+
instruction (str): The instruction to be passed to the LLM API
318+
msg_history (list[dict]): The message history to be passed to the LLM API
319+
**kwargs: Any additional arguments to be passed to the LLM API
320+
321+
Returns:
322+
str: The output of the LLM API
323+
"""
324+
325+
# Call your LLM API here
326+
# What you pass to the llm will depend on what arguments it accepts.
327+
llm_output = some_llm(prompt, instructions, msg_history, **kwargs)
328+
329+
return llm_output
330+
331+
# Wrap your LLM API call
332+
validated_response = guard(
333+
my_llm_api,
334+
prompt="Can you generate a list of 10 things that are not food?",
335+
**kwargs,
336+
)
337+
```

guardrails/api_client.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import json
22
import os
3-
from typing import Any, Iterable, Optional
3+
from typing import Any, Iterator, Optional
44

55
import requests
66
from guardrails_api_client.configuration import Configuration
@@ -80,7 +80,7 @@ def stream_validate(
8080
guard: Guard,
8181
payload: ValidatePayload,
8282
openai_api_key: Optional[str] = None,
83-
) -> Iterable[Any]:
83+
) -> Iterator[Any]:
8484
_openai_api_key = (
8585
openai_api_key
8686
if openai_api_key is not None

guardrails/applications/text2sql.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
import asyncio
22
import json
33
import os
4+
import openai
45
from string import Template
56
from typing import Callable, Dict, Optional, Type, cast
67

78
from guardrails.classes import ValidationOutcome
89
from guardrails.document_store import DocumentStoreBase, EphemeralDocumentStore
910
from guardrails.embedding import EmbeddingBase, OpenAIEmbedding
1011
from guardrails.guard import Guard
11-
from guardrails.utils.openai_utils import get_static_openai_create_func
1212
from guardrails.utils.sql_utils import create_sql_driver
1313
from guardrails.vectordb import Faiss, VectorDBBase
1414

@@ -89,7 +89,7 @@ def __init__(
8989
reask_prompt: Prompt to use for reasking. Defaults to REASK_PROMPT.
9090
"""
9191
if llm_api is None:
92-
llm_api = get_static_openai_create_func()
92+
llm_api = openai.completions.create
9393

9494
self.example_formatter = example_formatter
9595
self.llm_api = llm_api

guardrails/async_guard.py

Lines changed: 10 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from opentelemetry import context as otel_context
55
from typing import (
66
Any,
7-
AsyncIterable,
7+
AsyncIterator,
88
Awaitable,
99
Callable,
1010
Dict,
@@ -37,6 +37,7 @@
3737
set_tracer,
3838
set_tracer_context,
3939
)
40+
from guardrails.hub_telemetry.hub_tracing import async_trace
4041
from guardrails.types.pydantic import ModelOrListOfModels
4142
from guardrails.types.validator import UseManyValidatorSpec, UseValidatorSpec
4243
from guardrails.telemetry import trace_async_guard_execution, wrap_with_otel_context
@@ -187,7 +188,7 @@ async def _execute(
187188
) -> Union[
188189
ValidationOutcome[OT],
189190
Awaitable[ValidationOutcome[OT]],
190-
AsyncIterable[ValidationOutcome[OT]],
191+
AsyncIterator[ValidationOutcome[OT]],
191192
]:
192193
self._fill_validator_map()
193194
self._fill_validators()
@@ -219,49 +220,13 @@ async def __exec(
219220
) -> Union[
220221
ValidationOutcome[OT],
221222
Awaitable[ValidationOutcome[OT]],
222-
AsyncIterable[ValidationOutcome[OT]],
223+
AsyncIterator[ValidationOutcome[OT]],
223224
]:
224225
prompt_params = prompt_params or {}
225226
metadata = metadata or {}
226227
if full_schema_reask is None:
227228
full_schema_reask = self._base_model is not None
228229

229-
if self._allow_metrics_collection:
230-
llm_api_str = ""
231-
if llm_api:
232-
llm_api_module_name = (
233-
llm_api.__module__ if hasattr(llm_api, "__module__") else ""
234-
)
235-
llm_api_name = (
236-
llm_api.__name__
237-
if hasattr(llm_api, "__name__")
238-
else type(llm_api).__name__
239-
)
240-
llm_api_str = f"{llm_api_module_name}.{llm_api_name}"
241-
# Create a new span for this guard call
242-
self._hub_telemetry.create_new_span(
243-
span_name="/guard_call",
244-
attributes=[
245-
("guard_id", self.id),
246-
("user_id", self._user_id),
247-
("llm_api", llm_api_str),
248-
(
249-
"custom_reask_prompt",
250-
self._exec_opts.reask_prompt is not None,
251-
),
252-
(
253-
"custom_reask_instructions",
254-
self._exec_opts.reask_instructions is not None,
255-
),
256-
(
257-
"custom_reask_messages",
258-
self._exec_opts.reask_messages is not None,
259-
),
260-
],
261-
is_parent=True, # It will have children
262-
has_parent=False, # Has no parents
263-
)
264-
265230
set_call_kwargs(kwargs)
266231
set_tracer(self._tracer)
267232
set_tracer_context(self._tracer_context)
@@ -369,7 +334,7 @@ async def _exec(
369334
) -> Union[
370335
ValidationOutcome[OT],
371336
Awaitable[ValidationOutcome[OT]],
372-
AsyncIterable[ValidationOutcome[OT]],
337+
AsyncIterator[ValidationOutcome[OT]],
373338
]:
374339
"""Call the LLM asynchronously and validate the output.
375340
@@ -435,6 +400,7 @@ async def _exec(
435400
)
436401
return ValidationOutcome[OT].from_guard_history(call)
437402

403+
@async_trace(name="/guard_call", origin="AsyncGuard.__call__")
438404
async def __call__(
439405
self,
440406
llm_api: Optional[Callable[..., Awaitable[Any]]] = None,
@@ -450,7 +416,7 @@ async def __call__(
450416
) -> Union[
451417
ValidationOutcome[OT],
452418
Awaitable[ValidationOutcome[OT]],
453-
AsyncIterable[ValidationOutcome[OT]],
419+
AsyncIterator[ValidationOutcome[OT]],
454420
]:
455421
"""Call the LLM and validate the output. Pass an async LLM API to
456422
return a coroutine.
@@ -501,6 +467,7 @@ async def __call__(
501467
**kwargs,
502468
)
503469

470+
@async_trace(name="/guard_call", origin="AsyncGuard.parse")
504471
async def parse(
505472
self,
506473
llm_output: str,
@@ -567,7 +534,7 @@ async def parse(
567534

568535
async def _stream_server_call(
569536
self, *, payload: Dict[str, Any]
570-
) -> AsyncIterable[ValidationOutcome[OT]]:
537+
) -> AsyncIterator[ValidationOutcome[OT]]:
571538
# TODO: Once server side supports async streaming, this function will need to
572539
# yield async generators, not generators
573540
if self._api_client:
@@ -609,6 +576,7 @@ async def _stream_server_call(
609576
else:
610577
raise ValueError("AsyncGuard does not have an api client!")
611578

579+
@async_trace(name="/guard_call", origin="AsyncGuard.validate")
612580
async def validate(
613581
self, llm_output: str, *args, **kwargs
614582
) -> Awaitable[ValidationOutcome[OT]]:

guardrails/classes/__init__.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
from guardrails.classes.credentials import Credentials
1+
from guardrails.classes.credentials import Credentials # type: ignore
2+
from guardrails.classes.rc import RC
23
from guardrails.classes.input_type import InputType
34
from guardrails.classes.output_type import OT
45
from guardrails.classes.validation.validation_result import (
@@ -10,7 +11,8 @@
1011
from guardrails.classes.validation_outcome import ValidationOutcome
1112

1213
__all__ = [
13-
"Credentials",
14+
"Credentials", # type: ignore
15+
"RC",
1416
"ErrorSpan",
1517
"InputType",
1618
"OT",

guardrails/classes/credentials.py

Lines changed: 22 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,24 @@
11
import logging
2-
import os
32
from dataclasses import dataclass
4-
from os.path import expanduser
53
from typing import Optional
4+
from typing_extensions import deprecated
65

7-
from guardrails.classes.generic.serializeable import Serializeable
6+
from guardrails.classes.generic.serializeable import SerializeableJSONEncoder
7+
from guardrails.classes.rc import RC
88

99
BOOL_CONFIGS = set(["no_metrics", "enable_metrics", "use_remote_inferencing"])
1010

1111

12+
@deprecated(
13+
(
14+
"The `Credentials` class is deprecated and will be removed in version 0.6.x."
15+
" Use the `RC` class instead."
16+
),
17+
category=DeprecationWarning,
18+
)
1219
@dataclass
13-
class Credentials(Serializeable):
14-
id: Optional[str] = None
15-
token: Optional[str] = None
20+
class Credentials(RC):
1621
no_metrics: Optional[bool] = False
17-
enable_metrics: Optional[bool] = True
18-
use_remote_inferencing: Optional[bool] = True
1922

2023
@staticmethod
2124
def _to_bool(value: str) -> Optional[bool]:
@@ -27,51 +30,16 @@ def _to_bool(value: str) -> Optional[bool]:
2730

2831
@staticmethod
2932
def has_rc_file() -> bool:
30-
home = expanduser("~")
31-
guardrails_rc = os.path.join(home, ".guardrailsrc")
32-
return os.path.exists(guardrails_rc)
33+
return RC.exists()
3334

3435
@staticmethod
35-
def from_rc_file(logger: Optional[logging.Logger] = None) -> "Credentials":
36-
try:
37-
if not logger:
38-
logger = logging.getLogger()
39-
home = expanduser("~")
40-
guardrails_rc = os.path.join(home, ".guardrailsrc")
41-
with open(guardrails_rc, encoding="utf-8") as rc_file:
42-
lines = rc_file.readlines()
43-
filtered_lines = list(filter(lambda l: l.strip(), lines))
44-
creds = {}
45-
for line in filtered_lines:
46-
line_content = line.split("=", 1)
47-
if len(line_content) != 2:
48-
logger.warning(
49-
"""
50-
Invalid line found in .guardrailsrc file!
51-
All lines in this file should follow the format: key=value
52-
Ignoring line contents...
53-
"""
54-
)
55-
logger.debug(f".guardrailsrc file location: {guardrails_rc}")
56-
else:
57-
key, value = line_content
58-
key = key.strip()
59-
value = value.strip()
60-
if key in BOOL_CONFIGS:
61-
value = Credentials._to_bool(value)
62-
63-
creds[key] = value
64-
65-
rc_file.close()
66-
67-
# backfill no_metrics, handle defaults
68-
# remove in 0.5.0
69-
no_metrics_val = creds.pop("no_metrics", None)
70-
if no_metrics_val is not None and creds.get("enable_metrics") is None:
71-
creds["enable_metrics"] = not no_metrics_val
72-
73-
creds_dict = Credentials.from_dict(creds)
74-
return creds_dict
75-
76-
except FileNotFoundError:
77-
return Credentials.from_dict({}) # type: ignore
36+
def from_rc_file(logger: Optional[logging.Logger] = None) -> "Credentials": # type: ignore
37+
rc = RC.load(logger)
38+
return Credentials( # type: ignore
39+
id=rc.id,
40+
token=rc.token,
41+
enable_metrics=rc.enable_metrics,
42+
use_remote_inferencing=rc.use_remote_inferencing,
43+
no_metrics=(not rc.enable_metrics),
44+
encoder=SerializeableJSONEncoder(),
45+
)

0 commit comments

Comments
 (0)