Skip to content

Commit b40b514

Browse files
authored
Merge pull request #1071 from guardrails-ai/telemetry-update
Telemetry update
2 parents 0ba86cb + 48c93f4 commit b40b514

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+936
-655
lines changed

guardrails/api_client.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import json
22
import os
3-
from typing import Any, Iterable, Optional
3+
from typing import Any, Iterator, Optional
44

55
import requests
66
from guardrails_api_client.configuration import Configuration
@@ -80,7 +80,7 @@ def stream_validate(
8080
guard: Guard,
8181
payload: ValidatePayload,
8282
openai_api_key: Optional[str] = None,
83-
) -> Iterable[Any]:
83+
) -> Iterator[Any]:
8484
_openai_api_key = (
8585
openai_api_key
8686
if openai_api_key is not None

guardrails/async_guard.py

Lines changed: 10 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from opentelemetry import context as otel_context
55
from typing import (
66
Any,
7-
AsyncIterable,
7+
AsyncIterator,
88
Awaitable,
99
Callable,
1010
Dict,
@@ -37,6 +37,7 @@
3737
set_tracer,
3838
set_tracer_context,
3939
)
40+
from guardrails.hub_telemetry.hub_tracing import async_trace
4041
from guardrails.types.pydantic import ModelOrListOfModels
4142
from guardrails.types.validator import UseManyValidatorSpec, UseValidatorSpec
4243
from guardrails.telemetry import trace_async_guard_execution, wrap_with_otel_context
@@ -187,7 +188,7 @@ async def _execute(
187188
) -> Union[
188189
ValidationOutcome[OT],
189190
Awaitable[ValidationOutcome[OT]],
190-
AsyncIterable[ValidationOutcome[OT]],
191+
AsyncIterator[ValidationOutcome[OT]],
191192
]:
192193
self._fill_validator_map()
193194
self._fill_validators()
@@ -219,49 +220,13 @@ async def __exec(
219220
) -> Union[
220221
ValidationOutcome[OT],
221222
Awaitable[ValidationOutcome[OT]],
222-
AsyncIterable[ValidationOutcome[OT]],
223+
AsyncIterator[ValidationOutcome[OT]],
223224
]:
224225
prompt_params = prompt_params or {}
225226
metadata = metadata or {}
226227
if full_schema_reask is None:
227228
full_schema_reask = self._base_model is not None
228229

229-
if self._allow_metrics_collection:
230-
llm_api_str = ""
231-
if llm_api:
232-
llm_api_module_name = (
233-
llm_api.__module__ if hasattr(llm_api, "__module__") else ""
234-
)
235-
llm_api_name = (
236-
llm_api.__name__
237-
if hasattr(llm_api, "__name__")
238-
else type(llm_api).__name__
239-
)
240-
llm_api_str = f"{llm_api_module_name}.{llm_api_name}"
241-
# Create a new span for this guard call
242-
self._hub_telemetry.create_new_span(
243-
span_name="/guard_call",
244-
attributes=[
245-
("guard_id", self.id),
246-
("user_id", self._user_id),
247-
("llm_api", llm_api_str),
248-
(
249-
"custom_reask_prompt",
250-
self._exec_opts.reask_prompt is not None,
251-
),
252-
(
253-
"custom_reask_instructions",
254-
self._exec_opts.reask_instructions is not None,
255-
),
256-
(
257-
"custom_reask_messages",
258-
self._exec_opts.reask_messages is not None,
259-
),
260-
],
261-
is_parent=True, # It will have children
262-
has_parent=False, # Has no parents
263-
)
264-
265230
set_call_kwargs(kwargs)
266231
set_tracer(self._tracer)
267232
set_tracer_context(self._tracer_context)
@@ -369,7 +334,7 @@ async def _exec(
369334
) -> Union[
370335
ValidationOutcome[OT],
371336
Awaitable[ValidationOutcome[OT]],
372-
AsyncIterable[ValidationOutcome[OT]],
337+
AsyncIterator[ValidationOutcome[OT]],
373338
]:
374339
"""Call the LLM asynchronously and validate the output.
375340
@@ -435,6 +400,7 @@ async def _exec(
435400
)
436401
return ValidationOutcome[OT].from_guard_history(call)
437402

403+
@async_trace(name="/guard_call", origin="AsyncGuard.__call__")
438404
async def __call__(
439405
self,
440406
llm_api: Optional[Callable[..., Awaitable[Any]]] = None,
@@ -450,7 +416,7 @@ async def __call__(
450416
) -> Union[
451417
ValidationOutcome[OT],
452418
Awaitable[ValidationOutcome[OT]],
453-
AsyncIterable[ValidationOutcome[OT]],
419+
AsyncIterator[ValidationOutcome[OT]],
454420
]:
455421
"""Call the LLM and validate the output. Pass an async LLM API to
456422
return a coroutine.
@@ -501,6 +467,7 @@ async def __call__(
501467
**kwargs,
502468
)
503469

470+
@async_trace(name="/guard_call", origin="AsyncGuard.parse")
504471
async def parse(
505472
self,
506473
llm_output: str,
@@ -567,7 +534,7 @@ async def parse(
567534

568535
async def _stream_server_call(
569536
self, *, payload: Dict[str, Any]
570-
) -> AsyncIterable[ValidationOutcome[OT]]:
537+
) -> AsyncIterator[ValidationOutcome[OT]]:
571538
# TODO: Once server side supports async streaming, this function will need to
572539
# yield async generators, not generators
573540
if self._api_client:
@@ -609,6 +576,7 @@ async def _stream_server_call(
609576
else:
610577
raise ValueError("AsyncGuard does not have an api client!")
611578

579+
@async_trace(name="/guard_call", origin="AsyncGuard.validate")
612580
async def validate(
613581
self, llm_output: str, *args, **kwargs
614582
) -> Awaitable[ValidationOutcome[OT]]:

guardrails/classes/__init__.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
from guardrails.classes.credentials import Credentials
1+
from guardrails.classes.credentials import Credentials # type: ignore
2+
from guardrails.classes.rc import RC
23
from guardrails.classes.input_type import InputType
34
from guardrails.classes.output_type import OT
45
from guardrails.classes.validation.validation_result import (
@@ -10,7 +11,8 @@
1011
from guardrails.classes.validation_outcome import ValidationOutcome
1112

1213
__all__ = [
13-
"Credentials",
14+
"Credentials", # type: ignore
15+
"RC",
1416
"ErrorSpan",
1517
"InputType",
1618
"OT",

guardrails/classes/credentials.py

Lines changed: 22 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,24 @@
11
import logging
2-
import os
32
from dataclasses import dataclass
4-
from os.path import expanduser
53
from typing import Optional
4+
from typing_extensions import deprecated
65

7-
from guardrails.classes.generic.serializeable import Serializeable
6+
from guardrails.classes.generic.serializeable import SerializeableJSONEncoder
7+
from guardrails.classes.rc import RC
88

99
BOOL_CONFIGS = set(["no_metrics", "enable_metrics", "use_remote_inferencing"])
1010

1111

12+
@deprecated(
13+
(
14+
"The `Credentials` class is deprecated and will be removed in version 0.6.x."
15+
" Use the `RC` class instead."
16+
),
17+
category=DeprecationWarning,
18+
)
1219
@dataclass
13-
class Credentials(Serializeable):
14-
id: Optional[str] = None
15-
token: Optional[str] = None
20+
class Credentials(RC):
1621
no_metrics: Optional[bool] = False
17-
enable_metrics: Optional[bool] = True
18-
use_remote_inferencing: Optional[bool] = True
1922

2023
@staticmethod
2124
def _to_bool(value: str) -> Optional[bool]:
@@ -27,51 +30,16 @@ def _to_bool(value: str) -> Optional[bool]:
2730

2831
@staticmethod
2932
def has_rc_file() -> bool:
30-
home = expanduser("~")
31-
guardrails_rc = os.path.join(home, ".guardrailsrc")
32-
return os.path.exists(guardrails_rc)
33+
return RC.exists()
3334

3435
@staticmethod
35-
def from_rc_file(logger: Optional[logging.Logger] = None) -> "Credentials":
36-
try:
37-
if not logger:
38-
logger = logging.getLogger()
39-
home = expanduser("~")
40-
guardrails_rc = os.path.join(home, ".guardrailsrc")
41-
with open(guardrails_rc, encoding="utf-8") as rc_file:
42-
lines = rc_file.readlines()
43-
filtered_lines = list(filter(lambda l: l.strip(), lines))
44-
creds = {}
45-
for line in filtered_lines:
46-
line_content = line.split("=", 1)
47-
if len(line_content) != 2:
48-
logger.warning(
49-
"""
50-
Invalid line found in .guardrailsrc file!
51-
All lines in this file should follow the format: key=value
52-
Ignoring line contents...
53-
"""
54-
)
55-
logger.debug(f".guardrailsrc file location: {guardrails_rc}")
56-
else:
57-
key, value = line_content
58-
key = key.strip()
59-
value = value.strip()
60-
if key in BOOL_CONFIGS:
61-
value = Credentials._to_bool(value)
62-
63-
creds[key] = value
64-
65-
rc_file.close()
66-
67-
# backfill no_metrics, handle defaults
68-
# remove in 0.5.0
69-
no_metrics_val = creds.pop("no_metrics", None)
70-
if no_metrics_val is not None and creds.get("enable_metrics") is None:
71-
creds["enable_metrics"] = not no_metrics_val
72-
73-
creds_dict = Credentials.from_dict(creds)
74-
return creds_dict
75-
76-
except FileNotFoundError:
77-
return Credentials.from_dict({}) # type: ignore
36+
def from_rc_file(logger: Optional[logging.Logger] = None) -> "Credentials": # type: ignore
37+
rc = RC.load(logger)
38+
return Credentials( # type: ignore
39+
id=rc.id,
40+
token=rc.token,
41+
enable_metrics=rc.enable_metrics,
42+
use_remote_inferencing=rc.use_remote_inferencing,
43+
no_metrics=(not rc.enable_metrics),
44+
encoder=SerializeableJSONEncoder(),
45+
)

guardrails/classes/llm/llm_response.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import asyncio
22
from itertools import tee
3-
from typing import Any, Dict, Iterable, Optional, AsyncIterable
3+
from typing import Any, Dict, Iterator, Optional, AsyncIterator
44

55
from guardrails_api_client import LLMResponse as ILLMResponse
66
from pydantic.config import ConfigDict
@@ -19,9 +19,9 @@ class LLMResponse(ILLMResponse):
1919
2020
Attributes:
2121
output (str): The output from the LLM.
22-
stream_output (Optional[Iterable]): A stream of output from the LLM.
22+
stream_output (Optional[Iterator]): A stream of output from the LLM.
2323
Default None.
24-
async_stream_output (Optional[AsyncIterable]): An async stream of output
24+
async_stream_output (Optional[AsyncIterator]): An async stream of output
2525
from the LLM. Default None.
2626
prompt_token_count (Optional[int]): The number of tokens in the prompt.
2727
Default None.
@@ -35,8 +35,8 @@ class LLMResponse(ILLMResponse):
3535
prompt_token_count: Optional[int] = None
3636
response_token_count: Optional[int] = None
3737
output: str
38-
stream_output: Optional[Iterable] = None
39-
async_stream_output: Optional[AsyncIterable] = None
38+
stream_output: Optional[Iterator] = None
39+
async_stream_output: Optional[AsyncIterator] = None
4040

4141
def to_interface(self) -> ILLMResponse:
4242
stream_output = None
@@ -73,7 +73,7 @@ def to_dict(self) -> Dict[str, Any]:
7373
def from_interface(cls, i_llm_response: ILLMResponse) -> "LLMResponse":
7474
stream_output = None
7575
if i_llm_response.stream_output:
76-
stream_output = [so for so in i_llm_response.stream_output]
76+
stream_output = iter([so for so in i_llm_response.stream_output])
7777

7878
async_stream_output = None
7979
if i_llm_response.async_stream_output:

guardrails/classes/rc.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
import logging
2+
import os
3+
from dataclasses import dataclass
4+
from os.path import expanduser
5+
from typing import Optional
6+
7+
from guardrails.classes.generic.serializeable import Serializeable
8+
from guardrails.utils.casting_utils import to_bool
9+
10+
BOOL_CONFIGS = set(["no_metrics", "enable_metrics", "use_remote_inferencing"])
11+
12+
13+
@dataclass
14+
class RC(Serializeable):
15+
id: Optional[str] = None
16+
token: Optional[str] = None
17+
enable_metrics: Optional[bool] = True
18+
use_remote_inferencing: Optional[bool] = True
19+
20+
@staticmethod
21+
def exists() -> bool:
22+
home = expanduser("~")
23+
guardrails_rc = os.path.join(home, ".guardrailsrc")
24+
return os.path.exists(guardrails_rc)
25+
26+
@classmethod
27+
def load(cls, logger: Optional[logging.Logger] = None) -> "RC":
28+
try:
29+
if not logger:
30+
logger = logging.getLogger()
31+
home = expanduser("~")
32+
guardrails_rc = os.path.join(home, ".guardrailsrc")
33+
with open(guardrails_rc, encoding="utf-8") as rc_file:
34+
lines = rc_file.readlines()
35+
filtered_lines = list(filter(lambda l: l.strip(), lines))
36+
config = {}
37+
for line in filtered_lines:
38+
line_content = line.split("=", 1)
39+
if len(line_content) != 2:
40+
logger.warning(
41+
"""
42+
Invalid line found in .guardrailsrc file!
43+
All lines in this file should follow the format: key=value
44+
Ignoring line contents...
45+
"""
46+
)
47+
logger.debug(f".guardrailsrc file location: {guardrails_rc}")
48+
else:
49+
key, value = line_content
50+
key = key.strip()
51+
value = value.strip()
52+
if key in BOOL_CONFIGS:
53+
value = to_bool(value)
54+
55+
config[key] = value
56+
57+
rc_file.close()
58+
59+
# backfill no_metrics, handle defaults
60+
# We missed this comment in the 0.5.0 release
61+
# Making it a TODO for 0.6.0
62+
# TODO: remove in 0.6.0
63+
no_metrics_val = config.pop("no_metrics", None)
64+
if no_metrics_val is not None and config.get("enable_metrics") is None:
65+
config["enable_metrics"] = not no_metrics_val
66+
67+
rc = cls.from_dict(config)
68+
return rc
69+
70+
except FileNotFoundError:
71+
return cls.from_dict({}) # type: ignore

guardrails/cli/configure.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
import typer
88

9-
from guardrails.classes.credentials import Credentials
9+
from guardrails.settings import settings
1010
from guardrails.cli.guardrails import guardrails
1111
from guardrails.cli.logger import LEVELS, logger
1212
from guardrails.cli.hub.console import console
@@ -46,7 +46,7 @@ def save_configuration_file(
4646

4747
def _get_default_token() -> str:
4848
"""Get the default token from the configuration file."""
49-
file_token = Credentials.from_rc_file(logger).token
49+
file_token = settings.rc.token
5050
if file_token is None:
5151
return ""
5252
return file_token
@@ -79,7 +79,8 @@ def configure(
7979
),
8080
):
8181
version_warnings_if_applicable(console)
82-
trace_if_enabled("configure")
82+
if settings.rc.exists():
83+
trace_if_enabled("configure")
8384
existing_token = _get_default_token()
8485
last4 = existing_token[-4:] if existing_token else ""
8586

0 commit comments

Comments
 (0)