Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions guardrails/llm_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,8 +278,7 @@ def _invoke_llm(
import torch
except ImportError:
raise PromptCallableException(
"The `torch` package is not installed. "
"Install with `pip install torch`"
"The `torch` package is not installed. Install with `pip install torch`"
)
prompt = messages_to_prompt_string(messages)
tokenizer = kwargs.pop("tokenizer")
Expand Down Expand Up @@ -384,8 +383,7 @@ def _invoke_llm(
import torch # noqa: F401 # type: ignore
except ImportError:
raise PromptCallableException(
"The `torch` package is not installed. "
"Install with `pip install torch`"
"The `torch` package is not installed. Install with `pip install torch`"
)

content_key = kwargs.pop("content_key", "generated_text")
Expand Down
2 changes: 1 addition & 1 deletion guardrails/utils/openai_utils/v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

def get_static_openai_create_func():
warnings.warn(
"This function is deprecated. " " and will be removed in 0.6.0",
"This function is deprecated. and will be removed in 0.6.0",
DeprecationWarning,
)
return openai.completions.create
Expand Down
2 changes: 1 addition & 1 deletion guardrails/utils/prompt_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def prompt_content_for_string_schema(
description = output_schema.get("description")
if description:
prompt_content += (
"Here's a description of what I want you to generate: " f"{description}"
f"Here's a description of what I want you to generate: {description}"
)
validators = validator_map.get(json_path, [])
if len(validators):
Expand Down
4 changes: 2 additions & 2 deletions guardrails/utils/structured_data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ def set_additional_properties_false_iteratively(schema):
current["properties"].keys()
) # this has to be set
if "maximum" in current:
logger.warn("Property maximum is not supported." " Dropping")
logger.warn("Property maximum is not supported. Dropping")
current.pop("maximum") # the api does not like these set
if "minimum" in current:
logger.warn("Property maximum is not supported." " Dropping")
logger.warn("Property maximum is not supported. Dropping")
current.pop("minimum") # the api does not like these set
if "default" in current:
logger.warn("Property default is not supported. Marking field Required")
Expand Down
3 changes: 1 addition & 2 deletions guardrails/utils/validator_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,7 @@ def parse_rail_arguments(arg_tokens: List[str]) -> List[Any]:
t = literal_eval(t)
except (ValueError, SyntaxError, NameError) as e:
raise ValueError(
f"Python expression `{t}` is not valid, "
f"and raised an error: {e}."
f"Python expression `{t}` is not valid, and raised an error: {e}."
)
validator_args.append(t)
return validator_args
Expand Down
3 changes: 1 addition & 2 deletions guardrails/validator_service/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@ def should_run_sync():
bool_values = ["true", "false"]
if run_sync.lower() not in bool_values:
warnings.warn(
f"GUARDRAILS_RUN_SYNC must be one of {bool_values}!"
f" Defaulting to 'false'."
f"GUARDRAILS_RUN_SYNC must be one of {bool_values}! Defaulting to 'false'."
)
return process_count == 1 or run_sync.lower() == "true"

Expand Down
6,803 changes: 3,917 additions & 2,886 deletions poetry.lock

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ openai = "^1.30.1"
rich = "^13.6.0"
pydantic = ">=2.0.0, <3.0"
typer = {version = ">=0.9.0,<0.16"}
griffe = "^0.36.9"
griffe = "^1.3.2"
tenacity = ">=8.1.0"
rstr = "^3.2.2"
typing-extensions = "^4.8.0"
Expand Down Expand Up @@ -105,7 +105,7 @@ mkdocstrings = {extras = ["python"], version = "^0.23.0"}
mkdocs-jupyter = ">=0.23"
mkdocs-material = "^9.4.8"
mknotebooks = "^0.8.0"
griffe = "^0.36.9"
griffe = "^1.3.2"
pillow = "^10.1.0"
cairosvg = "^2.7.1"
mkdocs-glightbox = "^0.3.4"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def invoke(
chain.invoke({"topic": topic})

assert str(exc_info.value) == (
"The response from the LLM failed validation!" f" {expected_error}"
f"The response from the LLM failed validation! {expected_error}"
)

else:
Expand Down
4 changes: 1 addition & 3 deletions tests/integration_tests/mock_llm_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,7 @@ def _invoke_llm(
if hasattr(mock_llm_responses[key], "read"):
out_text = mock_llm_responses[key]
else:
raise ValueError(
"specify either prompt and instructions " "or messages"
)
raise ValueError("specify either prompt and instructions or messages")
return LLMResponse(
output=out_text,
prompt_token_count=123,
Expand Down
4 changes: 2 additions & 2 deletions tests/integration_tests/test_assets/validators/detect_pii.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def validate(self, value: Any, metadata: Dict[str, Any]) -> ValidationResult:
entities_to_filter = pii_entities
else:
raise ValueError(
f"`pii_entities` must be one of {pii_keys}" " or a list of strings."
f"`pii_entities` must be one of {pii_keys} or a list of strings."
)

# Analyze the text, and anonymize it if there is PII
Expand Down Expand Up @@ -174,7 +174,7 @@ def validate(self, value: Any, metadata: Dict[str, Any]) -> ValidationResult:
ErrorSpan(
start=diff_range[0],
end=diff_range[1],
reason=f"PII detected in {value[diff_range[0]:diff_range[1]]}",
reason=f"PII detected in {value[diff_range[0] : diff_range[1]]}",
)
)

Expand Down
2 changes: 1 addition & 1 deletion tests/unit_tests/cli/hub/test_uninstall.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def test_remove_from_hub_inits(mocker):
expected_calls = [
call(
"/site-packages/guardrails/hub/__init__.py",
"from guardrails_grhub_test_package import " "Validator, Helper",
"from guardrails_grhub_test_package import Validator, Helper",
),
]

Expand Down
6 changes: 3 additions & 3 deletions tests/unit_tests/test_validator_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ def mock_llm_api(messages, *args, **kwargs):
],
)
assert str(excinfo.value) == (
"Validation failed for field with errors:" " must be exactly two words"
"Validation failed for field with errors: must be exactly two words"
)
assert isinstance(guard.history.first.exception, ValidationError)
assert guard.history.first.exception == excinfo.value
Expand Down Expand Up @@ -592,7 +592,7 @@ async def mock_llm_api(messages, *args, **kwargs) -> str:


@pytest.mark.parametrize(
"on_fail," "structured_messages_error," "unstructured_messages_error,",
"on_fail,structured_messages_error,unstructured_messages_error,",
[
(
OnFailAction.REASK,
Expand Down Expand Up @@ -674,7 +674,7 @@ def custom_llm(messages, *args, **kwargs):


@pytest.mark.parametrize(
"on_fail," "structured_messages_error," "unstructured_messages_error,",
"on_fail,structured_messages_error,unstructured_messages_error,",
[
(
OnFailAction.REASK,
Expand Down
Loading