Skip to content

Commit d3f9a64

Browse files
committed
fix telem problem from api and add e2e server test gh action
1 parent f835998 commit d3f9a64

File tree

10 files changed

+285
-7
lines changed

10 files changed

+285
-7
lines changed

.dockerignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
__pycache__
2+
*.pyc
3+
*.pyo

.github/workflows/server_ci.yml

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
name: Server CI
2+
3+
on:
4+
push:
5+
branches:
6+
- main
7+
pull_request:
8+
branches:
9+
- main
10+
workflow_dispatch:
11+
12+
jobs:
13+
build-test:
14+
runs-on: ubuntu-latest
15+
steps:
16+
- name: Check out head
17+
uses: actions/checkout@v3
18+
with:
19+
persist-credentials: false
20+
21+
- name: Set up QEMU
22+
uses: docker/setup-qemu-action@master
23+
with:
24+
platforms: linux/amd64
25+
26+
- name: Set up Docker Buildx
27+
uses: docker/setup-buildx-action@master
28+
with:
29+
platforms: linux/amd64
30+
31+
- name: Build Docker image
32+
uses: docker/build-push-action@v6
33+
with:
34+
context: guard
35+
file: server_ci/Dockerfile
36+
platforms: linux/amd64
37+
push: false
38+
tags: guardrails:${{ github.sha }}
39+
load: true
40+
build-args: |
41+
GUARDRAILS_TOKEN=${{ secrets.GUARDRAILS_API_KEY }}
42+
43+
- name: Start Docker container
44+
run: |
45+
docker run -d --name guardrails-container -p 8000:8000 -e OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }} guardrails:${{ github.sha }}
46+
47+
- name: Wait for Docker container to be ready
48+
run: |
49+
for i in {1..30}; do
50+
if docker exec guardrails-container curl -s http://localhost:8000/; then
51+
echo "Server is up!"
52+
break
53+
fi
54+
echo "Waiting for server..."
55+
sleep 5
56+
done
57+
58+
- name: Run Pytest
59+
run: |
60+
pip install pytest openai guardrails-ai
61+
pytest server_ci/tests
62+
docker stop guardrails-container
63+
docker rm guardrails-container

guardrails/cli/create.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def create_command(
2222
default="",
2323
help="A comma-separated list of validator hub URIs.",
2424
),
25-
name: Optional[str] = typer.Option(
25+
guard_name: Optional[str] = typer.Option(
2626
default=None, help="The name of the guard to define in the file."
2727
),
2828
local_models: Optional[bool] = typer.Option(
@@ -78,13 +78,15 @@ def create_command(
7878
local_models,
7979
dry_run,
8080
)
81-
if name is None and validators:
82-
name = "Guard"
81+
if guard_name is None and validators:
82+
guard_name = "Guard"
8383
if len(installed_validators) > 0:
84-
name = installed_validators[0] + "Guard"
84+
guard_name = installed_validators[0] + "Guard"
8585

86-
console.print(f"No name provided for guard. Defaulting to {name}")
87-
new_config_file = generate_config_file(installed_validators, name)
86+
console.print(
87+
"No guard name provided for guard. Defaulting to {guard_name}"
88+
)
89+
new_config_file = generate_config_file(installed_validators, guard_name)
8890

8991
if dry_run:
9092
console.print(f"Not actually saving output to [bold]{filepath}[/bold]")

guardrails/cli/telemetry.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,6 @@ def trace_if_enabled(command_name: str):
1919
("machine", platform.machine()),
2020
("processor", platform.processor()),
2121
],
22+
False,
23+
False,
2224
)

guardrails/utils/hub_telemetry_utils.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,14 @@ def extract_current_context(self):
104104
context = self._prop.extract(carrier=self._carrier)
105105
return context
106106

107-
def create_new_span(self, span_name: str, attributes: list):
107+
def create_new_span(
108+
self,
109+
span_name: str,
110+
attributes: list,
111+
# todo deprecate these in 060
112+
is_parent: bool, #
113+
has_parent: bool, # no-qa
114+
):
108115
"""Creates a new span within the tracer with the given name and
109116
attributes.
110117

server_ci/Dockerfile

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
FROM python:3.11-slim
2+
3+
ARG GUARDRAILS_TOKEN
4+
ARG GUARDRAILS_TEMPLATE="guard-template.json"
5+
6+
# Set environment variables to avoid writing .pyc files and to unbuffer Python output
7+
ENV PYTHONDONTWRITEBYTECODE=1
8+
ENV PYTHONUNBUFFERED=1
9+
ENV LOGLEVEL="DEBUG"
10+
ENV GUARDRAILS_LOG_LEVEL="DEBUG"
11+
ENV APP_ENVIRONMENT="production"
12+
ENV GUARDRAILS_TEMPLATE=$GUARDRAILS_TEMPLATE
13+
14+
WORKDIR /app
15+
16+
# Install Git and necessary dependencies
17+
RUN apt-get update && \
18+
apt-get install -y make git curl gcc jq pipx && \
19+
apt-get clean && \
20+
rm -rf /var/lib/apt/lists/*
21+
22+
RUN pipx install poetry
23+
24+
# Ensure poetry is available in the PATH
25+
ENV PATH="/root/.local/bin:$PATH"
26+
27+
# Copy the entrypoint script
28+
COPY /server_ci/entry.sh /app/entry.sh
29+
COPY ../ /app/guardrails
30+
31+
# Install guardrails, the guardrails API, and gunicorn
32+
# openai optional. only used for integration testing
33+
RUN pip install "gunicorn" "guardrails-api"
34+
35+
WORKDIR /app/guardrails
36+
37+
RUN poetry install --all-extras
38+
39+
RUN pip install ./
40+
41+
RUN guardrails configure --enable-metrics --enable-remote-inferencing --token $GUARDRAILS_TOKEN
42+
43+
# bring in base template
44+
COPY /server_ci/$GUARDRAILS_TEMPLATE /app/$GUARDRAILS_TEMPLATE
45+
46+
# Install Hub Deps and create config.py
47+
RUN guardrails create --template /app/$GUARDRAILS_TEMPLATE
48+
49+
50+
RUN cp -r /usr/local/lib/python3.11/site-packages/guardrails/hub/* /app/guardrails/guardrails/hub
51+
52+
# Expose port 8000 for the application
53+
EXPOSE 8000
54+
55+
# Command to start the Gunicorn server with specified settings
56+
CMD ["/bin/bash", "/app/entry.sh"]

server_ci/config.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import json
2+
import os
3+
from guardrails import Guard
4+
5+
try:
6+
file_path = os.path.join(os.getcwd(), "guard-template.json")
7+
with open(file_path, "r") as fin:
8+
guards = json.load(fin)["guards"] or []
9+
except json.JSONDecodeError:
10+
print("Error parsing guards from JSON")
11+
SystemExit(1)
12+
13+
# instantiate guards
14+
guard0 = Guard.from_dict(guards[0])

server_ci/entry.sh

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
gunicorn \
2+
--workers 3 \
3+
--threads 2 \
4+
--bind 0.0.0.0:8000 \
5+
--worker-class gthread \
6+
--timeout 30 \
7+
--keep-alive 20 \
8+
--preload \
9+
--graceful-timeout 60 \
10+
"guardrails_api.app:create_app()"

server_ci/guard-template.json

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
{
2+
"name": "server-ci",
3+
"description": "basic guard template for oss server ci",
4+
"template_version": "0.0.1",
5+
"namespace": "guardrails",
6+
"guards": [
7+
{
8+
"id": "test-guard",
9+
"name": "test-guard",
10+
"validators": [
11+
{
12+
"id": "guardrails/detect_pii",
13+
"on": "msg_history",
14+
"onFail": "fix",
15+
"kwargs": {
16+
"pii_entities": ["PERSON","PHONE_NUMBER","LOCATION"]
17+
}
18+
}
19+
]
20+
}
21+
]
22+
}

server_ci/tests/test_server.py

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
import openai
2+
import os
3+
import pytest
4+
from guardrails import Guard, settings
5+
6+
# OpenAI compatible Guardrails API Guard
7+
openai.base_url = "http://127.0.0.1:8000/guards/test-guard/openai/v1/"
8+
9+
openai.api_key = os.getenv("OPENAI_API_KEY") or "some key"
10+
11+
12+
@pytest.mark.parametrize(
13+
"mock_llm_output, validation_output, validation_passed, error",
14+
[
15+
(
16+
"France is wonderful in the spring",
17+
"France is wonderful in the spring",
18+
True,
19+
False,
20+
),
21+
],
22+
)
23+
def test_guard_validation(mock_llm_output, validation_output, validation_passed, error):
24+
settings.use_server = True
25+
guard = Guard(name="test-guard")
26+
if error:
27+
with pytest.raises(Exception):
28+
validation_outcome = guard.validate(mock_llm_output)
29+
else:
30+
validation_outcome = guard.validate(mock_llm_output)
31+
assert validation_outcome.validation_passed == validation_passed
32+
assert validation_outcome.validated_output == validation_output
33+
34+
35+
@pytest.mark.parametrize(
36+
"message_content, output, validation_passed, error",
37+
[
38+
(
39+
"Tell me about Oranges in 5 words",
40+
"Citrus fruit, sweet, nutritious, vibrant.",
41+
True,
42+
False,
43+
),
44+
],
45+
)
46+
def test_server_guard_llm_integration(
47+
message_content, output, validation_passed, error
48+
):
49+
settings.use_server = True
50+
guard = Guard(name="test-guard")
51+
messages = [{"role": "user", "content": message_content}]
52+
if error:
53+
with pytest.raises(Exception):
54+
validation_outcome = guard(
55+
model="gpt-3.5-turbo",
56+
messages=messages,
57+
temperature=0.0,
58+
)
59+
else:
60+
validation_outcome = guard(
61+
model="gpt-4o-mini",
62+
messages=messages,
63+
temperature=0.0,
64+
)
65+
assert (output) in validation_outcome.validated_output
66+
assert (validation_outcome.validation_passed) is validation_passed
67+
68+
69+
@pytest.mark.parametrize(
70+
"message_content, output, validation_passed, error",
71+
[
72+
("Tell me about Paris in 5 words", "doesnt matter this errors", True, True),
73+
(
74+
"Write 5 words of prose.",
75+
"Whispers of dawn kissed the horizon.",
76+
True,
77+
False,
78+
),
79+
],
80+
)
81+
def test_server_openai_llm_integration(
82+
message_content, output, validation_passed, error
83+
):
84+
messages = [{"role": "user", "content": message_content}]
85+
if error:
86+
with pytest.raises(Exception):
87+
completion = openai.chat.completions.create(
88+
model="gpt-4o-mini",
89+
messages=messages,
90+
temperature=0.0,
91+
)
92+
else:
93+
completion = openai.chat.completions.create(
94+
model="gpt-4o-mini",
95+
messages=messages,
96+
temperature=0.0,
97+
)
98+
assert (output) in completion.choices[0].message.content
99+
assert (completion.guardrails["validation_passed"]) is validation_passed

0 commit comments

Comments
 (0)