Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
280 changes: 14 additions & 266 deletions aap_chatbot/package-lock.json

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions aap_chatbot/src/AnsibleChatbot/AnsibleChatbot.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ export const AnsibleChatbot: React.FunctionComponent<ChatbotContext> = (
hasStopButton,
handleStopButton,
isStreamingSupported,
bypassTools,
setBypassTools,
} = useChatbot();
const [chatbotVisible, setChatbotVisible] = useState<boolean>(true);
const [displayMode] = useState<ChatbotDisplayMode>(
Expand Down Expand Up @@ -226,6 +228,8 @@ export const AnsibleChatbot: React.FunctionComponent<ChatbotContext> = (
<SystemPromptModal
systemPrompt={systemPrompt}
setSystemPrompt={setSystemPrompt}
bypassTools={bypassTools}
setBypassTools={setBypassTools}
/>
)}
</ChatbotHeaderActions>
Expand Down
35 changes: 34 additions & 1 deletion aap_chatbot/src/App.test.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import React from "react";
// vitest-browser-react documentation
/* eslint-disable testing-library/prefer-screen-queries */
/* eslint-disable no-nested-ternary */

import { assert, beforeEach, expect, test, vi } from "vitest";
import { render } from "vitest-browser-react";
import { MemoryRouter } from "react-router-dom";
Expand Down Expand Up @@ -709,6 +708,40 @@ test("Test system prompt override", async () => {
);
});

test("Test system prompt override with no_tools option", async () => {
const spy = mockAxios(200);
await renderApp(true);

await expect.element(page.getByLabelText("SystemPrompt")).toBeVisible();
const systemPromptIcon = page.getByLabelText("SystemPrompt");
await systemPromptIcon.click();

const systemPromptTextArea = page.getByLabelText(
"system-prompt-form-text-area",
);
await systemPromptTextArea.fill("MY SYSTEM PROMPT WITH NO_TOOLS OPTION");

const bypassToolsCheckbox = page.getByRole("checkbox");
expect(bypassToolsCheckbox).not.toBeChecked();
await bypassToolsCheckbox.click();
expect(bypassToolsCheckbox).toBeChecked();

const systemPromptButton = page.getByLabelText("system-prompt-form-button");
await systemPromptButton.click();

await sendMessage("Hello with system prompt override with no_tools option");
expect(spy).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
conversation_id: undefined,
no_tools: true,
query: "Hello with system prompt override with no_tools option",
system_prompt: "MY SYSTEM PROMPT WITH NO_TOOLS OPTION",
}),
expect.anything(),
);
});

test("Chat streaming test", async () => {
let ghIssueLinkSpy = 0;
let ghIssueUrl = "";
Expand Down
16 changes: 15 additions & 1 deletion aap_chatbot/src/SystemPromptModal/SystemPromptModal.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import React from "react";
import {
Button,
Checkbox,
Form,
FormGroup,
Modal,
Expand All @@ -15,13 +16,15 @@ import WrenchIcon from "@patternfly/react-icons/dist/esm/icons/wrench-icon";
interface SystemPromptModalProps {
systemPrompt: string;
setSystemPrompt: (s: string) => void;
bypassTools: boolean;
setBypassTools: (b: boolean) => void;
}

export const SystemPromptModal: React.FunctionComponent<
SystemPromptModalProps
> = (props) => {
const [isModalOpen, setModalOpen] = React.useState(false);
const { systemPrompt, setSystemPrompt } = props;
const { systemPrompt, setSystemPrompt, bypassTools, setBypassTools } = props;

const handleModalToggle = (_event: KeyboardEvent | React.MouseEvent) => {
setModalOpen(!isModalOpen);
Expand All @@ -31,6 +34,10 @@ export const SystemPromptModal: React.FunctionComponent<
setSystemPrompt(value);
};

const handleBypassToolsChange = (_event: any, value: boolean) => {
setBypassTools(value);
};

return (
<React.Fragment>
<Button
Expand Down Expand Up @@ -64,6 +71,13 @@ export const SystemPromptModal: React.FunctionComponent<
aria-label="system-prompt-form-text-area"
rows={15}
/>
<Checkbox
id="bypass-tools"
label="Bypass Tools"
isChecked={bypassTools}
aria-label="bypass-tools-checkbox"
onChange={handleBypassToolsChange}
></Checkbox>
</FormGroup>
</Form>
</ModalBody>
Expand Down
1 change: 1 addition & 0 deletions aap_chatbot/src/types/Message.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ type LLMRequest = {
attachments?: object[] | null;
system_prompt?: string | null;
media_type?: "text/plain" | "application/json";
no_tools?: boolean | null;
};

type LLMResponse = {
Expand Down
6 changes: 6 additions & 0 deletions aap_chatbot/src/useChatbot/useChatbot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ export const useChatbot = () => {
const [systemPrompt, setSystemPrompt] = useState(QUERY_SYSTEM_INSTRUCTION);
const [hasStopButton, setHasStopButton] = useState<boolean>(false);
const [abortController, setAbortController] = useState(new AbortController());
const [bypassTools, setBypassTools] = useState<boolean>(false);

const [stream, setStream] = useState(false);
useEffect(() => {
Expand Down Expand Up @@ -465,6 +466,9 @@ export const useChatbot = () => {
if (systemPrompt !== QUERY_SYSTEM_INSTRUCTION) {
chatRequest.system_prompt = systemPrompt;
}
if (bypassTools) {
chatRequest.no_tools = true;
}

if (inDebugMode()) {
for (const m of modelsSupported) {
Expand Down Expand Up @@ -656,5 +660,7 @@ export const useChatbot = () => {
hasStopButton,
handleStopButton,
isStreamingSupported,
bypassTools,
setBypassTools,
};
};
17 changes: 12 additions & 5 deletions ansible_ai_connect/ai/api/model_pipelines/http/pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import json
import logging
from json import JSONDecodeError
from typing import AsyncGenerator
from typing import Any, AsyncGenerator

import aiohttp
import requests
Expand Down Expand Up @@ -179,8 +179,9 @@ def invoke(self, params: ChatBotParameters) -> ChatBotResponse:
provider = params.provider
model_id = params.model_id
system_prompt = params.system_prompt or settings.CHATBOT_DEFAULT_SYSTEM_PROMPT
no_tools = params.no_tools

data = {
data: dict[str, Any] = {
"query": query,
"model": model_id,
"provider": provider,
Expand All @@ -189,6 +190,8 @@ def invoke(self, params: ChatBotParameters) -> ChatBotResponse:
data["conversation_id"] = str(conversation_id)
if system_prompt:
data["system_prompt"] = str(system_prompt)
if no_tools:
data["no_tools"] = bool(no_tools)

headers = self.headers or {}
if params.mcp_headers:
Expand Down Expand Up @@ -281,8 +284,9 @@ async def async_invoke(self, params: StreamingChatBotParameters) -> AsyncGenerat
model_id = params.model_id
system_prompt = params.system_prompt or settings.CHATBOT_DEFAULT_SYSTEM_PROMPT
media_type = params.media_type
no_tools = params.no_tools

data = {
data: dict[str, Any] = {
"query": query,
"model": model_id,
"provider": provider,
Expand All @@ -293,6 +297,8 @@ async def async_invoke(self, params: StreamingChatBotParameters) -> AsyncGenerat
data["system_prompt"] = str(system_prompt)
if media_type:
data["media_type"] = str(media_type)
if no_tools:
data["no_tools"] = bool(no_tools)

async with session.post(
self.config.inference_url + "/v1/streaming_query",
Expand All @@ -315,6 +321,7 @@ async def async_invoke(self, params: StreamingChatBotParameters) -> AsyncGenerat
ev.provider_id = params.provider
ev.conversation_id = params.conversation_id
ev.modelName = params.model_id
ev.no_tools = params.no_tools

async for chunk in response.content:
try:
Expand All @@ -332,9 +339,9 @@ async def async_invoke(self, params: StreamingChatBotParameters) -> AsyncGenerat
logger.error(
"An error received in chat streaming content:"
+ " response="
+ data.get("response")
+ str(data.get("response"))
+ ", cause="
+ data.get("cause")
+ str(data.get("cause"))
)
elif event == "start":
ev.phase = event
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ def get_params(self) -> StreamingChatBotParameters:
conversation_id=None,
system_prompt="You are a helpful assistant",
media_type="application/json",
no_tools=False, # Do not bypass tool callings
)

def send_event(self, ev):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,7 @@ def get_params(self) -> StreamingChatBotParameters:
conversation_id=None,
system_prompt="",
media_type="application/json",
no_tools=False,
)

@patch("llama_stack_client.lib.agents.agent.AsyncAgent.create_session")
Expand Down
5 changes: 5 additions & 0 deletions ansible_ai_connect/ai/api/model_pipelines/pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ class ChatBotParameters:
conversation_id: Optional[str]
system_prompt: str
mcp_headers: Optional[dict[str, dict[str, str]]] = field(kw_only=True, default=None)
no_tools: bool

@classmethod
def init(
Expand All @@ -242,6 +243,7 @@ def init(
conversation_id: Optional[str] = None,
system_prompt: Optional[str] = None,
mcp_headers: Optional[dict[str, dict[str, str]]] = None,
no_tools: Optional[bool] = False,
):
return cls(
query=query,
Expand All @@ -250,6 +252,7 @@ def init(
conversation_id=conversation_id,
system_prompt=system_prompt,
mcp_headers=mcp_headers,
no_tools=no_tools,
)


Expand All @@ -270,6 +273,7 @@ def init(
system_prompt: Optional[str] = None,
media_type: Optional[str] = None,
mcp_headers: Optional[dict[str, dict[str, str]]] = None,
no_tools: Optional[bool] = False,
):
return cls(
query=query,
Expand All @@ -279,6 +283,7 @@ def init(
system_prompt=system_prompt,
media_type=media_type,
mcp_headers=mcp_headers,
no_tools=no_tools,
)


Expand Down
5 changes: 5 additions & 0 deletions ansible_ai_connect/ai/api/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,11 @@ class ChatRequestSerializer(serializers.Serializer):
label="System prompt",
help_text=("An optional non-default system prompt to be used on LLM (debug mode only)."),
)
no_tools = serializers.BooleanField(
required=False,
label="Bypass tools",
help_text=("Whether to bypass all tools and MCP servers"),
)


class StreamingChatRequestSerializer(ChatRequestSerializer):
Expand Down
1 change: 1 addition & 0 deletions ansible_ai_connect/ai/api/telemetry/schema1.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,7 @@ class ChatBotBaseEvent(Schema1Event):
converter=str,
default=settings.CHATBOT_DEFAULT_PROVIDER,
)
no_tools: bool = field(validator=validators.instance_of(bool), converter=bool, default=False)

def __attrs_post_init__(self):
self.chat_prompt = anonymize_struct(self.chat_prompt)
Expand Down
71 changes: 71 additions & 0 deletions ansible_ai_connect/ai/api/tests/test_chat_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,11 @@ class TestChatView(APIVersionTestCaseBase, WisdomServiceAPITestCaseBase):
"system_prompt": "System prompt override",
}

PAYLOAD_WITH_NO_TOOLS_OPTION = {
"query": "Payload with no tools option",
"no_tools": True,
}

JSON_RESPONSE = {
"response": "AAP 2.5 introduces an updated, unified UI.",
"conversation_id": "123e4567-e89b-12d3-a456-426614174000",
Expand Down Expand Up @@ -169,6 +174,7 @@ def json(self):
elif (
kwargs["json"]["query"] == TestChatView.PAYLOAD_WITH_MODEL_AND_PROVIDER["query"]
or kwargs["json"]["query"] == TestChatView.PAYLOAD_WITH_SYSTEM_PROMPT_OVERRIDE["query"]
or kwargs["json"]["query"] == TestChatView.PAYLOAD_WITH_NO_TOOLS_OPTION["query"]
):
status_code = 200
json_response["response"] = input
Expand Down Expand Up @@ -438,6 +444,40 @@ def test_operational_telemetry_with_system_prompt_override(self):
segment_events[0]["properties"]["chat_system_prompt"],
TestChatView.PAYLOAD_WITH_SYSTEM_PROMPT_OVERRIDE["system_prompt"],
)
self.assertFalse(segment_events[0]["properties"]["no_tools"])

@override_settings(SEGMENT_WRITE_KEY="DUMMY_KEY_VALUE")
def test_operational_telemetry_with_no_tools_option(self):
self.user.rh_user_has_seat = True
self.user.organization = Organization.objects.get_or_create(id=1)[0]
self.client.force_authenticate(user=self.user)
with (
patch.object(
apps.get_app_config("ai"),
"get_model_pipeline",
Mock(
return_value=HttpChatBotPipeline(
mock_pipeline_config("http", model_id="granite-8b")
)
),
),
self.assertLogs(logger="root", level="DEBUG") as log,
):
r = self.query_with_no_error(TestChatView.PAYLOAD_WITH_NO_TOOLS_OPTION)
self.assertEqual(r.status_code, HTTPStatus.OK)
segment_events = self.extractSegmentEventsFromLog(log)
# Verify that chat_prompt is excluded (not in allow list)
self.assertNotIn("chat_prompt", segment_events[0]["properties"])
self.assertEqual(segment_events[0]["properties"]["modelName"], "granite-8b")
self.assertIn(
TestChatView.PAYLOAD_WITH_NO_TOOLS_OPTION["query"],
segment_events[0]["properties"]["chat_response"],
)
self.assertEqual(
segment_events[0]["properties"]["chat_truncated"],
TestChatView.JSON_RESPONSE["truncated"],
)
self.assertTrue(segment_events[0]["properties"]["no_tools"])

def test_chat_rate_limit(self):
# Call chat API five times using self.user
Expand Down Expand Up @@ -737,6 +777,37 @@ def test_operational_telemetry_with_system_prompt_override(self):
segment_events[0]["properties"]["chat_system_prompt"],
TestChatView.PAYLOAD_WITH_SYSTEM_PROMPT_OVERRIDE["system_prompt"],
)
self.assertFalse(segment_events[0]["properties"]["no_tools"])

@override_settings(SEGMENT_WRITE_KEY="DUMMY_KEY_VALUE")
def test_operational_telemetry_with_no_tools_option(self):
self.user.rh_user_has_seat = True
self.user.organization = Organization.objects.get_or_create(id=1)[0]
self.client.force_authenticate(user=self.user)
with (
patch.object(
apps.get_app_config("ai"),
"get_model_pipeline",
Mock(
return_value=HttpStreamingChatBotPipeline(
mock_pipeline_config("http", model_id="granite-8b")
)
),
),
self.assertLogs(logger="root", level="DEBUG") as log,
):
r = self.query_with_no_error(TestChatView.PAYLOAD_WITH_NO_TOOLS_OPTION)
self.assertEqual(r.status_code, HTTPStatus.OK)
segment_events = self.extractSegmentEventsFromLog(log)
# Verify that chat_prompt is excluded (not in allow list)
self.assertNotIn("chat_prompt", segment_events[0]["properties"])
self.assertEqual(segment_events[0]["properties"]["modelName"], "granite-8b")
self.assertEqual(
segment_events[0]["properties"]["chat_truncated"],
TestChatView.JSON_RESPONSE["truncated"],
)
self.assertEqual(len(segment_events[0]["properties"]["chat_referenced_documents"]), 0)
self.assertTrue(segment_events[0]["properties"]["no_tools"])

def test_chat_rate_limit(self):
# Call chat API five times using self.user
Expand Down
Loading