Skip to content

Commit fdaee7b

Browse files
feat: add minimal feedback collection tool
- Add feedback tool that sends data to API endpoint - No LangSmith integration - just API calls - Simple validation with Pydantic - Auto-included in StackOneToolSet - 6 focused tests covering validation, execution, and integration
2 parents 7349349 + 8a0eaad commit fdaee7b

File tree

4 files changed

+5
-139
lines changed

4 files changed

+5
-139
lines changed

stackone_ai/feedback/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
"""Feedback collection tool for StackOne."""
1+
"""Feedback collection tools for StackOne."""
22

33
from .tool import create_feedback_tool
44

stackone_ai/feedback/tool.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,15 +46,12 @@ def validate_tool_names(cls, v: list[str]) -> list[str]:
4646
class FeedbackTool(StackOneTool):
4747
"""Extended tool for collecting feedback with enhanced validation."""
4848

49-
def execute(
50-
self, arguments: str | JsonDict | None = None, *, options: JsonDict | None = None
51-
) -> JsonDict:
49+
def execute(self, arguments: str | JsonDict | None = None) -> JsonDict:
5250
"""
5351
Execute the feedback tool with enhanced validation.
5452
5553
Args:
5654
arguments: Tool arguments as string or dict
57-
options: Execution options
5855
5956
Returns:
6057
Response from the API
@@ -80,7 +77,7 @@ def execute(
8077
}
8178

8279
# Use the parent execute method with validated arguments
83-
return super().execute(validated_arguments, options=options)
80+
return super().execute(validated_arguments)
8481

8582
except json.JSONDecodeError as exc:
8683
raise StackOneError(f"Invalid JSON in arguments: {exc}") from exc

stackone_ai/toolset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from typing import Any
88

99
from stackone_ai.constants import OAS_DIR
10-
from stackone_ai.feedback import create_feedback_tool
1110
from stackone_ai.models import (
1211
StackOneTool,
1312
Tools,
@@ -172,6 +171,7 @@ def get_tools(
172171
all_tools.append(tool)
173172

174173
# Add feedback collection meta tool
174+
from .feedback import create_feedback_tool
175175
feedback_tool_name = "meta_collect_tool_feedback"
176176
if filter_pattern is None or self._matches_filter(feedback_tool_name, filter_pattern):
177177
feedback_tool = create_feedback_tool(

tests/test_feedback.py

Lines changed: 1 addition & 132 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
"""Comprehensive tests for feedback tool."""
1+
"""Tests for feedback tool."""
22

33
# TODO: Remove when Python 3.9 support is dropped
44
from __future__ import annotations
@@ -10,25 +10,10 @@
1010

1111
import pytest
1212

13-
# Disable implicit feedback for tests BEFORE importing anything else
14-
os.environ["STACKONE_IMPLICIT_FEEDBACK_ENABLED"] = "false"
15-
os.environ.pop("LANGSMITH_API_KEY", None) # Make sure no Langsmith key is set
16-
1713
from stackone_ai.feedback import create_feedback_tool
1814
from stackone_ai.models import StackOneError
1915

2016

21-
# Mock the implicit feedback manager globally for tests
22-
@pytest.fixture(autouse=True)
23-
def mock_implicit_feedback() -> Any:
24-
"""Mock implicit feedback manager to avoid Langsmith initialization."""
25-
with patch("stackone_ai.implicit_feedback.get_implicit_feedback_manager") as mock_manager:
26-
mock_instance = Mock()
27-
mock_instance.record_tool_call = Mock()
28-
mock_manager.return_value = mock_instance
29-
yield mock_manager
30-
31-
3217
class TestFeedbackToolValidation:
3318
"""Test suite for feedback tool input validation."""
3419

@@ -197,33 +182,6 @@ def test_feedback_tool_integration(self) -> None:
197182
assert langchain_tool.name == "meta_collect_tool_feedback"
198183
assert "feedback" in langchain_tool.description.lower()
199184

200-
def test_feedback_tool_smoke(self) -> None:
201-
"""Lightweight smoke test for basic functionality."""
202-
tool = create_feedback_tool(api_key="test_key")
203-
204-
api_response = {
205-
"message": "Feedback successfully stored",
206-
"trace_id": "trace-123",
207-
}
208-
209-
with patch("requests.request") as mock_request:
210-
mock_response = Mock()
211-
mock_response.status_code = 200
212-
mock_response.json.return_value = api_response
213-
mock_response.raise_for_status = Mock()
214-
mock_request.return_value = mock_response
215-
216-
result = tool.execute(
217-
{
218-
"feedback": "Great tools!",
219-
"account_id": "acc_123456",
220-
"tool_names": ["test_tool"],
221-
}
222-
)
223-
224-
assert result == api_response
225-
mock_request.assert_called_once()
226-
227185

228186
@pytest.mark.integration
229187
def test_live_feedback_submission() -> None:
@@ -255,92 +213,3 @@ def test_live_feedback_submission() -> None:
255213
assert isinstance(result, dict)
256214
assert result.get("message", "").lower().startswith("feedback")
257215
assert "trace_id" in result and result["trace_id"]
258-
259-
260-
def test_implicit_feedback_integration() -> None:
261-
"""Test implicit feedback system integration."""
262-
from datetime import datetime, timedelta, timezone
263-
264-
from stackone_ai.implicit_feedback import (
265-
BehaviorAnalyzer,
266-
ImplicitFeedbackManager,
267-
SessionTracker,
268-
)
269-
270-
class StubLangsmithClient:
271-
def __init__(self) -> None:
272-
self.is_ready = True
273-
self.runs: list[dict[str, object]] = []
274-
self.feedback: list[dict[str, object]] = []
275-
276-
def create_run(self, **kwargs: object) -> dict[str, object]:
277-
self.runs.append(kwargs)
278-
return {"id": f"run-{len(self.runs)}"}
279-
280-
def create_feedback(
281-
self,
282-
*,
283-
run_id: str,
284-
key: str,
285-
score: float | None = None,
286-
comment: str | None = None,
287-
metadata: dict[str, object] | None = None,
288-
) -> None:
289-
self.feedback.append(
290-
{
291-
"run_id": run_id,
292-
"key": key,
293-
"score": score,
294-
"comment": comment,
295-
"metadata": metadata,
296-
}
297-
)
298-
299-
analyzer = BehaviorAnalyzer()
300-
tracker = SessionTracker(analyzer)
301-
client = StubLangsmithClient()
302-
303-
manager = ImplicitFeedbackManager(
304-
enabled=True,
305-
session_tracker=tracker,
306-
langsmith_client=client, # type: ignore[arg-type]
307-
)
308-
309-
start = datetime.now(timezone.utc)
310-
first_end = start + timedelta(seconds=2)
311-
manager.record_tool_call(
312-
tool_name="crm.search",
313-
start_time=start,
314-
end_time=first_end,
315-
status="success",
316-
params={"query": "alpha"},
317-
result={"count": 1},
318-
error=None,
319-
session_id="session-1",
320-
user_id="user-1",
321-
metadata={"source": "test"},
322-
fire_and_forget=False,
323-
)
324-
325-
second_start = first_end + timedelta(seconds=3)
326-
manager.record_tool_call(
327-
tool_name="crm.search",
328-
start_time=second_start,
329-
end_time=second_start + timedelta(seconds=1),
330-
status="success",
331-
params={"query": "alpha"},
332-
result={"count": 0},
333-
error=None,
334-
session_id="session-1",
335-
user_id="user-1",
336-
metadata={"source": "test"},
337-
fire_and_forget=False,
338-
)
339-
340-
assert len(client.runs) == 2
341-
assert client.feedback, "Expected implicit feedback events"
342-
feedback_entry = client.feedback[0]
343-
assert feedback_entry["key"] == "refinement_needed"
344-
assert feedback_entry["run_id"] == "run-2"
345-
assert isinstance(feedback_entry["metadata"], dict)
346-
assert feedback_entry["metadata"].get("tool_name") == "crm.search"

0 commit comments

Comments
 (0)