Skip to content

Commit 5f22856

Browse files
committed
Checkpoint: command classifier
1 parent acd2f20 commit 5f22856

File tree

8 files changed

+1200
-314
lines changed

8 files changed

+1200
-314
lines changed

pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,9 @@ dependencies = [
3232
"pyyaml>=6.0.3",
3333
"slack-sdk>=3.38.0",
3434
"slack-bolt>=1.27.0",
35+
"openai>=1.0.0",
36+
"PyJWT>=2.8.0",
37+
"cryptography>=41.0.0",
3538
]
3639

3740
[project.optional-dependencies]
Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
import json
2+
3+
from openai import OpenAI
4+
from py_trees.common import Status
5+
6+
from ..conversation_models import Command
7+
from .behaviours import ReleaseAction
8+
from .conversation_state import ConversationState
9+
10+
11+
class SimpleCommandClassifier(ReleaseAction):
12+
def __init__(
13+
self, name: str, state: ConversationState, log_prefix: str = ""
14+
) -> None:
15+
self.state = state
16+
super().__init__(name=name, log_prefix=log_prefix)
17+
18+
def update(self) -> Status:
19+
# Extract first word from message if available
20+
if self.state.message and self.state.message.message:
21+
first_word = self.state.message.message.strip().split()[0].lower()
22+
23+
# Map first word to Command enum
24+
command_map = {
25+
"release": Command.RELEASE,
26+
"custom_build": Command.CUSTOM_BUILD,
27+
"unstable_build": Command.UNSTABLE_BUILD,
28+
"status": Command.STATUS,
29+
"help": Command.HELP,
30+
}
31+
32+
# Set command if detected, otherwise leave as is
33+
if first_word in command_map:
34+
self.state.command = command_map[first_word]
35+
36+
# Always return SUCCESS
37+
return Status.SUCCESS
38+
39+
40+
class LLMCommandClassifier(ReleaseAction):
41+
def __init__(
42+
self,
43+
name: str,
44+
llm: OpenAI,
45+
state: ConversationState,
46+
log_prefix: str = "",
47+
confidence_threshold: float = 0.7,
48+
) -> None:
49+
self.llm = llm
50+
self.state = state
51+
self.confidence_threshold = confidence_threshold
52+
super().__init__(name=name, log_prefix=log_prefix)
53+
54+
def update(self) -> Status:
55+
self.logger.debug(f"state : {self.state.model_dump()}")
56+
# Check if message is available
57+
if not self.state.message or not self.state.message.message:
58+
return Status.FAILURE
59+
60+
# Prepare prompt with available commands
61+
commands_list = "\n".join([f"- {cmd.value}" for cmd in Command])
62+
63+
system_prompt = f"""You are a command detector for a Redis release automation system.
64+
Your task is to analyze user messages and detect which command they want to execute.
65+
66+
Available commands:
67+
{commands_list}
68+
69+
Respond with a JSON object containing:
70+
- "command": the detected command value (one of the available commands, or null if uncertain)
71+
- "confidence": a number between 0 and 1 indicating your confidence in the detection
72+
- "reasoning": brief explanation of your decision
73+
74+
Example response:
75+
{{"command": "release", "confidence": 0.95, "reasoning": "User explicitly mentioned releasing version 8.2.0"}}
76+
77+
If the message doesn't clearly match any command, set command to null and explain why."""
78+
79+
user_message = self.state.message.message
80+
81+
try:
82+
# Call LLM
83+
response = self.llm.chat.completions.create(
84+
model="gpt-4o-mini",
85+
messages=[
86+
{"role": "system", "content": system_prompt},
87+
{"role": "user", "content": user_message},
88+
],
89+
response_format={"type": "json_object"},
90+
temperature=0.3,
91+
)
92+
93+
self.logger.debug(f"LLM response: {response}")
94+
95+
# Parse response
96+
content = response.choices[0].message.content
97+
if not content:
98+
self.feedback_message = "LLM returned empty response"
99+
return Status.FAILURE
100+
101+
result = json.loads(content)
102+
command_value = result.get("command")
103+
confidence = result.get("confidence", 0.0)
104+
reasoning = result.get("reasoning", "")
105+
106+
# Log the detection
107+
self.feedback_message = (
108+
f"LLM detected: {command_value} (confidence: {confidence:.2f})"
109+
)
110+
111+
# Check confidence threshold
112+
if confidence < self.confidence_threshold:
113+
self.feedback_message += (
114+
f" [Below threshold {self.confidence_threshold}]"
115+
)
116+
self.state.reply = reasoning
117+
return Status.FAILURE
118+
119+
# Validate and set command
120+
if command_value:
121+
try:
122+
self.state.command = Command(command_value)
123+
return Status.SUCCESS
124+
except ValueError:
125+
self.feedback_message = f"Invalid command value: {command_value}"
126+
self.state.reply = self.feedback_message
127+
return Status.FAILURE
128+
else:
129+
return Status.FAILURE
130+
131+
except Exception as e:
132+
self.feedback_message = f"LLM command detection failed: {str(e)}"
133+
self.state.reply = self.feedback_message
134+
return Status.FAILURE
135+
136+
137+
# Conditions
138+
139+
140+
class IsLLMAvailable(ReleaseAction):
141+
def __init__(
142+
self, name: str, state: ConversationState, log_prefix: str = ""
143+
) -> None:
144+
self.state = state
145+
super().__init__(name=name, log_prefix=log_prefix)
146+
147+
def update(self) -> Status:
148+
if self.state.llm_available:
149+
return Status.SUCCESS
150+
return Status.FAILURE
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from typing import List, Optional
2+
3+
from pydantic import BaseModel, Field
4+
5+
from ..conversation_models import Command
6+
7+
8+
class InboxMessage(BaseModel):
9+
message: str
10+
context: List[str]
11+
12+
13+
class ConversationState(BaseModel):
14+
llm_available: bool = False
15+
message: Optional[InboxMessage] = None
16+
command: Optional[Command] = None
17+
reply: Optional[str] = None
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
from typing import Optional, Tuple
2+
3+
from openai import OpenAI
4+
from py_trees.behaviour import Behaviour
5+
from py_trees.composites import Selector
6+
from py_trees.trees import BehaviourTree
7+
from py_trees.visitors import SnapshotVisitor
8+
9+
from redis_release.bht.conversation_behaviours import (
10+
LLMCommandClassifier,
11+
SimpleCommandClassifier,
12+
)
13+
14+
from ..conversation_models import ConversationArgs
15+
from .conversation_state import ConversationState, InboxMessage
16+
from .tree import log_tree_state_with_markup
17+
18+
19+
def create_conversation_root_node(
20+
input: InboxMessage, llm: Optional[OpenAI] = None
21+
) -> Tuple[Behaviour, ConversationState]:
22+
state = ConversationState(llm_available=llm is not None, message=input)
23+
24+
# Use LLM classifier if available, otherwise use simple classifier
25+
if llm is not None:
26+
command_detector = LLMCommandClassifier("LLM Command Detector", llm, state)
27+
else:
28+
command_detector = SimpleCommandClassifier("Simple Command Classifier", state)
29+
30+
root = Selector(
31+
"Conversation Root",
32+
memory=False,
33+
children=[command_detector],
34+
)
35+
return root, state
36+
37+
38+
def initialize_conversation_tree(args: ConversationArgs) -> BehaviourTree:
39+
40+
llm: Optional[OpenAI] = None
41+
if args.openai_api_key:
42+
llm = OpenAI(api_key=args.openai_api_key)
43+
44+
root, state = create_conversation_root_node(
45+
InboxMessage(message=args.message, context=[]), llm=llm
46+
)
47+
tree = BehaviourTree(root)
48+
snapshot_visitor = SnapshotVisitor()
49+
tree.visitors.append(snapshot_visitor)
50+
tree.add_post_tick_handler(log_tree_state_with_markup)
51+
return tree

0 commit comments

Comments
 (0)