Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions chatsky/conditions/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ class LLMCondition(BaseCondition):
"""
Condition prompt.
"""
history: int = 1
dialog_turns: int = 0
"""
Number of dialogue turns aside from the current one to keep in history. `-1` for full history.
Number of dialogue turns aside from the current one to keep in history. `-1` to put all messages into the context.
"""
filter_func: BaseHistoryFilter = Field(default_factory=DefaultFilter)
"""
Expand Down Expand Up @@ -67,7 +67,7 @@ async def call(self, ctx: Context) -> bool:
call_prompt=Prompt(message=self.prompt),
prompt_misc_filter=self.prompt_misc_filter,
position_config=self.position_config or model.position_config,
length=self.history,
length=self.dialog_turns,
filter_func=self.filter_func,
llm_model_name=self.llm_model_name,
max_size=self.max_size,
Expand Down
6 changes: 3 additions & 3 deletions chatsky/llm/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ class PositionConfig(BaseModel):
"""

system_prompt: float = 0
history: float = 1
misc_prompt: float = 2
call_prompt: float = 3
misc_prompt: float = 1
call_prompt: float = 2
history: float = 3
last_turn: float = 4


Expand Down
6 changes: 3 additions & 3 deletions chatsky/responses/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ class LLMResponse(BaseResponse):
"""
Response prompt.
"""
history: int = 5
dialog_turns: int = 5
"""
Number of dialogue turns aside from the current one to keep in history. `-1` for full history.
Number of dialogue turns aside from the current one to keep in history. `-1` to put all messages into the context.
"""
filter_func: BaseHistoryFilter = Field(default_factory=DefaultFilter)
"""
Expand Down Expand Up @@ -68,7 +68,7 @@ async def call(self, ctx: Context) -> Message:
call_prompt=self.prompt,
prompt_misc_filter=self.prompt_misc_filter,
position_config=self.position_config or model.position_config,
length=self.history,
length=self.dialog_turns,
filter_func=self.filter_func,
llm_model_name=self.llm_model_name,
max_size=self.max_size,
Expand Down
18 changes: 9 additions & 9 deletions tests/llm/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,22 +217,22 @@ class TestHistory:
[
(
2,
"Mock response with history: ['Request 2', 'Response 2', "
"'Request 3', 'Response 3', 'prompt', 'Last request', 'last prompt']",
"Mock response with history: ['prompt', 'Request 2', 'Response 2', "
"'Request 3', 'Response 3', 'Last request', 'last prompt']",
),
(
0,
"Mock response with history: ['prompt', 'Last request', 'last prompt']",
),
(
4,
"Mock response with history: ['Request 1', 'Response 1', "
"'Request 2', 'Response 2', 'Request 3', 'Response 3', 'prompt', 'Last request', 'last prompt']",
"Mock response with history: ['prompt', 'Request 1', 'Response 1', "
"'Request 2', 'Response 2', 'Request 3', 'Response 3', 'Last request', 'last prompt']",
),
],
)
async def test_history(self, context, pipeline, hist, expected):
res = await LLMResponse(llm_model_name="test_model", history=hist)(context)
res = await LLMResponse(llm_model_name="test_model", dialog_turns=hist)(context)
assert res == Message(expected, annotations={"__generated_by_model__": "test_model"})


Expand Down Expand Up @@ -280,14 +280,14 @@ class TestGetLangchainContext:
PositionConfig(),
[
SystemMessage(content=[{"type": "text", "text": "system prompt"}]),
HumanMessage(content=[{"type": "text", "text": "prompt"}]),
HumanMessage(content=[{"type": "text", "text": "call prompt"}]),
HumanMessage(content=[{"type": "text", "text": "Request 1"}]),
AIMessage(content=[{"type": "text", "text": "Response 1"}]),
HumanMessage(content=[{"type": "text", "text": "Request 2"}]),
AIMessage(content=[{"type": "text", "text": "Response 2"}]),
HumanMessage(content=[{"type": "text", "text": "Request 3"}]),
AIMessage(content=[{"type": "text", "text": "Response 3"}]),
HumanMessage(content=[{"type": "text", "text": "prompt"}]),
HumanMessage(content=[{"type": "text", "text": "call prompt"}]),
HumanMessage(content=[{"type": "text", "text": "Last request"}]),
HumanMessage(content=[{"type": "text", "text": "last prompt"}]),
],
Expand Down Expand Up @@ -369,14 +369,14 @@ async def test_context_with_response(self, context):

expected = [
SystemMessage(content=[{"type": "text", "text": "system prompt"}]),
HumanMessage(content=[{"type": "text", "text": "prompt"}]),
HumanMessage(content=[{"type": "text", "text": "call prompt"}]),
HumanMessage(content=[{"type": "text", "text": "Request 1"}]),
AIMessage(content=[{"type": "text", "text": "Response 1"}]),
HumanMessage(content=[{"type": "text", "text": "Request 2"}]),
AIMessage(content=[{"type": "text", "text": "Response 2"}]),
HumanMessage(content=[{"type": "text", "text": "Request 3"}]),
AIMessage(content=[{"type": "text", "text": "Response 3"}]),
HumanMessage(content=[{"type": "text", "text": "prompt"}]),
HumanMessage(content=[{"type": "text", "text": "call prompt"}]),
HumanMessage(content=[{"type": "text", "text": "Last request"}]),
AIMessage(content=[{"type": "text", "text": "Last response"}]),
HumanMessage(content=[{"type": "text", "text": "last prompt"}]),
Expand Down
4 changes: 3 additions & 1 deletion tutorials/llm/1_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,9 @@
TRANSITIONS: [Tr(dst="greeting_node", cnd=cnd.ExactMatch("Hi"))],
},
"greeting_node": {
RESPONSE: LLMResponse(llm_model_name="barista_model", history=0),
RESPONSE: LLMResponse(
llm_model_name="barista_model", dialog_turns=0
),
TRANSITIONS: [
Tr(dst="main_node", cnd=cnd.ExactMatch("Who are you?"))
],
Expand Down
8 changes: 4 additions & 4 deletions tutorials/llm/2_prompt_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,9 @@
prompt types are ordered in the conversation history. The default hierarchy is:

1. `system_prompt` - Core instructions for the model
2. `history` - Conversation context
3. `misc_prompt` - Additional prompts from nodes/flows
4. `call_prompt` - Direct response prompts
2. `misc_prompt` - Additional prompts from nodes/flows
3. `call_prompt` - Direct response prompts
4. `history` - Conversation context
5. `last_turn` - Request and response from the current turn
(if response has not yet been generated during current turn,
only request is included)
Expand Down Expand Up @@ -158,7 +158,7 @@ async def fetch_vacancies(self) -> list[str]:
TRANSITIONS: [Tr(dst="greeting_node", cnd=cnd.ExactMatch("Hi"))],
},
"greeting_node": {
RESPONSE: LLMResponse(llm_model_name="bank_model", history=0),
RESPONSE: LLMResponse(llm_model_name="bank_model", dialog_turns=0),
TRANSITIONS: [
Tr(
dst=("loan_flow", "start_node"), cnd=cnd.ExactMatch("/loan")
Expand Down
4 changes: 2 additions & 2 deletions tutorials/llm/3_filtering_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def single_message_filter_call(
TRANSITIONS: [Tr(dst="greeting_node", cnd=cnd.ExactMatch("Hi"))],
},
"greeting_node": {
RESPONSE: LLMResponse(llm_model_name="note_model", history=0),
RESPONSE: LLMResponse(llm_model_name="note_model", dialog_turns=0),
TRANSITIONS: [
Tr(dst="main_node", cnd=cnd.ExactMatch("Who are you?"))
],
Expand All @@ -136,7 +136,7 @@ def single_message_filter_call(
llm_model_name="note_model",
prompt="Create a bullet list from all the previous "
"messages tagged with #important.",
history=15,
dialog_turns=15,
filter_func=FilterImportant(),
),
TRANSITIONS: [Tr(dst="main_node")],
Expand Down
Loading