Skip to content

Commit 39b374f

Browse files
authored
Merge pull request #48 from nextcloud/enh/always-use-chat
enh: Always use the chat method
2 parents 7e377cc + 5d01b90 commit 39b374f

File tree

17 files changed

+297
-422
lines changed

17 files changed

+297
-422
lines changed

.github/workflows/integration_test.yml

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ jobs:
2828
matrix:
2929
php-versions: [ '8.1' ]
3030
databases: [ 'sqlite' ]
31-
server-versions: [ 'master', 'stable30' ]
31+
server-versions: [ 'master', 'stable31', 'stable30' ]
3232

3333
name: Integration test on ☁️${{ matrix.server-versions }} 🐘${{ matrix.php-versions }}
3434

@@ -95,14 +95,6 @@ jobs:
9595
filename: ${{ env.APP_NAME }}/appinfo/info.xml
9696
expression: "/info/version/text()"
9797

98-
- name: Checkout AppAPI
99-
uses: actions/checkout@v4
100-
if: ${{ matrix.server-versions == 'stable30' }}
101-
with:
102-
repository: cloud-py-api/app_api
103-
path: apps/app_api
104-
ref: v3.2.1
105-
10698
- name: Set up Nextcloud
10799
if: ${{ matrix.databases != 'pgsql'}}
108100
run: |
@@ -141,7 +133,7 @@ jobs:
141133
env:
142134
APP_VERSION: ${{ fromJson(steps.appinfo.outputs.result).version }}
143135
run: |
144-
poetry run python3 main.py > backend_logs &
136+
poetry run python3 main.py > ../backend_logs 2>&1 &
145137
146138
- name: Register backend
147139
run: |
@@ -159,7 +151,7 @@ jobs:
159151
TASK_ID=$(echo $TASK | jq '.ocs.data.task.id')
160152
NEXT_WAIT_TIME=0
161153
TASK_STATUS='"STATUS_SCHEDULED"'
162-
until [ $NEXT_WAIT_TIME -eq 25 ] || [ "$TASK_STATUS" == '"STATUS_SUCCESSFUL"' ] || [ "$TASK_STATUS" == '"STATUS_FAILED"' ]; do
154+
until [ $NEXT_WAIT_TIME -eq 35 ] || [ "$TASK_STATUS" == '"STATUS_SUCCESSFUL"' ] || [ "$TASK_STATUS" == '"STATUS_FAILED"' ]; do
163155
TASK=$(curl -u "$CREDS" -H "oCS-APIRequest: true" http://localhost:8080/ocs/v2.php/taskprocessing/task/$TASK_ID?format=json)
164156
echo $TASK
165157
TASK_STATUS=$(echo $TASK | jq '.ocs.data.task.status')

default_config/config.json

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,33 @@
5555
"n_ctx": 8224,
5656
"max_tokens": 8196,
5757
"stop": ["<|eot_id|>"],
58+
"temperature": 0.15
59+
}
60+
},
61+
"olmo-2-1124-7B-instruct-Q4_K_M": {
62+
"prompt": "<|endoftext|><|system|>\n{system_prompt}\n<|user|>\n{system_prompt}\n{user_prompt}\n<|assistant|>\n",
63+
"loader_config": {
64+
"n_ctx": 4096,
65+
"max_tokens": 2048,
66+
"stop": ["<|endoftext|>"],
67+
"temperature": 0.7
68+
}
69+
},
70+
"olmo-2-1124-13B-instruct-Q5_K_M": {
71+
"prompt": "<|endoftext|><|system|>\n{system_prompt}\n<|user|>\n{system_prompt}\n{user_prompt}\n<|assistant|>\n",
72+
"loader_config": {
73+
"n_ctx": 4096,
74+
"max_tokens": 2048,
75+
"stop": ["<|endoftext|>"],
76+
"temperature": 0.7
77+
}
78+
},
79+
"Mistral-Small-24B-Instruct-2501-Q3_K_M": {
80+
"prompt": "<|endoftext|><|system|>\n{system_prompt}\n<|user|>\n{system_prompt}\n{user_prompt}\n<|assistant|>\n",
81+
"loader_config": {
82+
"n_ctx": 32768,
83+
"max_tokens": 2048,
84+
"stop": ["<|endoftext|>"],
5885
"temperature": 0.3
5986
}
6087
},

lib/change_tone.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,15 @@
77

88
from langchain.prompts import PromptTemplate
99
from langchain.schema.prompt_template import BasePromptTemplate
10+
from langchain_core.messages import SystemMessage, HumanMessage
1011
from langchain_core.runnables import Runnable
1112

12-
class ChangeToneProcessor():
13+
class ChangeToneProcessor:
1314

1415
runnable: Runnable
1516

1617
"""
17-
A topics chain
18+
A change tone processor
1819
"""
1920
system_prompt: str = "You're an AI assistant tasked with rewriting the text given to you by the user in another tone."
2021
user_prompt: BasePromptTemplate = PromptTemplate(
@@ -32,8 +33,10 @@ class ChangeToneProcessor():
3233
def __init__(self, runnable: Runnable):
3334
self.runnable = runnable
3435

35-
36-
def __call__(self, inputs: dict[str,Any],
37-
) -> dict[str, Any]:
38-
output = self.runnable.invoke({"user_prompt": self.user_prompt.format_prompt(text=inputs['input'], tone=inputs['tone']), "system_prompt": self.system_prompt})
39-
return {'output': output}
36+
def __call__(self, input_data: dict) -> dict[str, Any]:
37+
"""Process a single input"""
38+
messages = [
39+
SystemMessage(content=self.system_prompt),
40+
HumanMessage(content=self.user_prompt.format_prompt(text=input_data['input'], tone=input_data['tone']).to_string())
41+
]
42+
return {'output':self.runnable.invoke(messages).content }

lib/chatwithtools.py

Lines changed: 26 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@
1111
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
1212
from langchain_core.messages.ai import AIMessage
1313

14-
15-
1614
def try_parse_tool_calls(content: str):
1715
"""Try parse the tool calls."""
1816
tool_calls = []
@@ -43,42 +41,49 @@ def try_parse_tool_calls(content: str):
4341

4442
class ChatWithToolsProcessor:
4543
"""
46-
A chat with tools processor
47-
"""
44+
A chat with tools processor that supports batch processing
45+
"""
4846

4947
model: ChatLlamaCpp
5048

5149
def __init__(self, runner: ChatLlamaCpp):
5250
self.model = runner
5351

54-
def __call__(
55-
self,
56-
inputs: dict[str, Any],
57-
) -> dict[str, str]:
58-
model_with_tools = self.model.bind_tools(json.loads(inputs['tools']))
52+
def _process_single_input(self, input_data: dict[str, Any]) -> dict[str, Any]:
53+
model_with_tools = self.model.bind_tools(json.loads(input_data['tools']))
5954

6055
messages = []
61-
messages.append(SystemMessage(content=inputs['system_prompt']))
62-
for raw_message in inputs['history']:
56+
messages.append(SystemMessage(content=input_data['system_prompt']))
57+
58+
for raw_message in input_data['history']:
6359
message = json.loads(raw_message)
6460
if message['role'] == 'assistant':
65-
messages.append(AIMessage(content=inputs['system_prompt']))
61+
messages.append(AIMessage(content=input_data['system_prompt']))
6662
elif message['role'] == 'human':
67-
messages.append(HumanMessage(content=inputs['system_prompt']))
63+
messages.append(HumanMessage(content=input_data['system_prompt']))
6864

69-
messages.append(HumanMessage(content=inputs['input']))
65+
messages.append(HumanMessage(content=input_data['input']))
7066

7167
try:
72-
tool_messages = json.loads(inputs['tool_message'])
68+
tool_messages = json.loads(input_data['tool_message'])
7369
for tool_message in tool_messages:
74-
messages.append(ToolMessage(content=tool_message['content'], name=tool_message['name'], tool_call_id='42'))
70+
messages.append(ToolMessage(
71+
content=tool_message['content'],
72+
name=tool_message['name'],
73+
tool_call_id='42'
74+
))
7575
except:
7676
pass
7777

78-
response = model_with_tools.invoke(
79-
messages
80-
)
81-
print(response.content)
78+
response = model_with_tools.invoke(messages)
79+
8280
if not response.tool_calls or len(response.tool_calls) == 0:
8381
response = AIMessage(**try_parse_tool_calls(response.content))
84-
return {'output': response.content, 'tool_calls': json.dumps(response.tool_calls)}
82+
83+
return {
84+
'output': response.content,
85+
'tool_calls': json.dumps(response.tool_calls)
86+
}
87+
88+
def __call__(self, inputs: dict[str, Any]) -> dict[str, Any]:
89+
return self._process_single_input(inputs)

lib/contextwrite.py

Lines changed: 23 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,23 @@
11
# SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors
22
# SPDX-License-Identifier: AGPL-3.0-or-later
3-
"""A langchain chain to formalize text
3+
"""A chain that changes the tone of a text
44
"""
55

6-
from typing import Any, Optional
6+
from typing import Any
77

8-
from langchain.base_language import BaseLanguageModel
9-
from langchain.callbacks.manager import CallbackManagerForChainRun
10-
from langchain.chains.base import Chain
118
from langchain.prompts import PromptTemplate
129
from langchain.schema.prompt_template import BasePromptTemplate
13-
from langchain.text_splitter import CharacterTextSplitter
14-
from langchain.chains import LLMChain
10+
from langchain_core.messages import SystemMessage, HumanMessage
11+
from langchain_core.runnables import Runnable
12+
13+
class ContextWriteProcessor:
14+
15+
runnable: Runnable
1516

16-
class ContextWriteChain(Chain):
1717
"""
18-
A reformulation chain
18+
A context write processor
1919
"""
20-
21-
system_prompt = "You're an AI assistant tasked with reformulating the text given to you by the user."
22-
20+
system_prompt: str = "You're an AI assistant tasked with reformulating the text given to you by the user."
2321
user_prompt: BasePromptTemplate = PromptTemplate(
2422
input_variables=["style_input", "source_input"],
2523
template="""
@@ -36,49 +34,16 @@ class ContextWriteChain(Chain):
3634
Only output the newly written text without quotes, nothing else, no introductory or explanatory text.
3735
"""
3836
)
39-
# Multilingual output doesn't work with llama3.1
40-
# Task doesn't work with llama 3.1
41-
42-
43-
"""Prompt object to use."""
44-
llm_chain: LLMChain
45-
output_key: str = "text" #: :meta private:
46-
47-
class Config:
48-
"""Configuration for this pydantic object."""
49-
50-
extra = 'forbid'
51-
arbitrary_types_allowed = True
52-
53-
@property
54-
def input_keys(self) -> list[str]:
55-
"""Will be whatever keys the prompt expects.
56-
57-
:meta private:
58-
"""
59-
return ['style_input', 'source_input']
60-
61-
@property
62-
def output_keys(self) -> list[str]:
63-
"""Will always return text key.
64-
65-
:meta private:
66-
"""
67-
return [self.output_key]
68-
69-
def _call(
70-
self,
71-
inputs: dict[str, Any],
72-
run_manager: Optional[CallbackManagerForChainRun] = None,
73-
) -> dict[str, str]:
74-
75-
if not {"user_prompt", "system_prompt"} == set(self.llm_chain.input_keys):
76-
raise ValueError("llm_chain must have input_keys ['user_prompt', 'system_prompt']")
77-
if not self.llm_chain.output_keys == [self.output_key]:
78-
raise ValueError(f"llm_chain must have output_keys [{self.output_key}]")
79-
80-
return self.llm_chain.invoke({"user_prompt": self.user_prompt.format_prompt(style_input=inputs['style_input'], source_input=inputs['source_input']), "system_prompt": self.system_prompt})
81-
82-
@property
83-
def _chain_type(self) -> str:
84-
return "simplify_chain"
37+
def __init__(self, runnable: Runnable):
38+
self.runnable = runnable
39+
40+
def __call__(self, inputs: dict[str, Any]) -> dict[str, Any]:
41+
messages = [
42+
SystemMessage(content=self.system_prompt),
43+
HumanMessage(content=self.user_prompt.format(
44+
style_input=inputs['style_input'],
45+
source_input=inputs['source_input']
46+
))
47+
]
48+
output = self.runnable.invoke(messages)
49+
return {'output': output.content}

lib/formalize.py

Lines changed: 0 additions & 88 deletions
This file was deleted.

0 commit comments

Comments
 (0)