Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/integration_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
matrix:
php-versions: [ '8.1' ]
databases: [ 'sqlite' ]
server-versions: [ 'master', 'stable30' ]
server-versions: [ 'master', 'stable31', 'stable30' ]

name: Integration test on ☁️${{ matrix.server-versions }} 🐘${{ matrix.php-versions }}

Expand Down Expand Up @@ -141,7 +141,7 @@ jobs:
env:
APP_VERSION: ${{ fromJson(steps.appinfo.outputs.result).version }}
run: |
poetry run python3 main.py > backend_logs &
poetry run python3 main.py > ../backend_logs 2>&1 &

- name: Register backend
run: |
Expand Down
27 changes: 27 additions & 0 deletions default_config/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,33 @@
"temperature": 0.3
}
},
"olmo-2-1124-7B-instruct-Q4_K_M": {
"prompt": "<|endoftext|><|system|>\n{system_prompt}\n<|user|>\n{system_prompt}\n{user_prompt}\n<|assistant|>\n",
"loader_config": {
"n_ctx": 4096,
"max_tokens": 2048,
"stop": ["<|endoftext|>"],
"temperature": 0.7
}
},
"olmo-2-1124-13B-instruct-Q5_K_M": {
"prompt": "<|endoftext|><|system|>\n{system_prompt}\n<|user|>\n{system_prompt}\n{user_prompt}\n<|assistant|>\n",
"loader_config": {
"n_ctx": 4096,
"max_tokens": 2048,
"stop": ["<|endoftext|>"],
"temperature": 0.7
}
},
"Mistral-Small-24B-Instruct-2501-Q3_K_M": {
"prompt": "<|endoftext|><|system|>\n{system_prompt}\n<|user|>\n{system_prompt}\n{user_prompt}\n<|assistant|>\n",
"loader_config": {
"n_ctx": 32768,
"max_tokens": 2048,
"stop": ["<|endoftext|>"],
"temperature": 0.15
}
},
"default": {
"prompt": "<|im_start|> system\n{system_prompt}\n<|im_end|>\n<|im_start|> user\n{user_prompt}\n<|im_end|>\n<|im_start|> assistant\n",
"loader_config": {
Expand Down
17 changes: 10 additions & 7 deletions lib/change_tone.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,15 @@

from langchain.prompts import PromptTemplate
from langchain.schema.prompt_template import BasePromptTemplate
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.runnables import Runnable

class ChangeToneProcessor():
class ChangeToneProcessor:

runnable: Runnable

"""
A topics chain
A change tone processor
"""
system_prompt: str = "You're an AI assistant tasked with rewriting the text given to you by the user in another tone."
user_prompt: BasePromptTemplate = PromptTemplate(
Expand All @@ -32,8 +33,10 @@ class ChangeToneProcessor():
def __init__(self, runnable: Runnable):
self.runnable = runnable


def __call__(self, inputs: dict[str,Any],
) -> dict[str, Any]:
output = self.runnable.invoke({"user_prompt": self.user_prompt.format_prompt(text=inputs['input'], tone=inputs['tone']), "system_prompt": self.system_prompt})
return {'output': output}
def __call__(self, input_data: dict) -> dict[str, Any]:
"""Process a single input"""
messages = [
SystemMessage(content=self.system_prompt),
HumanMessage(content=self.user_prompt.format_prompt(text=input_data['input'], tone=input_data['tone']).to_string())
]
return {'output':self.runnable.invoke(messages).content }
47 changes: 26 additions & 21 deletions lib/chatwithtools.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
from langchain_core.messages.ai import AIMessage



def try_parse_tool_calls(content: str):
"""Try parse the tool calls."""
tool_calls = []
Expand Down Expand Up @@ -43,42 +41,49 @@ def try_parse_tool_calls(content: str):

class ChatWithToolsProcessor:
"""
A chat with tools processor
"""
A chat with tools processor that supports batch processing
"""

model: ChatLlamaCpp

def __init__(self, runner: ChatLlamaCpp):
self.model = runner

def __call__(
self,
inputs: dict[str, Any],
) -> dict[str, str]:
model_with_tools = self.model.bind_tools(json.loads(inputs['tools']))
def _process_single_input(self, input_data: dict[str, Any]) -> dict[str, Any]:
model_with_tools = self.model.bind_tools(json.loads(input_data['tools']))

messages = []
messages.append(SystemMessage(content=inputs['system_prompt']))
for raw_message in inputs['history']:
messages.append(SystemMessage(content=input_data['system_prompt']))

for raw_message in input_data['history']:
message = json.loads(raw_message)
if message['role'] == 'assistant':
messages.append(AIMessage(content=inputs['system_prompt']))
messages.append(AIMessage(content=input_data['system_prompt']))
elif message['role'] == 'human':
messages.append(HumanMessage(content=inputs['system_prompt']))
messages.append(HumanMessage(content=input_data['system_prompt']))

messages.append(HumanMessage(content=inputs['input']))
messages.append(HumanMessage(content=input_data['input']))

try:
tool_messages = json.loads(inputs['tool_message'])
tool_messages = json.loads(input_data['tool_message'])
for tool_message in tool_messages:
messages.append(ToolMessage(content=tool_message['content'], name=tool_message['name'], tool_call_id='42'))
messages.append(ToolMessage(
content=tool_message['content'],
name=tool_message['name'],
tool_call_id='42'
))
except:
pass

response = model_with_tools.invoke(
messages
)
print(response.content)
response = model_with_tools.invoke(messages)

if not response.tool_calls or len(response.tool_calls) == 0:
response = AIMessage(**try_parse_tool_calls(response.content))
return {'output': response.content, 'tool_calls': json.dumps(response.tool_calls)}

return {
'output': response.content,
'tool_calls': json.dumps(response.tool_calls)
}

def __call__(self, inputs: dict[str, Any]) -> dict[str, Any]:
return self._process_single_input(inputs)
81 changes: 23 additions & 58 deletions lib/contextwrite.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,23 @@
# SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors
# SPDX-License-Identifier: AGPL-3.0-or-later
"""A langchain chain to formalize text
"""A chain that changes the tone of a text
"""

from typing import Any, Optional
from typing import Any

from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.prompts import PromptTemplate
from langchain.schema.prompt_template import BasePromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import LLMChain
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.runnables import Runnable

class ContextWriteProcessor:

runnable: Runnable

class ContextWriteChain(Chain):
"""
A reformulation chain
A context write processor
"""

system_prompt = "You're an AI assistant tasked with reformulating the text given to you by the user."

system_prompt: str = "You're an AI assistant tasked with reformulating the text given to you by the user."
user_prompt: BasePromptTemplate = PromptTemplate(
input_variables=["style_input", "source_input"],
template="""
Expand All @@ -36,49 +34,16 @@ class ContextWriteChain(Chain):
Only output the newly written text without quotes, nothing else, no introductory or explanatory text.
"""
)
# Multilingual output doesn't work with llama3.1
# Task doesn't work with llama 3.1


"""Prompt object to use."""
llm_chain: LLMChain
output_key: str = "text" #: :meta private:

class Config:
"""Configuration for this pydantic object."""

extra = 'forbid'
arbitrary_types_allowed = True

@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the prompt expects.

:meta private:
"""
return ['style_input', 'source_input']

@property
def output_keys(self) -> list[str]:
"""Will always return text key.

:meta private:
"""
return [self.output_key]

def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:

if not {"user_prompt", "system_prompt"} == set(self.llm_chain.input_keys):
raise ValueError("llm_chain must have input_keys ['user_prompt', 'system_prompt']")
if not self.llm_chain.output_keys == [self.output_key]:
raise ValueError(f"llm_chain must have output_keys [{self.output_key}]")

return self.llm_chain.invoke({"user_prompt": self.user_prompt.format_prompt(style_input=inputs['style_input'], source_input=inputs['source_input']), "system_prompt": self.system_prompt})

@property
def _chain_type(self) -> str:
return "simplify_chain"
def __init__(self, runnable: Runnable):
self.runnable = runnable

def __call__(self, inputs: dict[str, Any]) -> dict[str, Any]:
messages = [
SystemMessage(content=self.system_prompt),
HumanMessage(content=self.user_prompt.format(
style_input=inputs['style_input'],
source_input=inputs['source_input']
))
]
output = self.runnable.invoke(messages)
return {'output': output.content}
88 changes: 0 additions & 88 deletions lib/formalize.py

This file was deleted.

18 changes: 10 additions & 8 deletions lib/free_prompt.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
# SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors
# SPDX-License-Identifier: AGPL-3.0-or-later
"""A free prompt chain
"""

from typing import Any

from typing import Any, List
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.runnables import Runnable


class FreePromptProcessor:
"""
A free prompt chain
"""
A free prompt chain with batch processing support
"""

runnable: Runnable
system_prompt: str = "You're an AI assistant tasked with helping the user to the best of your ability."

def __init__(self, runnable: Runnable):
self.runnable = runnable
Expand All @@ -22,5 +21,8 @@ def __call__(
self,
inputs: dict[str, Any],
) -> dict[str, Any]:
output = self.runnable.invoke({"user_prompt": inputs['input'], "system_prompt": "You're an AI assistant tasked with helping the user to the best of your ability."})
return {'output': output}
output = self.runnable.invoke([
SystemMessage(self.system_prompt),
HumanMessage(inputs['input'])
]).content
return {'output': output}
Loading
Loading