diff --git a/info.plist b/info.plist
index 33323a61..972f05c2 100644
--- a/info.plist
+++ b/info.plist
@@ -4584,9 +4584,141 @@ Please refer to OpenAI's [safety best practices guide](https://platform.openai.c
variable
stream_reply
+
+ config
+
+ default
+
+ placeholder
+
+ required
+
+ trim
+
+
+ description
+ Alternative API key used for chat completions only, overrides the default one if set. E.g. one from openrouter.ai
+ label
+ Alternative API key
+ type
+ textfield
+ variable
+ alternative_key
+
+
+ config
+
+ default
+ openai/gpt-4
+ pairs
+
+
+ OpenAi: GPT-4 Turbo
+ openai/gpt-4-turbo
+
+
+ OpenAI: GPT-4
+ openai/gpt-4
+
+
+ OpenAI: GPT-4 32k
+ openai/gpt-4-32k
+
+
+ OpenAI: GPT-3.5 Turbo
+ openai/gpt-3.5-turbo
+
+
+ OpenAI: GPT-3.5 Turbo 16k
+ openai/gpt-3.5-turbo-16k
+
+
+ Anthropic: Claude v2
+ anthropic/claude-2
+
+
+ Anthropic: Claude Instant v1
+ anthropic/claude-instant-v1
+
+
+ Google: PaLM 2 Bison (Code Chat)
+ google/palm-2-codechat-bison
+
+
+ Google: PaLM 2 Bison
+ google/palm-2-chat-bison
+
+
+ Meta: Llama v2 13B Chat (beta)
+ meta-llama/llama-2-13b-chat
+
+
+ Meta: Llama v2 70B Chat (beta)
+ meta-llama/llama-2-70b-chat
+
+
+ Meta: CodeLlama 34B Instruct (beta)
+ meta-llama/codellama-34b-instruct
+
+
+ Nous: Hermes Llama2 13B (beta)
+ nousresearch/nous-hermes-llama2-13b
+
+
+ Gryphe: MythoMax L2 13B
+ gryphe/mythomax-L2-13b
+
+
+ Mancer: Weaver 12k (alpha)
+ mancer/weaver
+
+
+ Jon Durbin: Airoboros L2 70B (beta)
+ jondurbin/airoboros-l2-70b-2.1
+
+
+ Anthropic: Claude v3 Opus
+ anthropic/claude-3-opus
+
+
+ Anthropic: Claude v3 Sonnet
+ anthropic/claude-3-sonnet
+
+
+
+ description
+ LLM model choices for chat completion when `Alternative key` is set. Compatible with openrouter.ai
+ label
+ Alternative model
+ type
+ popupbutton
+ variable
+ alternative_model
+
+
+ config
+
+ default
+
+ placeholder
+ Something like {"HTTP-Referer": "http://alfred.app", "X-Title": "Alfred"}
+ required
+
+ trim
+
+
+ description
+ Custom header field send with the request for chat completions. Required by openrouter.ai
+ label
+ Custom headers
+ type
+ textfield
+ variable
+ custom_headers
+
version
- 1.5.3
+ 1.6.1
webaddress
https://github.com/chrislemke/ChatFred
diff --git a/workflow/src/history_manager.py b/workflow/src/history_manager.py
index e94abb1c..0bc7f9ea 100644
--- a/workflow/src/history_manager.py
+++ b/workflow/src/history_manager.py
@@ -14,7 +14,11 @@
__workflow_data_path = os.getenv("alfred_workflow_data") or os.path.expanduser("~")
__log_file_path = f"{__workflow_data_path}/ChatFred_ChatGPT.csv"
__history_type = os.getenv("history_type")
-__model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"
+
+if os.getenv("alternative_key"):
+ __model = os.getenv("alternative_model")
+else:
+ __model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"
def get_query() -> str:
diff --git a/workflow/src/text_chat.py b/workflow/src/text_chat.py
index 662cf39a..5391a650 100644
--- a/workflow/src/text_chat.py
+++ b/workflow/src/text_chat.py
@@ -1,4 +1,9 @@
-"""This module contains the ChatGPT API. Modified to use Flet to show streaming reply"""
+"""
+This module contains the ChatGPT API.
+Modified to use Flet to show streaming reply.
+And implemented API routing for OpenRouter (https://openrouter.ai)
+set custom API URL to https://openrouter.ai/api/v1
+"""
import csv
import functools
@@ -7,6 +12,7 @@
import time
import uuid
from typing import Dict, List, Optional, Tuple
+import ast
from aliases_manager import prompt_for_alias
from caching_manager import read_from_cache, write_to_cache
@@ -24,11 +30,22 @@
import flet as ft
-openai.api_key = os.getenv("api_key")
if os.getenv("custom_api_url"):
openai.api_base = os.getenv("custom_api_url")
-__model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"
+if os.getenv("alternative_key"):
+ __model = os.getenv("alternative_model") or "gpt-3.5-turbo"
+ openai.api_key = os.getenv("alternative_key")
+else:
+ __model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"
+ openai.api_key = os.getenv("api_key")
+
+__headers = (
+ {}
+ if not os.getenv("custom_headers")
+ else ast.literal_eval(str(os.getenv("custom_headers")))
+)
+
__history_length = int(os.getenv("history_length") or 4)
__temperature = float(os.getenv("temperature") or 0.0)
__max_tokens = int(os.getenv("chat_max_tokens")) if os.getenv("chat_max_tokens") else None # type: ignore
@@ -287,6 +304,7 @@ def make_chat_request(
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stream=bool(stream_reply),
+ headers=__headers,
)
except Exception as exception: # pylint: disable=broad-except
response_mes = exception_response(exception)
@@ -301,6 +319,8 @@ def make_chat_request(
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
+ "stream_reply": bool(stream_reply),
+ "custom_headers": __headers,
},
)
return prompt, response_mes
@@ -313,6 +333,9 @@ def streaming_window(page: ft.Page):
page.scroll = ft.ScrollMode("auto")
page.auto_scroll = True
+ # set page title
+ page.title = f'Reply from "{__model}"'
+
# fonts setting by page.fonts and ft.theme
page.fonts = {
"Helvetica": os.path.join(