Skip to content
This repository was archived by the owner on May 13, 2024. It is now read-only.
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
134 changes: 133 additions & 1 deletion info.plist
Original file line number Diff line number Diff line change
Expand Up @@ -4584,9 +4584,141 @@ Please refer to OpenAI's [safety best practices guide](https://platform.openai.c
<key>variable</key>
<string>stream_reply</string>
</dict>
<dict>
<key>config</key>
<dict>
<key>default</key>
<string></string>
<key>placeholder</key>
<string></string>
<key>required</key>
<false/>
<key>trim</key>
<true/>
</dict>
<key>description</key>
<string>Alternative API key used for chat completions only, overrides the default one if set. E.g. one from openrouter.ai</string>
<key>label</key>
<string>Alternative API key</string>
<key>type</key>
<string>textfield</string>
<key>variable</key>
<string>alternative_key</string>
</dict>
<dict>
<key>config</key>
<dict>
<key>default</key>
<string>openai/gpt-4</string>
<key>pairs</key>
<array>
<array>
<string>OpenAi: GPT-4 Turbo</string>
<string>openai/gpt-4-turbo</string>
</array>
<array>
<string>OpenAI: GPT-4</string>
<string>openai/gpt-4</string>
</array>
<array>
<string>OpenAI: GPT-4 32k</string>
<string>openai/gpt-4-32k</string>
</array>
<array>
<string>OpenAI: GPT-3.5 Turbo</string>
<string>openai/gpt-3.5-turbo</string>
</array>
<array>
<string>OpenAI: GPT-3.5 Turbo 16k</string>
<string>openai/gpt-3.5-turbo-16k</string>
</array>
<array>
<string>Anthropic: Claude v2</string>
<string>anthropic/claude-2</string>
</array>
<array>
<string>Anthropic: Claude Instant v1</string>
<string>anthropic/claude-instant-v1</string>
</array>
<array>
<string>Google: PaLM 2 Bison (Code Chat)</string>
<string>google/palm-2-codechat-bison</string>
</array>
<array>
<string>Google: PaLM 2 Bison</string>
<string>google/palm-2-chat-bison</string>
</array>
<array>
<string>Meta: Llama v2 13B Chat (beta)</string>
<string>meta-llama/llama-2-13b-chat</string>
</array>
<array>
<string>Meta: Llama v2 70B Chat (beta)</string>
<string>meta-llama/llama-2-70b-chat</string>
</array>
<array>
<string>Meta: CodeLlama 34B Instruct (beta)</string>
<string>meta-llama/codellama-34b-instruct</string>
</array>
<array>
<string>Nous: Hermes Llama2 13B (beta)</string>
<string>nousresearch/nous-hermes-llama2-13b</string>
</array>
<array>
<string>Gryphe: MythoMax L2 13B</string>
<string>gryphe/mythomax-L2-13b</string>
</array>
<array>
<string>Mancer: Weaver 12k (alpha)</string>
<string>mancer/weaver</string>
</array>
<array>
<string>Jon Durbin: Airoboros L2 70B (beta)</string>
<string>jondurbin/airoboros-l2-70b-2.1</string>
</array>
<array>
<string>Anthropic: Claude v3 Opus</string>
<string>anthropic/claude-3-opus</string>
</array>
<array>
<string>Anthropic: Claude v3 Sonnet</string>
<string>anthropic/claude-3-sonnet</string>
</array>
</array>
</dict>
<key>description</key>
<string>LLM model choices for chat completion when `Alternative key` is set. Compatible with openrouter.ai</string>
<key>label</key>
<string>Alternative model</string>
<key>type</key>
<string>popupbutton</string>
<key>variable</key>
<string>alternative_model</string>
</dict>
<dict>
<key>config</key>
<dict>
<key>default</key>
<string></string>
<key>placeholder</key>
<string>Something like {"HTTP-Referer": "http://alfred.app", "X-Title": "Alfred"}</string>
<key>required</key>
<false/>
<key>trim</key>
<true/>
</dict>
<key>description</key>
<string>Custom header field send with the request for chat completions. Required by openrouter.ai</string>
<key>label</key>
<string>Custom headers</string>
<key>type</key>
<string>textfield</string>
<key>variable</key>
<string>custom_headers</string>
</dict>
</array>
<key>version</key>
<string>1.5.3</string>
<string>1.6.1</string>
<key>webaddress</key>
<string>https://github.com/chrislemke/ChatFred</string>
</dict>
Expand Down
6 changes: 5 additions & 1 deletion workflow/src/history_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,11 @@
__workflow_data_path = os.getenv("alfred_workflow_data") or os.path.expanduser("~")
__log_file_path = f"{__workflow_data_path}/ChatFred_ChatGPT.csv"
__history_type = os.getenv("history_type")
__model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"

if os.getenv("alternative_key"):
__model = os.getenv("alternative_model")
else:
__model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"


def get_query() -> str:
Expand Down
29 changes: 26 additions & 3 deletions workflow/src/text_chat.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
"""This module contains the ChatGPT API. Modified to use Flet to show streaming reply"""
"""
This module contains the ChatGPT API.
Modified to use Flet to show streaming reply.
And implemented API routing for OpenRouter (https://openrouter.ai)
set custom API URL to https://openrouter.ai/api/v1
"""

import csv
import functools
Expand All @@ -7,6 +12,7 @@
import time
import uuid
from typing import Dict, List, Optional, Tuple
import ast

from aliases_manager import prompt_for_alias
from caching_manager import read_from_cache, write_to_cache
Expand All @@ -24,11 +30,22 @@

import flet as ft

openai.api_key = os.getenv("api_key")
if os.getenv("custom_api_url"):
openai.api_base = os.getenv("custom_api_url")

__model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"
if os.getenv("alternative_key"):
__model = os.getenv("alternative_model") or "gpt-3.5-turbo"
openai.api_key = os.getenv("alternative_key")
else:
__model = os.getenv("chat_gpt_model") or "gpt-3.5-turbo"
openai.api_key = os.getenv("api_key")

__headers = (
{}
if not os.getenv("custom_headers")
else ast.literal_eval(str(os.getenv("custom_headers")))
)

__history_length = int(os.getenv("history_length") or 4)
__temperature = float(os.getenv("temperature") or 0.0)
__max_tokens = int(os.getenv("chat_max_tokens")) if os.getenv("chat_max_tokens") else None # type: ignore
Expand Down Expand Up @@ -287,6 +304,7 @@ def make_chat_request(
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stream=bool(stream_reply),
headers=__headers,
)
except Exception as exception: # pylint: disable=broad-except
response_mes = exception_response(exception)
Expand All @@ -301,6 +319,8 @@ def make_chat_request(
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
"stream_reply": bool(stream_reply),
"custom_headers": __headers,
},
)
return prompt, response_mes
Expand All @@ -313,6 +333,9 @@ def streaming_window(page: ft.Page):
page.scroll = ft.ScrollMode("auto")
page.auto_scroll = True

# set page title
page.title = f'Reply from "{__model}"'

# fonts setting by page.fonts and ft.theme
page.fonts = {
"Helvetica": os.path.join(
Expand Down