diff --git a/patchwork/common/client/llm/openai_.py b/patchwork/common/client/llm/openai_.py index 6573ee24e..ee04796db 100644 --- a/patchwork/common/client/llm/openai_.py +++ b/patchwork/common/client/llm/openai_.py @@ -14,6 +14,7 @@ from typing_extensions import Dict, Iterable, List, Optional, Union from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven +from patchwork.logger import logger @functools.lru_cache @@ -36,6 +37,7 @@ class OpenAiLlmClient(LlmClient): "o1-mini": 128_000, "gpt-4o-mini": 128_000, "gpt-4o": 128_000, + "o3-mini": 128_000, } def __init__(self, api_key: str, base_url=None, **kwargs): @@ -87,7 +89,12 @@ def is_prompt_supported( model_limit = self.__get_model_limits(model) token_count = 0 - encoding = tiktoken.encoding_for_model(model) + encoding = None + try: + encoding = tiktoken.encoding_for_model(model) + except Exception as e: + logger.error(f"Error getting encoding for model {model}: {e}, using gpt-4o as fallback") + encoding = tiktoken.encoding_for_model("gpt-4o") for message in messages: message_token_count = len(encoding.encode(message.get("content"))) token_count = token_count + message_token_count diff --git a/pyproject.toml b/pyproject.toml index 0ea3dc951..874f0810a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "patchwork-cli" -version = "0.0.95" +version = "0.0.96" description = "" authors = ["patched.codes"] license = "AGPL"