Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
f5eef37
sav init
CTY-git Feb 3, 2025
238b79c
sav
CTY-git Feb 5, 2025
5671111
Merge remote-tracking branch 'origin/main' into log-analysis
CTY-git Feb 5, 2025
c23bb15
complete log analysis
CTY-git Feb 10, 2025
f6a7d8a
update lock file
CTY-git Feb 10, 2025
31be1e3
Merge remote-tracking branch 'origin/main' into log-analysis
CTY-git Feb 11, 2025
bf3f4cd
split file view from code edit tools
CTY-git Feb 12, 2025
2ba316f
fix tests
CTY-git Feb 12, 2025
070fce1
pydantic ai model
CTY-git Feb 12, 2025
fabd099
add v2 versioning
CTY-git Feb 19, 2025
ee1c514
add forgotten v2
CTY-git Feb 19, 2025
9746b8b
lint and fix
CTY-git Feb 19, 2025
47fe2b3
Merge remote-tracking branch 'origin/main' into log-analysis
CTY-git Feb 19, 2025
f5c5548
update
CTY-git Feb 19, 2025
7009b1f
update lock fi;e
CTY-git Feb 19, 2025
b1a75a7
bump version
CTY-git Feb 19, 2025
ace2e70
fix typo
CTY-git Feb 19, 2025
1da12ce
apply sync hack
CTY-git Feb 19, 2025
77f2af6
finalise
CTY-git Feb 24, 2025
316c86f
Patched patchwork/steps/CallShell/README.md (#1284)
patched-admin Feb 24, 2025
c4fe2c0
Patched patchwork/steps/AgenticLLM/README.md (#1285)
patched-admin Feb 24, 2025
cacb155
Patched patchwork/steps/CallSQL/README.md (#1286)
patched-admin Feb 24, 2025
7f9996b
Patched patchwork/steps/SendEmail/README.md (#1298)
patched-admin Feb 24, 2025
3d24984
Patched patchwork/steps/AgenticLLMV2/README.md (#1329)
patched-admin Feb 24, 2025
4387ea1
Patched patchwork/steps/ReadEmail/README.md (#1330)
patched-admin Feb 24, 2025
c9470b5
finalise log analysis
CTY-git Feb 24, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions patchwork/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,11 @@
from collections import deque
from contextlib import nullcontext
from pathlib import Path
from typing import Any

import click
import yaml
from click import echo
from typing_extensions import Iterable
from typing_extensions import Any, Iterable

from patchwork.common.client.patched import PatchedClient
from patchwork.common.constants import PROMPT_TEMPLATE_FILE_KEY
Expand Down
64 changes: 63 additions & 1 deletion patchwork/common/client/llm/aio.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,11 @@
ChatCompletionToolParam,
completion_create_params,
)
from typing_extensions import Dict, Iterable, List, Optional, Union
from pydantic_ai.messages import ModelMessage, ModelResponse
from pydantic_ai.models import ModelRequestParameters, StreamedResponse
from pydantic_ai.settings import ModelSettings
from pydantic_ai.usage import Usage
from typing_extensions import AsyncIterator, Dict, Iterable, List, Optional, Union

from patchwork.common.client.llm.anthropic import AnthropicLlmClient
from patchwork.common.client.llm.google import GoogleLlmClient
Expand All @@ -32,6 +36,64 @@ def __init__(self, *clients: LlmClient):
except Exception:
pass

def __get_model(self, model_settings: ModelSettings | None) -> Optional[str]:
if model_settings is None:
raise ValueError("Model settings cannot be None")
model_name = model_settings.get("model")
if model_name is None:
raise ValueError("Model must be set cannot be None")

return model_name

async def request(
self,
messages: list[ModelMessage],
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> tuple[ModelResponse, Usage]:
model = self.__get_model(model_settings)
if model is None:
raise ValueError("Model cannot be unset")

for client in self.__clients:
if client.is_model_supported(model):
return await client.request(messages, model_settings, model_request_parameters)

client_names = [client.__class__.__name__ for client in self.__original_clients]
raise ValueError(
f"Model {model} is not supported by {client_names} clients. "
f"Please ensure that the respective API keys are correct."
)

async def request_stream(
self,
messages: list[ModelMessage],
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> AsyncIterator[StreamedResponse]:
model = self.__get_model(model_settings)
if model is None:
raise ValueError("Model cannot be unset")

for client in self.__clients:
if client.is_model_supported(model):
yield client.request(messages, model_settings, model_request_parameters)
return

client_names = [client.__class__.__name__ for client in self.__original_clients]
raise ValueError(
f"Model {model} is not supported by {client_names} clients. "
f"Please ensure that the respective API keys are correct."
)

@property
def model_name(self) -> str:
return "Undetermined"

@property
def system(self) -> str:
return next(iter(self.__clients)).system

def get_models(self) -> set[str]:
return self.__supported_models

Expand Down
56 changes: 50 additions & 6 deletions patchwork/common/client/llm/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import json
import time
from functools import lru_cache
from functools import cached_property, lru_cache
from pathlib import Path

from anthropic import Anthropic
Expand All @@ -15,13 +15,18 @@
ChatCompletionToolParam,
completion_create_params,
)
from openai.types.chat.chat_completion import Choice, CompletionUsage
from openai.types.chat.chat_completion import Choice
from openai.types.chat.chat_completion_message_tool_call import (
ChatCompletionMessageToolCall,
Function,
)
from openai.types.completion_usage import CompletionUsage
from typing_extensions import Dict, Iterable, List, Optional, Union
from pydantic_ai.messages import ModelMessage, ModelResponse
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
from pydantic_ai.models.anthropic import AnthropicModel
from pydantic_ai.settings import ModelSettings
from pydantic_ai.usage import Usage
from typing_extensions import AsyncIterator, Dict, Iterable, List, Optional, Union

from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven

Expand Down Expand Up @@ -74,7 +79,46 @@ class AnthropicLlmClient(LlmClient):
__100k_models = {"claude-2.0", "claude-instant-1.2"}

def __init__(self, api_key: str):
self.client = Anthropic(api_key=api_key)
self.__api_key = api_key

@cached_property
def __client(self):
return Anthropic(api_key=self.__api_key)

def __get_pydantic_model(self, model_settings: ModelSettings | None) -> Model:
if model_settings is None:
raise ValueError("Model settings cannot be None")
model_name = model_settings.get("model")
if model_name is None:
raise ValueError("Model must be set cannot be None")

return AnthropicModel(model_name, api_key=self.__api_key)

async def request(
self,
messages: list[ModelMessage],
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> tuple[ModelResponse, Usage]:
model = self.__get_pydantic_model(model_settings)
return await model.request(messages, model_settings, model_request_parameters)

async def request_stream(
self,
messages: list[ModelMessage],
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> AsyncIterator[StreamedResponse]:
model = self.__get_pydantic_model(model_settings)
yield model.request_stream(messages, model_settings, model_request_parameters)

@property
def model_name(self) -> str:
return "Undetermined"

@property
def system(self) -> str:
return "anthropic"

def __get_model_limit(self, model: str) -> int:
# it is observed that the count tokens is not accurate, so we are using a safety margin
Expand Down Expand Up @@ -250,7 +294,7 @@ def is_prompt_supported(
for k, v in input_kwargs.items()
if k in {"messages", "model", "system", "tool_choice", "tools", "beta"}
}
message_token_count = self.client.beta.messages.count_tokens(**count_token_input_kwargs)
message_token_count = self.__client.beta.messages.count_tokens(**count_token_input_kwargs)
return model_limit - message_token_count.input_tokens

def truncate_messages(
Expand Down Expand Up @@ -295,5 +339,5 @@ def chat_completion(
top_p=top_p,
)

response = self.client.messages.create(**input_kwargs)
response = self.__client.messages.create(**input_kwargs)
return _anthropic_to_openai_response(model, response)
51 changes: 50 additions & 1 deletion patchwork/common/client/llm/google.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,21 @@
)
from openai.types.chat.chat_completion import ChatCompletion, Choice
from pydantic import BaseModel
from typing_extensions import Any, Dict, Iterable, List, Optional, Type, Union
from pydantic_ai.messages import ModelMessage, ModelResponse
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
from pydantic_ai.models.gemini import GeminiModel
from pydantic_ai.settings import ModelSettings
from pydantic_ai.usage import Usage
from typing_extensions import (
Any,
AsyncIterator,
Dict,
Iterable,
List,
Optional,
Type,
Union,
)

from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven
from patchwork.common.client.llm.utils import json_schema_to_model
Expand All @@ -48,6 +62,41 @@ def __init__(self, api_key: str):
def __get_models_info(self) -> list[Model]:
return list(self.client.models.list())

def __get_pydantic_model(self, model_settings: ModelSettings | None) -> Model:
if model_settings is None:
raise ValueError("Model settings cannot be None")
model_name = model_settings.get("model")
if model_name is None:
raise ValueError("Model must be set cannot be None")

return GeminiModel(model_name, api_key=self.__api_key)

async def request(
self,
messages: list[ModelMessage],
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> tuple[ModelResponse, Usage]:
model = self.__get_pydantic_model(model_settings)
return await model.request(messages, model_settings, model_request_parameters)

async def request_stream(
self,
messages: list[ModelMessage],
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> AsyncIterator[StreamedResponse]:
model = self.__get_pydantic_model(model_settings)
yield model.request_stream(messages, model_settings, model_request_parameters)

@property
def model_name(self) -> str:
return "Undetermined"

@property
def system(self) -> str:
return "google-gla"

def __get_model_limits(self, model: str) -> int:
for model_info in self.__get_models_info():
if model_info.name == f"{self.__MODEL_PREFIX}{model}" and model_info.input_token_limit is not None:
Expand Down
59 changes: 52 additions & 7 deletions patchwork/common/client/llm/openai_.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

import functools
from functools import cached_property
from pathlib import Path

import tiktoken
Expand All @@ -12,7 +13,12 @@
ChatCompletionToolParam,
completion_create_params,
)
from typing_extensions import Dict, Iterable, List, Optional, Union
from pydantic_ai.messages import ModelMessage, ModelResponse
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
from pydantic_ai.models.openai import OpenAIModel
from pydantic_ai.settings import ModelSettings
from pydantic_ai.usage import Usage
from typing_extensions import AsyncIterator, Dict, Iterable, List, Optional, Union

from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven
from patchwork.logger import logger
Expand Down Expand Up @@ -42,20 +48,59 @@ class OpenAiLlmClient(LlmClient):
}

def __init__(self, api_key: str, base_url=None, **kwargs):
self.api_key = api_key
self.base_url = base_url
self.client = OpenAI(api_key=api_key, base_url=base_url, **kwargs)
self.__api_key = api_key
self.__base_url = base_url
self.__kwargs = kwargs

@cached_property
def __client(self) -> OpenAI:
return OpenAI(api_key=self.__api_key, base_url=self.__base_url, **self.__kwargs)

def __get_pydantic_model(self, model_settings: ModelSettings | None) -> Model:
if model_settings is None:
raise ValueError("Model settings cannot be None")
model_name = model_settings.get("model")
if model_name is None:
raise ValueError("Model must be set cannot be None")

return OpenAIModel(model_name, base_url=self.__base_url, api_key=self.__api_key)

async def request(
self,
messages: list[ModelMessage],
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> tuple[ModelResponse, Usage]:
model = self.__get_pydantic_model(model_settings)
return await model.request(messages, model_settings, model_request_parameters)

async def request_stream(
self,
messages: list[ModelMessage],
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> AsyncIterator[StreamedResponse]:
model = self.__get_pydantic_model(model_settings)
yield model.request_stream(messages, model_settings, model_request_parameters)

@property
def model_name(self) -> str:
return "Undetermined"

@property
def system(self) -> str | None:
return "openai"

def __is_not_openai_url(self):
# Some providers/apis only implement the chat completion endpoint.
# We mainly use this to skip using the model endpoints.
return self.base_url is not None and self.base_url != "https://api.openai.com/v1"
return self.__base_url is not None and self.__base_url != "https://api.openai.com/v1"

def get_models(self) -> set[str]:
if self.__is_not_openai_url():
return set()

return _cached_list_models_from_openai(self.api_key)
return _cached_list_models_from_openai(self.__api_key)

def is_model_supported(self, model: str) -> bool:
# might not implement model endpoint
Expand Down Expand Up @@ -147,4 +192,4 @@ def chat_completion(
top_p=top_p,
)

return self.client.chat.completions.create(**NotGiven.remove_not_given(input_kwargs))
return self.__client.chat.completions.create(**NotGiven.remove_not_given(input_kwargs))
Loading