Skip to content

Commit 5b501d5

Browse files
author
patched.codes[bot]
committed
Patched patchwork/common/client/llm/aio.py
1 parent e389d82 commit 5b501d5

File tree

1 file changed

+24
-0
lines changed
  • patchwork/common/client/llm

1 file changed

+24
-0
lines changed

patchwork/common/client/llm/aio.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,23 @@ def system(self) -> str:
9999

100100
def is_model_supported(self, model: str) -> bool:
101101
return any(client.is_model_supported(model) for client in self.__clients)
102+
103+
def get_model_limit(self, model: str) -> int:
104+
"""
105+
Get the model's context length limit from the appropriate client.
106+
107+
Args:
108+
model: The model name
109+
110+
Returns:
111+
The maximum context length in tokens, or a default value if not found
112+
"""
113+
for client in self.__clients:
114+
if client.is_model_supported(model) and hasattr(client, 'get_model_limit'):
115+
return client.get_model_limit(model)
116+
117+
# Default value if no client found or client doesn't have the method
118+
return 128_000
102119

103120
def is_prompt_supported(
104121
self,
@@ -119,6 +136,13 @@ def is_prompt_supported(
119136
top_p: Optional[float] | NotGiven = NOT_GIVEN,
120137
file: Path | NotGiven = NOT_GIVEN,
121138
) -> int:
139+
"""
140+
Check if the prompt is supported by the model and return available tokens.
141+
142+
Returns:
143+
int: If > 0, represents available tokens remaining after prompt.
144+
If <= 0, indicates that prompt is too large.
145+
"""
122146
for client in self.__clients:
123147
if client.is_model_supported(model):
124148
inputs = dict(

0 commit comments

Comments
 (0)