Skip to content

Commit 8a35e38

Browse files
Allow pydantic 2 in python client requested requirements (#433)
* allow pydantic 2 in requirements * bump version * ignore mypy for line * fix mypy * can't just cast to HttpUrl like that
1 parent 5e45f4a commit 8a35e38

File tree

5 files changed

+12
-11
lines changed

5 files changed

+12
-11
lines changed

clients/python/llmengine/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "0.0.0b23"
15+
__version__ = "0.0.0b24"
1616

1717
import os
1818
from typing import Sequence

clients/python/llmengine/data_types.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
if int(pydantic.__version__.split(".")[0]) > 1:
1111
from pydantic.v1 import BaseModel, Field, HttpUrl
1212
else:
13-
from pydantic import BaseModel, Field, HttpUrl
13+
from pydantic import BaseModel, Field, HttpUrl # type: ignore
1414

1515
CpuSpecificationType = Union[str, int, float]
1616
StorageSpecificationType = Union[str, int, float] # TODO(phil): we can make this more specific.
@@ -163,17 +163,17 @@ class CreateLLMEndpointRequest(BaseModel):
163163
cpus: CpuSpecificationType
164164
gpus: int
165165
memory: StorageSpecificationType
166-
gpu_type: GpuType
166+
gpu_type: Optional[GpuType]
167167
storage: Optional[StorageSpecificationType]
168-
optimize_costs: Optional[bool]
168+
optimize_costs: Optional[bool] = None
169169
min_workers: int
170170
max_workers: int
171171
per_worker: int
172172
labels: Dict[str, str]
173-
prewarm: Optional[bool]
173+
prewarm: Optional[bool] = None
174174
high_priority: Optional[bool]
175-
default_callback_url: Optional[HttpUrl]
176-
default_callback_auth: Optional[CallbackAuth]
175+
default_callback_url: Optional[HttpUrl] = None
176+
default_callback_auth: Optional[CallbackAuth] = None
177177
public_inference: Optional[bool] = True
178178
"""
179179
Whether the endpoint can be used for inference for all users. LLM endpoints are public by default.

clients/python/llmengine/model.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -277,7 +277,8 @@ def create(
277277
per_worker=per_worker,
278278
high_priority=high_priority,
279279
post_inference_hooks=post_inference_hooks_strs,
280-
default_callback_url=default_callback_url,
280+
# Pydantic automatically validates the url
281+
default_callback_url=default_callback_url, # type: ignore
281282
storage=storage,
282283
public_inference=public_inference,
283284
)

clients/python/pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "scale-llm-engine"
3-
version = "0.0.0.beta23"
3+
version = "0.0.0.beta24"
44
description = "Scale LLM Engine Python client"
55
license = "Apache-2.0"
66
authors = ["Phil Chen <[email protected]>"]
@@ -13,7 +13,7 @@ packages = [{include = "llmengine"}]
1313

1414
[tool.poetry.dependencies]
1515
python = "^3.7"
16-
pydantic = "^1.10"
16+
pydantic = ">=1.10"
1717
aiohttp = "^3.8"
1818
requests = "^2.31.0"
1919

clients/python/setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,6 @@
33
setup(
44
name="scale-llm-engine",
55
python_requires=">=3.7",
6-
version="0.0.0.beta23",
6+
version="0.0.0.beta24",
77
packages=find_packages(),
88
)

0 commit comments

Comments
 (0)