Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions mlserver/batching/requests.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ def _get_data(payload: Union[RequestInput, ResponseOutput]):

def _get_parameters(payload: ResponseOutput) -> DefaultDict[Any, Any]:
parameters = defaultdict(list)
payload_parameters = {}
if payload.parameters is not None:
payload_parameters = payload.parameters.model_dump()
for param_name, param_values in payload_parameters.items():
Expand Down
18 changes: 11 additions & 7 deletions mlserver/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,36 +3,40 @@
"""

import click
import asyncio

from functools import wraps

from .init_project import init_cookiecutter_project

from ..server import MLServer
from ..logging import logger, configure_logger
from ..utils import install_uvloop_event_loop
from ..utils import AsyncManager

from .build import generate_dockerfile, build_image, write_dockerfile
from .serve import load_settings
from ..batch_processing import process_batch, CHOICES_TRANSPORT

CTX_ASYNC_MGR_KEY = "async_manager"


def click_async(f):
@wraps(f)
def wrapper(*args, **kwargs):
return asyncio.run(f(*args, **kwargs))
ctx = click.get_current_context()
async_mgr = ctx.obj[CTX_ASYNC_MGR_KEY]
return async_mgr.run(f(*args, **kwargs))

return wrapper


@click.group()
@click.version_option()
def root():
@click.pass_context
def root(ctx):
"""
Command-line interface to manage MLServer models.
"""
pass
ctx.ensure_object(dict)


@root.command("start")
Expand Down Expand Up @@ -265,8 +269,8 @@ async def infer(

def main():
configure_logger()
install_uvloop_event_loop()
root()
async_mgr = AsyncManager()
root(obj={CTX_ASYNC_MGR_KEY: async_mgr})


if __name__ == "__main__":
Expand Down
25 changes: 24 additions & 1 deletion mlserver/codecs/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import TYPE_CHECKING

from .numpy import NumpyCodec, NumpyRequestCodec
from .pandas import PandasCodec
from .string import StringCodec, StringRequestCodec
from .base64 import Base64Codec
from .datetime import DatetimeCodec
Expand All @@ -24,6 +25,9 @@
decode_inference_request,
)

if TYPE_CHECKING: # pragma: no cover - type checking only
from .pandas import PandasCodec # noqa: F401

__all__ = [
"CodecError",
"NumpyCodec",
Expand All @@ -49,3 +53,22 @@
"decode_inference_request",
"decode_args",
]


def __getattr__(name: str): # pragma: no cover - lightweight lazy import
if name == "PandasCodec":
return _load_pandas_codec()

raise AttributeError(f"module 'mlserver.codecs' has no attribute {name!r}")


def _load_pandas_codec():
try:
from .pandas import PandasCodec as _PandasCodec # Local import to stay optional
except Exception as exc: # pragma: no cover - propagate useful context
raise ImportError(
"PandasCodec requires the optional 'pandas' dependency"
) from exc

globals()["PandasCodec"] = _PandasCodec
return _PandasCodec
2 changes: 1 addition & 1 deletion mlserver/codecs/pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def encode_response(
payload: pd.DataFrame,
model_version: Optional[str] = None,
use_bytes: bool = True,
**kwargs
**kwargs,
) -> InferenceResponse:
outputs = cls.encode_outputs(payload, use_bytes=use_bytes)

Expand Down
Loading
Loading