Skip to content

Commit 17a160c

Browse files
authored
refactor: move legacy APIs to a separate module (#5381)
* refactor: move legacy APIs to a separate module Signed-off-by: Frost Ming <me@frostming.com>
1 parent d0a47a3 commit 17a160c

File tree

57 files changed

+184
-129
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+184
-129
lines changed

docs/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ extension to include documentation from docstring. For example, a `.rst` documen
213213
create a section made from a Python Class's docstring, using the following syntax:
214214

215215
```rst
216-
.. autoclass:: bentoml.Service
216+
.. autoclass:: bentoml.Bento
217217
:members: api
218218
```
219219

docs/source/build-with-bentoml/snippets/metrics/runner_impl.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,14 @@
1-
class NLTKSentimentAnalysisRunnable(bentoml.Runnable):
1+
import bentoml
2+
3+
4+
class NLTKSentimentAnalysisRunnable(bentoml.legacy.Runnable):
25
SUPPORTED_RESOURCES = ("cpu",)
36
SUPPORTS_CPU_MULTI_THREADING = False
47

58
def __init__(self):
69
self.sia = SentimentIntensityAnalyzer()
710

8-
@bentoml.Runnable.method(batchable=False)
11+
@bentoml.legacy.Runnable.method(batchable=False)
912
def is_positive(self, input_text: str) -> bool:
1013
start = time.perf_counter()
1114
scores = [

src/_bentoml_impl/frameworks/catboost.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -200,12 +200,12 @@ def save_model(
200200

201201

202202
@deprecated(suggestion="Use `get_service` instead.")
203-
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.Runnable]:
203+
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.legacy.Runnable]:
204204
"""
205205
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
206206
"""
207207

208-
class CatBoostRunnable(bentoml.Runnable):
208+
class CatBoostRunnable(bentoml.legacy.Runnable):
209209
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "cpu")
210210
SUPPORTS_CPU_MULTI_THREADING = True
211211

src/_bentoml_impl/frameworks/lightgbm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,12 +204,12 @@ def save_model(
204204

205205

206206
@deprecated(suggestion="Use `get_service` instead.")
207-
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.Runnable]:
207+
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.legacy.Runnable]:
208208
"""
209209
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
210210
"""
211211

212-
class LightGBMRunnable(bentoml.Runnable):
212+
class LightGBMRunnable(bentoml.legacy.Runnable):
213213
# LightGBM only supports GPU during training, not for inference.
214214
SUPPORTED_RESOURCES = ("cpu",)
215215
SUPPORTS_CPU_MULTI_THREADING = True

src/_bentoml_impl/frameworks/mlflow.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -213,14 +213,14 @@ def import_model(
213213

214214

215215
@deprecated(suggestion="Use `get_service` instead.")
216-
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.Runnable]:
216+
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.legacy.Runnable]:
217217
"""
218218
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
219219
"""
220220
assert "predict" in bento_model.info.signatures
221221
predict_signature = bento_model.info.signatures["predict"]
222222

223-
class MLflowPyfuncRunnable(bentoml.Runnable):
223+
class MLflowPyfuncRunnable(bentoml.legacy.Runnable):
224224
# The only case that multi-threading may not be supported is when user define a
225225
# custom python_function MLflow model with pure python code, but there's no way
226226
# of telling that from the MLflow model metadata. It should be a very rare case,
@@ -233,7 +233,7 @@ def __init__(self):
233233
super().__init__()
234234
self.model = load_model(bento_model)
235235

236-
@bentoml.Runnable.method(
236+
@bentoml.legacy.Runnable.method(
237237
batchable=predict_signature.batchable,
238238
batch_dim=predict_signature.batch_dim,
239239
input_spec=None,

src/_bentoml_impl/frameworks/sklearn.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def save_model(
9292
name: Name for given model instance. This should pass Python identifier check.
9393
model: Instance of model to be saved.
9494
signatures: Methods to expose for running inference on the target model. Signatures are
95-
used for creating Runner instances when serving model with bentoml.Service
95+
used for creating Runner instances when serving model with bentoml.legacy.Service
9696
labels: user-defined labels for managing models, e.g. team=nlp, stage=dev
9797
custom_objects: user-defined additional python objects to be saved alongside the model,
9898
e.g. a tokenizer instance, preprocessor function, model configuration json
@@ -164,7 +164,7 @@ def get_runnable(bento_model: Model):
164164
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
165165
"""
166166

167-
class SklearnRunnable(bentoml.Runnable):
167+
class SklearnRunnable(bentoml.legacy.Runnable):
168168
SUPPORTED_RESOURCES = ("cpu",)
169169
SUPPORTS_CPU_MULTI_THREADING = True
170170

src/_bentoml_impl/frameworks/xgboost.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -217,12 +217,12 @@ def save_model(
217217

218218

219219
@deprecated(suggestion="Use `get_service` instead.")
220-
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.Runnable]:
220+
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.legacy.Runnable]:
221221
"""
222222
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
223223
"""
224224

225-
class XGBoostRunnable(bentoml.Runnable):
225+
class XGBoostRunnable(bentoml.legacy.Runnable):
226226
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "cpu")
227227
SUPPORTS_CPU_MULTI_THREADING = True
228228

src/_bentoml_sdk/service/factory.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from starlette.applications import Starlette
1919
from typing_extensions import Unpack
2020

21-
from bentoml import Runner
2221
from bentoml._internal.bento.bento import Bento
2322
from bentoml._internal.bento.build_config import BentoEnvSchema
2423
from bentoml._internal.configuration.containers import BentoMLContainer
@@ -28,6 +27,7 @@
2827
from bentoml._internal.utils import dict_filter_none
2928
from bentoml.exceptions import BentoMLConfigException
3029
from bentoml.exceptions import BentoMLException
30+
from bentoml.legacy import Runner
3131

3232
from ..images import Image
3333
from ..method import APIMethod

src/bentoml/__init__.py

Lines changed: 15 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -32,11 +32,6 @@
3232
"server_context": "._internal.context:server_context",
3333
"Model": "._internal.models:Model",
3434
"monitor": "._internal.monitoring:monitor",
35-
"Resource": "._internal.resource:Resource",
36-
"Runnable": "._internal.runner:Runnable",
37-
"Runner": "._internal.runner:Runner",
38-
"Strategy": "._internal.runner.strategy:Strategy",
39-
"Service": "._internal.service:Service",
4035
"Tag": "._internal.tag:Tag",
4136
"load": "._internal.service.loader:load",
4237
"Cookie": "._internal.utils.http:Cookie",
@@ -50,9 +45,6 @@
5045
"pull": ".bentos:pull",
5146
"push": ".bentos:push",
5247
"serve": ".bentos:serve",
53-
# Legacy APIs
54-
"HTTPServer": ".server:HTTPServer",
55-
"GrpcServer": ".server:GrpcServer",
5648
# New SDK
5749
"service": "_bentoml_sdk:service",
5850
"runner_service": "_bentoml_sdk:runner_service",
@@ -83,6 +75,7 @@
8375
from _bentoml_impl.frameworks import xgboost
8476

8577
from . import bentos
78+
from . import legacy
8679

8780
# BentoML built-in types
8881
from ._internal.bento import Bento
@@ -94,11 +87,6 @@
9487
from ._internal.context import server_context
9588
from ._internal.models import Model
9689
from ._internal.monitoring import monitor
97-
from ._internal.resource import Resource
98-
from ._internal.runner import Runnable
99-
from ._internal.runner import Runner
100-
from ._internal.runner.strategy import Strategy
101-
from ._internal.service import Service
10290
from ._internal.service.loader import load
10391
from ._internal.tag import Tag
10492
from ._internal.utils.args import use_arguments
@@ -176,6 +164,7 @@
176164
FrameworkImporter.install()
177165

178166
bentos = _LazyLoader("bentoml.bentos", globals(), "bentoml.bentos")
167+
legacy = _LazyLoader("bentoml.legacy", globals(), "bentoml.legacy")
179168

180169
# ML Frameworks
181170
catboost = _LazyLoader(
@@ -301,22 +290,33 @@
301290
del _LazyLoader, FrameworkImporter
302291

303292
def __getattr__(name: str) -> Any:
293+
import bentoml.legacy as legacy
294+
304295
if name in MODULE_ATTRS:
305296
from importlib import import_module
306297

307298
module_name, attr_name = MODULE_ATTRS[name].split(":")
308299
module = import_module(module_name, __package__)
309300
return getattr(module, attr_name)
310-
raise AttributeError(f"module {__name__} has no attribute {name}")
301+
elif name in legacy.__all__:
302+
from ._internal.utils import warn_deprecated
303+
304+
warn_deprecated(
305+
f"`bentoml.{name}` is moved to `bentoml.legacy.{name}` "
306+
"and will be removed in a future version.",
307+
)
308+
return getattr(legacy, name)
309+
else:
310+
raise AttributeError(f"module {__name__} has no attribute {name}")
311311

312312

313313
__all__ = [
314314
"__version__",
315315
"Context",
316316
"Cookie",
317-
"Service",
318317
"bentos",
319318
"models",
319+
"legacy",
320320
"batch",
321321
"metrics",
322322
"container",
@@ -326,8 +326,6 @@ def __getattr__(name: str) -> Any:
326326
"io",
327327
"Tag",
328328
"Model",
329-
"Runner",
330-
"Runnable",
331329
"monitoring",
332330
"BentoCloudClient", # BentoCloud REST API Client
333331
# bento APIs
@@ -374,8 +372,6 @@ def __getattr__(name: str) -> Any:
374372
"load_config",
375373
"save_config",
376374
"set_serialization_strategy",
377-
"Strategy",
378-
"Resource",
379375
# new SDK
380376
"service",
381377
"runner_service",

src/bentoml/_internal/frameworks/common/pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def __init__(self, *inner_args: t.Any, **inner_kwargs: t.Any) -> None:
5656
return NewClass
5757

5858

59-
class PytorchModelRunnable(bentoml.Runnable):
59+
class PytorchModelRunnable(bentoml.legacy.Runnable):
6060
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "cpu")
6161
SUPPORTS_CPU_MULTI_THREADING = True
6262

0 commit comments

Comments
 (0)