Skip to content

Commit a781375

Browse files
committed
fix tests
Signed-off-by: Raphael Glon <[email protected]>
1 parent bb2a6c3 commit a781375

File tree

8 files changed

+48
-52
lines changed

8 files changed

+48
-52
lines changed

src/huggingface_inference_toolkit/webservice_starlette.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,6 @@
2727
from huggingface_inference_toolkit.serialization.base import ContentType
2828
from huggingface_inference_toolkit.serialization.json_utils import Jsoner
2929
from huggingface_inference_toolkit.utils import convert_params_to_int_or_bool
30-
31-
# _load_repository_from_hf,
32-
# convert_params_to_int_or_bool,
33-
# )
3430
from huggingface_inference_toolkit.vertex_ai_utils import _load_repository_from_gcs
3531

3632
INFERENCE_HANDLERS = {}

tests/integ/conftest.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import tenacity
1010
from transformers.testing_utils import _run_slow_tests
1111

12-
from huggingface_inference_toolkit.utils import _load_repository_from_hf
12+
from huggingface_inference_toolkit.heavy_utils import load_repository_from_hf
1313
from tests.integ.config import task2model
1414

1515
HF_HUB_CACHE = os.environ.get("HF_HUB_CACHE", "/home/ubuntu/.cache/huggingface/hub")
@@ -124,7 +124,7 @@ def local_container(device, task, repository_id, framework):
124124
object_id = model.replace("/", "--")
125125
model_dir = f"{HF_HUB_CACHE}/{object_id}"
126126

127-
_storage_dir = _load_repository_from_hf(
127+
_storage_dir = load_repository_from_hf(
128128
repository_id=model, target_dir=model_dir
129129
)
130130

tests/integ/helpers.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from docker import DockerClient
1111
from transformers.testing_utils import _run_slow_tests, require_tf, require_torch
1212

13-
from huggingface_inference_toolkit.utils import _load_repository_from_hf
13+
from huggingface_inference_toolkit.heavy_utils import load_repository_from_hf
1414
from tests.integ.config import task2input, task2model, task2output, task2validation
1515

1616
IS_GPU = _run_slow_tests
@@ -207,7 +207,7 @@ def test_pt_container_local_model(task: str) -> None:
207207
make_sure_other_containers_are_stopped(client, container_name)
208208
with tempfile.TemporaryDirectory() as tmpdirname:
209209
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
210-
_load_repository_from_hf(model, tmpdirname, framework="pytorch")
210+
load_repository_from_hf(model, tmpdirname, framework="pytorch")
211211
container = client.containers.run(
212212
container_image,
213213
name=container_name,
@@ -238,7 +238,7 @@ def test_pt_container_custom_handler(repository_id) -> None:
238238
make_sure_other_containers_are_stopped(client, container_name)
239239
with tempfile.TemporaryDirectory() as tmpdirname:
240240
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
241-
_storage_dir = _load_repository_from_hf(repository_id, tmpdirname)
241+
_storage_dir = load_repository_from_hf(repository_id, tmpdirname)
242242
container = client.containers.run(
243243
container_image,
244244
name=container_name,
@@ -275,7 +275,7 @@ def test_pt_container_legacy_custom_pipeline(repository_id: str) -> None:
275275
make_sure_other_containers_are_stopped(client, container_name)
276276
with tempfile.TemporaryDirectory() as tmpdirname:
277277
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
278-
_storage_dir = _load_repository_from_hf(repository_id, tmpdirname)
278+
_storage_dir = load_repository_from_hf(repository_id, tmpdirname)
279279
container = client.containers.run(
280280
container_image,
281281
name=container_name,
@@ -393,7 +393,7 @@ def test_tf_container_local_model(task) -> None:
393393
make_sure_other_containers_are_stopped(client, container_name)
394394
with tempfile.TemporaryDirectory() as tmpdirname:
395395
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
396-
_storage_dir = _load_repository_from_hf(model, tmpdirname, framework=framework)
396+
_storage_dir = load_repository_from_hf(model, tmpdirname, framework=framework)
397397
container = client.containers.run(
398398
container_image,
399399
name=container_name,
@@ -421,7 +421,7 @@ def test_tf_container_local_model(task) -> None:
421421
# make_sure_other_containers_are_stopped(client, container_name)
422422
# with tempfile.TemporaryDirectory() as tmpdirname:
423423
# # https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
424-
# storage_dir = _load_repository_from_hf("philschmid/custom-pipeline-text-classification", tmpdirname)
424+
# storage_dir = load_repository_from_hf("philschmid/custom-pipeline-text-classification", tmpdirname)
425425
# container = client.containers.run(
426426
# container_image,
427427
# name=container_name,

tests/unit/test_diffusers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,14 @@
55
from transformers.testing_utils import require_torch, slow
66

77
from huggingface_inference_toolkit.diffusers_utils import IEAutoPipelineForText2Image
8-
from huggingface_inference_toolkit.utils import _load_repository_from_hf, get_pipeline
8+
from huggingface_inference_toolkit.heavy_utils import get_pipeline, load_repository_from_hf
99

1010
logging.basicConfig(level="DEBUG")
1111

1212
@require_torch
1313
def test_get_diffusers_pipeline():
1414
with tempfile.TemporaryDirectory() as tmpdirname:
15-
storage_dir = _load_repository_from_hf(
15+
storage_dir = load_repository_from_hf(
1616
"echarlaix/tiny-random-stable-diffusion-xl",
1717
tmpdirname,
1818
framework="pytorch"
@@ -25,7 +25,7 @@ def test_get_diffusers_pipeline():
2525
@require_torch
2626
def test_pipe_on_gpu():
2727
with tempfile.TemporaryDirectory() as tmpdirname:
28-
storage_dir = _load_repository_from_hf(
28+
storage_dir = load_repository_from_hf(
2929
"echarlaix/tiny-random-stable-diffusion-xl",
3030
tmpdirname,
3131
framework="pytorch"
@@ -41,7 +41,7 @@ def test_pipe_on_gpu():
4141
@require_torch
4242
def test_text_to_image_task():
4343
with tempfile.TemporaryDirectory() as tmpdirname:
44-
storage_dir = _load_repository_from_hf(
44+
storage_dir = load_repository_from_hf(
4545
"echarlaix/tiny-random-stable-diffusion-xl",
4646
tmpdirname,
4747
framework="pytorch"

tests/unit/test_handler.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@
88
HuggingFaceHandler,
99
get_inference_handler_either_custom_or_default_handler,
1010
)
11-
from huggingface_inference_toolkit.utils import (
11+
from huggingface_inference_toolkit.heavy_utils import (
1212
_is_gpu_available,
13-
_load_repository_from_hf,
13+
load_repository_from_hf,
1414
)
1515

1616
TASK = "text-classification"
@@ -29,7 +29,7 @@ def test_pt_get_device() -> None:
2929

3030
with tempfile.TemporaryDirectory() as tmpdirname:
3131
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
32-
storage_dir = _load_repository_from_hf(MODEL, tmpdirname, framework="pytorch")
32+
storage_dir = load_repository_from_hf(MODEL, tmpdirname, framework="pytorch")
3333
h = HuggingFaceHandler(model_dir=str(storage_dir), task=TASK)
3434
if torch.cuda.is_available():
3535
assert h.pipeline.model.device == torch.device(type="cuda", index=0)
@@ -41,7 +41,7 @@ def test_pt_get_device() -> None:
4141
def test_pt_predict_call(input_data: Dict[str, str]) -> None:
4242
with tempfile.TemporaryDirectory() as tmpdirname:
4343
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
44-
storage_dir = _load_repository_from_hf(MODEL, tmpdirname, framework="pytorch")
44+
storage_dir = load_repository_from_hf(MODEL, tmpdirname, framework="pytorch")
4545
h = HuggingFaceHandler(model_dir=str(storage_dir), task=TASK)
4646

4747
prediction = h(input_data)
@@ -52,7 +52,7 @@ def test_pt_predict_call(input_data: Dict[str, str]) -> None:
5252
@require_torch
5353
def test_pt_custom_pipeline(input_data: Dict[str, str]) -> None:
5454
with tempfile.TemporaryDirectory() as tmpdirname:
55-
storage_dir = _load_repository_from_hf(
55+
storage_dir = load_repository_from_hf(
5656
"philschmid/custom-pipeline-text-classification",
5757
tmpdirname,
5858
framework="pytorch",
@@ -64,7 +64,7 @@ def test_pt_custom_pipeline(input_data: Dict[str, str]) -> None:
6464
@require_torch
6565
def test_pt_sentence_transformers_pipeline(input_data: Dict[str, str]) -> None:
6666
with tempfile.TemporaryDirectory() as tmpdirname:
67-
storage_dir = _load_repository_from_hf(
67+
storage_dir = load_repository_from_hf(
6868
"sentence-transformers/all-MiniLM-L6-v2", tmpdirname, framework="pytorch"
6969
)
7070
h = get_inference_handler_either_custom_or_default_handler(str(storage_dir), task="sentence-embeddings")
@@ -76,7 +76,7 @@ def test_pt_sentence_transformers_pipeline(input_data: Dict[str, str]) -> None:
7676
def test_tf_get_device():
7777
with tempfile.TemporaryDirectory() as tmpdirname:
7878
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
79-
storage_dir = _load_repository_from_hf(MODEL, tmpdirname, framework="tensorflow")
79+
storage_dir = load_repository_from_hf(MODEL, tmpdirname, framework="tensorflow")
8080
h = HuggingFaceHandler(model_dir=str(storage_dir), task=TASK)
8181
if _is_gpu_available():
8282
assert h.pipeline.device == 0
@@ -88,7 +88,7 @@ def test_tf_get_device():
8888
def test_tf_predict_call(input_data: Dict[str, str]) -> None:
8989
with tempfile.TemporaryDirectory() as tmpdirname:
9090
# https://github.com/huggingface/infinity/blob/test-ovh/test/integ/utils.py
91-
storage_dir = _load_repository_from_hf(MODEL, tmpdirname, framework="tensorflow")
91+
storage_dir = load_repository_from_hf(MODEL, tmpdirname, framework="tensorflow")
9292
handler = HuggingFaceHandler(model_dir=str(storage_dir), task=TASK, framework="tf")
9393

9494
prediction = handler(input_data)
@@ -99,7 +99,7 @@ def test_tf_predict_call(input_data: Dict[str, str]) -> None:
9999
@require_tf
100100
def test_tf_custom_pipeline(input_data: Dict[str, str]) -> None:
101101
with tempfile.TemporaryDirectory() as tmpdirname:
102-
storage_dir = _load_repository_from_hf(
102+
storage_dir = load_repository_from_hf(
103103
"philschmid/custom-pipeline-text-classification",
104104
tmpdirname,
105105
framework="tensorflow",
@@ -112,7 +112,7 @@ def test_tf_custom_pipeline(input_data: Dict[str, str]) -> None:
112112
def test_tf_sentence_transformers_pipeline():
113113
# TODO should fail! because TF is not supported yet
114114
with tempfile.TemporaryDirectory() as tmpdirname:
115-
storage_dir = _load_repository_from_hf(
115+
storage_dir = load_repository_from_hf(
116116
"sentence-transformers/all-MiniLM-L6-v2", tmpdirname, framework="tensorflow"
117117
)
118118
with pytest.raises(Exception) as _exc_info:

tests/unit/test_optimum_utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@
44
import pytest
55
from transformers.testing_utils import require_torch
66

7+
from huggingface_inference_toolkit.heavy_utils import load_repository_from_hf
78
from huggingface_inference_toolkit.optimum_utils import (
89
get_input_shapes,
910
get_optimum_neuron_pipeline,
1011
is_optimum_neuron_available,
1112
)
12-
from huggingface_inference_toolkit.utils import _load_repository_from_hf
1313

1414
require_inferentia = pytest.mark.skipif(
1515
not is_optimum_neuron_available(),
@@ -34,7 +34,7 @@ def test_not_supported_task():
3434
@require_inferentia
3535
def test_get_input_shapes_from_file():
3636
with tempfile.TemporaryDirectory() as tmpdirname:
37-
storage_folder = _load_repository_from_hf(
37+
storage_folder = load_repository_from_hf(
3838
repository_id=REMOTE_CONVERTED_MODEL,
3939
target_dir=tmpdirname,
4040
)
@@ -49,7 +49,7 @@ def test_get_input_shapes_from_env():
4949
os.environ["HF_OPTIMUM_BATCH_SIZE"] = "4"
5050
os.environ["HF_OPTIMUM_SEQUENCE_LENGTH"] = "32"
5151
with tempfile.TemporaryDirectory() as tmpdirname:
52-
storage_folder = _load_repository_from_hf(
52+
storage_folder = load_repository_from_hf(
5353
repository_id=REMOTE_NOT_CONVERTED_MODEL,
5454
target_dir=tmpdirname,
5555
)
@@ -77,7 +77,7 @@ def test_get_optimum_neuron_pipeline_from_converted_model():
7777
def test_get_optimum_neuron_pipeline_from_non_converted_model():
7878
os.environ["HF_OPTIMUM_SEQUENCE_LENGTH"] = "32"
7979
with tempfile.TemporaryDirectory() as tmpdirname:
80-
storage_folder = _load_repository_from_hf(
80+
storage_folder = load_repository_from_hf(
8181
repository_id=REMOTE_NOT_CONVERTED_MODEL,
8282
target_dir=tmpdirname,
8383
)

tests/unit/test_sentence_transformers.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,28 +3,28 @@
33
import pytest
44
from transformers.testing_utils import require_torch
55

6+
from huggingface_inference_toolkit.heavy_utils import (
7+
get_pipeline,
8+
load_repository_from_hf,
9+
)
610
from huggingface_inference_toolkit.sentence_transformers_utils import (
711
SentenceEmbeddingPipeline,
812
get_sentence_transformers_pipeline,
913
)
10-
from huggingface_inference_toolkit.utils import (
11-
_load_repository_from_hf,
12-
get_pipeline,
13-
)
1414

1515

1616
@require_torch
1717
def test_get_sentence_transformers_pipeline():
1818
with tempfile.TemporaryDirectory() as tmpdirname:
19-
storage_dir = _load_repository_from_hf("sentence-transformers/all-MiniLM-L6-v2", tmpdirname)
19+
storage_dir = load_repository_from_hf("sentence-transformers/all-MiniLM-L6-v2", tmpdirname)
2020
pipe = get_pipeline("sentence-embeddings", storage_dir.as_posix())
2121
assert isinstance(pipe, SentenceEmbeddingPipeline)
2222

2323

2424
@require_torch
2525
def test_sentence_embedding_task():
2626
with tempfile.TemporaryDirectory() as tmpdirname:
27-
storage_dir = _load_repository_from_hf("sentence-transformers/all-MiniLM-L6-v2", tmpdirname)
27+
storage_dir = load_repository_from_hf("sentence-transformers/all-MiniLM-L6-v2", tmpdirname)
2828
pipe = get_sentence_transformers_pipeline("sentence-embeddings", storage_dir.as_posix())
2929
res = pipe(sentences="Lets create an embedding")
3030
assert isinstance(res["embeddings"], list)
@@ -36,7 +36,7 @@ def test_sentence_embedding_task():
3636
@require_torch
3737
def test_sentence_similarity():
3838
with tempfile.TemporaryDirectory() as tmpdirname:
39-
storage_dir = _load_repository_from_hf("sentence-transformers/all-MiniLM-L6-v2", tmpdirname)
39+
storage_dir = load_repository_from_hf("sentence-transformers/all-MiniLM-L6-v2", tmpdirname)
4040
pipe = get_sentence_transformers_pipeline("sentence-similarity", storage_dir.as_posix())
4141
res = pipe(source_sentence="Lets create an embedding", sentences=["Lets create an embedding"])
4242
assert isinstance(res["similarities"], list)
@@ -45,7 +45,7 @@ def test_sentence_similarity():
4545
@require_torch
4646
def test_sentence_ranking():
4747
with tempfile.TemporaryDirectory() as tmpdirname:
48-
storage_dir = _load_repository_from_hf("cross-encoder/ms-marco-MiniLM-L-6-v2", tmpdirname)
48+
storage_dir = load_repository_from_hf("cross-encoder/ms-marco-MiniLM-L-6-v2", tmpdirname)
4949
pipe = get_sentence_transformers_pipeline("sentence-ranking", storage_dir.as_posix())
5050
res = pipe(
5151
sentences=[
@@ -61,7 +61,7 @@ def test_sentence_ranking():
6161
@require_torch
6262
def test_sentence_ranking_tei():
6363
with tempfile.TemporaryDirectory() as tmpdirname:
64-
storage_dir = _load_repository_from_hf("cross-encoder/ms-marco-MiniLM-L-6-v2", tmpdirname, framework="pytorch")
64+
storage_dir = load_repository_from_hf("cross-encoder/ms-marco-MiniLM-L-6-v2", tmpdirname, framework="pytorch")
6565
pipe = get_sentence_transformers_pipeline("sentence-ranking", storage_dir.as_posix())
6666
res = pipe(
6767
query="Lets create an embedding",
@@ -82,7 +82,7 @@ def test_sentence_ranking_tei():
8282
@require_torch
8383
def test_sentence_ranking_validation_errors():
8484
with tempfile.TemporaryDirectory() as tmpdirname:
85-
storage_dir = _load_repository_from_hf("cross-encoder/ms-marco-MiniLM-L-6-v2", tmpdirname, framework="pytorch")
85+
storage_dir = load_repository_from_hf("cross-encoder/ms-marco-MiniLM-L-6-v2", tmpdirname, framework="pytorch")
8686
pipe = get_sentence_transformers_pipeline("sentence-ranking", storage_dir.as_posix())
8787

8888
with pytest.raises(

0 commit comments

Comments
 (0)