Skip to content

Commit aa0b5aa

Browse files
make fastembed cpu or gpu optional (#19878)
1 parent 1536873 commit aa0b5aa

File tree

3 files changed

+17
-6
lines changed

3 files changed

+17
-6
lines changed

llama-index-integrations/embeddings/llama-index-embeddings-fastembed/llama_index/embeddings/fastembed/base.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
import asyncio
22
import numpy as np
3-
from typing import Any, List, Literal, Optional
3+
from typing import Any, List, Literal, Optional, TYPE_CHECKING
44

55
from llama_index.core.base.embeddings.base import BaseEmbedding
66
from llama_index.core.bridge.pydantic import Field, PrivateAttr, ConfigDict
7-
from fastembed import TextEmbedding
7+
8+
if TYPE_CHECKING:
9+
from fastembed import TextEmbedding
810

911

1012
class FastEmbedEmbedding(BaseEmbedding):
@@ -57,7 +59,7 @@ class FastEmbedEmbedding(BaseEmbedding):
5759
default=None, description="The ONNX providers to use for the embedding model."
5860
)
5961

60-
_model: TextEmbedding = PrivateAttr()
62+
_model: "TextEmbedding" = PrivateAttr()
6163

6264
def __init__(
6365
self,
@@ -77,6 +79,15 @@ def __init__(
7779
**kwargs,
7880
)
7981

82+
try:
83+
from fastembed import TextEmbedding
84+
except ImportError as e:
85+
raise ImportError(
86+
"Could not import FastEmbed. "
87+
"Please install it with `pip install fastembed` or "
88+
"`pip install fastembed-gpu` for GPU support"
89+
) from e
90+
8091
self._model = TextEmbedding(
8192
model_name=model_name,
8293
cache_dir=cache_dir,

llama-index-integrations/embeddings/llama-index-embeddings-fastembed/pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ build-backend = "hatchling.build"
44

55
[dependency-groups]
66
dev = [
7+
"fastembed>=0.2.2",
78
"ipython==8.10.0",
89
"jupyter>=1.0.0,<2",
910
"mypy==0.991",
@@ -26,14 +27,13 @@ dev = [
2627

2728
[project]
2829
name = "llama-index-embeddings-fastembed"
29-
version = "0.4.1"
30+
version = "0.5.0"
3031
description = "llama-index embeddings fastembed integration"
3132
authors = [{name = "Your Name", email = "[email protected]"}]
3233
requires-python = ">=3.9,<3.13"
3334
readme = "README.md"
3435
license = "MIT"
3536
dependencies = [
36-
"fastembed>=0.2.2",
3737
"llama-index-core>=0.13.0,<0.15",
3838
]
3939

llama-index-integrations/embeddings/llama-index-embeddings-fastembed/tests/test_embeddings_fastembed.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ def test_class():
99
assert BaseEmbedding.__name__ in names_of_base_classes
1010

1111

12-
@patch("llama_index.embeddings.fastembed.base.TextEmbedding")
12+
@patch("fastembed.TextEmbedding")
1313
def test_create_fastembed_embedding(mock_text_embedding):
1414
cache = Path("./test_cache_2")
1515

0 commit comments

Comments
 (0)