Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
184 changes: 182 additions & 2 deletions wren-ai-service/src/__main__.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,24 @@
from contextlib import asynccontextmanager

import uvicorn
from fastapi import FastAPI
from fastapi import FastAPI, HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import ORJSONResponse, RedirectResponse
from langfuse.decorators import langfuse_context

from src.config import settings
from src.globals import (
create_pipe_components,
create_service_container,
create_service_metadata,
)
from src.providers import generate_components
from src.providers.document_store.qdrant import QdrantProvider
from src.providers.embedder.litellm import LitellmEmbedderProvider
from src.providers.llm.litellm import LitellmLLMProvider
from src.utils import (
Configs,
init_langfuse,
setup_custom_logger,
)
Expand All @@ -28,8 +33,13 @@
@asynccontextmanager
async def lifespan(app: FastAPI):
# startup events
pipe_components = generate_components(settings.components)
pipe_components, instantiated_providers = generate_components(settings.components)
app.state.pipe_components = pipe_components
app.state.instantiated_providers = instantiated_providers
app.state.service_container = create_service_container(pipe_components, settings)
app.state.pipe_service_components = create_pipe_components(
app.state.service_container
)
app.state.service_metadata = create_service_metadata(pipe_components)
init_langfuse(settings)

Expand Down Expand Up @@ -86,6 +96,176 @@ def health():
return {"status": "ok"}


@app.get("/configs")
def get_configs():
_configs = {
"env_vars": {},
"providers": {
"llm": [],
"embedder": [],
},
"pipelines": {},
}

_llm_model_alias_mapping = {}
_embedder_model_alias_mapping = {}

_llm_configs = []
for _, model_config in app.state.instantiated_providers["llm"].items():
_llm_config = {
"model": model_config._model,
"alias": model_config._alias,
"context_window_size": model_config._context_window_size,
"timeout": model_config._timeout,
"kwargs": model_config._model_kwargs,
}
if model_config._api_base:
_llm_config["api_base"] = model_config._api_base
if model_config._api_version:
_llm_config["api_version"] = model_config._api_version
_llm_configs.append(_llm_config)
_llm_model_alias_mapping[model_config._model] = model_config._alias
_configs["providers"]["llm"] = _llm_configs

_embedder_configs = []
# we only support one embedding model now
for _, model_config in app.state.instantiated_providers["embedder"].items():
_embedder_config = {
"model": model_config._model,
"alias": model_config._alias,
"dimension": app.state.instantiated_providers["document_store"][
"qdrant"
]._embedding_model_dim,
"timeout": model_config._timeout,
"kwargs": model_config._model_kwargs,
}
if model_config._api_base:
_embedder_config["api_base"] = model_config._api_base
if model_config._api_version:
_embedder_config["api_version"] = model_config._api_version
_embedder_configs.append(_embedder_config)
Comment on lines +99 to +146
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

🧩 Analysis chain

Sanitize exposed kwargs to avoid accidental secret leakage.

If any provider kwargs include sensitive fields, filter them before returning.

Proposed filter example:

-            "kwargs": model_config._model_kwargs,
+            "kwargs": {k: v for k, v in (model_config._model_kwargs or {}).items() if k.lower() not in {"api_key", "key", "token", "secret"}},

Run to verify presence of sensitive keys in kwargs across providers:


🏁 Script executed:

#!/bin/bash
rg -nP --type=py -C2 '_model_kwargs' src/providers

Length of output: 53


🏁 Script executed:

#!/bin/bash
set -euo pipefail

echo "PWD: $(pwd)"
echo "Listing top-level files/dirs:"
ls -la || true

echo
echo "Searching repository for occurrences of '_model_kwargs' and related patterns..."
rg -n --hidden -S "_model_kwargs" -C3 || true

echo
echo "Searching for '\"kwargs':' occurrences (to see where kwargs are returned in JSON responses)..."
rg -n --hidden -S '"kwargs"\s*:' -C3 || true

echo
echo "Searching for 'def get_configs' and __main__ to locate the reported file."
rg -n --hidden -S "def get_configs" -C3 || true
rg -n --hidden -S "__main__\.py" -C3 || true

echo
echo "Searching for 'instantiated_providers' usages (to confirm provider objects structure)..."
rg -n --hidden -S "instantiated_providers" -C3 || true

echo
echo "Searching for common secret-like keys in code (api_key, token, secret) to spot exposed fields."
rg -n --hidden -S -i "api_key|apikey|token|secret|api_secret" -C1 || true

Length of output: 317607


Sanitize exposed provider kwargs in API responses

The API currently returns raw provider kwargs (risk: leaking secrets such as API keys/tokens). Filter sensitive keys (handle None) before returning.

  • Files & places to fix:
    • wren-ai-service/src/main.py — _llm_config and _embedder_config (currently "kwargs": model_config._model_kwargs). Replace with a sanitized dict. Example:
-            "kwargs": model_config._model_kwargs,
+            "kwargs": {k: v for k, v in (model_config._model_kwargs or {}).items()
+                       if k.lower() not in {"api_key","key","token","secret","password","access_key","access_token","secret_key","client_secret","authorization"}},
  • wren-ai-service/src/globals.py — llm_model_kwargs (line ~317) is populated from provider.model_kwargs; sanitize there too using the same filter before exposing.

  • Tests & config:

    • Tests that assert presence/shape of kwargs (e.g. tests/pytest/test_config.py, tests/pytest/providers/test_providers.py) may need updating if they expect raw values.
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
@app.get("/configs")
def get_configs():
_configs = {
"env_vars": {},
"providers": {
"llm": [],
"embedder": [],
},
"pipelines": {},
}
_llm_model_alias_mapping = {}
_embedder_model_alias_mapping = {}
_llm_configs = []
for _, model_config in app.state.instantiated_providers["llm"].items():
_llm_config = {
"model": model_config._model,
"alias": model_config._alias,
"context_window_size": model_config._context_window_size,
"timeout": model_config._timeout,
"kwargs": model_config._model_kwargs,
}
if model_config._api_base:
_llm_config["api_base"] = model_config._api_base
if model_config._api_version:
_llm_config["api_version"] = model_config._api_version
_llm_configs.append(_llm_config)
_llm_model_alias_mapping[model_config._model] = model_config._alias
_configs["providers"]["llm"] = _llm_configs
_embedder_configs = []
# we only support one embedding model now
for _, model_config in app.state.instantiated_providers["embedder"].items():
_embedder_config = {
"model": model_config._model,
"alias": model_config._alias,
"dimension": app.state.instantiated_providers["document_store"][
"qdrant"
]._embedding_model_dim,
"timeout": model_config._timeout,
"kwargs": model_config._model_kwargs,
}
if model_config._api_base:
_embedder_config["api_base"] = model_config._api_base
if model_config._api_version:
_embedder_config["api_version"] = model_config._api_version
_embedder_configs.append(_embedder_config)
@app.get("/configs")
def get_configs():
_configs = {
"env_vars": {},
"providers": {
"llm": [],
"embedder": [],
},
"pipelines": {},
}
_llm_model_alias_mapping = {}
_embedder_model_alias_mapping = {}
_llm_configs = []
for _, model_config in app.state.instantiated_providers["llm"].items():
_llm_config = {
"model": model_config._model,
"alias": model_config._alias,
"context_window_size": model_config._context_window_size,
"timeout": model_config._timeout,
"kwargs": {k: v for k, v in (model_config._model_kwargs or {}).items()
if k.lower() not in {"api_key", "key", "token", "secret", "password", "access_key", "access_token", "secret_key", "client_secret", "authorization"}},
}
if model_config._api_base:
_llm_config["api_base"] = model_config._api_base
if model_config._api_version:
_llm_config["api_version"] = model_config._api_version
_llm_configs.append(_llm_config)
_llm_model_alias_mapping[model_config._model] = model_config._alias
_configs["providers"]["llm"] = _llm_configs
_embedder_configs = []
# we only support one embedding model now
for _, model_config in app.state.instantiated_providers["embedder"].items():
_embedder_config = {
"model": model_config._model,
"alias": model_config._alias,
"dimension": app.state.instantiated_providers["document_store"][
"qdrant"
]._embedding_model_dim,
"timeout": model_config._timeout,
"kwargs": {k: v for k, v in (model_config._model_kwargs or {}).items()
if k.lower() not in {"api_key", "key", "token", "secret", "password", "access_key", "access_token", "secret_key", "client_secret", "authorization"}},
}
if model_config._api_base:
_embedder_config["api_base"] = model_config._api_base
if model_config._api_version:
_embedder_config["api_version"] = model_config._api_version
_embedder_configs.append(_embedder_config)

_embedder_model_alias_mapping[model_config._model] = model_config._alias
break
_configs["providers"]["embedder"] = _embedder_configs

for pipe_name, pipe_component in app.state.pipe_service_components.items():
llm_model = pipe_component.get("llm", None)
embedding_model = pipe_component.get("embedder", None)
description = pipe_component.get("description", "")
if llm_model or embedding_model:
_configs["pipelines"][pipe_name] = {
"has_db_data_in_llm_prompt": pipe_component.get(
"has_db_data_in_llm_prompt", False
),
"description": description,
}
if llm_model:
if llm_model_alias := _llm_model_alias_mapping.get(llm_model):
_configs["pipelines"][pipe_name]["llm"] = llm_model_alias
else:
_configs["pipelines"][pipe_name]["llm"] = llm_model
if embedding_model:
if embedding_model_alias := _embedder_model_alias_mapping.get(
embedding_model
):
_configs["pipelines"][pipe_name]["embedder"] = embedding_model_alias
else:
_configs["pipelines"][pipe_name]["embedder"] = embedding_model

return _configs


@app.post("/configs")
def update_configs(configs_request: Configs):
try:
# override current instantiated_providers
app.state.instantiated_providers["embedder"] = {
f"litellm_embedder.{embedder_provider.alias}": LitellmEmbedderProvider(
**embedder_provider.__dict__
)
for embedder_provider in configs_request.providers.embedder
}
app.state.instantiated_providers["llm"] = {
f"litellm_llm.{llm_provider.alias}": LitellmLLMProvider(
**llm_provider.__dict__
)
for llm_provider in configs_request.providers.llm
}
Comment on lines +183 to +193
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Avoid Pydantic dict; use model_dump(exclude_none=True).

dict may include internals; model_dump is stable/safe.

-            f"litellm_embedder.{embedder_provider.alias}": LitellmEmbedderProvider(
-                **embedder_provider.__dict__
-            )
+            f"litellm_embedder.{embedder_provider.alias}": LitellmEmbedderProvider(
+                **embedder_provider.model_dump(exclude_none=True)
+            )
             for embedder_provider in configs_request.providers.embedder
         }
         app.state.instantiated_providers["llm"] = {
-            f"litellm_llm.{llm_provider.alias}": LitellmLLMProvider(
-                **llm_provider.__dict__
-            )
+            f"litellm_llm.{llm_provider.alias}": LitellmLLMProvider(
+                **llm_provider.model_dump(exclude_none=True)
+            )
             for llm_provider in configs_request.providers.llm
         }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
f"litellm_embedder.{embedder_provider.alias}": LitellmEmbedderProvider(
**embedder_provider.__dict__
)
for embedder_provider in configs_request.providers.embedder
}
app.state.instantiated_providers["llm"] = {
f"litellm_llm.{llm_provider.alias}": LitellmLLMProvider(
**llm_provider.__dict__
)
for llm_provider in configs_request.providers.llm
}
f"litellm_embedder.{embedder_provider.alias}": LitellmEmbedderProvider(
**embedder_provider.model_dump(exclude_none=True)
)
for embedder_provider in configs_request.providers.embedder
}
app.state.instantiated_providers["llm"] = {
f"litellm_llm.{llm_provider.alias}": LitellmLLMProvider(
**llm_provider.model_dump(exclude_none=True)
)
for llm_provider in configs_request.providers.llm
}
🤖 Prompt for AI Agents
In wren-ai-service/src/__main__.py around lines 183 to 193, the code uses
Pydantic objects' __dict__ to expand kwargs for LitellmEmbedderProvider and
LitellmLLMProvider; replace each use of embedder_provider.__dict__ and
llm_provider.__dict__ with embedder_provider.model_dump(exclude_none=True) and
llm_provider.model_dump(exclude_none=True) respectively so only validated fields
are passed and internal attributes are omitted. Ensure both comprehensions call
model_dump(exclude_none=True) on each provider instance and no other behavior
changes are introduced.

app.state.instantiated_providers["document_store"]["qdrant"] = QdrantProvider(
location=app.state.instantiated_providers["document_store"][
"qdrant"
]._location,
api_key=app.state.instantiated_providers["document_store"][
"qdrant"
]._api_key,
timeout=app.state.instantiated_providers["document_store"][
"qdrant"
]._timeout,
embedding_model_dim=configs_request.providers.embedder[0].dimension,
recreate_index=True,
)
Comment on lines +178 to +206
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Guard against empty embedder list.

IndexError if providers.embedder is empty. Add checks or 400.

-def update_configs(configs_request: Configs):
+def update_configs(configs_request: Configs):
     try:
+        if not configs_request.providers.embedder:
+            raise HTTPException(status_code=400, detail="At least one embedder is required")
         # override current instantiated_providers
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
@app.post("/configs")
def update_configs(configs_request: Configs):
try:
# override current instantiated_providers
app.state.instantiated_providers["embedder"] = {
f"litellm_embedder.{embedder_provider.alias}": LitellmEmbedderProvider(
**embedder_provider.__dict__
)
for embedder_provider in configs_request.providers.embedder
}
app.state.instantiated_providers["llm"] = {
f"litellm_llm.{llm_provider.alias}": LitellmLLMProvider(
**llm_provider.__dict__
)
for llm_provider in configs_request.providers.llm
}
app.state.instantiated_providers["document_store"]["qdrant"] = QdrantProvider(
location=app.state.instantiated_providers["document_store"][
"qdrant"
]._location,
api_key=app.state.instantiated_providers["document_store"][
"qdrant"
]._api_key,
timeout=app.state.instantiated_providers["document_store"][
"qdrant"
]._timeout,
embedding_model_dim=configs_request.providers.embedder[0].dimension,
recreate_index=True,
)
@app.post("/configs")
def update_configs(configs_request: Configs):
try:
if not configs_request.providers.embedder:
raise HTTPException(status_code=400, detail="At least one embedder is required")
# override current instantiated_providers
app.state.instantiated_providers["embedder"] = {
f"litellm_embedder.{embedder_provider.alias}": LitellmEmbedderProvider(
**embedder_provider.__dict__
)
for embedder_provider in configs_request.providers.embedder
}
app.state.instantiated_providers["llm"] = {
f"litellm_llm.{llm_provider.alias}": LitellmLLMProvider(
**llm_provider.__dict__
)
for llm_provider in configs_request.providers.llm
}
app.state.instantiated_providers["document_store"]["qdrant"] = QdrantProvider(
location=app.state.instantiated_providers["document_store"][
"qdrant"
]._location,
api_key=app.state.instantiated_providers["document_store"][
"qdrant"
]._api_key,
timeout=app.state.instantiated_providers["document_store"][
"qdrant"
]._timeout,
embedding_model_dim=configs_request.providers.embedder[0].dimension,
recreate_index=True,
)
🤖 Prompt for AI Agents
In wren-ai-service/src/__main__.py around lines 178 to 206, the code assumes
configs_request.providers.embedder has at least one item and will raise
IndexError when empty; before accessing
configs_request.providers.embedder[0].dimension, validate that
providers.embedder is not empty and if it is return a 400 HTTP response (raise
fastapi.HTTPException(status_code=400, detail="...")) or otherwise handle the
missing embedder (e.g., require a default embedder or skip updating
document_store.embedding_model_dim), then proceed to instantiate providers;
ensure the validation occurs before any use of embedder[0] so the endpoint fails
fast with a clear error message.

Comment on lines +194 to +206
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Danger: unconditional Qdrant reindex can drop data. Gate on dimension change.

Recreate only if the embedding dim actually changes (or behind an explicit flag).

-        app.state.instantiated_providers["document_store"]["qdrant"] = QdrantProvider(
-            location=app.state.instantiated_providers["document_store"][
-                "qdrant"
-            ]._location,
-            api_key=app.state.instantiated_providers["document_store"][
-                "qdrant"
-            ]._api_key,
-            timeout=app.state.instantiated_providers["document_store"][
-                "qdrant"
-            ]._timeout,
-            embedding_model_dim=configs_request.providers.embedder[0].dimension,
-            recreate_index=True,
-        )
+        _current_qdrant = app.state.instantiated_providers["document_store"]["qdrant"]
+        _new_dim = (
+            configs_request.providers.embedder[0].dimension
+            if configs_request.providers.embedder
+            else _current_qdrant._embedding_model_dim
+        )
+        _should_recreate = _new_dim != _current_qdrant._embedding_model_dim
+        app.state.instantiated_providers["document_store"]["qdrant"] = QdrantProvider(
+            location=_current_qdrant._location,
+            api_key=_current_qdrant._api_key,
+            timeout=_current_qdrant._timeout,
+            embedding_model_dim=_new_dim,
+            recreate_index=_should_recreate,
+        )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
app.state.instantiated_providers["document_store"]["qdrant"] = QdrantProvider(
location=app.state.instantiated_providers["document_store"][
"qdrant"
]._location,
api_key=app.state.instantiated_providers["document_store"][
"qdrant"
]._api_key,
timeout=app.state.instantiated_providers["document_store"][
"qdrant"
]._timeout,
embedding_model_dim=configs_request.providers.embedder[0].dimension,
recreate_index=True,
)
_current_qdrant = app.state.instantiated_providers["document_store"]["qdrant"]
_new_dim = (
configs_request.providers.embedder[0].dimension
if configs_request.providers.embedder
else _current_qdrant._embedding_model_dim
)
_should_recreate = _new_dim != _current_qdrant._embedding_model_dim
app.state.instantiated_providers["document_store"]["qdrant"] = QdrantProvider(
location=_current_qdrant._location,
api_key=_current_qdrant._api_key,
timeout=_current_qdrant._timeout,
embedding_model_dim=_new_dim,
recreate_index=_should_recreate,
)
🤖 Prompt for AI Agents
In wren-ai-service/src/__main__.py around lines 194 to 206, the code
unconditionally sets recreate_index=True when instantiating the QdrantProvider
which can drop existing data; change this to only recreate when the embedder
dimension actually changes or when an explicit override flag is set. Implement a
check that reads the current provider's embedding_model_dim (if present) and
compares it to configs_request.providers.embedder[0].dimension and set
recreate_index=True only if they differ; otherwise set recreate_index=False.
Also add support for an explicit override (e.g. a config or env flag like
configs_request.providers.document_store.force_recreate or
app.state.force_recreate) that, when true, forces recreate_index=True regardless
of dimension comparison.

_embedder_providers = app.state.instantiated_providers["embedder"]
_llm_providers = app.state.instantiated_providers["llm"]
_document_store_provider = app.state.instantiated_providers["document_store"][
"qdrant"
]

# override current pipe_components
for (
pipe_name,
pipe_service_components,
) in app.state.pipe_service_components.items():
if pipe_name in configs_request.pipelines:
pipe_config = configs_request.pipelines[pipe_name]
pipe_service_components.update(pipe_config)

# updating pipelines
for (
pipeline_name,
pipe_service_components,
) in app.state.pipe_service_components.items():
for service in pipe_service_components.get("services", []):
if pipe_config := configs_request.pipelines.get(pipeline_name):
service._pipelines[pipeline_name].update_components(
llm_provider=(
_llm_providers[f"litellm_llm.{pipe_config.llm}"]
if pipe_config.llm
else None
),
embedder_provider=(
_embedder_providers[
f"litellm_embedder.{pipe_config.embedder}"
]
if pipe_config.embedder
else None
),
document_store_provider=(
_document_store_provider
if service._pipelines[
pipeline_name
]._document_store_provider
else None
),
)
else:
if service._pipelines[pipeline_name]._document_store_provider:
service._pipelines[pipeline_name].update_components(
llm_provider=service._pipelines[
pipeline_name
]._llm_provider,
embedder_provider=service._pipelines[
pipeline_name
]._embedder_provider,
document_store_provider=_document_store_provider,
)

# TODO: updating service_metadata
for pipeline_name, _ in app.state.pipe_components.items():
pass
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error updating configs: {e}")


if __name__ == "__main__":
uvicorn.run(
"src.__main__:app",
Expand Down
33 changes: 32 additions & 1 deletion wren-ai-service/src/core/pipeline.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from abc import ABCMeta, abstractmethod
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, Dict
from typing import Any, Dict, Optional

from hamilton.async_driver import AsyncDriver
from hamilton.driver import Driver
Expand All @@ -14,14 +14,42 @@
class BasicPipeline(metaclass=ABCMeta):
def __init__(self, pipe: Pipeline | AsyncDriver | Driver):
self._pipe = pipe
self._description = ""
self._llm_provider = None
self._embedder_provider = None
self._document_store_provider = None
self._components = {}

@abstractmethod
def run(self, *args, **kwargs) -> Dict[str, Any]:
...

def _update_components(self) -> dict:
...

def update_components(
self,
llm_provider: Optional[LLMProvider] = None,
embedder_provider: Optional[EmbedderProvider] = None,
document_store_provider: Optional[DocumentStoreProvider] = None,
update_components: bool = True,
):
if llm_provider:
self._llm_provider = llm_provider
if embedder_provider:
self._embedder_provider = embedder_provider
if document_store_provider:
self._document_store_provider = document_store_provider
if update_components:
self._components = self._update_components()

def __str__(self):
return f"BasicPipeline(llm_provider={self._llm_provider}, embedder_provider={self._embedder_provider}, document_store_provider={self._document_store_provider})"


@dataclass
class PipelineComponent(Mapping):
description: str = None
llm_provider: LLMProvider = None
embedder_provider: EmbedderProvider = None
document_store_provider: DocumentStoreProvider = None
Expand All @@ -35,3 +63,6 @@ def __iter__(self):

def __len__(self):
return len(self.__dict__)

def __str__(self):
return f"PipelineComponent(description={self.description}, llm_provider={self.llm_provider}, embedder_provider={self.embedder_provider}, document_store_provider={self.document_store_provider}, engine={self.engine})"
26 changes: 21 additions & 5 deletions wren-ai-service/src/core/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,20 @@ class LLMProvider(metaclass=ABCMeta):
def get_generator(self, *args, **kwargs):
...

def get_model(self):
@property
def alias(self):
return self._alias

@property
def model(self):
return self._model

def get_model_kwargs(self):
@property
def model_kwargs(self):
return self._model_kwargs

def get_context_window_size(self):
@property
def context_window_size(self):
return self._context_window_size


Expand All @@ -27,8 +34,17 @@ def get_text_embedder(self, *args, **kwargs):
def get_document_embedder(self, *args, **kwargs):
...

def get_model(self):
return self._embedding_model
@property
def alias(self):
return self._alias

@property
def model(self):
return self._model

@property
def model_kwargs(self):
return self._model_kwargs


class DocumentStoreProvider(metaclass=ABCMeta):
Expand Down
Loading
Loading