Skip to content

Commit edd2253

Browse files
committed
test: add function tests for the rag core api
1 parent 245f4b3 commit edd2253

File tree

22 files changed

+780
-112
lines changed

22 files changed

+780
-112
lines changed

libs/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
.vscode

libs/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ Uploaded documents are required to contain the following metadata:
8383
| vector_database | [`rag_core_api.vector_databases.vector_database.VectorDatabase`](./rag-core-api/src/rag_core_api/vector_databases/vector_database.py) | [`rag_core_api.impl.vector_databases.qdrant_database.QdrantDatabase`](./rag-core-api/src/rag_core_api/impl/vector_databases/qdrant_database.py) | |
8484
| reranker | [`rag_core_api.reranking.reranker.Reranker`](./rag-core-api/src/rag_core_api/reranking/reranker.py) | [`rag_core_api.impl.reranking.flashrank_reranker.FlashrankReranker`](./rag-core-api/src/rag_core_api/impl/reranking/flashrank_reranker.py) | Used in the *composed_retriever* |
8585
| composed_retriever | [`rag_core_api.retriever.retriever.Retriever`](./rag-core-api/src/rag_core_api/retriever/retriever.py) | [`rag_core_api.impl.retriever.composite_retriever.CompositeRetriever`](./rag-core-api/src/rag_core_api/impl/retriever/composite_retriever.py) | Handles retrieval, re-ranking, etc. |
86-
| large_language_model | `langchain_core.language_models.llms.BaseLLM` | `langchain_community.llms.vllm.VLLMOpenAI` or `langchain_community.llms.Ollama` | The LLm that is used for all LLM tasks. The default depends on the value of `rag_core_lib.impl.settings.rag_class_types_settings.RAGClassTypeSettings.llm_type` |
86+
| large_language_model | `langchain_core.language_models.llms.BaseLLM` | `langchain_community.llms.vllm.VLLMOpenAI`, `langchain_community.llms.Ollama` or `langchain_community.llms.FakeListLLM` | The LLm that is used for all LLM tasks. The default depends on the value of `rag_core_lib.impl.settings.rag_class_types_settings.RAGClassTypeSettings.llm_type`. The FakeListLLM is used for testing |
8787
| prompt | `str` | [`rag_core_api.prompt_templates.answer_generation_prompt.ANSWER_GENERATION_PROMPT`](./rag-core-api/src/rag_core_api/prompt_templates/answer_generation_prompt.py) | The prompt used for answering the question. |
8888
| rephrasing_prompt | `str` | [`rag_core_api.prompt_templates.question_rephrasing_prompt.ANSWER_REPHRASING_PROMPT`](./rag-core-api/src/rag_core_api/prompt_templates/question_rephrasing_prompt.py) | The prompt used for rephrasing the question. The rephrased question (and the *original* question are both used for retrival of the documents)|
8989
| langfuse_manager | [`rag_core_lib.impl.langfuse_manager.langfuse_manager.LangfuseManager`](./rag-core-lib/src/rag_core_lib/impl/langfuse_manager/langfuse_manager.py) | [`rag_core_lib.impl.langfuse_manager.langfuse_manager.LangfuseManager`](./rag-core-lib/src/rag_core_lib/impl/langfuse_manager/langfuse_manager.py) | Retrieves additional settings, as well as the prompt from langfuse if available. |

libs/extractor-api-lib/src/openapi_server/impl/__init__.py

Whitespace-only changes.

libs/rag-core-api/.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ celerybeat.pid
111111

112112
# Environments
113113
.env
114-
.venv
114+
.venv*
115115
env/
116116
venv/
117117
ENV/

libs/rag-core-api/.python-version

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3.11.11

libs/rag-core-api/poetry.lock

Lines changed: 19 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

libs/rag-core-api/pyproject.toml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ openai = "^1.42.0"
2727
langgraph = "^0.2.23"
2828
pillow = "^11.0.0"
2929
langchain-ollama = "^0.2.0"
30+
pytest-asyncio = "^0.25.0"
3031

3132
[tool.poetry.group.dev.dependencies]
3233
debugpy = "^1.8.1"
@@ -78,7 +79,8 @@ per-file-ignores = """
7879
./src/rag_core_api/prompt_templates/*: E501,
7980
./src/rag_core_api/dependency_container.py: CCE002,CCE001,
8081
./src/rag_core_api/apis/rag_api_base.py: WOT001,
81-
./tests/*: S101,
82+
./tests/rag_api_test.py: E402,S101,S105,I252,D409,
83+
./tests/*: S101,S105,I252,D409,
8284
"""
8385

8486
[tool.black]
@@ -114,4 +116,6 @@ max-line-length = 120
114116
[tool.pytest.ini_options]
115117
log_cli = 1
116118
log_cli_level = "DEBUG"
119+
pythonpath = "src"
120+
testpaths = "src/tests"
117121

libs/rag-core-api/src/rag_core_api/dependency_container.py

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,14 @@
99
Singleton,
1010
)
1111
from langchain_community.document_compressors.flashrank_rerank import FlashrankRerank
12-
from langchain_community.embeddings import OllamaEmbeddings
12+
from langchain_community.embeddings.ollama import OllamaEmbeddings
13+
from langchain_community.embeddings.fake import FakeEmbeddings
1314
from langchain_community.llms.ollama import Ollama
1415
from langchain_community.llms.vllm import VLLMOpenAI
16+
from langchain_community.llms.fake import FakeListLLM
1517
from langchain_ollama import ChatOllama
1618
from langchain_openai import ChatOpenAI
17-
from langchain_qdrant import Qdrant
19+
from langchain_qdrant import QdrantVectorStore
1820
from langfuse import Langfuse
1921

2022
from rag_core_api.impl.answer_generation_chains.answer_generation_chain import (
@@ -31,6 +33,8 @@
3133
from rag_core_api.impl.embeddings.langchain_community_embedder import (
3234
LangchainCommunityEmbedder,
3335
)
36+
37+
3438
from rag_core_api.impl.embeddings.stackit_embedder import StackitEmbedder
3539
from rag_core_api.impl.evaluator.langfuse_ragas_evaluator import LangfuseRagasEvaluator
3640
from rag_core_api.impl.graph.chat_graph import DefaultChatGraph
@@ -65,6 +69,8 @@
6569
from rag_core_lib.impl.settings.stackit_vllm_settings import StackitVllmSettings
6670
from rag_core_lib.impl.tracers.langfuse_traced_chain import LangfuseTracedGraph
6771
from rag_core_lib.impl.utils.async_threadsafe_semaphore import AsyncThreadsafeSemaphore
72+
from rag_core_lib.impl.settings.fake_llm_settings import FakeLlmSettings
73+
from rag_core_api.impl.settings.fake_embedder_settings import FakeEmbedderSettings
6874

6975

7076
class DependencyContainer(DeclarativeContainer):
@@ -78,8 +84,10 @@ class DependencyContainer(DeclarativeContainer):
7884
retriever_settings = RetrieverSettings()
7985
ollama_settings = OllamaSettings()
8086
ollama_embedder_settings = OllamaEmbedderSettings()
87+
fake_embedder_settings = FakeEmbedderSettings()
8188
langfuse_settings = LangfuseSettings()
8289
stackit_vllm_settings = StackitVllmSettings()
90+
fake_llm_settings = FakeLlmSettings()
8391
error_messages = ErrorMessages()
8492
rag_class_type_settings = RAGClassTypeSettings()
8593
ragas_settings = RagasSettings()
@@ -97,17 +105,21 @@ class DependencyContainer(DeclarativeContainer):
97105
LangchainCommunityEmbedder, embedder=Singleton(OllamaEmbeddings, **ollama_embedder_settings.model_dump())
98106
),
99107
stackit=Singleton(StackitEmbedder, stackit_embedder_settings),
108+
fake=Singleton(
109+
LangchainCommunityEmbedder, embedder=Singleton(FakeEmbeddings, **fake_embedder_settings.model_dump())
110+
),
100111
)
101112

102113
vectordb_client = Singleton(
103114
qdrant_client.QdrantClient,
104-
url=vector_database_settings.url,
115+
location=vector_database_settings.location,
105116
)
106117
vectorstore = Singleton(
107-
Qdrant,
118+
QdrantVectorStore,
108119
client=vectordb_client,
109120
collection_name=vector_database_settings.collection_name,
110-
embeddings=embedder,
121+
embedding=embedder,
122+
validate_collection_config=False,
111123
)
112124

113125
vector_database = Singleton(
@@ -165,6 +177,7 @@ class DependencyContainer(DeclarativeContainer):
165177
class_selector_config.llm_type,
166178
ollama=Singleton(llm_provider, ollama_settings, Ollama),
167179
stackit=Singleton(llm_provider, stackit_vllm_settings, VLLMOpenAI),
180+
fake=Singleton(llm_provider, fake_llm_settings, FakeListLLM),
168181
)
169182

170183
prompt = ANSWER_GENERATION_PROMPT

libs/rag-core-api/src/rag_core_api/impl/embeddings/embedder_type.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,4 @@ class EmbedderType(StrEnum):
99

1010
OLLAMA = "ollama"
1111
STACKIT = "stackit"
12+
FAKE = "fake"

libs/rag-core-api/src/rag_core_api/impl/retriever/composite_retriever.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ async def ainvoke(
100100
continue
101101
return_val.append(result)
102102

103-
if self._reranker:
103+
if self._reranker and results:
104104
return_val = await self._reranker.ainvoke((return_val, retriever_input), config=config)
105105

106106
return return_val

0 commit comments

Comments
 (0)