Skip to content

Commit 328bf5e

Browse files
committed
Bump version: 1.1.13 → 1.1.14
1 parent 39acfa6 commit 328bf5e

File tree

11 files changed

+42
-10
lines changed

11 files changed

+42
-10
lines changed

.bumpversion.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
[bumpversion]
2-
current_version = 1.1.13
2+
current_version = 1.1.14
33
commit = True
44
tag = True
55

Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,6 @@ ENV HOST=${HOST} \
5454
RUN apt-get update \
5555
&& apt-get install -y curl nano libpq-dev \
5656
&& curl -LsSf https://astral.sh/uv/install.sh | sh \
57-
&& uv pip install --system --upgrade --verbose --no-cache --break-system-packages --prerelease=allow vector-mcp[postgres,chromadb,couchbase,qdrant,mongodb,a2a]>=1.1.13
57+
&& uv pip install --system --upgrade --verbose --no-cache --break-system-packages --prerelease=allow vector-mcp[postgres,chromadb,couchbase,qdrant,mongodb,a2a]>=1.1.14
5858

5959
CMD ["vector-mcp"]

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
![PyPI - Wheel](https://img.shields.io/pypi/wheel/vector-mcp)
2222
![PyPI - Implementation](https://img.shields.io/pypi/implementation/vector-mcp)
2323

24-
*Version: 1.1.13*
24+
*Version: 1.1.14*
2525

2626
## Overview
2727

compose.yml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ services:
3131
- "LLM_BASE_URL=${LLM_BASE_URL:-http://host.docker.internal:1234/v1}"
3232
- "LLM_API_KEY=${LLM_API_KEY:-llama}"
3333
- "MODEL_ID=${EMBEDDING_MODEL_ID:-text-embedding-nomic-embed-text-v2-moe}"
34+
- "CHUNK_SIZE=${CHUNK_SIZE:-512}"
3435
ports:
3536
- "8023:8023"
3637
healthcheck:
@@ -140,6 +141,7 @@ services:
140141
- "LLM_BASE_URL=${LLM_BASE_URL:-http://host.docker.internal:1234/v1}"
141142
- "LLM_API_KEY=${LLM_API_KEY:-llama}"
142143
- "MODEL_ID=${EMBEDDING_MODEL_ID:-text-embedding-nomic-embed-text-v2-moe}"
144+
- "CHUNK_SIZE=${CHUNK_SIZE:-512}"
143145
ports:
144146
- "8024:8024"
145147
healthcheck:
@@ -238,6 +240,7 @@ services:
238240
- "LLM_BASE_URL=${LLM_BASE_URL:-http://host.docker.internal:1234/v1}"
239241
- "LLM_API_KEY=${LLM_API_KEY:-llama}"
240242
- "MODEL_ID=${EMBEDDING_MODEL_ID:-text-embedding-nomic-embed-text-v2-moe}"
243+
- "CHUNK_SIZE=${CHUNK_SIZE:-512}"
241244
ports:
242245
- "8025:8025"
243246
healthcheck:
@@ -330,6 +333,7 @@ services:
330333
- "LLM_BASE_URL=${LLM_BASE_URL:-http://host.docker.internal:1234/v1}"
331334
- "LLM_API_KEY=${LLM_API_KEY:-llama}"
332335
- "MODEL_ID=${EMBEDDING_MODEL_ID:-text-embedding-nomic-embed-text-v2-moe}"
336+
- "CHUNK_SIZE=${CHUNK_SIZE:-512}"
333337
ports:
334338
- "8026:8026"
335339
healthcheck:
@@ -416,6 +420,7 @@ services:
416420
- "LLM_BASE_URL=${LLM_BASE_URL:-http://host.docker.internal:1234/v1}"
417421
- "LLM_API_KEY=${LLM_API_KEY:-llama}"
418422
- "MODEL_ID=${EMBEDDING_MODEL_ID:-text-embedding-nomic-embed-text-v2-moe}"
423+
- "CHUNK_SIZE=${CHUNK_SIZE:-512}"
419424
ports:
420425
- "8027:8027"
421426
healthcheck:

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "vector-mcp"
7-
version = "1.1.13"
7+
version = "1.1.14"
88
description = "Integrate RAG into AI Agents via MCP Server. Supports multiple Vector database technologies."
99
readme = "README.md"
1010
authors = [{ name = "Audel Rouhi", email = "knucklessg1@gmail.com" }]

scripts/debug_embedding.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
print("\nAttempting to generate embedding...")
2121
resp = client.embeddings.create(
2222
input="test",
23-
model="text-embedding-nomic-embed-text-v1.5",
23+
model="text-embedding-nomic-embed-text-v2-moe",
2424
)
2525
print("SUCCESS: Generated embedding")
2626
print(f"Dimension: {len(resp.data[0].embedding)}")

scripts/test_embedding.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def test_embedding():
1515
os.environ["EMBEDDING_PROVIDER"] = "openai"
1616
os.environ["LLM_BASE_URL"] = "http://localhost:1234/v1"
1717
os.environ["LLM_API_KEY"] = "llama"
18-
os.environ["EMBEDDING_MODEL"] = "text-embedding-nomic-embed-text-v1.5"
18+
os.environ["EMBEDDING_MODEL"] = "text-embedding-nomic-embed-text-v2-moe"
1919

2020
docs_path = Path("./mcp/documents")
2121
if not docs_path.exists():

tests/reproduce_chunking.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
2+
import os
3+
import sys
4+
from llama_index.core import Settings
5+
6+
# Add the project root to the python path
7+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
8+
9+
try:
10+
from vector_mcp.vector_mcp import config
11+
print(f"Current Config: {config}")
12+
except ImportError:
13+
print("Could not import vector_mcp")
14+
15+
print(f"Default Global Chunk Size: {Settings.chunk_size}")
16+
17+
if Settings.chunk_size > 512:
18+
print("FAILURE: Chunk size is larger than 512 tokens.")
19+
else:
20+
print("SUCCESS: Chunk size is 512 tokens or less.")

vector_mcp/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,7 @@ def get_embedding_model() -> BaseEmbedding:
385385
if provider == "openai":
386386
return OpenAIEmbedding(
387387
model_name=os.environ.get(
388-
"EMBEDDING_MODEL", "text-embedding-nomic-embed-text-v1.5"
388+
"EMBEDDING_MODEL", "text-embedding-nomic-embed-text-v2-moe"
389389
),
390390
api_key=os.environ.get("LLM_API_KEY"),
391391
api_base=os.environ.get("LLM_BASE_URL", "http://localhost:1234/v1"),

vector_mcp/vector_agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ class AgentState(BaseModel):
4141
from pydantic_ai.ui import SSE_CONTENT_TYPE
4242
from pydantic_ai.ui.ag_ui import AGUIAdapter
4343

44-
__version__ = "1.1.13"
44+
__version__ = "1.1.14"
4545

4646
logging.basicConfig(
4747
level=logging.INFO,

0 commit comments

Comments
 (0)