Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions moatless/index/simple_faiss.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from fsspec.implementations.local import LocalFileSystem
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.schema import BaseNode
from llama_index.core.vector_stores.simple import _build_metadata_filter_fn
from llama_index.core.vector_stores.simple import build_metadata_filter_fn
from llama_index.core.vector_stores.types import (
DEFAULT_PERSIST_DIR,
BasePydanticVectorStore,
Expand Down Expand Up @@ -172,7 +172,7 @@ def query(
similarity_top_k (int): top k most similar nodes

"""
query_filter_fn = _build_metadata_filter_fn(lambda node_id: self._data.metadata_dict[node_id], query.filters)
query_filter_fn = build_metadata_filter_fn(lambda node_id: self._data.metadata_dict[node_id], query.filters)

query_embedding = cast(list[float], query.query_embedding)
query_embedding_np = np.array(query_embedding, dtype="float32")[np.newaxis, :]
Expand Down
10 changes: 10 additions & 0 deletions moatless/runner/docker_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,9 @@ async def start_job(
elif effective_memory_limit:
# If memory limit is set but swap limit is not, default to twice the memory limit
cmd.extend(["--memory-swap", effective_memory_limit])

# Add ulimit for file descriptors to prevent "Too many open files" errors
cmd.extend(["--ulimit", "nofile=65536:65536"])

# Add Docker labels for easier container identification and querying
job_labels = create_labels(project_id, trajectory_id)
Expand All @@ -292,6 +295,9 @@ async def start_job(
if self.moatless_source_dir:
logger.info(f"Mounting {self.moatless_source_dir}:/opt/moatless/moatless")
cmd.extend(["-v", f"{self.moatless_source_dir}:/opt/moatless/moatless"])
# Also mount the lockfile to ensure dependency compatibility
cmd.extend(["-v", f"{self.moatless_source_dir}/uv.lock:/opt/moatless/uv.lock"])
cmd.extend(["-v", f"{self.moatless_source_dir}/pyproject.toml:/opt/moatless/pyproject.toml"])
cmd.extend(["-e", "PYTHONPATH=/opt/moatless/moatless:$PYTHONPATH"])

args = create_job_args(project_id, trajectory_id, job_func, node_id)
Expand All @@ -311,6 +317,10 @@ async def start_job(
self.logger.info(f"Will run update-moatless.sh with branch {branch_to_use}")
run_command += f"/opt/moatless/docker/update-moatless.sh --branch {branch_to_use} && "

# If using local source, sync dependencies first (not frozen) to use local lockfile
if self.moatless_source_dir:
run_command += "cd /opt/moatless && uv sync --compile-bytecode --all-extras && "

# Add the main job command
run_command += f"date '+%Y-%m-%d %H:%M:%S' && echo 'Starting job at ' $(date '+%Y-%m-%d %H:%M:%S') && uv run - <<EOF\n{args}\nEOF"

Expand Down
68 changes: 34 additions & 34 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,50 +11,50 @@ readme = "README.md"
authors = [
{name = "Albert Örwall", email = "[email protected]"},
]
requires-python = "<3.13,>=3.10"
requires-python = "<3.14,>=3.10"
dependencies = [
"pydantic<3.0.0,>=2.8.2",
"tiktoken<1.0.0,>=0.8.0",
"networkx<4.0,>=3.3",
"pydantic<3.0.0,>=2.11.7",
"tiktoken<2.0.0,>=0.11.0",
"networkx<4.0,>=3.4",
"tree-sitter==0.24.0",
"tree-sitter-python==0.23.6",
"tree-sitter-java==0.23.5",
"rapidfuzz<4.0.0,>=3.9.5",
"gitpython<4.0.0,>=3.1.43",
"rapidfuzz<4.0.0,>=3.13.0",
"gitpython<4.0.0,>=3.1.45",
"unidiff<1.0.0,>=0.7.5",
"python-dotenv==1.0.1",
"docstring-parser<1.0,>=0.16",
"litellm<2.0.0,>=1.67.0",
"openai<2.0.0,>=1.41.0",
"anthropic<1.0.0,>=0.49.0",
"llama-index<1.0.0,>=0.12.11",
"llama-index-embeddings-openai<1.0.0,>=0.3.1",
"llama-index-embeddings-voyageai<1.0.0,>=0.3.4",
"llama-index-readers-file<1.0.0,>=0.4.3",
"faiss-cpu<2.0.0.0,>=1.8.0.post1",
"voyageai<1.0.0,>=0.3.2",
"filelock<4.0.0,>=3.16.1",
"docstring-parser<1.0,>=0.17.0",
"litellm<2.0.0,>=1.75.5",
"openai<2.0.0,>=1.99.9",
"anthropic<1.0.0,>=0.62.0",
"llama-index<2.0.0,>=0.13.1",
"llama-index-embeddings-openai<1.0.0,>=0.5.0",
"llama-index-embeddings-voyageai<1.0.0,>=0.4.1",
"llama-index-readers-file<1.0.0,>=0.5.0",
"faiss-cpu<2.0.0.0,>=1.11.0.post1",
"voyageai<1.0.0,>=0.3.4",
"filelock<4.0.0,>=3.18.0",
"aiofiles<25.0.0,>=24.1.0",
"swebench<4.0.0,>=3.0.15",
"opentelemetry-sdk<2.0.0,>=1.30.0",
"sqlalchemy>=2.0.41",
"swebench<4.0.0,>=3.0.17",
"opentelemetry-sdk<2.0.0,>=1.36.0",
"sqlalchemy>=2.0.43",
"psycopg2-binary>=2.9.10",
"redis<6.0.0,>=5.2.1",
"redis<6.0.0,>=5.3.1",
"gunicorn>=23.0.0",
"opentelemetry-api<2.0.0,>=1.30.0",
"opentelemetry-exporter-otlp<2.0.0,>=1.30.0",
"opentelemetry-instrumentation<1.0,>=0.51b0",
"opentelemetry-instrumentation-fastapi<1.0,>=0.51b0",
"fastapi>=0.115.12",
"uvicorn>=0.34.2",
"opentelemetry-api<2.0.0,>=1.36.0",
"opentelemetry-exporter-otlp<2.0.0,>=1.36.0",
"opentelemetry-instrumentation<1.0,>=0.57b0",
"opentelemetry-instrumentation-fastapi<1.0,>=0.57b0",
"fastapi>=0.116.1",
"uvicorn>=0.35.0",
"dotenv>=0.9.9",
"botocore<2.0.0,>=1.35.54",
"boto3<2.0.0,>=1.35.54",
"aioboto3<15.0.0,>=14.1.0",
"kubernetes<33.0.0,>=32.0.0",
"kubernetes-asyncio<33.0.0,>=32.0.0",
"azure-storage-blob<13.0.0,>=12.25.0",
"azure-monitor-opentelemetry<2.0.0,>=1.6.5",
"botocore<2.0.0,>=1.37.3",
"boto3<2.0.0,>=1.37.3",
"aioboto3<15.0.0,>=14.3.0",
"kubernetes<33.0.0,>=32.0.1",
"kubernetes-asyncio<33.0.0,>=32.3.2",
"azure-storage-blob<13.0.0,>=12.26.0",
"azure-monitor-opentelemetry<2.0.0,>=1.6.13",
]

[dependency-groups]
Expand Down
Loading