``) are ignored.
"""
- if not enclosing_func:
- return
-
target = self._resolve_jsx_component_target(
child, language, file_path, import_map or {}, defined_names or set(),
)
if not target:
return
- caller = self._qualify(enclosing_func, file_path, enclosing_class)
+ # At module scope (no enclosing function) attribute to the file,
+ # e.g. top-level
in an index.tsx entry point.
+ caller = (
+ self._qualify(enclosing_func, file_path, enclosing_class)
+ if enclosing_func
+ else file_path
+ )
edges.append(EdgeInfo(
kind="CALLS",
source=caller,
@@ -3995,6 +4086,17 @@ def _get_name(self, node, language: str, kind: str) -> Optional[str]:
for child in node.children:
if child.type == "word":
return child.text.decode("utf-8", errors="replace")
+ # Julia: function Base.show(io, d) — qualified name via field_expression.
+ # Extract only the final identifier ("show") as the function name; the
+ # module qualifier ("Base") is emitted as a REFERENCES edge separately.
+ if language == "julia" and node.type in (
+ "function_definition", "short_function_definition",
+ ):
+ for child in node.children:
+ if child.type == "field_expression":
+ for sub in reversed(list(child.children)):
+ if sub.type == "identifier":
+ return sub.text.decode("utf-8", errors="replace")
# Go methods: tree-sitter-go uses field_identifier for the name
# (e.g. func (s *T) MethodName(...) { }). Must run before the generic
# loop, which would match the result type's type_identifier (e.g. int64).
@@ -4024,6 +4126,20 @@ def _get_name(self, node, language: str, kind: str) -> Optional[str]:
return self._get_name(child, language, kind)
return None
+ def _get_julia_module_qualifier(self, node) -> Optional[str]:
+ """Return the module name for a Julia qualified function definition.
+
+ For ``function Base.show(io::IO, d::Dog)`` the ``function_definition``
+ node contains a ``field_expression`` whose first ``identifier`` child
+ is the module name (``"Base"``). Returns ``None`` for plain functions.
+ """
+ for child in node.children:
+ if child.type == "field_expression":
+ for sub in child.children:
+ if sub.type == "identifier":
+ return sub.text.decode("utf-8", errors="replace")
+ return None
+
def _get_go_receiver_type(self, node) -> Optional[str]:
"""Extract the receiver type from a Go method_declaration.
@@ -4644,21 +4760,26 @@ def _handle_r_call(
import_map, defined_names,
)
- if enclosing_func:
- call_name = self._get_call_name(node, language, source)
- if call_name:
- caller = self._qualify(enclosing_func, file_path, enclosing_class)
- target = self._resolve_call_target(
- call_name, file_path, language,
- import_map or {}, defined_names or set(),
- )
- edges.append(EdgeInfo(
- kind="CALLS",
- source=caller,
- target=target,
- file_path=file_path,
- line=node.start_point[0] + 1,
- ))
+ call_name = self._get_call_name(node, language, source)
+ if call_name:
+ # Emit CALLS even at module scope; R scripts overwhelmingly
+ # use top-level calls (worker(), plot(), etc.).
+ caller = (
+ self._qualify(enclosing_func, file_path, enclosing_class)
+ if enclosing_func
+ else file_path
+ )
+ target = self._resolve_call_target(
+ call_name, file_path, language,
+ import_map or {}, defined_names or set(),
+ )
+ edges.append(EdgeInfo(
+ kind="CALLS",
+ source=caller,
+ target=target,
+ file_path=file_path,
+ line=node.start_point[0] + 1,
+ ))
self._extract_from_tree(
node, source, language, file_path, nodes, edges,
diff --git a/code_review_graph/skills.py b/code_review_graph/skills.py
index 66265534..e75ad93b 100644
--- a/code_review_graph/skills.py
+++ b/code_review_graph/skills.py
@@ -204,10 +204,18 @@ def _detect_serve_command() -> tuple[str, list[str]]:
return (sys.executable, ["-m", "code_review_graph", "serve"])
-def _build_server_entry(plat: dict[str, Any], key: str = "") -> dict[str, Any]:
- """Build the MCP server entry for a platform."""
+def _build_server_entry(plat: dict[str, Any], repo_root: Path, key: str = "") -> dict[str, Any]:
+ """Build the MCP server entry for a platform.
+
+ Args:
+ plat: Platform metadata dict from PLATFORMS.
+ repo_root: Absolute path to the project root. Written as ``cwd`` so
+ that MCP clients always launch ``code-review-graph serve`` from the
+ correct directory and can locate ``.code-review-graph/graph.db``.
+ key: Platform key (e.g. ``"opencode"``), used for platform-specific tweaks.
+ """
command, args = _detect_serve_command()
- entry: dict[str, Any] = {"command": command, "args": args}
+ entry: dict[str, Any] = {"command": command, "args": args, "cwd": str(repo_root)}
if plat["needs_type"]:
entry["type"] = "stdio"
if key == "opencode":
@@ -290,7 +298,7 @@ def install_platform_configs(
for key, plat in platforms_to_install.items():
config_path: Path = plat["config_path"](repo_root)
server_key = plat["key"]
- server_entry = _build_server_entry(plat, key=key)
+ server_entry = _build_server_entry(plat, repo_root, key=key)
if plat["format"] == "toml":
changed = _merge_toml_mcp_server(
diff --git a/code_review_graph/token_benchmark 2.py b/code_review_graph/token_benchmark 2.py
deleted file mode 100644
index 7325f180..00000000
--- a/code_review_graph/token_benchmark 2.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""Token reduction benchmark -- measures graph query efficiency vs naive file reading."""
-
-from __future__ import annotations
-
-import logging
-from pathlib import Path
-from typing import Any
-
-from .graph import GraphStore
-from .search import hybrid_search
-
-logger = logging.getLogger(__name__)
-
-# Sample questions for benchmarking
-_SAMPLE_QUESTIONS = [
- "how does authentication work",
- "what is the main entry point",
- "how are database connections managed",
- "what error handling patterns are used",
- "how do tests verify core functionality",
-]
-
-
-def estimate_tokens(text: str) -> int:
- """Rough token estimate: ~4 chars per token."""
- return max(1, len(text) // 4)
-
-
-def compute_naive_tokens(repo_root: Path) -> int:
- """Count tokens in all parseable source files."""
- total = 0
- exts = (
- ".py", ".js", ".ts", ".go", ".rs", ".java",
- ".c", ".cpp", ".rb", ".php", ".swift", ".kt",
- )
- for ext in exts:
- for f in repo_root.rglob(f"*{ext}"):
- try:
- total += estimate_tokens(
- f.read_text(errors="replace")
- )
- except OSError:
- continue
- return total
-
-
-def run_token_benchmark(
- store: GraphStore,
- repo_root: Path,
- questions: list[str] | None = None,
-) -> dict[str, Any]:
- """Run token reduction benchmark.
-
- Compares naive full-corpus token cost vs graph query token
- cost for a set of sample questions.
- """
- if questions is None:
- questions = _SAMPLE_QUESTIONS
-
- naive_total = compute_naive_tokens(repo_root)
-
- results = []
- for q in questions:
- search_results = hybrid_search(store, q, limit=5)
- # Simulate graph context: search results + neighbors
- graph_tokens = 0
- for r in search_results:
- graph_tokens += estimate_tokens(str(r))
- # Add approximate neighbor context
- qn = r.get("qualified_name", "")
- edges = store.get_edges_by_source(qn)[:5]
- for e in edges:
- graph_tokens += estimate_tokens(str(e))
-
- if graph_tokens > 0:
- ratio = naive_total / graph_tokens
- else:
- ratio = 0
- results.append({
- "question": q,
- "naive_tokens": naive_total,
- "graph_tokens": graph_tokens,
- "reduction_ratio": round(ratio, 1),
- })
-
- if results:
- total = sum(
- r["reduction_ratio"] for r in results # type: ignore[misc]
- )
- avg_ratio = float(total) / len(results) # type: ignore[arg-type]
- else:
- avg_ratio = 0.0
-
- return {
- "naive_corpus_tokens": naive_total,
- "per_question": results,
- "average_reduction_ratio": round(avg_ratio, 1),
- "summary": (
- f"Graph queries use ~{avg_ratio:.0f}x fewer tokens "
- f"than reading all source files"
- ),
- }
diff --git a/code_review_graph/tools/review.py b/code_review_graph/tools/review.py
index 772614f2..143b82b5 100644
--- a/code_review_graph/tools/review.py
+++ b/code_review_graph/tools/review.py
@@ -6,7 +6,7 @@
from pathlib import Path
from typing import Any
-from ..changes import analyze_changes, parse_diff_ranges, parse_git_diff_ranges
+from ..changes import analyze_changes, parse_diff_ranges
from ..flows import get_affected_flows as _get_affected_flows
from ..graph import edge_to_dict, node_to_dict
from ..hints import generate_hints, get_session
diff --git a/tests/test_main.py b/tests/test_main.py
index 61fbbf49..47bc94e9 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -222,16 +222,19 @@ class TestApplyToolFilter:
"""
@pytest.fixture(autouse=True)
- def _restore_tools(self):
+ async def _restore_tools(self):
"""Snapshot registered tools before test, restore after.
_apply_tool_filter calls ``mcp.remove_tool()`` which is
- permanent. We restore by re-adding from the saved snapshot.
+ permanent. We restore by re-adding from the saved snapshot
+ using the public FastMCP API.
"""
- original = dict(crg_main.mcp._tool_manager._tools)
+ original_names = {t.name for t in await crg_main.mcp.list_tools()}
yield
- crg_main.mcp._tool_manager._tools.clear()
- crg_main.mcp._tool_manager._tools.update(original)
+ current_names = {t.name for t in await crg_main.mcp.list_tools()}
+ for name in original_names:
+ if name not in current_names:
+ crg_main.mcp.add_tool(getattr(crg_main, name))
@pytest.fixture(autouse=True)
def _clean_env(self, monkeypatch):
@@ -241,9 +244,9 @@ def _clean_env(self, monkeypatch):
@pytest.mark.asyncio
async def test_no_filter_keeps_all_tools(self):
"""When neither --tools nor CRG_TOOLS is set, all tools remain."""
- before = set((await crg_main.mcp.get_tools()).keys())
+ before = {t.name for t in await crg_main.mcp.list_tools()}
crg_main._apply_tool_filter(None)
- after = set((await crg_main.mcp.get_tools()).keys())
+ after = {t.name for t in await crg_main.mcp.list_tools()}
assert before == after
@pytest.mark.asyncio
@@ -251,7 +254,7 @@ async def test_filter_via_argument(self):
"""The ``tools`` argument keeps only the listed tools."""
keep = "query_graph_tool,semantic_search_nodes_tool"
crg_main._apply_tool_filter(keep)
- remaining = set((await crg_main.mcp.get_tools()).keys())
+ remaining = {t.name for t in await crg_main.mcp.list_tools()}
assert remaining == {"query_graph_tool", "semantic_search_nodes_tool"}
@pytest.mark.asyncio
@@ -259,7 +262,7 @@ async def test_filter_via_env_var(self, monkeypatch):
"""The ``CRG_TOOLS`` env var works as fallback."""
monkeypatch.setenv("CRG_TOOLS", "query_graph_tool")
crg_main._apply_tool_filter(None)
- remaining = set((await crg_main.mcp.get_tools()).keys())
+ remaining = {t.name for t in await crg_main.mcp.list_tools()}
assert remaining == {"query_graph_tool"}
@pytest.mark.asyncio
@@ -267,21 +270,21 @@ async def test_argument_takes_precedence_over_env(self, monkeypatch):
"""CLI --tools wins over CRG_TOOLS env var."""
monkeypatch.setenv("CRG_TOOLS", "list_repos_tool")
crg_main._apply_tool_filter("query_graph_tool")
- remaining = set((await crg_main.mcp.get_tools()).keys())
+ remaining = {t.name for t in await crg_main.mcp.list_tools()}
assert remaining == {"query_graph_tool"}
@pytest.mark.asyncio
async def test_empty_string_is_noop(self):
"""An empty string should not remove all tools."""
- before = set((await crg_main.mcp.get_tools()).keys())
+ before = {t.name for t in await crg_main.mcp.list_tools()}
crg_main._apply_tool_filter("")
- after = set((await crg_main.mcp.get_tools()).keys())
+ after = {t.name for t in await crg_main.mcp.list_tools()}
assert before == after
@pytest.mark.asyncio
async def test_whitespace_handling(self):
"""Spaces around tool names are stripped."""
crg_main._apply_tool_filter(" query_graph_tool , semantic_search_nodes_tool ")
- remaining = set((await crg_main.mcp.get_tools()).keys())
+ remaining = {t.name for t in await crg_main.mcp.list_tools()}
assert remaining == {"query_graph_tool", "semantic_search_nodes_tool"}
diff --git a/tests/test_skills.py b/tests/test_skills.py
index 873ed2f8..78b2319c 100644
--- a/tests/test_skills.py
+++ b/tests/test_skills.py
@@ -391,7 +391,10 @@ def test_install_codex_config(self, tmp_path):
data = tomllib.loads(codex_config.read_text())
entry = data["mcp_servers"]["code-review-graph"]
assert entry["type"] == "stdio"
- assert "serve" in entry["args"]
+ expected_cmd, expected_args = _detect_serve_command()
+ assert entry["command"] == expected_cmd
+ assert entry["args"] == expected_args
+ assert entry["cwd"] == str(tmp_path)
@_needs_tomllib
def test_install_codex_preserves_existing_toml(self, tmp_path):
@@ -417,6 +420,7 @@ def test_install_codex_preserves_existing_toml(self, tmp_path):
assert data["mcp_servers"]["other"]["command"] == "other"
expected_cmd, _ = _detect_serve_command()
assert data["mcp_servers"]["code-review-graph"]["command"] == expected_cmd
+ assert data["mcp_servers"]["code-review-graph"]["cwd"] == str(tmp_path)
def test_install_codex_no_duplicate(self, tmp_path):
codex_config = tmp_path / ".codex" / "config.toml"
@@ -459,7 +463,9 @@ def test_install_cursor_config(self, tmp_path):
assert config_path.exists()
data = json.loads(config_path.read_text())
assert "code-review-graph" in data["mcpServers"]
- assert data["mcpServers"]["code-review-graph"]["type"] == "stdio"
+ entry = data["mcpServers"]["code-review-graph"]
+ assert entry["type"] == "stdio"
+ assert entry["cwd"] == str(tmp_path)
def test_install_windsurf_config(self, tmp_path):
windsurf_dir = tmp_path / ".codeium" / "windsurf"
@@ -482,6 +488,7 @@ def test_install_windsurf_config(self, tmp_path):
assert "type" not in entry
expected_cmd, _ = _detect_serve_command()
assert entry["command"] == expected_cmd
+ assert entry["cwd"] == str(tmp_path)
def test_install_zed_config(self, tmp_path):
zed_settings = tmp_path / "zed" / "settings.json"
@@ -520,8 +527,10 @@ def test_install_continue_config(self, tmp_path):
assert "Continue" in configured
data = json.loads(config_path.read_text())
assert isinstance(data["mcpServers"], list)
- assert data["mcpServers"][0]["name"] == "code-review-graph"
- assert data["mcpServers"][0]["type"] == "stdio"
+ arr_entry = data["mcpServers"][0]
+ assert arr_entry["name"] == "code-review-graph"
+ assert arr_entry["type"] == "stdio"
+ assert arr_entry["cwd"] == str(tmp_path)
def test_install_opencode_config(self, tmp_path):
configured = install_platform_configs(tmp_path, target="opencode")
@@ -531,6 +540,7 @@ def test_install_opencode_config(self, tmp_path):
entry = data["mcpServers"]["code-review-graph"]
assert entry["type"] == "stdio"
assert entry["env"] == []
+ assert entry["cwd"] == str(tmp_path)
def test_install_qwen_config(self, tmp_path):
"""Qwen Code uses ~/.qwen/settings.json with mcpServers (see #83)."""
@@ -551,6 +561,7 @@ def test_install_qwen_config(self, tmp_path):
entry = data["mcpServers"]["code-review-graph"]
assert entry["type"] == "stdio"
assert entry["args"][-1] == "serve"
+ assert entry["cwd"] == str(tmp_path)
def test_install_qwen_preserves_existing_servers(self, tmp_path):
"""Adding qwen should merge with, not clobber, existing mcpServers."""
@@ -662,9 +673,10 @@ def test_install_qoder_config(self, tmp_path):
assert "mcpServers" in data
assert "code-review-graph" in data["mcpServers"]
assert data["mcpServers"]["code-review-graph"]["type"] == "stdio"
- import shutil
- expected_cmd = "uvx" if shutil.which("uvx") else "code-review-graph"
+ expected_cmd, expected_args = _detect_serve_command()
assert data["mcpServers"]["code-review-graph"]["command"] == expected_cmd
+ assert data["mcpServers"]["code-review-graph"]["args"] == expected_args
+ assert data["mcpServers"]["code-review-graph"]["cwd"] == str(tmp_path)
class TestCursorHooksConfig:
@@ -854,6 +866,7 @@ def test_install_kiro_config(self, tmp_path):
assert "code-review-graph" in data["mcpServers"]
entry = data["mcpServers"]["code-review-graph"]
assert entry["type"] == "stdio"
+ assert entry["cwd"] == str(tmp_path)
def test_install_kiro_preserves_existing_servers(self, tmp_path):
"""Existing mcpServers entries are preserved when adding code-review-graph."""