diff --git a/.github/module-owners.json b/.github/module-owners.json new file mode 100644 index 00000000000..5295e3547ec --- /dev/null +++ b/.github/module-owners.json @@ -0,0 +1,39 @@ +{ + "Generic Runtime": ["funatiq", "pcastonguay", "Shixiaowei02", "MartinMarciniszyn", "schetlur-nv", "dcampora"], + "Triton Backend": ["Tabrizian", "pcastonguay", "schetlur-nv"], + "LLM API/Workflow": ["Superjomn", "syuoni", "nv-guomingz", "litaotju", "QiJune"], + "KV-Cache Management":["thorjohnsen", "schetlur-nv"], + "Low Precision":["Tracin", "nv-guomingz", "Naveassaf"], + "Speculative Decoding":["yweng0828", "nekorobov", "lfr-0531"], + "Customized Kernels":["lowsfer", "PerkzZheng", "jdemouth-nvidia"], + "Performance": ["kaiyux", "jiahanc", "hypdeb"], + "Lora/P-tuning":["byshiue", "shaharmor98"], + "Disaggregated Serving":["Shixiaowei02", "joyang-nv", "chuangz0", "schetlur-nv"], + "Documentation":["nv-guomingz"], + "Sampling": ["dcampora", "lfr-0531", "Naveassaf", "syuoni", "yweng0828"], + "Memory": ["litaotju", "peaceh-nv"], + "Installation": ["hchings", "Superjomn", "nv-guomingz", "QiJune"], + "GitHub Configuration": ["tburt-nv", "niukuo"], + "Jenkins Pipelines": ["chzblych", "niukuo"], + "Test Configuration": ["niukuo", "syuoni", "LarryXFly"], + "Test Waive List": ["chzblych", "niukuo"], + "Integration Tests": ["LarryXFly", "niukuo"], + "Torch Framework": ["QiJune", "hlu1"], + "Torch Attention Backend": ["yuxianq", "hlu1"], + "Torch AutoDeploy": ["lucaslie", "suyoggupta"], + "Torch Compilation": ["litaotju", "yizhang-nv", "liji-nv"], + "Torch Custom Ops": ["yizhang-nv"], + "Torch Distributed": ["yilin-void", "yuxianq", "hyukn", "yizhang-nv", "hlu1"], + "Torch PyExecutor": ["dongxuy04", "funatiq", "dcampora", "HuiGao-NV"], + "Torch Speculative": ["lfr-0531", "mikeiovine"], + "Autotuner": ["hyukn", "litaotju"], + "Pipeline Interface": ["amukkara", "chang-l"], + "Torch Models": ["QiJune", "hlu1"], + "Torch Models DeepSeekV3": ["hlu1", "zongfeijing"], + "Torch Models Llama": ["chang-l", "mikeiovine"], + "Torch Modules": ["QiJune", "hlu1"], + "Torch Modules Attention": ["yuxianq", "hlu1"], + "Torch Modules Fused MOE": ["hlu1", "dongxuy04", "zongfeijing", "HuiGao-NV"], + "Torch Tests": ["QiJune", "hlu1"], + "PyTorch Examples": ["QiJune", "hlu1"] +} diff --git a/.github/module-paths.json b/.github/module-paths.json new file mode 100644 index 00000000000..45699eb7afa --- /dev/null +++ b/.github/module-paths.json @@ -0,0 +1,33 @@ +{ + "cpp/": "Generic Runtime", + "triton_backend/": "Triton Backend", + "tensorrt_llm/_torch/peft/": "Lora/P-tuning", + "tensorrt_llm/": "LLM API/Workflow", + "benchmarks/": "Performance", + "examples/disaggregated/": "Disaggregated Serving", + "docs/": "Documentation", + "docker/": "Installation", + ".github/": "GitHub Configuration", + "jenkins/": "Jenkins Pipelines", + "tests/integration/test_lists/": "Test Configuration", + "tests/integration/test_lists/waives.txt": "Test Waive List", + "tests/integration/defs/": "Integration Tests", + "tensorrt_llm/_torch/": "Torch Framework", + "tensorrt_llm/_torch/attention_backend/": "Torch Attention Backend", + "tensorrt_llm/_torch/auto_deploy/": "Torch AutoDeploy", + "tensorrt_llm/_torch/compilation/": "Torch Compilation", + "tensorrt_llm/_torch/custom_ops/": "Torch Custom Ops", + "tensorrt_llm/_torch/distributed/": "Torch Distributed", + "tensorrt_llm/_torch/pyexecutor/": "Torch PyExecutor", + "tensorrt_llm/_torch/speculative/": "Torch Speculative", + "tensorrt_llm/autotuner.py": "Autotuner", + "tensorrt_llm/pipeline_interface.py": "Pipeline Interface", + "tensorrt_llm/_torch/models/": "Torch Models", + "tensorrt_llm/_torch/models/modeling_deepseekv3.py": "Torch Models DeepSeekV3", + "tensorrt_llm/_torch/models/modeling_llama.py": "Torch Models Llama", + "tensorrt_llm/_torch/modules/": "Torch Modules", + "tensorrt_llm/_torch/modules/attention.py": "Torch Modules Attention", + "tensorrt_llm/_torch/modules/fused_moe.py": "Torch Modules Fused MOE", + "tests/unittest/_torch/": "Torch Tests", + "examples/pytorch/": "PyTorch Examples" +} diff --git a/.github/pr-checklist.md b/.github/pr-checklist.md new file mode 100644 index 00000000000..28014b80cba --- /dev/null +++ b/.github/pr-checklist.md @@ -0,0 +1,12 @@ +## 🚀 PR Checklist + +- [ ] PR title follows format: `[type] Description` +- [ ] PR description explains both **what** you're doing and **why** +- [ ] Code conforms to coding conventions (see `CODING_GUIDELINES.md`) +- [ ] Test cases added for new code +- [ ] All existing tests pass +- [ ] PR and commit messages cleaned up via `git rebase -i` + +--- +**Please ✅ check the below item to confirm you've reviewed the checklist when ready for review!.** +- [ ] **I have reviewed the above checklist and addressed all applicable items** diff --git a/.github/scripts/assign_reviewers.py b/.github/scripts/assign_reviewers.py new file mode 100644 index 00000000000..3481f3d83c1 --- /dev/null +++ b/.github/scripts/assign_reviewers.py @@ -0,0 +1,295 @@ +import argparse +import json +import os +import random +import subprocess +import sys +from pathlib import Path + + +def get_pr_changed_files(pr_number: str) -> list[str]: + """Get files changed in PR using GitHub CLI (more reliable than git diff)""" + result = subprocess.run( + [ + "gh", "pr", "view", pr_number, "--json", "files", "--jq", + ".files[].path" + ], + capture_output=True, + text=True, + check=True, + ) + return [line.strip() for line in result.stdout.splitlines() if line.strip()] + + +def get_existing_reviewers(pr_number: str) -> tuple[set[str], set[str]]: + """Get currently assigned reviewers (users and teams) for a PR""" + try: + # Get user reviewers + user_result = subprocess.run( + [ + "gh", "pr", "view", pr_number, "--json", "reviewRequests", + "--jq", + "(.reviewRequests // []) | .[] | select(.login) | .login" + ], + capture_output=True, + text=True, + check=True, + ) + user_reviewers = { + line.strip() + for line in user_result.stdout.splitlines() if line.strip() + } + + # Get team reviewers + team_result = subprocess.run( + [ + "gh", "pr", "view", pr_number, "--json", "reviewRequests", + "--jq", "(.reviewRequests // []) | .[] | select(.name) | .name" + ], + capture_output=True, + text=True, + check=True, + ) + team_reviewers = { + line.strip() + for line in team_result.stdout.splitlines() if line.strip() + } + + return user_reviewers, team_reviewers + except subprocess.CalledProcessError as e: + print(f"Warning: Could not fetch existing reviewers: {e}") + return set(), set() + + +def load_json(path: str): + with open(path, "r", encoding="utf-8") as f: + return json.load(f) + + +def map_modules(changed_files: list[str], + module_paths: dict[str, str]) -> tuple[set[str], list[str]]: + """Map changed files to modules using MOST SPECIFIC (longest) prefix match""" + modules: set[str] = set() + unmapped_files: list[str] = [] + + for file in changed_files: + # Find ALL matching prefixes + matches = [] + for prefix, module in module_paths.items(): + if file.startswith(prefix): + matches.append((len(prefix), prefix, module)) + + if matches: + # Sort by prefix length (descending) to get most specific first + matches.sort(reverse=True) + most_specific_module = matches[0][2] + modules.add(most_specific_module) + + # Log if there were multiple matches (for debugging) + if len(matches) > 1: + matches[0][1] + print(f" File '{file}' has overlapping mappings:") + for _, prefix, module in matches: + marker = "→" if module == most_specific_module else " " + print(f" {marker} {prefix} -> {module}") + else: + unmapped_files.append(file) + + return modules, unmapped_files + + +def gather_reviewers( + modules: set[str], + module_owners: dict[str, list[str]], + *, + pr_author: str | None = None, + existing_reviewers: set[str] | None = None, + per_module_limit: int = 2 +) -> tuple[list[str], dict[str, list[str]], set[str]]: + """ + Gather reviewers ensuring each module gets representation. + + Args: + modules: Set of module names that were touched + module_owners: Dict mapping module names to lists of owners + pr_author: PR author to exclude from reviewers + existing_reviewers: Set of already assigned reviewers to exclude + per_module_limit: Maximum reviewers to assign per module + + Returns: + - List of all unique reviewers to assign + - Dict mapping modules to their assigned reviewers + - Set of modules without owners + """ + all_reviewers: set[str] = set() + module_assignments: dict[str, list[str]] = {} + modules_without_owners: set[str] = set() + + for module in sorted(modules): # Sort for consistent ordering + owners = module_owners.get(module, []) + if not owners: + modules_without_owners.add(module) + module_assignments[module] = [] + continue + + # Filter out PR author and existing reviewers + eligible_owners = [ + o for o in owners if o != pr_author and ( + not existing_reviewers or o not in existing_reviewers) + ] + + if not eligible_owners: + # All owners are excluded + print( + f" ⚠️ Module '{module}': All owners excluded (PR author or already assigned)" + ) + module_assignments[module] = [] + continue + + # Sample up to per_module_limit reviewers for this module + num_to_select = min(len(eligible_owners), per_module_limit) + selected = random.sample(eligible_owners, num_to_select) + + module_assignments[module] = selected + all_reviewers.update(selected) + + return sorted(all_reviewers), module_assignments, modules_without_owners + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Assign reviewers based on changed modules") + parser.add_argument("--dry-run", + action="store_true", + help="Print the gh command instead of executing") + parser.add_argument( + "--force-assign", + action="store_true", + help= + "Assign reviewers even if some already exist (default: only assign if no reviewers)" + ) + args = parser.parse_args() + + pr_number = os.environ["PR_NUMBER"] + per_module_limit = int(os.environ.get("PER_MODULE_REVIEWER_LIMIT", "2")) + pr_author = os.environ.get("PR_AUTHOR") + + print(f"Testing PR #{pr_number} with author: {pr_author}") + print(f"Per-module reviewer limit: {per_module_limit}") + + # Check existing reviewers + existing_user_reviewers, existing_team_reviewers = get_existing_reviewers( + pr_number) + total_existing = len(existing_user_reviewers) + len(existing_team_reviewers) + + print(f"Existing user reviewers: {sorted(existing_user_reviewers)}") + print(f"Existing team reviewers: {sorted(existing_team_reviewers)}") + + # Skip assignment if reviewers already exist (unless forced) + if total_existing > 0 and not args.force_assign: + print( + f"✅ PR already has {total_existing} reviewer(s) assigned. Skipping auto-assignment." + ) + print(" Use --force-assign to assign additional reviewers.") + return + + try: + changed_files = get_pr_changed_files(pr_number) + print(f"Changed files: {changed_files}") + + module_paths = load_json(Path(".github") / "module-paths.json") + module_owners = load_json(Path(".github") / "module-owners.json") + + modules, unmapped_files = map_modules(changed_files, module_paths) + reviewers, module_assignments, modules_without_owners = gather_reviewers( + modules, + module_owners, + pr_author=pr_author, + existing_reviewers= + existing_user_reviewers, # Avoid re-assigning existing users + per_module_limit=per_module_limit) + + print(f"\nChanged modules: {sorted(modules)}") + + # Show module-specific assignments + if module_assignments: + print("\nModule assignments:") + for module, assigned in sorted(module_assignments.items()): + if assigned: + print(f" {module}: {assigned}") + else: + print(f" {module}: No eligible reviewers") + + print(f"\nFinal reviewers to assign: {reviewers}") + + # Provide detailed feedback about coverage gaps + if unmapped_files: + print(f"⚠️ Files with no module mapping: {unmapped_files}") + print( + f" These files are not covered in .github/module-paths.json") + print( + f" Consider adding appropriate module mappings for these paths." + ) + + if modules_without_owners: + print( + f"⚠️ Modules with no owners: {sorted(modules_without_owners)}") + print( + f" These modules exist in module-paths.json but have no owners in module-owners.json" + ) + print(f" Consider adding owner assignments for these modules.") + + if reviewers: + cmd = ["gh", "pr", "edit", pr_number] + for reviewer in reviewers: + cmd.extend(["--add-reviewer", reviewer]) + + if args.dry_run: + print(f"🔍 DRY RUN: {' '.join(cmd)}") + else: + try: + subprocess.run(cmd, check=True) + print( + f"✅ Successfully assigned {len(reviewers)} new reviewer(s)" + ) + except subprocess.CalledProcessError as e: + print(f"❌ Failed to add reviewers: {e}", file=sys.stderr) + print( + " This might be due to permissions or invalid usernames" + ) + sys.exit(1) + else: + print("✅ No new reviewers to assign") + + # Explain why no reviewers were assigned + if not modules and not unmapped_files: + print(" Reason: No files were changed in this PR") + elif not modules and unmapped_files: + print( + " Reason: All changed files are unmapped (no module coverage)" + ) + print( + " ➜ Action needed: Add module mappings to .github/module-paths.json" + ) + elif modules and not reviewers: + if modules_without_owners: + print(" Reason: Matched modules have no assigned owners") + print( + " ➜ Action needed: Add owner assignments to .github/module-owners.json" + ) + else: + print( + " Reason: All potential reviewers are already assigned or excluded" + ) + else: + print( + " Reason: Complex combination of mapping/ownership issues (see warnings above)" + ) + + except subprocess.CalledProcessError as e: + print(f"❌ Error processing PR: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/tests/test_assign_reviewers.py b/.github/scripts/tests/test_assign_reviewers.py new file mode 100644 index 00000000000..6d8b43c1c32 --- /dev/null +++ b/.github/scripts/tests/test_assign_reviewers.py @@ -0,0 +1,481 @@ +#!/usr/bin/env python3 +""" +End-to-end tests for assign_reviewers.py script. +Tests various scenarios without requiring GitHub API access or tokens. +""" + +import os +import subprocess +import sys +from pathlib import Path +from unittest import TestCase +from unittest.mock import MagicMock, patch + +# Add the parent directory to the path so we can import assign_reviewers +sys.path.insert(0, str(Path(__file__).parent.parent)) +import assign_reviewers + + +class TestAssignReviewers(TestCase): + """Test suite for the assign_reviewers.py script""" + + def setUp(self): + """Set up test environment""" + # Set required environment variables + os.environ["PR_NUMBER"] = "123" + os.environ["PR_AUTHOR"] = "test-author" + os.environ["PER_MODULE_REVIEWER_LIMIT"] = "2" + + # Set up test data + self.module_paths = { + "cpp/": "Generic Runtime", + "docs/": "Documentation", + } + + self.module_owners = { + "Generic Runtime": ["user1", "user2", "user3"], + "Documentation": ["user9"], + "Module1": ["owner1", "owner2"], + "Module2": ["owner3", "owner4"], + "Module3": [], # No owners + } + + def tearDown(self): + """Clean up environment variables""" + for var in ["PR_NUMBER", "PR_AUTHOR", "PER_MODULE_REVIEWER_LIMIT"]: + if var in os.environ: + del os.environ[var] + + def _mock_subprocess_run(self, *args, **kwargs): + """Mock subprocess.run based on the command being executed""" + cmd = args[0] + cmd_str = ' '.join(cmd) # Join command for easier matching + + # Mock response for getting changed files + if "pr" in cmd and "view" in cmd and "files" in cmd: + return MagicMock(stdout=self.mock_changed_files, + stderr="", + returncode=0) + + # Mock response for getting existing reviewers (users) + elif "pr" in cmd and "view" in cmd and "reviewRequests" in cmd: + # Check if it's asking for login (users) or name (teams) + if "select(.login)" in cmd_str: + return MagicMock(stdout=self.mock_existing_users, + stderr="", + returncode=0) + elif "select(.name)" in cmd_str: + return MagicMock(stdout=self.mock_existing_teams, + stderr="", + returncode=0) + else: + return MagicMock(stdout="", stderr="", returncode=0) + + # Mock response for assigning reviewers + elif "pr" in cmd and "edit" in cmd: + self.assign_reviewers_called = True + self.assigned_reviewers = [ + cmd[i + 1] for i, arg in enumerate(cmd) + if arg == "--add-reviewer" + ] + return MagicMock(stdout="", stderr="", returncode=0) + + return MagicMock(stdout="", stderr="", returncode=0) + + # ========== Unit Tests for Core Functions ========== + + def test_module_mapping_scenarios(self): + """Test various module mapping scenarios with parametrized data""" + test_cases = [ + # Basic mapping with unmapped files + { + "name": + "basic_mapping", + "files": [ + "cpp/main.cpp", "cpp/utils.h", "docs/README.md", + "unknown/file.txt" + ], + "paths": { + "cpp/": "Generic Runtime", + "docs/": "Documentation" + }, + "expected_modules": {"Generic Runtime", "Documentation"}, + "expected_unmapped": ["unknown/file.txt"] + }, + # Most specific module matching + { + "name": "most_specific_single", + "files": ["tensorrt_llm/_torch/models/bert.py"], + "paths": { + "tensorrt_llm/": "LLM API/Workflow", + "tensorrt_llm/_torch/": "Torch Framework", + "tensorrt_llm/_torch/models/": "Torch Models" + }, + "expected_modules": {"Torch Models"}, + "expected_unmapped": [] + }, + # Multiple files with overlapping paths + { + "name": + "multiple_overlapping", + "files": [ + "tensorrt_llm/config.py", "tensorrt_llm/_torch/base.py", + "tensorrt_llm/_torch/models/gpt.py" + ], + "paths": { + "tensorrt_llm/": "LLM API/Workflow", + "tensorrt_llm/_torch/": "Torch Framework", + "tensorrt_llm/_torch/models/": "Torch Models" + }, + "expected_modules": + {"LLM API/Workflow", "Torch Framework", "Torch Models"}, + "expected_unmapped": [] + }, + # All files unmapped + { + "name": "all_unmapped", + "files": ["unmapped/file1.txt", "another/file2.py"], + "paths": { + "cpp/": "Generic Runtime" + }, + "expected_modules": set(), + "expected_unmapped": ["unmapped/file1.txt", "another/file2.py"] + }, + # Exact file match priority + { + "name": "exact_file_match", + "files": ["tests/integration/test_lists/waives.txt"], + "paths": { + "tests/": "General Tests", + "tests/integration/": "Integration Tests", + "tests/integration/test_lists/": "Test Configuration", + "tests/integration/test_lists/waives.txt": "Test Waive List" + }, + "expected_modules": {"Test Waive List"}, + "expected_unmapped": [] + } + ] + + for case in test_cases: + with self.subTest(case=case["name"]): + modules, unmapped = assign_reviewers.map_modules( + case["files"], case["paths"]) + self.assertEqual(modules, case["expected_modules"], + f"Failed for case: {case['name']}") + self.assertEqual(set(unmapped), set(case["expected_unmapped"]), + f"Failed for case: {case['name']}") + + def test_gather_reviewers_basic(self): + """Test basic gather_reviewers functionality""" + modules = {"Module1", "Module2", "Module3"} + + reviewers, module_assignments, modules_without_owners = assign_reviewers.gather_reviewers( + modules, self.module_owners, per_module_limit=10) + + # Should get all unique reviewers from modules with owners + expected = ["owner1", "owner2", "owner3", "owner4"] + self.assertEqual(set(reviewers), set(expected)) + + # Check module assignments + self.assertEqual(set(module_assignments["Module1"]), + {"owner1", "owner2"}) + self.assertEqual(set(module_assignments["Module2"]), + {"owner3", "owner4"}) + self.assertEqual(module_assignments["Module3"], []) + self.assertEqual(modules_without_owners, {"Module3"}) + + def test_gather_reviewers_exclusions(self): + """Test reviewer exclusion functionality""" + modules = {"Module1", "Module2"} + + # Test PR author exclusion + reviewers, module_assignments, _ = assign_reviewers.gather_reviewers( + modules, + self.module_owners, + pr_author="owner1", + per_module_limit=10) + + self.assertNotIn("owner1", reviewers) + self.assertNotIn("owner1", module_assignments["Module1"]) + + # Test existing reviewers exclusion + existing = {"owner2", "owner3"} + reviewers, module_assignments, _ = assign_reviewers.gather_reviewers( + modules, + self.module_owners, + existing_reviewers=existing, + per_module_limit=10) + + self.assertFalse(any(r in existing for r in reviewers)) + + def test_per_module_reviewer_limit(self): + """Test per-module reviewer limit functionality""" + modules = {"Module1", "Module2"} + module_owners = { + "Module1": ["a", "b", "c", "d", "e"], # 5 owners + "Module2": ["f", "g", "h"], # 3 owners + } + + reviewers, module_assignments, _ = assign_reviewers.gather_reviewers( + modules, module_owners, per_module_limit=2) + + # Each module should have at most 2 reviewers + self.assertEqual(len(module_assignments["Module1"]), 2) + self.assertEqual(len(module_assignments["Module2"]), 2) + self.assertEqual(len(reviewers), 4) + + # Verify reviewers are from correct modules + self.assertTrue( + set(module_assignments["Module1"]).issubset( + {"a", "b", "c", "d", "e"})) + self.assertTrue( + set(module_assignments["Module2"]).issubset({"f", "g", "h"})) + + def test_module_reviewer_overlap(self): + """Test handling when reviewers own multiple modules""" + modules = {"Module1", "Module2", "Module3"} + module_owners = { + "Module1": ["shared", "owner1"], + "Module2": ["shared", "owner2"], + "Module3": ["owner3"], + } + + # Run multiple times to test randomness + total_reviewers_counts = [] + for _ in range(10): + reviewers, _, _ = assign_reviewers.gather_reviewers( + modules, module_owners, per_module_limit=1) + total_reviewers_counts.append(len(reviewers)) + + # Should see both 2 and 3 reviewers due to random selection of 'shared' + self.assertTrue(any(count == 2 for count in total_reviewers_counts)) + self.assertTrue(any(count == 3 for count in total_reviewers_counts)) + + def test_module_coverage_edge_cases(self): + """Test edge cases in module coverage""" + module_owners = { + "Module1": ["alice", "bob"], + "Module2": ["bob"], # Only bob owns this + "Module3": ["charlie"], + } + + # Case 1: PR author owns a module entirely + modules = {"Module1", "Module2", "Module3"} + reviewers, module_assignments, _ = assign_reviewers.gather_reviewers( + modules, module_owners, pr_author="bob", per_module_limit=2) + + self.assertEqual(module_assignments["Module2"], + []) # No eligible reviewers + self.assertEqual(module_assignments["Module1"], ["alice"]) + self.assertEqual(module_assignments["Module3"], ["charlie"]) + + # Case 2: All owners already assigned + existing = {"alice", "charlie"} + reviewers, module_assignments, _ = assign_reviewers.gather_reviewers( + {"Module1", "Module3"}, + module_owners, + pr_author="bob", + existing_reviewers=existing, + per_module_limit=2) + + self.assertEqual(len(reviewers), 0) + self.assertEqual(module_assignments["Module1"], []) + self.assertEqual(module_assignments["Module3"], []) + + # ========== Integration Tests ========== + + def _run_integration_test(self, + changed_files, + expected_reviewer_count=None, + expected_assigned=True, + pr_author=None, + existing_users="", + extra_assertions=None): + """Helper method to run integration tests with common setup""" + self.mock_changed_files = changed_files + self.mock_existing_users = existing_users + self.mock_existing_teams = "" + self.assign_reviewers_called = False + + if pr_author: + os.environ["PR_AUTHOR"] = pr_author + + with patch('subprocess.run') as mock_run, \ + patch('assign_reviewers.load_json') as mock_load_json: + + mock_run.side_effect = self._mock_subprocess_run + mock_load_json.side_effect = lambda path: ( + self.module_paths + if "module-paths" in str(path) else self.module_owners) + + with patch('sys.argv', ['assign_reviewers.py']): + assign_reviewers.main() + + self.assertEqual(self.assign_reviewers_called, expected_assigned) + + if expected_reviewer_count is not None and expected_assigned: + self.assertEqual(len(self.assigned_reviewers), + expected_reviewer_count) + + if extra_assertions and expected_assigned: + extra_assertions(self) + + def test_single_module_changed(self): + """Test PR with files from a single module""" + self._run_integration_test( + changed_files="cpp/file1.cpp\ncpp/file2.h\n", + expected_reviewer_count=2, + extra_assertions=lambda self: self.assertTrue( + all(r in ["user1", "user2", "user3"] + for r in self.assigned_reviewers))) + + def test_multiple_modules_changed(self): + """Test PR with files from multiple modules""" + self._run_integration_test( + changed_files="cpp/file1.cpp\ndocs/README.md\n", + expected_reviewer_count= + 3, # 2 from Generic Runtime, 1 from Documentation + extra_assertions=lambda self: self.assertTrue( + all(r in ["user1", "user2", "user3", "user9"] + for r in self.assigned_reviewers))) + + def test_no_files_or_unmapped(self): + """Test PR with no files or unmapped files""" + # No files + self._run_integration_test(changed_files="", expected_assigned=False) + + # Unmapped files + self._run_integration_test( + changed_files="unknown/file.txt\nrandom/path.py\n", + expected_assigned=False) + + def test_pr_author_excluded(self): + """Test that PR author is excluded from reviewers""" + self._run_integration_test( + changed_files="cpp/file1.cpp\n", + pr_author="user2", + expected_reviewer_count=2, + extra_assertions=lambda self: self.assertNotIn( + "user2", self.assigned_reviewers)) + + def test_existing_reviewers_behavior(self): + """Test behavior with existing reviewers""" + # Should skip assignment when reviewers exist + self._run_integration_test( + changed_files="cpp/file1.cpp\n", + existing_users="existing_user1\nexisting_user2\n", + expected_assigned=False) + + # Force assign with existing reviewers + self.mock_changed_files = "cpp/file1.cpp\n" + self.mock_existing_users = "user1\n" + self.mock_existing_teams = "" + self.assign_reviewers_called = False + + with patch('subprocess.run') as mock_run, \ + patch('assign_reviewers.load_json') as mock_load_json: + + mock_run.side_effect = self._mock_subprocess_run + mock_load_json.side_effect = lambda path: ( + self.module_paths + if "module-paths" in str(path) else self.module_owners) + + with patch('sys.argv', ['assign_reviewers.py', '--force-assign']): + assign_reviewers.main() + + self.assertTrue(self.assign_reviewers_called) + self.assertNotIn("user1", self.assigned_reviewers) + + def test_special_modes(self): + """Test dry-run and error modes""" + # Dry run mode + import io + from contextlib import redirect_stdout + + self.mock_changed_files = "cpp/file1.cpp\n" + self.mock_existing_users = "" + self.mock_existing_teams = "" + self.assign_reviewers_called = False + + f = io.StringIO() + with redirect_stdout(f): + with patch('subprocess.run') as mock_run, \ + patch('assign_reviewers.load_json') as mock_load_json: + + mock_run.side_effect = self._mock_subprocess_run + mock_load_json.side_effect = lambda path: ( + self.module_paths + if "module-paths" in str(path) else self.module_owners) + + with patch('sys.argv', ['assign_reviewers.py', '--dry-run']): + assign_reviewers.main() + + output = f.getvalue() + self.assertIn("DRY RUN:", output) + self.assertFalse(self.assign_reviewers_called) + + def test_error_handling(self): + """Test various error handling scenarios""" + # Subprocess error + with patch('subprocess.run') as mock_run, \ + patch('assign_reviewers.load_json') as mock_load_json: + + mock_run.side_effect = subprocess.CalledProcessError( + 1, ["gh", "pr", "view"]) + mock_load_json.side_effect = lambda path: self.module_paths + + with self.assertRaises(SystemExit) as cm: + with patch('sys.argv', ['assign_reviewers.py']): + assign_reviewers.main() + self.assertEqual(cm.exception.code, 1) + + # Missing JSON file + with patch('assign_reviewers.load_json') as mock_load_json: + mock_load_json.side_effect = FileNotFoundError( + "module-paths.json not found") + + with self.assertRaises(FileNotFoundError): + with patch('sys.argv', ['assign_reviewers.py']): + assign_reviewers.main() + + # Missing environment variable + del os.environ["PR_NUMBER"] + with self.assertRaises(KeyError): + with patch('sys.argv', ['assign_reviewers.py']): + assign_reviewers.main() + + def test_edge_cases_integration(self): + """Test edge cases in full integration""" + # Files with special characters + self._run_integration_test( + changed_files= + "cpp/file with spaces.cpp\ncpp/file[brackets].h\ncpp/file@special.cpp\n", + expected_reviewer_count=2) + + # Large reviewer pool + large_module_owners = self.module_owners.copy() + large_module_owners["Large Module"] = [f"user{i}" for i in range(20)] + + self.mock_changed_files = "large/file.cpp\n" + self.mock_existing_users = "" + self.mock_existing_teams = "" + self.assign_reviewers_called = False + + with patch('subprocess.run') as mock_run, \ + patch('assign_reviewers.load_json') as mock_load_json: + + mock_run.side_effect = self._mock_subprocess_run + mock_load_json.side_effect = lambda path: ({ + "large/": "Large Module" + } if "module-paths" in str(path) else large_module_owners) + + with patch('sys.argv', ['assign_reviewers.py']): + assign_reviewers.main() + + self.assertTrue(self.assign_reviewers_called) + self.assertEqual(len(self.assigned_reviewers), 2) # Per-module limit + + +if __name__ == "__main__": + import unittest + unittest.main(verbosity=2) diff --git a/.github/workflows/auto-assign-reviewers.yml b/.github/workflows/auto-assign-reviewers.yml new file mode 100644 index 00000000000..72259849935 --- /dev/null +++ b/.github/workflows/auto-assign-reviewers.yml @@ -0,0 +1,41 @@ +name: Auto assign reviewers +on: + pull_request_target: + types: [opened, synchronize, reopened] + workflow_dispatch: + inputs: + pr_number: + description: 'PR number to test assignment on' + required: true + type: string + dry_run: + description: 'Run in dry-run mode (just print commands)' + required: false + type: boolean + default: false + force_assign: + description: 'Force assign even if reviewers already exist' + required: false + type: boolean + default: false +jobs: + assign: + runs-on: ubuntu-latest + permissions: + pull-requests: write + contents: read + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + - name: Assign reviewers + env: + PR_NUMBER: ${{ github.event.inputs.pr_number || github.event.pull_request.number }} + PR_AUTHOR: ${{ github.event.pull_request.user.login || github.event.inputs.pr_author || '' }} + GH_TOKEN: ${{ secrets.REVIEW_ASSIGNING_TOKEN }} + PER_MODULE_REVIEWER_LIMIT: '2' + run: | + python3 .github/scripts/assign_reviewers.py \ + ${{ github.event.inputs.dry_run == 'true' && '--dry-run' || '' }} \ + ${{ github.event.inputs.force_assign == 'true' && '--force-assign' || '' }} diff --git a/.github/workflows/auto-assign.yml b/.github/workflows/auto-assign.yml index 5ecb067cc01..4c484481aaf 100644 --- a/.github/workflows/auto-assign.yml +++ b/.github/workflows/auto-assign.yml @@ -22,7 +22,7 @@ jobs: const fs = require('fs'); // Read configuration file - const config = JSON.parse(fs.readFileSync('.github/workflows/module-owners.json', 'utf8')); + const config = JSON.parse(fs.readFileSync('.github/module-owners.json', 'utf8')); // Find matching label in config for (const [configLabel, users] of Object.entries(config)) { diff --git a/.github/workflows/module-owners.json b/.github/workflows/module-owners.json index bab5760f012..c799be4a6bb 100644 --- a/.github/workflows/module-owners.json +++ b/.github/workflows/module-owners.json @@ -7,10 +7,29 @@ "Speculative Decoding":["yweng0828", "nekorobov", "lfr-0531"], "Customized Kernels":["lowsfer", "PerkzZheng", "jdemouth-nvidia"], "Performance": ["kaiyux", "jiahanc", "hypdeb"], - "Lora/P-tuning":["byshiue", "Naveassaf"], + "Lora/P-tuning":["byshiue", "shaharmor98"], "Disaggregated Serving":["Shixiaowei02", "joyang-nv", "chuangz0", "schetlur-nv"], "Documentation":["nv-guomingz"], "Sampling": ["dcampora", "lfr-0531", "Naveassaf", "syuoni", "yweng0828"], "Memory": ["litaotju", "peaceh-nv"], - "Installation": ["hchings", "Superjomn", "nv-guomingz", "QiJune"] + "Installation": ["hchings", "Superjomn", "nv-guomingz", "QiJune"], + "CI/CD": ["chzblych", "syuoni"], + "Torch Framework": ["QiJune", "hlu1"], + "Torch Attention Backend": ["yuxianq", "hlu1"], + "Torch AutoDeploy": ["lucaslie", "suyoggupta"], + "Torch Compilation": ["litaotju", "yizhang-nv", "liji-nv"], + "Torch Custom Ops": ["yizhang-nv"], + "Torch Distributed": ["yilin-void", "yuxianq", "hyukn", "yizhang-nv", "hlu1"], + "Torch PyExecutor": ["dongxuy04", "funatiq", "dcampora", "HuiGao-NV"], + "Torch Speculative": ["lfr-0531", "mikeiovine"], + "Autotuner": ["hyukn", "litaotju"], + "Pipeline Interface": ["amukkara", "chang-l"], + "Torch Models": ["QiJune", "hlu1"], + "Torch Models DeepSeekV3": ["hlu1", "zongfeijing"], + "Torch Models Llama": ["chang-l", "mikeiovine"], + "Torch Modules": ["QiJune", "hlu1"], + "Torch Modules Attention": ["yuxianq", "hlu1"], + "Torch Modules Fused MOE": ["hlu1", "dongxuy04", "zongfeijing", "HuiGao-NV"], + "Torch Tests": ["QiJune", "hlu1"], + "PyTorch Examples": ["QiJune", "hlu1"] } diff --git a/.github/workflows/pr-checklist.yml b/.github/workflows/pr-checklist.yml new file mode 100644 index 00000000000..ec8a1d11c4f --- /dev/null +++ b/.github/workflows/pr-checklist.yml @@ -0,0 +1,174 @@ +name: PR Checklist +on: + pull_request_target: + types: [opened, reopened, ready_for_review, synchronize] + issue_comment: + types: [created, edited] + workflow_dispatch: + inputs: + pr_number: + description: 'PR number to check' + required: true + type: string + +permissions: + pull-requests: write + statuses: write + +jobs: + checklist: + runs-on: ubuntu-latest + if: github.event_name == 'workflow_dispatch' || github.event.pull_request.draft == false || github.event.issue.pull_request + steps: + - name: Manage PR Checklist + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + // Get PR information based on event type + let pr, prNumber; + if (context.eventName === 'pull_request_target') { + pr = context.payload.pull_request; + prNumber = pr.number; + } else if (context.eventName === 'issue_comment') { + // For issue_comment events, get PR info from the issue + if (!context.payload.issue.pull_request) { + console.log('Comment is not on a PR, skipping'); + return; + } + prNumber = context.payload.issue.number; + const { data: prData } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber + }); + pr = prData; + } else if (context.eventName === 'workflow_dispatch') { + // For manual dispatch, get PR info from input + prNumber = parseInt(context.payload.inputs.pr_number); + const { data: prData } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber + }); + pr = prData; + } else { + console.log('Unexpected event type:', context.eventName); + return; + } + + console.log(`Processing PR #${prNumber}`); + + const checklistBody = `## 🚀 PR Checklist + + - [ ] PR title follows format: \`[type] Description\` + - [ ] PR description explains both **what** you're doing and **why** + - [ ] Code conforms to coding conventions (see \`CODING_GUIDELINES.md\`) + - [ ] Test cases added for new code + - [ ] All existing tests pass + - [ ] PR and commit messages cleaned up via \`git rebase -i\` + + --- + **Please ✅ check the below item to confirm you've reviewed the checklist when ready for review!.** + - [ ] **I have reviewed the above checklist and addressed all applicable items**`; + + // Get all comments + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber + }); + + // Find checklist comment + let checklistComment = comments.find(comment => + comment.user.login === 'github-actions[bot]' && + comment.body.includes('PR Checklist') + ); + + // Post checklist if not exists + if (!checklistComment && + (context.eventName === 'workflow_dispatch' || + ['opened', 'reopened', 'ready_for_review'].includes(context.payload.action))) { + const { data: newComment } = await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: checklistBody + }); + checklistComment = newComment; + console.log('Posted PR checklist'); + } + + if (!checklistComment) { + console.log('No checklist comment found'); + return; + } + + // Parse the checklist comment to count checked items + const checkboxRegex = /- \[([ x])\]/gi; + const matches = [...checklistComment.body.matchAll(checkboxRegex)]; + const totalItems = matches.length; + const checkedItems = matches.filter(match => match[1].toLowerCase() === 'x').length; + + console.log(`Checklist status: ${checkedItems}/${totalItems} items checked`); + + // Check if the acknowledgment checkbox (last one) is checked + const lastCheckbox = matches[matches.length - 1]; + const isAcknowledged = lastCheckbox && lastCheckbox[1].toLowerCase() === 'x'; + + + // Determine status based on checked items + let state, description; + if (!isAcknowledged) { + state = 'pending'; + description = `Please acknowledge the checklist`; + } else { + state = 'success'; + description = `Checklist acknowledged`; + } + + // Update commit status + await github.rest.repos.createCommitStatus({ + owner: context.repo.owner, + repo: context.repo.repo, + sha: pr.head.sha, + state: state, + context: 'PR Checklist', + description: description, + target_url: checklistComment.html_url + }); + + console.log(`Status: ${state} - ${description}`); + + // Add a helpful label based on checklist status + const labels = { + complete: 'checklist-complete', + incomplete: 'checklist-incomplete' + }; + + try { + // Remove both labels first + for (const label of Object.values(labels)) { + try { + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + name: label + }); + } catch (e) { + // Label might not exist, that's ok + } + } + + // Add appropriate label + const labelToAdd = isAcknowledged ? labels.complete : labels.incomplete; + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + labels: [labelToAdd] + }); + } catch (e) { + console.log('Could not update labels:', e.message); + } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9c8995b2ef0..b4cb77e5272 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -92,10 +92,32 @@ Developer workflow for code contributions is as follows: 3. Once the code changes are staged on the fork and ready for review, a [Pull Request](https://help.github.com/en/articles/about-pull-requests) (PR) can be [requested](https://help.github.com/en/articles/creating-a-pull-request) to merge the changes from a branch of the fork into a selected branch of upstream. PRs should typically target the `main` branch. * Creation of a PR creation kicks off the code review process. - * At least one TensorRT-LLM engineer will be assigned for the review. When the PR is under review, the label `Pending Review` will be added to the PR. + * At least one TensorRT-LLM engineer will be assigned for the review. When the PR is under review. * If changes are requested, then the reviewer will add the label `Changes Requested` to the PR. * Once changes are approved, CI will be launched to validate the change. When CI passes, the reviewer will merge the PR. * If CI reports any failures, it's up to the requester to fix any CI failures before requesting another review. + * An automatic comment will be generated with a checklist, that needs to be checked and resolved for merge. + +### Automatic Reviewer Assignment + +Reviewers are automatically assigned to PRs through a GitHub Action that: + +* **Triggers**: Runs automatically when PRs are opened, synchronized, or reopened +* **Module-based assignment**: Maps changed files to modules using `.github/module-paths.json` and assigns reviewers based on module ownership defined in `.github/module-owners.json` +* **Respects existing assignments**: Won't assign additional reviewers if any reviewers are already assigned (unless forced) +* **Excludes PR author**: Automatically excludes the PR author from reviewer assignments +* **Limits reviewer count**: Randomly samples up to 3 reviewers if more are eligible (configurable via `REVIEWER_LIMIT`) +* **Coexists with CODEOWNERS**: Works alongside GitHub's CODEOWNERS file (`.github/CODEOWNERS`) which enforces mandatory approvals for specific paths (e.g., API stability tests, release branches) + +The auto-assignment system analyzes all files changed in your PR, maps them to the appropriate code modules, and assigns reviewers from the module owner lists. This ensures domain experts review relevant changes while avoiding over-assignment. + +**Testing the assignment locally**: You can test reviewer assignment with the `--dry-run` flag: + ```bash + GH_TOKEN= PR_NUMBER= PR_AUTHOR= \ + python3 .github/scripts/assign_reviewers.py --dry-run + ``` + +**Manual assignment**: You can also manually trigger reviewer assignment via GitHub's workflow dispatch with options for dry-run mode and force-assignment. ### PR Submission Policies