-
Notifications
You must be signed in to change notification settings - Fork 1.9k
[Github] Action to Auto-assign PR Reviewers (that respects CODEOWNERS.md and overrides) #5630
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
0b7ced9
4c465e2
dd95efe
e435dc2
c8410c6
8c6f689
bb53b5b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,39 @@ | ||
| { | ||
| "Generic Runtime": ["funatiq", "pcastonguay", "Shixiaowei02", "MartinMarciniszyn", "schetlur-nv", "dcampora"], | ||
| "Triton Backend": ["Tabrizian", "pcastonguay", "schetlur-nv"], | ||
| "LLM API/Workflow": ["Superjomn", "syuoni", "nv-guomingz", "litaotju", "QiJune"], | ||
| "KV-Cache Management":["thorjohnsen", "schetlur-nv"], | ||
| "Low Precision":["Tracin", "nv-guomingz", "Naveassaf"], | ||
| "Speculative Decoding":["yweng0828", "nekorobov", "lfr-0531"], | ||
| "Customized Kernels":["lowsfer", "PerkzZheng", "jdemouth-nvidia"], | ||
| "Performance": ["kaiyux", "jiahanc", "hypdeb"], | ||
| "Lora/P-tuning":["byshiue", "shaharmor98"], | ||
| "Disaggregated Serving":["Shixiaowei02", "joyang-nv", "chuangz0", "schetlur-nv"], | ||
| "Documentation":["nv-guomingz"], | ||
| "Sampling": ["dcampora", "lfr-0531", "Naveassaf", "syuoni", "yweng0828"], | ||
| "Memory": ["litaotju", "peaceh-nv"], | ||
| "Installation": ["hchings", "Superjomn", "nv-guomingz", "QiJune"], | ||
| "GitHub Configuration": ["tburt-nv", "niukuo"], | ||
| "Jenkins Pipelines": ["chzblych", "niukuo"], | ||
| "Test Configuration": ["niukuo", "syuoni", "LarryXFly"], | ||
| "Test Waive List": ["chzblych", "niukuo"], | ||
| "Integration Tests": ["LarryXFly", "niukuo"], | ||
| "Torch Framework": ["QiJune", "hlu1"], | ||
| "Torch Attention Backend": ["yuxianq", "hlu1"], | ||
| "Torch AutoDeploy": ["lucaslie", "suyoggupta"], | ||
| "Torch Compilation": ["litaotju", "yizhang-nv", "liji-nv"], | ||
| "Torch Custom Ops": ["yizhang-nv"], | ||
| "Torch Distributed": ["yilin-void", "yuxianq", "hyukn", "yizhang-nv", "hlu1"], | ||
| "Torch PyExecutor": ["dongxuy04", "funatiq", "dcampora", "HuiGao-NV"], | ||
| "Torch Speculative": ["lfr-0531", "mikeiovine"], | ||
| "Autotuner": ["hyukn", "litaotju"], | ||
| "Pipeline Interface": ["amukkara", "chang-l"], | ||
| "Torch Models": ["QiJune", "hlu1"], | ||
| "Torch Models DeepSeekV3": ["hlu1", "zongfeijing"], | ||
| "Torch Models Llama": ["chang-l", "mikeiovine"], | ||
| "Torch Modules": ["QiJune", "hlu1"], | ||
| "Torch Modules Attention": ["yuxianq", "hlu1"], | ||
| "Torch Modules Fused MOE": ["hlu1", "dongxuy04", "zongfeijing", "HuiGao-NV"], | ||
| "Torch Tests": ["QiJune", "hlu1"], | ||
| "PyTorch Examples": ["QiJune", "hlu1"] | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,33 @@ | ||
| { | ||
| "cpp/": "Generic Runtime", | ||
| "triton_backend/": "Triton Backend", | ||
| "tensorrt_llm/_torch/peft/": "Lora/P-tuning", | ||
| "tensorrt_llm/": "LLM API/Workflow", | ||
| "benchmarks/": "Performance", | ||
| "examples/disaggregated/": "Disaggregated Serving", | ||
| "docs/": "Documentation", | ||
| "docker/": "Installation", | ||
| ".github/": "GitHub Configuration", | ||
| "jenkins/": "Jenkins Pipelines", | ||
| "tests/integration/test_lists/": "Test Configuration", | ||
| "tests/integration/test_lists/waives.txt": "Test Waive List", | ||
| "tests/integration/defs/": "Integration Tests", | ||
| "tensorrt_llm/_torch/": "Torch Framework", | ||
| "tensorrt_llm/_torch/attention_backend/": "Torch Attention Backend", | ||
| "tensorrt_llm/_torch/auto_deploy/": "Torch AutoDeploy", | ||
| "tensorrt_llm/_torch/compilation/": "Torch Compilation", | ||
| "tensorrt_llm/_torch/custom_ops/": "Torch Custom Ops", | ||
| "tensorrt_llm/_torch/distributed/": "Torch Distributed", | ||
| "tensorrt_llm/_torch/pyexecutor/": "Torch PyExecutor", | ||
| "tensorrt_llm/_torch/speculative/": "Torch Speculative", | ||
| "tensorrt_llm/autotuner.py": "Autotuner", | ||
| "tensorrt_llm/pipeline_interface.py": "Pipeline Interface", | ||
| "tensorrt_llm/_torch/models/": "Torch Models", | ||
| "tensorrt_llm/_torch/models/modeling_deepseekv3.py": "Torch Models DeepSeekV3", | ||
| "tensorrt_llm/_torch/models/modeling_llama.py": "Torch Models Llama", | ||
| "tensorrt_llm/_torch/modules/": "Torch Modules", | ||
| "tensorrt_llm/_torch/modules/attention.py": "Torch Modules Attention", | ||
| "tensorrt_llm/_torch/modules/fused_moe.py": "Torch Modules Fused MOE", | ||
| "tests/unittest/_torch/": "Torch Tests", | ||
| "examples/pytorch/": "PyTorch Examples" | ||
| } | ||
| Original file line number | Diff line number | Diff line change | ||
|---|---|---|---|---|
| @@ -0,0 +1,295 @@ | ||||
| import argparse | ||||
| import json | ||||
| import os | ||||
| import random | ||||
| import subprocess | ||||
| import sys | ||||
| from pathlib import Path | ||||
|
|
||||
|
|
||||
| def get_pr_changed_files(pr_number: str) -> list[str]: | ||||
| """Get files changed in PR using GitHub CLI (more reliable than git diff)""" | ||||
| result = subprocess.run( | ||||
| [ | ||||
| "gh", "pr", "view", pr_number, "--json", "files", "--jq", | ||||
| ".files[].path" | ||||
| ], | ||||
| capture_output=True, | ||||
| text=True, | ||||
| check=True, | ||||
| ) | ||||
| return [line.strip() for line in result.stdout.splitlines() if line.strip()] | ||||
|
|
||||
|
|
||||
| def get_existing_reviewers(pr_number: str) -> tuple[set[str], set[str]]: | ||||
| """Get currently assigned reviewers (users and teams) for a PR""" | ||||
| try: | ||||
| # Get user reviewers | ||||
| user_result = subprocess.run( | ||||
| [ | ||||
| "gh", "pr", "view", pr_number, "--json", "reviewRequests", | ||||
| "--jq", | ||||
| "(.reviewRequests // []) | .[] | select(.login) | .login" | ||||
| ], | ||||
| capture_output=True, | ||||
| text=True, | ||||
| check=True, | ||||
| ) | ||||
| user_reviewers = { | ||||
| line.strip() | ||||
| for line in user_result.stdout.splitlines() if line.strip() | ||||
| } | ||||
|
|
||||
| # Get team reviewers | ||||
| team_result = subprocess.run( | ||||
|
Comment on lines
+43
to
+44
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For team reviewers, it would probably be helpful to randomly assign one reviewer in the team. That may also help automate the round-robin-style review balancing.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. i believe the CODEOWNERS already does this tyler, my action does not deal with the github teams (although the same information is present in module-owners.json)
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think so: GitHub assigns the whole team as a reviewer when the team is in the CODEOWNERS list, and then GitHub optionally sends a email notification to the whole team when the team is assigned as a reviewer. I'm not aware of any built-in mechanism to send a notification to a single person in that team.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it might even be appropriate to replace each team reviewer with a single person in the team. The CODEOWNERS requirement would still allow any member of the team to approve.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can leave that as a future improvement if you'd prefer, I'll approve this MR for now
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah I didn't realize this setting existed; I suppose each team can decide how they want to balance their own reviews this way. The built-in teams probably would be sufficient instead of this action if we were able to mix required and not-required CODEOWNERS.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. but imagine a scenario where the CODEOWNERS has very specific module mappings, each with a team/individual. If a PR touches multiple of these, then all these teams will get tagged as required codeowner approvals. It is possible that some module-mappings are so specific that its probably just 1-2 people. And they will effectivley block the approval and merge of the PR. but the github action can alleviate that. the above assumes that the CODEOWNERS-enabled assignments are rigid (and hence blocking). What do you think @tburt-nv ? also tagging @juney-nvidia @zeroepoch @kevinch-nv @atrifex @hlu1 @MartinMarciniszyn
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Other members of the team in that case can still be manually assigned to the PR after the fact and their approval would unblock the PR. It comes down to what the goals are for enforcing reviews and approvals.
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sorry, I thought I had already resolved this thread. I'm personally ok with the flexibility of maintaining both the required CODEOWNERs with native GitHub features and the optional
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks @kevinch-nv / @tburt-nv . So that means the module-owners must explicitly approve every PR (atleast 1 of them) that touches their module. This can potentially blow up the required approvals (like in this PR you mentioned), but that's okay/necessary. BUT if, for any reason, none of the module owners of specific CODEOWNERS rule/mapping are available for approval, we'll need an unblocking mechanism. I should also correct my original comment - the auto-routing done per-team, when the team is pulled by the CODEOWNERS for a particular change, does respect OOTO/Busy statuses (here). so, if the following things are done properly, CODEOWNERS + team settings are alone enough:
I have another draft PR #5977 that expands CODEOWNERS, but basically takes care of the ordering and heirarchy, but maintains the same coverage as @kevinch-nv 's reverted #4105 . I think that should be a good starting point. |
||||
| [ | ||||
| "gh", "pr", "view", pr_number, "--json", "reviewRequests", | ||||
| "--jq", "(.reviewRequests // []) | .[] | select(.name) | .name" | ||||
| ], | ||||
| capture_output=True, | ||||
| text=True, | ||||
| check=True, | ||||
| ) | ||||
| team_reviewers = { | ||||
| line.strip() | ||||
| for line in team_result.stdout.splitlines() if line.strip() | ||||
| } | ||||
|
|
||||
| return user_reviewers, team_reviewers | ||||
| except subprocess.CalledProcessError as e: | ||||
| print(f"Warning: Could not fetch existing reviewers: {e}") | ||||
| return set(), set() | ||||
|
|
||||
|
|
||||
| def load_json(path: str): | ||||
| with open(path, "r", encoding="utf-8") as f: | ||||
| return json.load(f) | ||||
|
|
||||
|
|
||||
| def map_modules(changed_files: list[str], | ||||
| module_paths: dict[str, str]) -> tuple[set[str], list[str]]: | ||||
| """Map changed files to modules using MOST SPECIFIC (longest) prefix match""" | ||||
| modules: set[str] = set() | ||||
| unmapped_files: list[str] = [] | ||||
|
|
||||
| for file in changed_files: | ||||
| # Find ALL matching prefixes | ||||
| matches = [] | ||||
| for prefix, module in module_paths.items(): | ||||
| if file.startswith(prefix): | ||||
| matches.append((len(prefix), prefix, module)) | ||||
|
|
||||
| if matches: | ||||
| # Sort by prefix length (descending) to get most specific first | ||||
| matches.sort(reverse=True) | ||||
| most_specific_module = matches[0][2] | ||||
| modules.add(most_specific_module) | ||||
|
|
||||
| # Log if there were multiple matches (for debugging) | ||||
| if len(matches) > 1: | ||||
| matches[0][1] | ||||
|
||||
| matches[0][1] |
Uh oh!
There was an error while loading. Please reload this page.