diff --git a/.github/workflows/check_pr_quality.yml b/.github/workflows/check_pr_quality.yml
new file mode 100644
index 000000000000..417ae5d20198
--- /dev/null
+++ b/.github/workflows/check_pr_quality.yml
@@ -0,0 +1,48 @@
+name: PR Quality Checks
+
+on:
+ pull_request_target:
+ types: [ edited, opened, reopened, ready_for_review, synchronize ]
+ branches:
+ - main
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
+ cancel-in-progress: true
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ pr_quality:
+ name: Run Quality Checks on a PR
+ runs-on: ubuntu-latest
+ timeout-minutes: 5
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v6
+ with:
+ persist-credentials: false
+ # Checking out the default branch (not the PR head) is what makes
+ # pull_request_target safe: the workflow code always comes from the
+ # base repo, so a malicious PR cannot alter it.
+ ref: ${{ github.event.repository.default_branch }}
+ - name: Set up Python
+ uses: actions/setup-python@v6
+ with:
+ python-version: '3.14'
+ - name: Run PR quality checks
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ PR_AUTHOR: ${{ github.event.pull_request.user.login }}
+ PR_BODY: ${{ github.event.pull_request.body }}
+ PR_TITLE: ${{ github.event.pull_request.title }}
+ PR_CREATED_AT: ${{ github.event.pull_request.created_at }}
+ PR_NUMBER: ${{ github.event.pull_request.number }}
+ PR_REPO: ${{ github.repository }}
+ # Only close PRs on the main Django repository; on forks the workflow
+ # runs in warning-only mode so contributors can test their PRs.
+ AUTOCLOSE: ${{ github.repository == 'django/django' }}
+ PYTHONPATH: scripts
+ run: python scripts/pr_quality/check_pr.py
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 411fc7736a97..b0742eba6965 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -60,3 +60,18 @@ jobs:
cache-dependency-path: '**/package.json'
- run: npm install
- run: npm test
+
+ scripts-tests:
+ runs-on: ubuntu-latest
+ name: Scripts tests
+ timeout-minutes: 60
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v6
+ with:
+ persist-credentials: false
+ - name: Set up Python
+ uses: actions/setup-python@v6
+ with:
+ python-version: '3.14'
+ - run: python -m unittest discover -v -s scripts/
diff --git a/scripts/pr_quality/__init__.py b/scripts/pr_quality/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/scripts/pr_quality/check_pr.py b/scripts/pr_quality/check_pr.py
new file mode 100644
index 000000000000..054d3e6adf50
--- /dev/null
+++ b/scripts/pr_quality/check_pr.py
@@ -0,0 +1,553 @@
+#!/usr/bin/env python3
+"""
+PR quality checks for Django pull requests.
+
+Each check is an independent function that returns None on success, or an
+error message on failure. Independent checks are all run so that all issues
+are reported in a single pass. Trac status and has_patch checks are skipped
+when no ticket is found, since they require a ticket ID to be meaningful.
+
+Required environment variables:
+ GITHUB_TOKEN GitHub API token
+ PR_NUMBER Pull request number
+ PR_REPO Repository in "owner/repo" format
+
+Optional environment variables:
+ AUTOCLOSE Set to "true" to close failing PRs (default: false)
+ PR_AUTHOR PR author's GitHub login
+ PR_BODY Pull request body text
+ PR_CREATED_AT PR creation timestamp (ISO 8601)
+ PR_TITLE Pull request title
+"""
+
+import json
+import logging
+import os
+import re
+import sys
+import time
+import urllib.error
+import urllib.parse
+import urllib.request
+from datetime import date, datetime, timedelta, timezone
+
+from pr_quality.errors import (
+ CHECKS_FOOTER,
+ CHECKS_HEADER,
+ INCOMPLETE_CHECKLIST,
+ INVALID_TRAC_STATUS,
+ LEVEL_ERROR,
+ LEVEL_WARNING,
+ MISSING_AI_DESCRIPTION,
+ MISSING_AI_DISCLOSURE,
+ MISSING_DESCRIPTION,
+ MISSING_HAS_PATCH_FLAG,
+ MISSING_TICKET_IN_PR_TITLE,
+ MISSING_TRAC_TICKET,
+ Message,
+)
+
+GITHUB_PER_PAGE = 100
+LARGE_PR_THRESHOLD = 80 # additions + deletions
+MIN_WORDS = 5
+SKIPPED = object() # Sentinel: check was not applicable and was skipped.
+TICKET_NOT_FOUND = object() # Sentinel: Trac returned HTTP 404 for the ticket.
+URLOPEN_TIMEOUT_SECONDS = 15
+# PRs opened before these dates predate PR template additions.
+PR_TEMPLATE_DATE = date(2024, 3, 4) # 3fcef50 -- PR template introduced
+AI_DISCLOSURE_DATE = date(2026, 1, 8) # 4f580c4 -- AI disclosure added
+
+logger = logging.getLogger(__name__)
+
+
+def setup_logging(logger, gha_formatter=True):
+ logger.setLevel(logging.DEBUG)
+
+ if not logger.handlers and gha_formatter:
+
+ class GHAFormatter(logging.Formatter):
+ _PREFIXES = {
+ logging.DEBUG: "::debug::",
+ logging.INFO: "::notice::",
+ logging.WARNING: "::warning::",
+ logging.ERROR: "::error::",
+ }
+
+ def format(self, record):
+ msg = super().format(record)
+ prefix = self._PREFIXES.get(record.levelno, "")
+ return f"{prefix}{msg}"
+
+ handler = logging.StreamHandler()
+ handler.setFormatter(GHAFormatter())
+ logger.addHandler(handler)
+
+
+def github_request(method, path, token, repo, data=None, params=None):
+ """Make an authenticated GitHub API request."""
+ url = f"https://api.github.com/repos/{repo}{path}"
+ if params:
+ encoded_params = urllib.parse.urlencode(params)
+ url = f"{url}?{encoded_params}"
+ headers = {
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/vnd.github+json",
+ "X-GitHub-Api-Version": "2022-11-28",
+ }
+ body = None
+ if data is not None:
+ body = json.dumps(data).encode()
+ headers["Content-Type"] = "application/json"
+ req = urllib.request.Request(url, data=body, headers=headers, method=method)
+ with urllib.request.urlopen(req, timeout=URLOPEN_TIMEOUT_SECONDS) as response:
+ return json.loads(response.read())
+
+
+def get_recent_commit_count(pr_author, repo, token, since_days, max_count):
+ """Return the number of recent commits by the author, up to max_count."""
+ if not pr_author:
+ return 0
+
+ since = (datetime.now(timezone.utc) - timedelta(days=since_days)).strftime(
+ "%Y-%m-%dT%H:%M:%SZ"
+ )
+ results = github_request(
+ "GET",
+ "/commits",
+ token,
+ repo,
+ params={"author": pr_author, "since": since, "per_page": max_count},
+ )
+ return len(results)
+
+
+def get_pr_total_changes(pr_number, repo, token):
+ """Return total lines changed in the PR (additions + deletions)."""
+ total_changes = 0
+ page = 1
+ while True:
+ results = github_request(
+ "GET",
+ f"/pulls/{pr_number}/files",
+ token,
+ repo,
+ params={"per_page": GITHUB_PER_PAGE, "page": page},
+ )
+ if not results:
+ break
+ total_changes += sum(f["changes"] for f in results)
+ if len(results) < GITHUB_PER_PAGE:
+ break
+ page += 1
+ return total_changes
+
+
+def strip_html_comments(text):
+ """Return text with all HTML comments removed."""
+ return re.sub(r"", "", text, flags=re.DOTALL)
+
+
+def extract_ticket_id(pr_body):
+ """Return the Trac ticket ID string from the PR body, or None."""
+ match = re.search(r"\bticket-(\d+)\b", pr_body, re.IGNORECASE)
+ return match.group(1) if match else None
+
+
+def fetch_trac_ticket(ticket_id):
+ """Fetch ticket data from the Trac JSON API.
+
+ Returns a dict with ticket data on success, TICKET_NOT_FOUND if the
+ ticket does not exist (HTTP 404), or None on a non-fatal network error
+ (the caller should skip the check).
+ """
+ url = f"https://www.djangoproject.com/trac/api/tickets/{ticket_id}"
+ try:
+ with urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT_SECONDS) as response:
+ return json.loads(response.read())
+ except urllib.error.HTTPError as exc:
+ code = exc.code
+ exc.close()
+ if code == 404:
+ return TICKET_NOT_FOUND
+ logger.warning(
+ "HTTP %s fetching ticket %s -- skipping Trac check.",
+ code,
+ ticket_id,
+ )
+ return None
+ except Exception as exc:
+ logger.warning(
+ "Could not fetch ticket %s: %s -- skipping Trac check.",
+ ticket_id,
+ exc,
+ )
+ return None
+
+
+def check_trac_ticket(pr_body, total_changes, threshold=LARGE_PR_THRESHOLD):
+ """A Trac ticket must be referenced in the ticket section.
+
+ For PRs with fewer than threshold lines changed, "N/A" is also accepted
+ (e.g. for typo fixes). Larger PRs must reference a ticket.
+ """
+ # Look for the ticket reference inside the Trac ticket number section.
+ section_match = re.search(
+ r"#### Trac ticket number[^\n]*\n(.*?)(?=\r?\n####|\Z)",
+ pr_body,
+ re.DOTALL,
+ )
+ section = section_match.group(1) if section_match else pr_body
+
+ # Strip HTML comments before checking -- the template itself contains "N/A"
+ # inside a comment, which would otherwise trigger the N/A exemption below.
+ section = strip_html_comments(section)
+
+ if re.search(r"\bticket-\d+\b", section, re.IGNORECASE): # valid ticket found
+ return None
+
+ # N/A is accepted for trivial PRs that don't warrant a ticket.
+ if total_changes < threshold and re.search(
+ r"(?:^|\s)N/A\b", section, re.IGNORECASE | re.MULTILINE
+ ):
+ return None
+
+ return Message(*MISSING_TRAC_TICKET, threshold=threshold)
+
+
+def check_trac_status(ticket_id, ticket_data):
+ """The referenced Trac ticket must be Accepted, unresolved, and assigned.
+
+ ticket_data is the dict returned by fetch_trac_ticket(). Passing None
+ skips the check (non-fatal fetch error). Passing TICKET_NOT_FOUND fails
+ with a generic not-ready message.
+ """
+ if ticket_data is None:
+ return None # Non-fatal fetch error; skip.
+ if ticket_data is TICKET_NOT_FOUND:
+ return Message(
+ *INVALID_TRAC_STATUS,
+ ticket_id=ticket_id,
+ current_state="ticket not found in Trac",
+ )
+ stage = ticket_data.get("custom", {}).get("stage", "").strip()
+ resolution = (ticket_data.get("resolution") or "").strip()
+ status = ticket_data.get("status", "").strip()
+ if stage == "Accepted" and not resolution and status == "assigned":
+ return None
+ current_state = [
+ f"{stage=}" if stage != "Accepted" else "",
+ f"{resolution=}" if resolution else "",
+ f"{status=}" if status != "assigned" else "",
+ ]
+ return Message(
+ *INVALID_TRAC_STATUS,
+ ticket_id=ticket_id,
+ current_state=", ".join(s for s in current_state if s),
+ )
+
+
+def check_trac_has_patch(ticket_id, initial_data, poll_interval=1, poll_timeout=10):
+ """The referenced Trac ticket must have has_patch=1.
+
+ Uses initial_data (the dict returned by fetch_trac_ticket()) on the first
+ check, then polls via fetch_trac_ticket() every poll_interval seconds for
+ up to poll_timeout seconds. Set poll_timeout=0 for a single check with no
+ retries. Passing None for initial_data skips the check entirely.
+ """
+ deadline = time.monotonic() + poll_timeout
+ elapsed = 0
+ ticket_data = initial_data
+
+ while True:
+ logger.info(
+ "Checking has_patch flag for ticket-%s (elapsed: %ss) ...",
+ ticket_id,
+ elapsed,
+ )
+ if ticket_data is None:
+ return None # Non-fatal fetch error; skip.
+ if ticket_data is TICKET_NOT_FOUND:
+ # Ticket not found -- already reported by check_trac_status.
+ return None
+ has_patch = ticket_data.get("custom", {}).get("has_patch", "0").strip()
+ if has_patch == "1":
+ logger.info("ticket-%s has_patch flag is set.", ticket_id)
+ return None
+ remaining = deadline - time.monotonic()
+ if remaining <= 0:
+ break
+ logger.info(
+ " has_patch not yet set -- will retry in %ss.",
+ poll_interval,
+ )
+ sleep_time = min(poll_interval, remaining)
+ time.sleep(sleep_time)
+ elapsed += int(sleep_time)
+ ticket_data = fetch_trac_ticket(ticket_id)
+
+ logger.warning(
+ "ticket-%s has_patch flag was not set after %ss.",
+ ticket_id,
+ poll_timeout,
+ )
+ return Message(*MISSING_HAS_PATCH_FLAG, ticket_id=ticket_id)
+
+
+def check_pr_title_has_ticket(pr_title, ticket_id):
+ """The PR title must include the ticket number (e.g. #36991).
+
+ This enables Trac's auto-link feature, which associates the PR with the
+ ticket when the title follows the commit message format.
+ """
+ if re.search(rf"#{ticket_id}\b", pr_title):
+ return None
+ return Message(*MISSING_TICKET_IN_PR_TITLE, ticket_id=ticket_id)
+
+
+def check_branch_description(pr_body):
+ """The branch description must be present.
+
+ The description should not contain the placeholder, and should be at least
+ 5 words long.
+ """
+ placeholder = (
+ "Provide a concise overview of the issue or rationale behind the"
+ " proposed changes."
+ )
+
+ description_match = re.search(
+ r"#### Branch description[ \t]*\r?\n(.*?)(?=\r?\n####|\Z)",
+ pr_body,
+ re.DOTALL,
+ )
+ if not description_match:
+ return Message(*MISSING_DESCRIPTION)
+
+ # Strip HTML comments before evaluating content.
+ cleaned = strip_html_comments(description_match.group(1)).strip()
+
+ if not cleaned or placeholder in cleaned or len(cleaned.split()) < MIN_WORDS:
+ return Message(*MISSING_DESCRIPTION)
+
+ return None
+
+
+def check_ai_disclosure(pr_body):
+ """Exactly one AI disclosure checkbox must be selected.
+
+ If the "AI tools were used" option is checked, at least 5 words of
+ additional description must be present in that section.
+ """
+ ai_match = re.search(
+ r"#### AI Assistance Disclosure[^\n]*\n(.*?)(?=\r?\n####|\Z)",
+ pr_body,
+ re.DOTALL,
+ )
+ if not ai_match:
+ return Message(*MISSING_AI_DISCLOSURE)
+
+ section = strip_html_comments(ai_match.group(1))
+ no_ai_checked = bool(
+ re.search(r"-\s*\[x\].*?No AI tools were used", section, re.IGNORECASE)
+ )
+ ai_used_checked = bool(
+ re.search(r"-\s*\[x\].*?If AI tools were used", section, re.IGNORECASE)
+ )
+
+ # Must check exactly one option.
+ if no_ai_checked == ai_used_checked:
+ return Message(*MISSING_AI_DISCLOSURE)
+
+ if ai_used_checked:
+ # Collect non-checkbox lines for word count.
+ extra_lines = [
+ line.strip()
+ for line in section.splitlines()
+ if line.strip() and not line.strip().startswith("- [")
+ ]
+ # Ensure PR author includes at least 5 words about their AI use.
+ if len(" ".join(extra_lines).split()) < MIN_WORDS:
+ return Message(*MISSING_AI_DESCRIPTION)
+
+ return None
+
+
+def check_checklist(pr_body):
+ """The first five items in the Checklist section must be checked."""
+ checklist_match = re.search(
+ r"#### Checklist[ \t]*\r?\n(.*?)(?=\r?\n####|\Z)", pr_body, re.DOTALL
+ )
+ if not checklist_match:
+ return Message(*INCOMPLETE_CHECKLIST)
+
+ checkboxes = re.findall(r"-\s*\[(.)\]", checklist_match.group(1))
+
+ if len(checkboxes) < 5 or not all(c.lower() == "x" for c in checkboxes[:5]):
+ return Message(*INCOMPLETE_CHECKLIST)
+
+ return None
+
+
+def write_job_summary(pr_number, results, summary_file=None):
+ """Write a Markdown job summary to the given file path (if provided)."""
+ if not summary_file:
+ return
+
+ lines = [
+ f"## PR #{pr_number} Quality Check Results\n",
+ "| | Check | Result |",
+ "| --- | --- | --- |",
+ ]
+ for name, result, level in results:
+ if result is SKIPPED:
+ icon, status = "⏭️", "Skipped"
+ elif result is None:
+ icon, status = "✅", "Passed"
+ else:
+ icon, status = level
+ lines.append(f"| {icon} | {name} | {status} |")
+
+ with open(summary_file, "a") as f:
+ f.write("\n".join(lines) + "\n")
+
+
+def main(
+ repo,
+ token,
+ pr_author,
+ pr_body,
+ pr_number,
+ pr_title="",
+ pr_created_at=None,
+ autoclose=True,
+ summary_file=None,
+ gha_formatter=False,
+):
+ setup_logging(logger, gha_formatter)
+
+ created_date = (
+ datetime.fromisoformat(pr_created_at).date() if pr_created_at else None
+ )
+ if created_date is not None and created_date <= PR_TEMPLATE_DATE:
+ logger.info(
+ "PR #%s is older than PR template (%s) -- skipping all checks.",
+ pr_number,
+ PR_TEMPLATE_DATE,
+ )
+ return
+
+ commit_count = get_recent_commit_count(
+ pr_author, repo, token, since_days=365 * 3, max_count=5
+ )
+ if commit_count >= 5:
+ logger.info(
+ "PR #%s author is an established contributor -- skipping all checks.",
+ pr_number,
+ )
+ return
+
+ pr_title_result = SKIPPED
+ total_changes = get_pr_total_changes(pr_number, repo, token)
+ ticket_result = check_trac_ticket(pr_body, total_changes)
+ ticket_status_result = SKIPPED
+ ticket_has_patch_result = SKIPPED
+ ticket_id = extract_ticket_id(pr_body) if ticket_result is None else None
+ if ticket_id:
+ pr_title_result = check_pr_title_has_ticket(pr_title, ticket_id)
+ ticket_data = fetch_trac_ticket(ticket_id)
+ ticket_status_result = check_trac_status(ticket_id, ticket_data)
+ if ticket_status_result is None:
+ # Polling is disabled (poll_timeout=0): has_patch is a non-fatal
+ # warning, and contributors often update Trac after reviewing their
+ # PR, making any fixed polling window unreliable.
+ ticket_has_patch_result = check_trac_has_patch(
+ ticket_id, ticket_data, poll_timeout=0
+ )
+ else:
+ logger.info("Trac ticket is not Accepted -- skipping has_patch check.")
+ else:
+ logger.info("No Trac ticket -- skipping status and has_patch checks.")
+
+ if created_date is not None and created_date <= AI_DISCLOSURE_DATE:
+ ai_disclosure_result = SKIPPED
+ logger.info(
+ "PR #%s is older than AI Disclosure section (%s) -- skipping AI checks.",
+ pr_number,
+ AI_DISCLOSURE_DATE,
+ )
+ else:
+ ai_disclosure_result = check_ai_disclosure(pr_body)
+
+ results = [
+ ("Trac ticket referenced", ticket_result, LEVEL_ERROR),
+ ("Trac ticket status is Accepted", ticket_status_result, LEVEL_ERROR),
+ ("Trac ticket has_patch flag set", ticket_has_patch_result, LEVEL_WARNING),
+ ("PR title includes ticket number", pr_title_result, LEVEL_WARNING),
+ ("Branch description provided", check_branch_description(pr_body), LEVEL_ERROR),
+ ("AI disclosure completed", ai_disclosure_result, LEVEL_ERROR),
+ ("Checklist completed", check_checklist(pr_body), LEVEL_ERROR),
+ ]
+ write_job_summary(pr_number, results, summary_file)
+
+ failures = [
+ msg.as_details(level=level)
+ for _, msg, level in results
+ if msg is not None and msg is not SKIPPED and level == LEVEL_ERROR
+ ]
+ warning_msgs = [
+ msg.as_details(level=level)
+ for _, msg, level in results
+ if msg is not None and msg is not SKIPPED and level == LEVEL_WARNING
+ ]
+ if not failures and not warning_msgs:
+ logger.info("PR #%s passed all quality checks.", pr_number)
+ return
+
+ github_request(
+ "POST",
+ f"/issues/{pr_number}/comments",
+ token,
+ repo,
+ {"body": "\n\n".join([CHECKS_HEADER, *failures, *warning_msgs, CHECKS_FOOTER])},
+ )
+ if not failures:
+ logger.warning(
+ "PR #%s has %s warning(s), adding informational comment.",
+ pr_number,
+ len(warning_msgs),
+ )
+ return
+
+ msg = "PR #%s failed %s check(s), adding comment with details."
+ if not autoclose or commit_count > 0:
+ logger.warning(
+ msg + " Not closing the PR given %s.",
+ pr_number,
+ len(failures),
+ "warning-only mode" if not autoclose else "recent contributions",
+ )
+ else:
+ logger.error(
+ msg + " Closing the PR given lack of recent contributions.",
+ pr_number,
+ len(failures),
+ )
+ github_request("PATCH", f"/pulls/{pr_number}", token, repo, {"state": "closed"})
+ return 1
+
+
+if __name__ == "__main__":
+ sys.exit(
+ main(
+ repo=os.environ["PR_REPO"],
+ token=os.environ["GITHUB_TOKEN"],
+ pr_author=os.environ.get("PR_AUTHOR", ""),
+ pr_body=os.environ.get("PR_BODY", ""),
+ pr_number=os.environ["PR_NUMBER"],
+ pr_title=os.environ.get("PR_TITLE", ""),
+ pr_created_at=os.environ.get("PR_CREATED_AT"),
+ autoclose=os.environ.get("AUTOCLOSE", "").lower() == "true",
+ summary_file=os.environ.get("GITHUB_STEP_SUMMARY"),
+ gha_formatter=os.environ.get("GITHUB_ACTIONS"),
+ )
+ )
diff --git a/scripts/pr_quality/errors.py b/scripts/pr_quality/errors.py
new file mode 100644
index 000000000000..8b98873f6dd6
--- /dev/null
+++ b/scripts/pr_quality/errors.py
@@ -0,0 +1,160 @@
+"""Error and notification messages for PR quality checks.
+
+Message constants are plain 2-tuples of (title, body). Body strings use
+str.format() placeholders; kwargs are supplied at Message() construction time.
+
+"""
+
+LEVEL_ERROR = ("🛑", "Error")
+LEVEL_WARNING = ("⚠️", "Warning")
+
+
+class Message:
+ """A PR quality check message bound to its runtime formatting kwargs."""
+
+ def __init__(self, title, body, **kwargs):
+ self.title = title
+ self.body = body
+ self.kwargs = kwargs
+
+ def as_details(self, level=LEVEL_ERROR):
+ body = self.body.format(**self.kwargs) if self.kwargs else self.body
+ symbol, label = level
+ return (
+ f"\n{symbol} {label}: {self.title}"
+ f"
\n\n{body}\n\n "
+ )
+
+
+CONTRIBUTING_URL = "https://docs.djangoproject.com/en/dev/internals/contributing/"
+SUBMITTING_URL = f"{CONTRIBUTING_URL}writing-code/submitting-patches/"
+TRIAGING_URL = f"{CONTRIBUTING_URL}triaging-tickets/"
+FORUM_URL = "https://forum.djangoproject.com"
+TRAC_URL = "https://code.djangoproject.com"
+
+
+CHECKS_HEADER = (
+ "Thank you for your contribution to Django! This pull request has one or more "
+ "items that need attention before it can be accepted for review."
+)
+
+CHECKS_FOOTER = (
+ "If you have questions about these requirements, please review the "
+ f"[contributing guidelines]({SUBMITTING_URL}) or ask for help on the "
+ f"[Django Forum]({FORUM_URL}/c/internals/mentorship/10)."
+)
+
+INCOMPLETE_CHECKLIST = (
+ "Incomplete Checklist",
+ "The items in the **Checklist** section must be all understood and checked "
+ "accordingly before this PR can be accepted. These items confirm that you have "
+ f"read and followed the [Django contributing guidelines]({SUBMITTING_URL}).\n\n"
+ "**What to do:**\n\n"
+ "- Review each item and change `[ ]` to `[x]` once you have completed it.",
+)
+
+INVALID_TRAC_STATUS = (
+ "Trac Ticket Not Ready for a Pull Request",
+ "The referenced ticket **ticket-{ticket_id}** is not ready for a pull request. "
+ "A ticket must be in the *Accepted* stage, *assigned* status, and have no "
+ "resolution.\n\n"
+ "**Current state:** {current_state}\n\n"
+ "**What to do:**\n\n"
+ f"1. Check the ticket at {TRAC_URL}/ticket/{{ticket_id}}.\n"
+ "2. If *Unreviewed*, wait for a community member to accept it. "
+ "A ticket cannot be accepted by its reporter.\n"
+ "3. If *Ready for Checkin*, there is already a solution for it.\n"
+ "4. If *Someday/Maybe*, discuss on the "
+ f"[Django Forum]({FORUM_URL}/c/internals/5) before proceeding.\n"
+ "5. If resolved or closed, it is not eligible for a PR.\n"
+ "6. If not *assigned*, claim it by setting yourself as the owner.\n\n"
+ f"For more information on the Django triage process see {TRIAGING_URL}.",
+)
+
+MISSING_AI_DESCRIPTION = (
+ "AI Tool Usage Not Described",
+ "You indicated that AI tools were used in preparing this PR, but you have not "
+ "provided a description of which tools you used or how you used them. At least "
+ "5 words of description are required.\n\n"
+ "**What to do:**\n\n"
+ "Add a brief description below the checked AI disclosure checkbox in your PR "
+ "description. For example:\n\n"
+ "```\n"
+ "- [x] **If AI tools were used**, I have disclosed which ones, and fully reviewed "
+ "and verified their output.\n\n"
+ "Used GitHub Copilot for initial code generation. All output was manually "
+ "reviewed, tested, and verified for correctness.\n"
+ "```\n\n"
+ "Your description should cover:\n\n"
+ "- Which AI tool(s) and versions you used (e.g. GitHub Copilot, ChatGPT, Claude)\n"
+ "- How you used them (e.g. code generation, writing tests, drafting commit "
+ "messages)\n"
+ "- How you reviewed the generated result\n\n"
+ "Please note that you are fully responsible for the correctness and quality of all "
+ "content in this PR, regardless of how it was generated.",
+)
+
+MISSING_AI_DISCLOSURE = (
+ "AI Tool Usage Not Disclosed",
+ "You must select exactly one checkbox in the AI Assistance Disclosure section of "
+ "your PR description.\n\n"
+ "**What to do:**\n\n"
+ "Change `[ ]` to `[x]` for either:\n\n"
+ '1. "**No AI tools were used** in preparing this PR."\n\n'
+ "OR\n\n"
+ '2. "**If AI tools were used**, I have disclosed which ones, and fully reviewed '
+ 'and verified their output."\n\n'
+ "**Key Requirements:**\n\n"
+ "- Select only one option\n"
+ "- Do not check both boxes\n"
+ "- Do not leave both unchecked\n"
+ "- If selecting the second option, provide details of which AI tools were used.",
+)
+
+MISSING_DESCRIPTION = (
+ "Missing PR Description",
+ "Your PR description must be substantive and meaningful. The placeholder text "
+ '"*Provide a concise overview of the issue or rationale behind the proposed '
+ 'changes.*" is not acceptable.\n\n'
+ "**What to do:**\n\n"
+ "Write a description that contains at least 5 words and addresses:\n\n"
+ "- What problem does this PR solve?\n"
+ "- Why is this change necessary?\n"
+ "- What approach did you take?\n\n"
+ "A meaningful description helps reviewers understand the intent of your change "
+ "quickly and increases the likelihood that your PR will be reviewed promptly.",
+)
+
+MISSING_HAS_PATCH_FLAG = (
+ "Incorrect Trac Ticket Flag",
+ "The referenced ticket **ticket-{ticket_id}** does not have the *Has patch* "
+ "flag set in Trac. This flag must be checked before a pull request can be "
+ "reviewed.\n\n"
+ "**What to do:**\n\n"
+ f"1. Open the ticket at {TRAC_URL}/ticket/{{ticket_id}}.\n"
+ "2. Scroll down and check the *Has patch* checkbox on the ticket.\n"
+ "3. Save the ticket.\n\n"
+ f"For more information see {TRIAGING_URL}#has-patch.",
+)
+
+MISSING_TICKET_IN_PR_TITLE = (
+ "Ticket Number Missing from PR Title",
+ "The PR title does not include the ticket number **#{ticket_id}**. Including "
+ "it allows Trac to automatically link this pull request to the ticket.\n\n"
+ "**What to do:**\n\n"
+ "Edit the PR title to follow the commit message format, for example:\n\n"
+ "`Fixed #{ticket_id} -- .`",
+)
+
+MISSING_TRAC_TICKET = (
+ "Missing Trac Ticket",
+ "This PR does not include a valid Trac ticket reference.\n\n"
+ "**What to do:**\n\n"
+ f"1. Visit {TRAC_URL} and find or file the appropriate ticket for your change.\n"
+ "2. Wait for the ticket to be triaged and accepted before submitting a PR. "
+ "Patches submitted against unreviewed tickets are unlikely to be merged.\n"
+ "3. Edit the **Trac ticket number** section of your PR description to include "
+ "the ticket in the format `ticket-NNNNN` (e.g. `ticket-36991`).\n\n"
+ "For PRs with fewer than {threshold} lines changed (additions + deletions), you "
+ "may write `N/A` in the ticket field instead (e.g. `N/A - typo fix`).",
+)
diff --git a/scripts/pr_quality/tests/__init__.py b/scripts/pr_quality/tests/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/scripts/pr_quality/tests/test_check_pr.py b/scripts/pr_quality/tests/test_check_pr.py
new file mode 100644
index 000000000000..f238ebb0c6f5
--- /dev/null
+++ b/scripts/pr_quality/tests/test_check_pr.py
@@ -0,0 +1,968 @@
+"""Tests for the PR quality checks in check_pr.py."""
+
+import json
+import logging
+import tempfile
+import unittest
+import urllib.error
+from pathlib import Path
+from unittest import mock
+
+from pr_quality import check_pr
+from pr_quality.errors import LEVEL_ERROR, LEVEL_WARNING, MISSING_TRAC_TICKET, Message
+
+logger = logging.getLogger("pr_quality.check_pr")
+
+UNSET = object()
+ERROR_ICON, ERROR_LABEL = LEVEL_ERROR
+WARNING_ICON, WARNING_LABEL = LEVEL_WARNING
+
+
+def make_pr_body(
+ ticket="ticket-36991",
+ description=(
+ "Fix regression in template rendering where nested blocks were"
+ " incorrectly evaluated."
+ ),
+ no_ai_checked=True,
+ ai_used_checked=False,
+ ai_description="",
+ checked_items=5,
+):
+ no_ai_box = "[x]" if no_ai_checked else "[ ]"
+ ai_used_box = "[x]" if ai_used_checked else "[ ]"
+ ai_extra = f"\n{ai_description}" if ai_description else ""
+
+ checklist_items = [
+ "This PR follows the [contribution guidelines]"
+ "(https://docs.djangoproject.com/en/stable/internals/contributing/"
+ "writing-code/submitting-patches/).",
+ "This PR **does not** disclose a security vulnerability"
+ " (see [vulnerability reporting]"
+ "(https://docs.djangoproject.com/en/stable/internals/security/)).",
+ "This PR targets the `main` branch."
+ " ",
+ "The commit message is written in past tense, mentions the ticket"
+ " number, and ends with a period (see [guidelines]"
+ "(https://docs.djangoproject.com/en/dev/internals/contributing/"
+ "committing-code/#committing-guidelines)).",
+ "I have not requested, and will not request, an automated AI review"
+ " for this PR."
+ " ",
+ 'I have checked the "Has patch" ticket flag in the Trac system.',
+ "I have added or updated relevant tests.",
+ "I have added or updated relevant docs, including release notes if"
+ " applicable.",
+ "I have attached screenshots in both light and dark modes for any UI"
+ " changes.",
+ ]
+ checklist_lines = "\n".join(
+ f"- [x] {item}" if i < checked_items else f"- [ ] {item}"
+ for i, item in enumerate(checklist_items)
+ )
+
+ # GitHub PR bodies are plain markdown with no leading spaces.
+ return (
+ f"#### Trac ticket number\n"
+ f"\n"
+ f'\n'
+ f"\n"
+ f"{ticket}\n"
+ f"\n"
+ f"#### Branch description\n"
+ f"{description}\n"
+ f"\n"
+ f"#### AI Assistance Disclosure (REQUIRED)\n"
+ f"\n"
+ f"- {no_ai_box} **No AI tools were used** in preparing this PR.\n"
+ f"- {ai_used_box} **If AI tools were used**, I have disclosed which"
+ f" ones, and fully reviewed and verified their output.{ai_extra}\n"
+ f"\n"
+ f"#### Checklist\n"
+ f"{checklist_lines}\n"
+ )
+
+
+VALID_PR_BODY = make_pr_body()
+VALID_PR_TITLE = "Fixed #36991 -- Fix template rendering regression."
+
+
+def make_trac_json(
+ ticket_id="36991",
+ stage="Accepted",
+ status="assigned",
+ resolution="",
+ has_patch="0",
+ needs_docs="0",
+ needs_tests="0",
+ needs_better_patch="0",
+):
+ return {
+ "id": int(ticket_id),
+ "summary": "Some summary",
+ "status": status,
+ "resolution": resolution,
+ "custom": {
+ "stage": stage,
+ "has_patch": has_patch,
+ "needs_docs": needs_docs,
+ "needs_tests": needs_tests,
+ "needs_better_patch": needs_better_patch,
+ },
+ }
+
+
+def patch_urlopen(json_responses=(), status_code=200):
+ side_effects = []
+ if status_code == 200:
+ for data in json_responses:
+ mock_response = mock.MagicMock()
+ mock_response.read.return_value = json.dumps(data).encode()
+ mock_cm = mock.MagicMock()
+ mock_cm.__enter__.return_value = mock_response
+ side_effects.append(mock_cm)
+ else:
+ error = urllib.error.HTTPError(
+ url="https://example.com",
+ code=status_code,
+ msg="Error",
+ hdrs=None,
+ fp=None,
+ )
+ side_effects.append(error)
+ return mock.patch("urllib.request.urlopen", side_effect=side_effects)
+
+
+class BaseTestCase(unittest.TestCase):
+ def setUp(self):
+ null_handler = logging.NullHandler()
+ logger.addHandler(null_handler)
+ self.addCleanup(logger.removeHandler, null_handler)
+
+ def call_main(
+ self,
+ repo="test/repo",
+ token="test-token",
+ pr_author="trusted",
+ pr_body="",
+ pr_title="",
+ pr_created_at=None,
+ pr_number="10",
+ total_changes=check_pr.LARGE_PR_THRESHOLD,
+ commit_count=0,
+ autoclose=True,
+ trac_data=UNSET,
+ ):
+ if trac_data is UNSET:
+ trac_data = make_trac_json(stage="Accepted", has_patch="1")
+ with (
+ mock.patch.object(
+ check_pr, "get_pr_total_changes", return_value=total_changes
+ ),
+ mock.patch.object(
+ check_pr, "get_recent_commit_count", return_value=commit_count
+ ),
+ mock.patch.object(check_pr, "write_job_summary") as mock_summary,
+ mock.patch.object(check_pr, "github_request", mock.MagicMock()) as mock_gh,
+ mock.patch.object(check_pr, "fetch_trac_ticket", return_value=trac_data),
+ ):
+ result = check_pr.main(
+ repo=repo,
+ token=token,
+ pr_author=pr_author,
+ pr_body=pr_body,
+ pr_title=pr_title,
+ pr_number=pr_number,
+ pr_created_at=pr_created_at,
+ autoclose=autoclose,
+ gha_formatter=False,
+ )
+ return result, mock_summary, mock_gh
+
+
+class TestExtractTicketId(BaseTestCase):
+ def test_valid_ticket_returns_id(self):
+ self.assertEqual(check_pr.extract_ticket_id("ticket-36991"), "36991")
+
+ def test_ticket_in_sentence_returns_id(self):
+ self.assertEqual(
+ check_pr.extract_ticket_id("See ticket-12345 for details."), "12345"
+ )
+
+ def test_case_insensitive(self):
+ self.assertEqual(check_pr.extract_ticket_id("TICKET-99"), "99")
+
+ def test_no_ticket_returns_none(self):
+ self.assertIsNone(check_pr.extract_ticket_id("No ticket here."))
+
+ def test_na_returns_none(self):
+ self.assertIsNone(check_pr.extract_ticket_id("N/A - typo fix"))
+
+ def test_ticket_placeholder_returns_none(self):
+ self.assertIsNone(check_pr.extract_ticket_id("ticket-XXXXX"))
+
+ def test_returns_first_ticket_id(self):
+ self.assertEqual(check_pr.extract_ticket_id("ticket-111 and ticket-222"), "111")
+
+
+class TestFetchTracTicket(BaseTestCase):
+ def test_success_returns_dict(self):
+ data = make_trac_json(stage="Accepted", has_patch="1")
+ with patch_urlopen([data]):
+ result = check_pr.fetch_trac_ticket("36991")
+ self.assertEqual(result["custom"]["stage"], "Accepted")
+ self.assertEqual(result["custom"]["has_patch"], "1")
+
+ def test_http_404_returns_ticket_not_found(self):
+ with patch_urlopen(status_code=404):
+ result = check_pr.fetch_trac_ticket("99999")
+ self.assertIs(result, check_pr.TICKET_NOT_FOUND)
+
+ def test_http_500_returns_none(self):
+ with patch_urlopen(status_code=500):
+ result = check_pr.fetch_trac_ticket("36991")
+ self.assertIsNone(result)
+
+ def test_network_error_returns_none(self):
+ with mock.patch(
+ "urllib.request.urlopen", side_effect=OSError("Connection refused")
+ ):
+ result = check_pr.fetch_trac_ticket("36991")
+ self.assertIsNone(result)
+
+ def test_uses_trac_api_url(self):
+ data = make_trac_json()
+ with patch_urlopen([data]) as mock_urlopen:
+ check_pr.fetch_trac_ticket("36991")
+ url_called = mock_urlopen.call_args.args[0]
+ self.assertIn("djangoproject.com/trac/api/tickets/36991", url_called)
+
+
+class TestCheckTracTicket(BaseTestCase):
+ def test_valid_ticket_small_pr_passes(self):
+ self.assertIsNone(
+ check_pr.check_trac_ticket(VALID_PR_BODY, check_pr.LARGE_PR_THRESHOLD - 1)
+ )
+
+ def test_valid_ticket_large_pr_passes(self):
+ self.assertIsNone(
+ check_pr.check_trac_ticket(VALID_PR_BODY, check_pr.LARGE_PR_THRESHOLD)
+ )
+
+ def test_placeholder_fails(self):
+ body = make_pr_body(ticket="ticket-XXXXX")
+ self.assertIsNotNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD - 1)
+ )
+
+ def test_missing_small_pr_fails(self):
+ body = make_pr_body(ticket="")
+ self.assertIsNotNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD - 1)
+ )
+
+ def test_missing_large_pr_fails(self):
+ body = make_pr_body(ticket="")
+ self.assertIsNotNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD)
+ )
+
+ def test_na_small_pr_passes(self):
+ body = make_pr_body(ticket="N/A - typo")
+ self.assertIsNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD - 1)
+ )
+
+ def test_na_bare_small_pr_passes(self):
+ body = make_pr_body(ticket="N/A")
+ self.assertIsNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD - 1)
+ )
+
+ def test_na_large_pr_fails(self):
+ body = make_pr_body(ticket="N/A - typo")
+ self.assertIsNotNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD)
+ )
+
+ def test_na_at_threshold_fails(self):
+ body = make_pr_body(ticket="N/A")
+ self.assertIsNotNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD)
+ )
+
+ def test_na_just_below_threshold_passes(self):
+ body = make_pr_body(ticket="N/A")
+ self.assertIsNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD - 1)
+ )
+
+ def test_ticket_na_format_fails(self):
+ body = make_pr_body(ticket="ticket-N/A")
+ self.assertIsNotNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD - 1)
+ )
+
+ def test_various_ticket_lengths_pass(self):
+ for ticket in ["ticket-1", "ticket-123", "ticket-999999"]:
+ with self.subTest(ticket=ticket):
+ body = make_pr_body(ticket=ticket)
+ self.assertIsNone(
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD)
+ )
+
+
+class TestCheckTracStatus(BaseTestCase):
+ def test_accepted_assigned_unresolved_passes(self):
+ # The Trac API returns null (None) for open tickets.
+ data = make_trac_json(stage="Accepted", status="assigned", resolution=None)
+ self.assertIsNone(check_pr.check_trac_status("36991", data))
+
+ def test_non_accepted_stage_fails(self):
+ for stage in ["Unreviewed", "Ready for Checkin", "Someday/Maybe"]:
+ with self.subTest(stage=stage):
+ data = make_trac_json(stage=stage)
+ self.assertIsNotNone(check_pr.check_trac_status("36991", data))
+
+ def test_resolved_ticket_fails(self):
+ for resolution in [
+ "fixed",
+ "wontfix",
+ "duplicate",
+ "invalid",
+ "worksforme",
+ "needsinfo",
+ "needsnewfeatureprocess",
+ ]:
+ with self.subTest(resolution=resolution):
+ data = make_trac_json(
+ stage="Accepted", status="assigned", resolution=resolution
+ )
+ self.assertIsNotNone(check_pr.check_trac_status("36991", data))
+
+ def test_unassigned_ticket_fails(self):
+ for status in ["new", "closed", ""]:
+ with self.subTest(status=status):
+ data = make_trac_json(stage="Accepted", status=status, resolution="")
+ self.assertIsNotNone(check_pr.check_trac_status("36991", data))
+
+ def test_failure_message_contains_ticket_id(self):
+ data = make_trac_json(stage="Unreviewed")
+ result = check_pr.check_trac_status("12345", data)
+ self.assertIsInstance(result, Message)
+ self.assertEqual(result.kwargs["ticket_id"], "12345")
+
+ def test_failure_message_contains_current_state(self):
+ data = make_trac_json(stage="Unreviewed", status="new", resolution="duplicate")
+ result = check_pr.check_trac_status("36991", data)
+ self.assertIsInstance(result, Message)
+ self.assertIn("stage='Unreviewed'", result.kwargs["current_state"])
+ self.assertIn("resolution='duplicate'", result.kwargs["current_state"])
+ self.assertIn("status='new'", result.kwargs["current_state"])
+
+ def test_ticket_not_found_current_state(self):
+ result = check_pr.check_trac_status("99999", check_pr.TICKET_NOT_FOUND)
+ self.assertIsInstance(result, Message)
+ self.assertIn("not found", result.kwargs["current_state"])
+
+ def test_none_data_skips_check(self):
+ self.assertIsNone(check_pr.check_trac_status("36991", None))
+
+
+class TestCheckTracHasPatch(BaseTestCase):
+ def test_already_set_passes(self):
+ data = make_trac_json(has_patch="1")
+ self.assertIsNone(check_pr.check_trac_has_patch("36991", data))
+
+ def test_not_set_times_out_fails(self):
+ data = make_trac_json(has_patch="0")
+ result = check_pr.check_trac_has_patch(
+ "36991", data, poll_timeout=0, poll_interval=0
+ )
+ self.assertIsNotNone(result)
+
+ def test_failure_message_contains_ticket_id(self):
+ data = make_trac_json(ticket_id="36991", has_patch="0")
+ result = check_pr.check_trac_has_patch(
+ "36991", data, poll_timeout=0, poll_interval=0
+ )
+ self.assertIsInstance(result, Message)
+ self.assertEqual(result.kwargs["ticket_id"], "36991")
+
+ def test_set_on_second_poll_passes(self):
+ initial = make_trac_json(has_patch="0")
+ second = make_trac_json(has_patch="1")
+ with (
+ mock.patch.object(check_pr, "fetch_trac_ticket", return_value=second),
+ mock.patch("time.sleep"),
+ ):
+ self.assertIsNone(
+ check_pr.check_trac_has_patch(
+ "36991", initial, poll_timeout=60, poll_interval=0
+ )
+ )
+
+ def test_none_data_skips_check(self):
+ self.assertIsNone(check_pr.check_trac_has_patch("36991", None))
+
+ def test_ticket_not_found_skips_check(self):
+ # Ticket not found -- already reported by check_trac_status.
+ self.assertIsNone(
+ check_pr.check_trac_has_patch("36991", check_pr.TICKET_NOT_FOUND)
+ )
+
+ def test_fetch_error_during_poll_skips_check(self):
+ initial = make_trac_json(has_patch="0")
+ with (
+ mock.patch.object(check_pr, "fetch_trac_ticket", return_value=None),
+ mock.patch("time.sleep"),
+ ):
+ self.assertIsNone(
+ check_pr.check_trac_has_patch(
+ "36991", initial, poll_timeout=60, poll_interval=0
+ )
+ )
+
+
+class TestCheckPrTitleHasTicket(BaseTestCase):
+ def test_ticket_in_title_passes(self):
+ self.assertIsNone(
+ check_pr.check_pr_title_has_ticket("Fixed #36991 -- Fix bug.", "36991")
+ )
+
+ def test_refs_format_passes(self):
+ self.assertIsNone(
+ check_pr.check_pr_title_has_ticket("Refs #36991 -- Add tests.", "36991")
+ )
+
+ def test_ticket_missing_from_title_fails(self):
+ self.assertIsNotNone(
+ check_pr.check_pr_title_has_ticket("Fix template rendering bug.", "36991")
+ )
+
+ def test_empty_title_fails(self):
+ self.assertIsNotNone(check_pr.check_pr_title_has_ticket("", "36991"))
+
+ def test_wrong_ticket_number_fails(self):
+ self.assertIsNotNone(
+ check_pr.check_pr_title_has_ticket("Fixed #11111 -- Fix bug.", "36991")
+ )
+
+ def test_failure_message_contains_ticket_id(self):
+ result = check_pr.check_pr_title_has_ticket("Fix something.", "36991")
+ self.assertIsInstance(result, Message)
+ self.assertEqual(result.kwargs["ticket_id"], "36991")
+
+
+class TestCheckBranchDescription(BaseTestCase):
+ def test_valid_passes(self):
+ self.assertIsNone(check_pr.check_branch_description(VALID_PR_BODY))
+
+ def test_placeholder_fails(self):
+ body = make_pr_body(
+ description=(
+ "Provide a concise overview of the issue or rationale behind"
+ " the proposed changes."
+ )
+ )
+ self.assertIsNotNone(check_pr.check_branch_description(body))
+
+ def test_placeholder_with_appended_text_fails(self):
+ body = make_pr_body(
+ description=(
+ "Provide a concise overview of the issue or rationale behind"
+ " the proposed changes. Yes."
+ )
+ )
+ self.assertIsNotNone(check_pr.check_branch_description(body))
+
+ def test_empty_fails(self):
+ body = make_pr_body(description="")
+ self.assertIsNotNone(check_pr.check_branch_description(body))
+
+ def test_too_short_fails(self):
+ body = make_pr_body(description="Fix bug.")
+ self.assertIsNotNone(check_pr.check_branch_description(body))
+
+ def test_exactly_five_words_passes(self):
+ body = make_pr_body(description="Fix the template rendering bug.")
+ self.assertIsNone(check_pr.check_branch_description(body))
+
+ def test_html_comment_only_fails(self):
+ body = make_pr_body(
+ description=""
+ )
+ self.assertIsNotNone(check_pr.check_branch_description(body))
+
+ def test_html_comment_words_not_counted(self):
+ body = make_pr_body(description=" fix")
+ self.assertIsNotNone(check_pr.check_branch_description(body))
+
+ def test_missing_section_header_fails(self):
+ body = VALID_PR_BODY.replace("#### Branch description\n", "")
+ self.assertIsNotNone(check_pr.check_branch_description(body))
+
+ def test_multiline_passes(self):
+ body = make_pr_body(
+ description=(
+ "This PR fixes a bug in the ORM.\n"
+ "The issue affects queries with multiple joins."
+ )
+ )
+ self.assertIsNone(check_pr.check_branch_description(body))
+
+ def test_crlf_line_endings_pass(self):
+ body = make_pr_body().replace("\n", "\r\n")
+ self.assertIsNone(check_pr.check_branch_description(body))
+
+
+class TestCheckAIDisclosure(BaseTestCase):
+ def test_no_ai_checked_passes(self):
+ self.assertIsNone(check_pr.check_ai_disclosure(VALID_PR_BODY))
+
+ def test_ai_used_with_description_passes(self):
+ body = make_pr_body(
+ no_ai_checked=False,
+ ai_used_checked=True,
+ ai_description=(
+ "Used GitHub Copilot for autocomplete, all output manually reviewed."
+ ),
+ )
+ self.assertIsNone(check_pr.check_ai_disclosure(body))
+
+ def test_neither_option_checked_fails(self):
+ body = make_pr_body(no_ai_checked=False, ai_used_checked=False)
+ self.assertIsNotNone(check_pr.check_ai_disclosure(body))
+
+ def test_both_options_checked_fails(self):
+ body = make_pr_body(no_ai_checked=True, ai_used_checked=True)
+ self.assertIsNotNone(check_pr.check_ai_disclosure(body))
+
+ def test_ai_used_no_description_fails(self):
+ body = make_pr_body(
+ no_ai_checked=False, ai_used_checked=True, ai_description=""
+ )
+ self.assertIsNotNone(check_pr.check_ai_disclosure(body))
+
+ def test_ai_used_short_description_fails(self):
+ body = make_pr_body(
+ no_ai_checked=False, ai_used_checked=True, ai_description="Used Copilot."
+ )
+ self.assertIsNotNone(check_pr.check_ai_disclosure(body))
+
+ def test_ai_used_exactly_five_word_description_passes(self):
+ body = make_pr_body(
+ no_ai_checked=False,
+ ai_used_checked=True,
+ ai_description="Used Claude for code review.",
+ )
+ self.assertIsNone(check_pr.check_ai_disclosure(body))
+
+ def test_missing_section_fails(self):
+ body = VALID_PR_BODY.replace("#### AI Assistance Disclosure (REQUIRED)\n", "")
+ self.assertIsNotNone(check_pr.check_ai_disclosure(body))
+
+ def test_uppercase_x_in_checkbox_passes(self):
+ body = VALID_PR_BODY.replace(
+ "- [x] **No AI tools were used**", "- [X] **No AI tools were used**"
+ )
+ self.assertIsNone(check_pr.check_ai_disclosure(body))
+
+ def test_commented_out_checkbox_not_counted(self):
+ body = make_pr_body(no_ai_checked=False, ai_used_checked=False)
+ body = body.replace(
+ "- [ ] **No AI tools were used**",
+ "\n- [ ] **No AI tools were used**",
+ )
+ self.assertIsNotNone(check_pr.check_ai_disclosure(body))
+
+
+class TestStripHtmlComments(BaseTestCase):
+ def test_removes_single_line_comment(self):
+ self.assertEqual(
+ check_pr.strip_html_comments("hello world"), "hello world"
+ )
+
+ def test_removes_multiline_comment(self):
+ self.assertEqual(
+ check_pr.strip_html_comments("before\n\nafter"),
+ "before\n\nafter",
+ )
+
+ def test_no_comment_unchanged(self):
+ self.assertEqual(
+ check_pr.strip_html_comments("no comments here"), "no comments here"
+ )
+
+ def test_empty_string(self):
+ self.assertEqual(check_pr.strip_html_comments(""), "")
+
+ def test_removes_multiple_comments(self):
+ self.assertEqual(
+ check_pr.strip_html_comments("a b c"),
+ "a b c",
+ )
+
+
+class TestCheckChecklist(BaseTestCase):
+ def test_first_five_checked_passes(self):
+ self.assertIsNone(check_pr.check_checklist(VALID_PR_BODY))
+
+ def test_all_nine_checked_passes(self):
+ body = make_pr_body(checked_items=9)
+ self.assertIsNone(check_pr.check_checklist(body))
+
+ def test_none_checked_fails(self):
+ body = make_pr_body(checked_items=0)
+ self.assertIsNotNone(check_pr.check_checklist(body))
+
+ def test_four_of_five_checked_fails(self):
+ body = make_pr_body(checked_items=4)
+ self.assertIsNotNone(check_pr.check_checklist(body))
+
+ def test_three_of_five_checked_fails(self):
+ body = make_pr_body(checked_items=3)
+ self.assertIsNotNone(check_pr.check_checklist(body))
+
+ def test_missing_section_fails(self):
+ body = VALID_PR_BODY.replace("#### Checklist\n", "")
+ self.assertIsNotNone(check_pr.check_checklist(body))
+
+ def test_uppercase_x_passes(self):
+ body = VALID_PR_BODY.replace("- [x]", "- [X]")
+ self.assertIsNone(check_pr.check_checklist(body))
+
+ def test_crlf_line_endings_pass(self):
+ body = make_pr_body().replace("\n", "\r\n")
+ self.assertIsNone(check_pr.check_checklist(body))
+
+
+class TestIntegration(BaseTestCase):
+ def test_fully_valid_pr_passes_all_checks(self):
+ data = make_trac_json(stage="Accepted", has_patch="1")
+ for label, body in [
+ ("LF line endings", VALID_PR_BODY),
+ ("CRLF line endings", VALID_PR_BODY.replace("\n", "\r\n")),
+ ]:
+ with self.subTest(label=label):
+ ticket_id = check_pr.extract_ticket_id(body)
+ results = [
+ check_pr.check_trac_ticket(body, check_pr.LARGE_PR_THRESHOLD),
+ check_pr.check_trac_status(ticket_id, data),
+ check_pr.check_trac_has_patch(ticket_id, data),
+ check_pr.check_pr_title_has_ticket(VALID_PR_TITLE, ticket_id),
+ check_pr.check_branch_description(body),
+ check_pr.check_ai_disclosure(body),
+ check_pr.check_checklist(body),
+ ]
+ failures = [r for r in results if r is not None]
+ self.assertEqual(
+ failures,
+ [],
+ f"Expected no failures ({label}), got: {failures!r}",
+ )
+
+ def test_blank_body_fails_non_status_checks(self):
+ results = [
+ check_pr.check_trac_ticket("", check_pr.LARGE_PR_THRESHOLD),
+ check_pr.check_branch_description(""),
+ check_pr.check_ai_disclosure(""),
+ check_pr.check_checklist(""),
+ ]
+ for i, result in enumerate(results, 1):
+ self.assertIsNotNone(result, f"Check {i} should have failed on empty body")
+
+ def test_make_pr_body_matches_template(self):
+ template_path = (
+ Path(__file__).parents[3] / ".github" / "pull_request_template.md"
+ )
+ with open(template_path) as f:
+ raw_template = f.read()
+ blank_body = make_pr_body(
+ ticket="ticket-XXXXX",
+ description=(
+ "Provide a concise overview of the issue or rationale behind"
+ " the proposed changes."
+ ),
+ no_ai_checked=False,
+ ai_used_checked=False,
+ checked_items=0,
+ )
+ self.assertEqual(blank_body, raw_template)
+
+ def test_unedited_template_fails_all_checks(self):
+ template_path = (
+ Path(__file__).parents[3] / ".github" / "pull_request_template.md"
+ )
+ with open(template_path) as f:
+ raw_template = f.read()
+ results = [
+ check_pr.check_trac_ticket(raw_template, check_pr.LARGE_PR_THRESHOLD),
+ check_pr.check_branch_description(raw_template),
+ check_pr.check_ai_disclosure(raw_template),
+ check_pr.check_checklist(raw_template),
+ ]
+ for i, result in enumerate(results, 1):
+ self.assertIsNotNone(
+ result, f"Check {i} should have failed on raw template"
+ )
+
+ def test_pr_before_template_date_skips_all_checks(self):
+ with self.assertLogs(logger, level="INFO") as logs:
+ result, _, mock_gh = self.call_main(
+ pr_body=make_pr_body(ticket="", checked_items=0),
+ pr_created_at="2024-01-01T00:00:00Z",
+ )
+ self.assertIsNone(result)
+ mock_gh.assert_not_called()
+ self.assertIn("older than PR template", "\n".join(logs.output))
+
+ def test_pr_before_ai_disclosure_date_skips_ai_check(self):
+ body = make_pr_body(no_ai_checked=False, ai_used_checked=False)
+ with self.assertLogs(logger, level="INFO") as logs:
+ _, mock_summary, _ = self.call_main(
+ pr_body=body,
+ pr_title=VALID_PR_TITLE,
+ pr_created_at="2025-01-01T00:00:00Z",
+ )
+ _, results, _ = mock_summary.call_args.args
+ result_map = {name: result for name, result, _ in results}
+ self.assertIs(result_map["AI disclosure completed"], check_pr.SKIPPED)
+ self.assertIn("older than AI Disclosure", "\n".join(logs.output))
+
+ def test_no_ticket_skips_trac_status_and_has_patch(self):
+ with self.assertLogs(logger, level="INFO") as logs:
+ self.call_main(pr_body="")
+ self.assertIn(
+ "No Trac ticket -- skipping status and has_patch checks.",
+ "\n".join(logs.output),
+ )
+
+ def test_no_ticket_results_include_skipped_sentinels(self):
+ body = make_pr_body(ticket="")
+ _, mock_summary, _ = self.call_main(pr_body=body)
+ _, results, _ = mock_summary.call_args.args
+ result_map = {name: result for name, result, _ in results}
+ self.assertIs(result_map["Trac ticket status is Accepted"], check_pr.SKIPPED)
+ self.assertIs(result_map["Trac ticket has_patch flag set"], check_pr.SKIPPED)
+ self.assertIs(result_map["PR title includes ticket number"], check_pr.SKIPPED)
+
+ def test_non_accepted_ticket_skips_has_patch(self):
+ data = make_trac_json(stage="Unreviewed")
+ _, mock_summary, _ = self.call_main(
+ pr_body=VALID_PR_BODY, pr_title=VALID_PR_TITLE, trac_data=data
+ )
+ _, results, _ = mock_summary.call_args.args
+ result_map = {name: result for name, result, _ in results}
+ self.assertIsNotNone(result_map["Trac ticket status is Accepted"])
+ self.assertIs(result_map["Trac ticket has_patch flag set"], check_pr.SKIPPED)
+
+ def test_established_author_skips_all_checks(self):
+ body = make_pr_body(ticket="", checked_items=0)
+ result, _, mock_gh = self.call_main(pr_body=body, commit_count=5)
+ self.assertIsNone(result)
+ mock_gh.assert_not_called()
+
+ def test_fully_valid_pr_no_comment_posted(self):
+ result, _, mock_gh = self.call_main(
+ pr_body=VALID_PR_BODY, pr_title=VALID_PR_TITLE
+ )
+ self.assertIsNone(result)
+ mock_gh.assert_not_called()
+
+ def test_trusted_author_failures_no_close(self):
+ body = make_pr_body(ticket="", checked_items=0)
+ result, _, mock_gh = self.call_main(pr_body=body, commit_count=1)
+ self.assertEqual(result, 1)
+ mock_gh.assert_called_once_with(
+ "POST", "/issues/10/comments", "test-token", "test/repo", mock.ANY
+ )
+
+ def test_untrusted_author_failures_posts_comment_and_closes(self):
+ body = make_pr_body(ticket="", checked_items=0)
+ result, _, mock_gh = self.call_main(pr_body=body)
+ self.assertEqual(result, 1)
+ self.assertEqual(
+ mock_gh.mock_calls,
+ [
+ mock.call(
+ "POST", "/issues/10/comments", "test-token", "test/repo", mock.ANY
+ ),
+ mock.call(
+ "PATCH", "/pulls/10", "test-token", "test/repo", {"state": "closed"}
+ ),
+ ],
+ )
+
+ def test_missing_pr_author_treated_as_untrusted(self):
+ body = make_pr_body(ticket="", checked_items=0)
+ result, _, mock_gh = self.call_main(pr_body=body, pr_author="")
+ self.assertEqual(result, 1)
+ self.assertEqual(
+ mock_gh.mock_calls,
+ [
+ mock.call(
+ "POST", "/issues/10/comments", "test-token", "test/repo", mock.ANY
+ ),
+ mock.call(
+ "PATCH", "/pulls/10", "test-token", "test/repo", {"state": "closed"}
+ ),
+ ],
+ )
+
+ def test_autoclose_false_posts_comment_does_not_close(self):
+ body = make_pr_body(ticket="", checked_items=0)
+ result, _, mock_gh = self.call_main(pr_body=body, autoclose=False)
+ self.assertEqual(result, 1)
+ mock_gh.assert_called_once_with(
+ "POST", "/issues/10/comments", "test-token", "test/repo", mock.ANY
+ )
+
+ def test_warnings_only_posts_comment_does_not_close(self):
+ trac_data = make_trac_json(stage="Accepted", has_patch="0")
+ # pr_title="" triggers title warning.
+ # has_patch="0" triggers has_patch warning.
+ result, _, mock_gh = self.call_main(
+ pr_body=VALID_PR_BODY, pr_title="", trac_data=trac_data
+ )
+ self.assertIsNone(result)
+ mock_gh.assert_called_once_with(
+ "POST", "/issues/10/comments", "test-token", "test/repo", mock.ANY
+ )
+
+ def test_warnings_alongside_failures_still_closes(self):
+ trac_data = make_trac_json(stage="Accepted", has_patch="0")
+ # Empty body: fatal ticket failure + incorrect title warning.
+ body = make_pr_body(ticket="", checked_items=0)
+ result, _, mock_gh = self.call_main(pr_body=body, trac_data=trac_data)
+ self.assertEqual(result, 1)
+ self.assertEqual(
+ mock_gh.mock_calls,
+ [
+ mock.call(
+ "POST", "/issues/10/comments", "test-token", "test/repo", mock.ANY
+ ),
+ mock.call(
+ "PATCH", "/pulls/10", "test-token", "test/repo", {"state": "closed"}
+ ),
+ ],
+ )
+
+
+class TestWriteJobSummary(BaseTestCase):
+ def test_no_summary_file_does_nothing(self):
+ check_pr.write_job_summary(
+ "99",
+ [
+ ("Some check", None, LEVEL_ERROR),
+ ("Other check", "failure", LEVEL_ERROR),
+ ],
+ )
+
+ def test_all_passed(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ summary = Path(tmpdir) / "summary.md"
+ results = [
+ ("Trac ticket referenced", None, LEVEL_ERROR),
+ ("Branch description provided", None, LEVEL_ERROR),
+ ]
+ check_pr.write_job_summary("7", results, str(summary))
+ content = summary.read_text()
+ self.assertIn("## PR #7 Quality Check Results", content)
+ self.assertIn("✅", content)
+ self.assertNotIn(ERROR_ICON, content)
+ self.assertIn("Trac ticket referenced", content)
+ self.assertIn("Branch description provided", content)
+
+ def test_with_failures(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ summary = Path(tmpdir) / "summary.md"
+ results = [
+ ("Trac ticket referenced", None, LEVEL_ERROR),
+ ("Branch description provided", "Missing description", LEVEL_ERROR),
+ ("Checklist completed", "Incomplete checklist", LEVEL_ERROR),
+ ]
+ check_pr.write_job_summary("12", results, str(summary))
+ content = summary.read_text()
+ self.assertIn("## PR #12 Quality Check Results", content)
+ self.assertEqual(content.count("✅"), 1)
+ self.assertEqual(content.count(ERROR_ICON), 2)
+ self.assertIn("Passed", content)
+ self.assertIn(ERROR_LABEL, content)
+
+ def test_with_warning(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ summary = Path(tmpdir) / "summary.md"
+ results = [
+ ("Trac ticket referenced", None, LEVEL_ERROR),
+ ("Trac ticket has_patch flag set", "No patch", LEVEL_WARNING),
+ ]
+ check_pr.write_job_summary("8", results, str(summary))
+ content = summary.read_text()
+ self.assertIn("✅", content)
+ self.assertIn(WARNING_ICON, content)
+ self.assertIn(WARNING_LABEL, content)
+ self.assertNotIn(ERROR_ICON, content)
+
+ def test_appends_to_existing_file(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ summary = Path(tmpdir) / "summary.md"
+ summary.write_text("## Previous step\n\nSome content.\n")
+ check_pr.write_job_summary(
+ "5", [("Checklist completed", None, LEVEL_ERROR)], str(summary)
+ )
+ content = summary.read_text()
+ self.assertIn("## Previous step", content)
+ self.assertIn("## PR #5 Quality Check Results", content)
+
+ def test_skipped_checks(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ summary = Path(tmpdir) / "summary.md"
+ results = [
+ (
+ "Trac ticket referenced",
+ Message(*MISSING_TRAC_TICKET, threshold=80),
+ LEVEL_ERROR,
+ ),
+ ("Trac ticket status is Accepted", check_pr.SKIPPED, LEVEL_ERROR),
+ ("Trac ticket has_patch flag set", check_pr.SKIPPED, LEVEL_WARNING),
+ ]
+ check_pr.write_job_summary("3", results, str(summary))
+ content = summary.read_text()
+ self.assertEqual(content.count("⏭️"), 2)
+ self.assertEqual(content.count("Skipped"), 2)
+ self.assertIn(ERROR_ICON, content)
+
+
+class TestGetRecentCommitCount(BaseTestCase):
+ def _make_github_request_mock(self, commits):
+ return mock.patch.object(check_pr, "github_request", return_value=commits)
+
+ def test_returns_number_of_commits(self):
+ commits = [{"sha": "abc"}, {"sha": "def"}, {"sha": "ghi"}]
+ with self._make_github_request_mock(commits):
+ self.assertEqual(
+ check_pr.get_recent_commit_count(
+ "someuser", "django/django", "token", since_days=365, max_count=5
+ ),
+ 3,
+ )
+
+ def test_no_commits_returns_zero(self):
+ with self._make_github_request_mock([]):
+ self.assertEqual(
+ check_pr.get_recent_commit_count(
+ "newuser", "django/django", "token", since_days=365, max_count=5
+ ),
+ 0,
+ )
+
+ def test_api_path_includes_author_since_and_max_count(self):
+ with mock.patch.object(check_pr, "github_request", return_value=[]) as mock_gh:
+ check_pr.get_recent_commit_count(
+ "someuser", "django/django", "token", since_days=365, max_count=5
+ )
+ params = mock_gh.call_args.kwargs["params"]
+ self.assertEqual(params["author"], "someuser")
+ self.assertIn("since", params)
+ self.assertEqual(params["per_page"], 5)
diff --git a/zizmor.yml b/zizmor.yml
index 19493ba151db..40cc34254e9b 100644
--- a/zizmor.yml
+++ b/zizmor.yml
@@ -6,6 +6,7 @@ rules:
- coverage_comment.yml
- labels.yml
- new_contributor_pr.yml
+ - check_pr_quality.yml
unpinned-uses:
config:
policies: