diff --git a/.github/actions/run_tests/pr_comment.py b/.github/actions/run_tests/pr_comment.py new file mode 100644 index 000000000000..871e7f5522b6 --- /dev/null +++ b/.github/actions/run_tests/pr_comment.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 +""" +Script to create and update PR comments for test runs. +""" +import os +import sys +from github import Github, Auth as GithubAuth + +def get_pr_number(): + """Extract PR number from environment variable.""" + pr_number = os.environ.get("PR_NUMBER") + if not pr_number: + raise ValueError("PR_NUMBER environment variable is not set") + + # Remove pull/ prefix if present + if pr_number.startswith("pull/"): + pr_number = pr_number.replace("pull/", "") + + return int(pr_number) + +def get_workflow_run_url(): + """Get workflow run URL for identification.""" + github_server = os.environ.get("GITHUB_SERVER_URL") + if not github_server: + raise ValueError("GITHUB_SERVER_URL environment variable is not set") + + github_repo = os.environ.get("GITHUB_REPOSITORY") + if not github_repo: + raise ValueError("GITHUB_REPOSITORY environment variable is not set") + + run_id = os.environ.get("GITHUB_RUN_ID") + if not run_id: + raise ValueError("GITHUB_RUN_ID environment variable is not set") + + return f"{github_server}/{github_repo}/actions/runs/{run_id}" + +def create_or_update_comment(pr_number, message, workflow_run_url): + """Create or update PR comment with test run information.""" + github_token = os.environ.get("GITHUB_TOKEN") + if not github_token: + raise ValueError("GITHUB_TOKEN environment variable is not set") + + github_repo = os.environ.get("GITHUB_REPOSITORY") + if not github_repo: + raise ValueError("GITHUB_REPOSITORY environment variable is not set") + + gh = Github(auth=GithubAuth.Token(github_token)) + repo = gh.get_repo(github_repo) + pr = repo.get_pull(pr_number) + + # Find existing comment by workflow run URL + comment = None + for c in pr.get_issue_comments(): + if workflow_run_url in c.body: + comment = c + break + + # Add workflow run link to message + full_body = f"{message}\n\n[View workflow run]({workflow_run_url})" + + if comment: + print(f"::notice::Updating existing comment id={comment.id}") + comment.edit(full_body) + else: + print(f"::notice::Creating new comment") + pr.create_issue_comment(full_body) + +def format_start_message(build_preset, test_size, test_targets): + """Format message for test run start.""" + parts = [] + parts.append("đŸ§Ē **Test Run Started**") + parts.append("") + + info = [] + info.append(f"**Build Preset:** `{build_preset}`") + info.append(f"**Test Size:** `{test_size}`") + + if test_targets and test_targets != "ydb/": + info.append(f"**Test Targets:** `{test_targets}`") + + parts.append("\n".join(info)) + parts.append("") + parts.append("âŗ Tests are running...") + + return "\n".join(parts) + +def format_completion_message(build_preset, test_size, test_targets, summary_content, status): + """Format message for test run completion.""" + parts = [] + + # Status emoji + if status == "success": + parts.append("✅ **Test Run Completed Successfully**") + elif status == "failure": + parts.append("❌ **Test Run Failed**") + elif status == "cancelled": + parts.append("âš ī¸ **Test Run Cancelled**") + else: + parts.append("âš ī¸ **Test Run Completed**") + + parts.append("") + + info = [] + info.append(f"**Build Preset:** `{build_preset}`") + info.append(f"**Test Size:** `{test_size}`") + + if test_targets and test_targets != "ydb/": + info.append(f"**Test Targets:** `{test_targets}`") + + parts.append("\n".join(info)) + parts.append("") + + # Add summary content if available + if summary_content and summary_content.strip(): + parts.append("**Test Results:**") + parts.append("") + parts.append(summary_content.strip()) + + return "\n".join(parts) + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("::error::Usage: pr_comment.py ") + sys.exit(1) + + command = sys.argv[1] + + if command not in ["start", "complete"]: + print(f"::error::Unknown command: {command}. Must be 'start' or 'complete'") + sys.exit(1) + + pr_number = get_pr_number() + + build_preset = os.environ.get("BUILD_PRESET") + if not build_preset: + raise ValueError("BUILD_PRESET environment variable is not set") + + test_size = os.environ.get("TEST_SIZE") + if not test_size: + raise ValueError("TEST_SIZE environment variable is not set") + + test_targets = os.environ.get("TEST_TARGETS", "ydb/") + + workflow_run_url = get_workflow_run_url() + + if command == "start": + message = format_start_message(build_preset, test_size, test_targets) + create_or_update_comment(pr_number, message, workflow_run_url) + else: # complete + status = os.environ.get("TEST_STATUS") + if not status: + raise ValueError("TEST_STATUS environment variable is not set") + + # Read summary from summary_text.txt in workspace + workspace = os.environ.get("GITHUB_WORKSPACE", os.getcwd()) + summary_text_path = os.path.join(workspace, "summary_text.txt") + + summary_content = "" + if os.path.exists(summary_text_path): + with open(summary_text_path, 'r', encoding='utf-8') as f: + summary_content = f.read() + if summary_content.strip(): + print(f"::notice::Read {len(summary_content)} characters from {summary_text_path}") + else: + print(f"::warning::Summary file {summary_text_path} is empty") + else: + print(f"::warning::Summary file not found: {summary_text_path}") + + message = format_completion_message( + build_preset, test_size, test_targets, + summary_content, status + ) + create_or_update_comment(pr_number, message, workflow_run_url) + diff --git a/.github/actions/validate_pr_description/action.yaml b/.github/actions/validate_pr_description/action.yaml index 8db5d2eb6771..cfe08b9d8325 100644 --- a/.github/actions/validate_pr_description/action.yaml +++ b/.github/actions/validate_pr_description/action.yaml @@ -9,6 +9,8 @@ runs: env: GITHUB_TOKEN: ${{ github.token }} PR_BODY: ${{ inputs.pr_body}} + SHOW_ADDITIONAL_INFO_IN_PR: ${{ vars.SHOW_ADDITIONAL_INFO_IN_PR }} + APP_DOMAIN: ${{ vars.APP_DOMAIN }} run: | python3 -m pip install PyGithub echo "$PR_BODY" | python3 ${{ github.action_path }}/validate_pr_description.py diff --git a/.github/actions/validate_pr_description/test_validation.py b/.github/actions/validate_pr_description/test_validation.py new file mode 100644 index 000000000000..df113d34c926 --- /dev/null +++ b/.github/actions/validate_pr_description/test_validation.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +""" +Test script to validate PR description locally. + +Usage: + python3 test_validation.py + python3 test_validation.py --body-file + python3 test_validation.py --body-file + +Environment variables: + +Required for fetching PR from GitHub: + export GITHUB_TOKEN="your_github_token" + +Optional for table generation testing: + export SHOW_ADDITIONAL_INFO_IN_PR="TRUE" # Enable table generation test + export APP_DOMAIN="your-app-domain.com" # Required if SHOW_ADDITIONAL_INFO_IN_PR=TRUE + +Note: GITHUB_WORKSPACE is automatically set to repository root if not provided. +""" +import os +import sys +import json +from pathlib import Path +from validate_pr_description import ( + validate_pr_description_from_file, + ensure_tables_in_pr_body, + update_pr_body +) + +def find_repo_root(): + """Find repository root by looking for .github or .git directory.""" + current = Path(__file__).resolve().parent + while current != current.parent: + if (current / ".github").exists() or (current / ".git").exists(): + return str(current) + current = current.parent + # Fallback to current working directory + return os.getcwd() + +def test_validation(pr_body: str, pr_number: int = None, base_ref: str = "main"): + """Test validation and table generation.""" + print("=" * 60) + print("PR Body from GitHub") + print("=" * 60) + print(pr_body) + print("=" * 60) + print() + + print("=" * 60) + print("Testing PR description validation") + print("=" * 60) + + # Validate + is_valid, txt = validate_pr_description_from_file(description=pr_body) + print(f"\nValidation result: {'✅ PASSED' if is_valid else '❌ FAILED'}") + print(f"Message: {txt}\n") + + if not is_valid: + return False, pr_body + + # Test table generation if enabled + show_additional_info = os.environ.get("SHOW_ADDITIONAL_INFO_IN_PR", "").upper() == "TRUE" + result_body = pr_body + + if show_additional_info: + print("=" * 60) + print("Testing table generation") + print("=" * 60) + + app_domain = os.environ.get("APP_DOMAIN") + if not app_domain: + print("âš ī¸ APP_DOMAIN not set, skipping table generation test") + print(" Set APP_DOMAIN environment variable to test table generation") + return is_valid, pr_body + + if not pr_number: + print("âš ī¸ PR number not provided, skipping table generation test") + print(" Provide PR number to test table generation") + return is_valid, pr_body + + # Check current state + test_marker = "" + backport_marker = "" + has_test = test_marker in pr_body + has_backport = backport_marker in pr_body + + print(f"Current state:") + print(f" Test table exists: {has_test}") + print(f" Backport table exists: {has_backport}") + print() + + updated_body = ensure_tables_in_pr_body(pr_body, pr_number, base_ref, app_domain) + if updated_body: + result_body = updated_body + print("✅ Tables would be added to PR body") + print("\nGenerated tables preview:") + print("-" * 60) + # Extract just the tables part for preview + if test_marker in updated_body: + test_start = updated_body.find(test_marker) + test_end = updated_body.find("###", test_start + 1) + if test_end == -1: + test_end = updated_body.find("**Legend:**", test_start + 1) + if test_end != -1: + print(updated_body[test_start:test_end].strip()) + if backport_marker in updated_body: + backport_start = updated_body.find(backport_marker) + backport_end = updated_body.find("**Legend:**", backport_start + 1) + if backport_end != -1: + print(updated_body[backport_start:backport_end].strip()) + print("-" * 60) + else: + if has_test and has_backport: + print("â„šī¸ Both tables already exist in PR body") + else: + print("âš ī¸ Function returned None but tables don't exist - this is unexpected") + else: + print("â„šī¸ SHOW_ADDITIONAL_INFO_IN_PR is not TRUE, skipping table generation test") + print(" Set SHOW_ADDITIONAL_INFO_IN_PR=TRUE to test table generation") + + return is_valid, result_body + +def main(): + if len(sys.argv) < 2 and "--body-file" not in sys.argv: + print(__doc__) + sys.exit(1) + + # Set GITHUB_WORKSPACE for local testing if not already set + if not os.environ.get("GITHUB_WORKSPACE"): + repo_root = find_repo_root() + os.environ["GITHUB_WORKSPACE"] = repo_root + print(f"â„šī¸ Set GITHUB_WORKSPACE={repo_root} for local testing") + + pr_number = None + pr_body = None + base_ref = "main" + + # Parse arguments + if "--body-file" in sys.argv: + idx = sys.argv.index("--body-file") + if idx + 1 >= len(sys.argv): + print("Error: --body-file requires a file path") + sys.exit(1) + with open(sys.argv[idx + 1], 'r') as f: + pr_body = f.read() + # Try to get PR number from remaining args + if len(sys.argv) > idx + 2: + try: + pr_number = int(sys.argv[idx + 2]) + except ValueError: + pass + else: + try: + pr_number = int(sys.argv[1]) + except (ValueError, IndexError): + print("Error: PR number must be an integer") + sys.exit(1) + + # Try to get PR body from GitHub API if PR number provided + github_token = os.environ.get("GITHUB_TOKEN") + if github_token: + try: + from github import Github, Auth as GithubAuth + gh = Github(auth=GithubAuth.Token(github_token)) + repo = gh.get_repo("ydb-platform/ydb") + pr = repo.get_pull(pr_number) + pr_body = pr.body or "" + base_ref = pr.base.ref + print(f"đŸ“Ĩ Fetched PR #{pr_number} from GitHub") + except Exception as e: + print(f"âš ī¸ Failed to fetch PR from GitHub: {e}") + print(" Provide PR body via --body-file option") + sys.exit(1) + else: + print("Error: GITHUB_TOKEN not set. Cannot fetch PR from GitHub.") + print(" Set GITHUB_TOKEN or use --body-file option") + sys.exit(1) + + if not pr_body: + print("Error: PR body is required") + sys.exit(1) + + success, result_body = test_validation(pr_body, pr_number, base_ref) + + print() + print("=" * 60) + print("Resulting PR Body") + print("=" * 60) + print(result_body) + print("=" * 60) + + sys.exit(0 if success else 1) + +if __name__ == "__main__": + main() + diff --git a/.github/actions/validate_pr_description/validate_pr_description.py b/.github/actions/validate_pr_description/validate_pr_description.py index 8996a0d552dd..a13fa998a8a0 100644 --- a/.github/actions/validate_pr_description/validate_pr_description.py +++ b/.github/actions/validate_pr_description/validate_pr_description.py @@ -1,5 +1,6 @@ import sys import re +<<<<<<< HEAD from typing import Tuple issue_patterns = [ @@ -26,6 +27,19 @@ * Documentation (changelog entry is not required) * Not for changelog (changelog entry is not required) """ +======= +import os +import json +import urllib.parse +from typing import Tuple, Optional +from github import Github, Auth as GithubAuth +from pr_template import ( + ISSUE_PATTERNS, + PULL_REQUEST_TEMPLATE, + NOT_FOR_CHANGELOG_CATEGORIES, + ALL_CATEGORIES +) +>>>>>>> c8cecab2b81 (Add PR comment functionality for test runs) def validate_pr_description(description, is_not_for_cl_valid=True) -> bool: try: @@ -108,22 +122,305 @@ def check_issue_pattern(issue_pattern): print("PR description is valid.") return True, "PR description is valid." -def validate_pr_description_from_file(file_path) -> Tuple[bool, str]: +def normalize_app_domain(app_domain: str) -> str: + """Normalize app domain - remove https:// prefix if present.""" + domain = app_domain.strip() + if domain.startswith("https://"): + domain = domain[8:] + if domain.startswith("http://"): + domain = domain[7:] + return domain.rstrip('/') + +def generate_test_table(pr_number: int, base_ref: str, app_domain: str) -> str: + """Generate test execution table with buttons for different build presets and test sizes.""" + domain = normalize_app_domain(app_domain) + base_url = f"https://{domain}/workflow/trigger" + owner = "ydb-platform" + repo = "ydb" + workflow_id = "run_tests.yml" + return_url = f"https://github.com/{owner}/{repo}/pull/{pr_number}" + + build_presets = ["relwithdebinfo", "release-asan", "release-msan", "release-tsan"] + test_size_combinations = [ + ("small,medium", "Small & Medium"), + ("large", "Large") + ] + + rows = [] + for build_preset in build_presets: + cells = [] + + for test_size, test_size_display in test_size_combinations: + params = { + "owner": owner, + "repo": repo, + "workflow_id": workflow_id, + "ref": base_ref, + "pull_number": f"pull/{pr_number}", + "build_preset": build_preset, + "test_size": test_size, + "test_targets": "ydb/", + "return_url": return_url + } + query_string = "&".join([f"{k}={urllib.parse.quote(str(v), safe='')}" for k, v in params.items()]) + url = f"{base_url}?{query_string}" + url_ui = f"{base_url}?{query_string}&ui=true" + + button_label_encoded = build_preset.replace('-', '_') + buttons = f"[![â–ļ {build_preset}](https://img.shields.io/badge/%E2%96%B6_{button_label_encoded}-4caf50?style=flat-square)]({url}) [![âš™ī¸](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F-ff9800?style=flat-square)]({url_ui})" + cells.append(buttons) + + rows.append("| " + " | ".join(cells) + " |") + + table = "\n" + table += "

Run tests

\n\n" + table += "| Small & Medium | Large |\n" + table += "|----------------|-------|\n" + table += "\n".join(rows) + return table + +def generate_backport_table(pr_number: int, app_domain: str) -> str: + """Generate backport execution table with buttons for different branches.""" + domain = normalize_app_domain(app_domain) + base_url = f"https://{domain}/workflow/trigger" + owner = "ydb-platform" + repo = "ydb" + workflow_id = "cherry_pick_v2.yml" # Workflow file name + return_url = f"https://github.com/{owner}/{repo}/pull/{pr_number}" + + # Load backport branches from config - no fallback, fail if not found + workspace = os.environ.get("GITHUB_WORKSPACE") + if not workspace: + raise ValueError("GITHUB_WORKSPACE environment variable is not set") + + backport_branches_path = os.path.join(workspace, ".github", "config", "backport_branches.json") + + if not os.path.exists(backport_branches_path): + raise FileNotFoundError(f"Backport branches config file not found: {backport_branches_path}") + + with open(backport_branches_path, 'r') as f: + branches = json.load(f) + + if not isinstance(branches, list) or len(branches) == 0: + raise ValueError(f"Invalid backport branches config: expected non-empty list, got {type(branches)}") + + print(f"::notice::Loaded {len(branches)} backport branches from {backport_branches_path}") + + rows = [] + for branch in branches: + params = { + "owner": owner, + "repo": repo, + "workflow_id": workflow_id, + "ref": "main", + "commits": str(pr_number), + "target_branches": branch, + "allow_unmerged": "true", + "return_url": return_url + } + query_string = "&".join([f"{k}={urllib.parse.quote(str(v), safe='')}" for k, v in params.items()]) + url = f"{base_url}?{query_string}" + url_ui = f"{base_url}?{query_string}&ui=true" + + rows.append(f"| [![â–ļ {branch}](https://img.shields.io/badge/%E2%96%B6_{branch.replace('-', '_')}-4caf50?style=flat-square)]({url}) [![âš™ī¸](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F-ff9800?style=flat-square)]({url_ui}) |") + + # Generate URL for backporting multiple branches + all_branches = ",".join(branches) + params_multiple = { + "owner": owner, + "repo": repo, + "workflow_id": workflow_id, + "ref": "main", + "commits": str(pr_number), + "target_branches": all_branches, + "allow_unmerged": "true", + "return_url": return_url + } + query_string_multiple = "&".join([f"{k}={urllib.parse.quote(str(v), safe='')}" for k, v in params_multiple.items()]) + url_multiple_ui = f"{base_url}?{query_string_multiple}&ui=true" + + table = "\n" + table += "

🔄 Backport

\n\n" + table += "| Actions |\n" + table += "|----------|\n" + table += "\n".join(rows) + table += "\n\n" + table += f"[![âš™ī¸ Backport multiple branches](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_Backport_multiple_branches-2196F3?style=flat-square)]({url_multiple_ui})" + return table + +def get_legend() -> str: + """Get legend text for workflow buttons.""" + return "\n**Legend:**\n\n" \ + "* â–ļ - immediately runs the workflow with default parameters\n" \ + "* âš™ī¸ - opens UI to review and modify parameters before running\n" + +def ensure_tables_in_pr_body(pr_body: str, pr_number: int, base_ref: str, app_domain: str) -> Optional[str]: + """Check if test and backport tables exist in PR body, add them if missing.""" + test_table_marker = "" + backport_table_marker = "" + + has_test_table = test_table_marker in pr_body + has_backport_table = backport_table_marker in pr_body + + if has_test_table and has_backport_table: + return None # Tables already exist + + # Generate tables to insert + test_table = None + backport_table = None + if not has_test_table: + test_table = generate_test_table(pr_number, base_ref, app_domain) + if not has_backport_table: + backport_table = generate_backport_table(pr_number, app_domain) + + legend = get_legend() + + # Combine tables side by side using HTML table + tables_html = "" + if test_table and backport_table: + # Both tables - place them side by side using HTML table + # GitHub markdown supports markdown tables inside HTML table cells + # Using HTML attributes instead of CSS styles for better compatibility + tables_html = '\n' + tables_html += '\n' + tables_html += '\n' + tables_html += '
' + tables_html += test_table + tables_html += '' + tables_html += backport_table + tables_html += '
' + elif test_table: + tables_html = test_table + elif backport_table: + tables_html = backport_table + + # Find insertion point after "Description for reviewers" section + reviewers_section_marker = "### Description for reviewers" + + if reviewers_section_marker not in pr_body: + # If section not found, add at the end + if pr_body.strip(): + return pr_body.rstrip() + "\n\n" + tables_html + legend + else: + return tables_html + legend + + # Find the end of "Description for reviewers" section (before next ### heading) + lines = pr_body.split('\n') + insertion_index = len(lines) # Default to end + + for i, line in enumerate(lines): + if reviewers_section_marker in line: + # Look for the next ### heading after this section + for j in range(i + 1, len(lines)): + if lines[j].strip().startswith('###') and reviewers_section_marker not in lines[j]: + insertion_index = j + break + break + + # Insert tables and legend after "Description for reviewers" section + new_lines = lines[:insertion_index] + [""] + [tables_html] + [legend] + lines[insertion_index:] + return '\n'.join(new_lines) + +def update_pr_body(pr_number: int, new_body: str) -> None: + """Update PR body via GitHub API. Raises exception on error.""" + github_token = os.environ.get("GITHUB_TOKEN") + github_repo = os.environ.get("GITHUB_REPOSITORY") + + if not github_token: + raise ValueError("GITHUB_TOKEN environment variable is not set") + + if not github_repo: + raise ValueError("GITHUB_REPOSITORY environment variable is not set") + + gh = Github(auth=GithubAuth.Token(github_token)) + repo = gh.get_repo(github_repo) + pr = repo.get_pull(pr_number) + pr.edit(body=new_body) + print(f"::notice::Updated PR #{pr_number} body with test and backport tables") + +def validate_pr_description_from_file(file_path=None, description=None) -> Tuple[bool, str]: try: - if file_path: + if description is not None: + # Use provided description directly + desc = description + elif file_path: with open(file_path, 'r') as file: - description = file.read() + desc = file.read() else: - description = sys.stdin.read() - return check_pr_description(description) + # Read from stdin if available + if not sys.stdin.isatty(): + desc = sys.stdin.read() + else: + desc = "" + return check_pr_description(desc) except Exception as e: txt = f"Failed to validate PR description: {e}" print(f"::error::{txt}") return False, txt +def validate_pr(): + """Validate PR description.""" + # Read PR body from stdin (passed from action.yaml) + if sys.stdin.isatty(): + raise ValueError("PR body must be provided via stdin") + + pr_body = sys.stdin.read() + + # Get PR info from event - required, no fallback + event_path = os.environ.get("GITHUB_EVENT_PATH") + if not event_path: + raise ValueError("GITHUB_EVENT_PATH environment variable is not set") + + if not os.path.exists(event_path): + raise FileNotFoundError(f"Event file not found: {event_path}") + + with open(event_path, 'r') as f: + event = json.load(f) + + if "pull_request" not in event: + raise ValueError("Event does not contain pull_request data") + + pr_number = event["pull_request"]["number"] + base_ref = event["pull_request"]["base"]["ref"] + + # Use PR body from event if stdin is empty + if not pr_body: + pr_body = event["pull_request"].get("body") or "" + + # Validate PR description + is_valid, txt = validate_pr_description_from_file( + sys.argv[1] if len(sys.argv) > 1 else None, + description=pr_body + ) + + return is_valid, txt, pr_body, pr_number, base_ref + +def add_tables_if_needed(pr_body: str, pr_number: int, base_ref: str): + """Add test and backport tables to PR body if enabled.""" + show_additional_info = os.environ.get("SHOW_ADDITIONAL_INFO_IN_PR", "").upper() == "TRUE" + + if not show_additional_info: + return # Tables should not be added + + app_domain = os.environ.get("APP_DOMAIN") + if not app_domain: + raise ValueError("APP_DOMAIN environment variable is not set (required when SHOW_ADDITIONAL_INFO_IN_PR=TRUE)") + + updated_body = ensure_tables_in_pr_body(pr_body, pr_number, base_ref, app_domain) + if updated_body: + update_pr_body(pr_number, updated_body) + if __name__ == "__main__": - is_valid, txt = validate_pr_description_from_file(sys.argv[1] if len(sys.argv) > 1 else None) + # Step 1: Validate PR description + is_valid, txt, pr_body, pr_number, base_ref = validate_pr() + + # Step 2: Add tables if validation passed and feature is enabled + if is_valid: + add_tables_if_needed(pr_body, pr_number, base_ref) + + # Step 3: Post validation status from post_status_to_github import post post(is_valid, txt) + if not is_valid: sys.exit(1) diff --git a/.github/config/backport_branches.json b/.github/config/backport_branches.json new file mode 100644 index 000000000000..0328ef804624 --- /dev/null +++ b/.github/config/backport_branches.json @@ -0,0 +1,6 @@ +[ + "stable-25-2", + "stable-25-2-1", + "stable-25-3", + "stable-25-3-1" +] diff --git a/.github/scripts/cherry_pick_v2.py b/.github/scripts/cherry_pick_v2.py new file mode 100755 index 000000000000..87c0f27cac6a --- /dev/null +++ b/.github/scripts/cherry_pick_v2.py @@ -0,0 +1,813 @@ +#!/usr/bin/env python3 +""" +Cherry-pick v2 Script - Automated Backport Tool + +Maintains order of input sources and creates PRs with proper metadata. +""" + +import os +import sys +import datetime +import logging +import subprocess +import argparse +import re +import tempfile +import shutil +from typing import List, Optional, Tuple, Any +from dataclasses import dataclass, field +from github import Github, GithubException, Auth +import requests + +try: + pr_template_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'actions', 'validate_pr_description')) + sys.path.insert(0, pr_template_path) + from pr_template import ISSUE_PATTERNS, get_category_section_template +except ImportError as e: + logging.error(f"Failed to import pr_template: {e}") + raise + + +@dataclass +class ConflictInfo: + file_path: str + + +@dataclass +class Source: + type: str # 'commit' or 'pr' + commit_shas: List[str] + title: str + body_item: str + author: Optional[str] + pull_requests: List[Any] + + +@dataclass +class BackportResult: + target_branch: str + pr: Optional[Any] = None + conflict_files: List[ConflictInfo] = field(default_factory=list) + cherry_pick_logs: List[str] = field(default_factory=list) + + @property + def has_conflicts(self) -> bool: + return len(self.conflict_files) > 0 + + +def run_git(repo_path: str, cmd: List[str], logger, check=True) -> subprocess.CompletedProcess: + """Run git command""" + result = subprocess.run( + ['git'] + cmd, + cwd=repo_path, + capture_output=True, + text=True, + check=check + ) + return result + + +def expand_sha(repo, ref: str, logger) -> str: + """Expands short SHA to full SHA using GitHub API""" + try: + commits = repo.get_commits(sha=ref) + if commits.totalCount > 0: + commit = commits[0] + if len(ref) == 40: + return commit.sha if commit.sha == ref else ref + elif commit.sha.startswith(ref): + return commit.sha + except Exception as e: + logger.debug(f"Failed to find commit via GitHub API: {e}") + raise ValueError(f"Failed to find commit for '{ref}'") + + +def create_commit_source(commit, repo, logger) -> Source: + """Creates source from commit SHA""" + linked_pr = None + try: + pulls = commit.get_pulls() + if pulls.totalCount > 0: + linked_pr = pulls.get_page(0)[0] + except Exception: + pass + + author = linked_pr.user.login if linked_pr else (commit.author.login if commit.author else None) + body_item = f"* commit {commit.html_url}: {linked_pr.title}" if linked_pr else f"* commit {commit.html_url}" + + # Get commit message title (first line) + commit_title = commit.commit.message.split('\n')[0].strip() if commit.commit.message else f"commit {commit.sha[:7]}" + + return Source( + type='commit', + commit_shas=[commit.sha], + title=f'commit {commit.sha[:7]}: {commit_title}', + body_item=body_item, + author=author, + pull_requests=[linked_pr] if linked_pr else [] + ) + + +def create_pr_source(pull: Any, allow_unmerged: bool, logger) -> Source: + """Creates source from PR""" + if not pull.merged: + commit_shas = [c.sha for c in pull.get_commits()] + if not commit_shas: + raise ValueError(f"PR #{pull.number} contains no commits to cherry-pick") + elif pull.merge_commit_sha: + commit_shas = [pull.merge_commit_sha] + else: + commit_shas = [c.sha for c in pull.get_commits()] + if not commit_shas: + raise ValueError(f"PR #{pull.number} contains no commits to cherry-pick") + + return Source( + type='pr', + commit_shas=commit_shas, + title=f'PR {pull.number}: {pull.title}', + body_item=f"* PR {pull.html_url}", + author=pull.user.login, + pull_requests=[pull] + ) + + +def detect_conflicts(repo_path: str, logger) -> List[ConflictInfo]: + """Detects conflicts from git status""" + conflict_files = [] + CONFLICT_STATUS_CODES = ['UU', 'AA', 'DD', 'DU', 'UD', 'AU', 'UA'] + + try: + result = run_git(repo_path, ['status', '--porcelain'], logger) + if not result.stdout.strip(): + return [] + + for line in result.stdout.strip().split('\n'): + status_code = line[:2] + if status_code in CONFLICT_STATUS_CODES: + parts = line.split(None, 1) + if len(parts) > 1: + file_path = parts[1].strip() + if file_path: + conflict_files.append(ConflictInfo(file_path=file_path)) + except Exception as e: + logger.error(f"Error detecting conflicts: {e}") + + return conflict_files + + +def get_linked_issues(repo, token: str, pull_requests: List[Any], logger) -> str: + """Gets linked issues for all PRs""" + all_issues = [] + owner, repo_name = repo.full_name.split('/') + + for pull in pull_requests: + issues = [] + # Try GraphQL + try: + query = """ + query($owner: String!, $repo: String!, $prNumber: Int!) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $prNumber) { + closingIssuesReferences(first: 100) { + nodes { + number + repository { + owner { login } + name + } + } + } + } + } + } + """ + response = requests.post( + "https://api.github.com/graphql", + headers={"Authorization": f"token {token}"}, + json={"query": query, "variables": {"owner": owner, "repo": repo_name, "prNumber": pull.number}}, + timeout=10 + ) + if response.ok: + data = response.json() + nodes = data.get("data", {}).get("repository", {}).get("pullRequest", {}).get("closingIssuesReferences", {}).get("nodes", []) + for issue in nodes: + owner_name = issue["repository"]["owner"]["login"] + repo_name_issue = issue["repository"]["name"] + number = issue["number"] + if owner_name == owner and repo_name_issue == repo_name: + issues.append(f"#{number}") + else: + issues.append(f"{owner_name}/{repo_name_issue}#{number}") + except Exception: + pass + + # Fallback to parsing PR body + if not issues and pull.body: + issues = [f"#{num}" for num in re.findall(r'#(\d+)', pull.body)] + + all_issues.extend(issues) + + unique_issues = list(dict.fromkeys(all_issues)) + return ' '.join(unique_issues) if unique_issues else 'None' + + +def extract_changelog(pr_body: str) -> Tuple[Optional[str], Optional[str], Optional[str]]: + """Extracts changelog category, entry, and entry content""" + if not pr_body: + return None, None, None + + # Category + category_match = re.search(r"### Changelog category.*?\n(.*?)(\n###|$)", pr_body, re.DOTALL) + category = None + if category_match: + categories = [line.lstrip('* ').strip() for line in category_match.group(1).splitlines() if line.strip() and line.strip().startswith('*')] + category = categories[0] if categories else None + + # Entry + entry_match = re.search(r"### Changelog entry.*?\n(.*?)(\n###|$)", pr_body, re.DOTALL) + entry = entry_match.group(1).strip() if entry_match else None + if entry in ['...', '']: + entry = None + + # Entry content (stops at category) + entry_content_match = re.search(r"### Changelog entry.*?\n(.*?)(\n### Changelog category|$)", pr_body, re.DOTALL) + entry_content = entry_content_match.group(1).strip() if entry_content_match else None + if entry_content in ['...', '']: + entry_content = None + + return category, entry, entry_content + + +def build_pr_content( + repo_name: str, repo, token: str, target_branch: str, dev_branch_name: str, + sources: List[Source], conflict_files: List[ConflictInfo], + cherry_pick_logs: List[str], workflow_triggerer: str, workflow_url: Optional[str], + pr_number: Optional[int], logger +) -> Tuple[str, str]: + """Generates PR title and body""" + has_conflicts = len(conflict_files) > 0 + # Collect data + all_commit_shas = [] + all_pull_requests = [] + all_titles = [] + all_body_items = [] + all_authors = [] + + for source in sources: + all_commit_shas.extend(source.commit_shas) + all_pull_requests.extend(source.pull_requests) + all_titles.append(source.title) + all_body_items.append(source.body_item) + if source.author and source.author not in all_authors: + all_authors.append(source.author) + + # Title + if len(all_titles) == 1: + title = f"[Backport {target_branch}] {all_titles[0]}" + else: + title = f"[Backport {target_branch}] {', '.join(all_titles)}" + if has_conflicts: + title = f"[CONFLICT] {title}" + if len(title) > 256: # GitHub limit for PR title + title = title[:253] + "..." + + # Issues + issue_refs = get_linked_issues(repo, token, all_pull_requests, logger) + authors_str = ', '.join([f"@{a}" for a in set(all_authors)]) if all_authors else "Unknown" + + # Changelog: build entry for each source, then merge + categories = [] + changelog_entries = [] + + for source in sources: + source_entry = None + source_category = None + + # For PR or merge commit with linked PR + if source.pull_requests: + pull = source.pull_requests[0] + if pull.body: + cat, ent, ent_content = extract_changelog(pull.body) + source_category = cat + # Use entry_content if available, otherwise entry + source_entry = ent_content if ent_content else ent + + # Format: "PR Title: changelog_entry" or just "PR Title" + if source_entry: + changelog_entries.append(f"{pull.title}: {source_entry}") + else: + changelog_entries.append(pull.title) + + # For commit SHA (no linked PR) + elif source.type == 'commit' and source.commit_shas: + try: + commit = repo.get_commit(source.commit_shas[0]) + commit_message = commit.commit.message + commit_title = commit_message.split('\n')[0].strip() + changelog_entries.append(commit_title) + except Exception as e: + logger.debug(f"Failed to get commit message for {source.commit_shas[0]}: {e}") + changelog_entries.append(f"commit {source.commit_shas[0][:7]}") + + if source_category: + categories.append(source_category) + + changelog_category = categories[0] if len(set(categories)) == 1 else None + + # Merge all entries + if len(changelog_entries) > 1: + changelog_entry = "\n\n---\n\n".join(changelog_entries) + elif len(changelog_entries) == 1: + changelog_entry = changelog_entries[0] + else: + # Fallback + changelog_entry = f"Backport to `{target_branch}`" + + if changelog_category == "Bugfix" and issue_refs != "None": + if not any(re.search(p, changelog_entry) for p in ISSUE_PATTERNS): + changelog_entry = f"{changelog_entry} ({issue_refs})" + + category_section = f"* {changelog_category}" if changelog_category else get_category_section_template() + commits = '\n'.join(all_body_items) + + # Build body sections + description = f"#### Original PR(s)\n{commits}\n\n#### Metadata\n" + description += f"- **Original PR author(s):** {authors_str}\n" + description += f"- **Cherry-picked by:** @{workflow_triggerer}\n" + description += f"- **Related issues:** {issue_refs}" + + cherry_pick_log_section = "" + if cherry_pick_logs: + cherry_pick_log_section = "\n\n### Git Cherry-Pick Log\n\n```\n" + '\n'.join(log if log.endswith('\n') else log + '\n' for log in cherry_pick_logs) + "```\n" + + conflicts_section = "" + if has_conflicts: + branch_for_instructions = dev_branch_name or target_branch + conflicts_section = "\n\n#### Conflicts Require Manual Resolution\n\n" + conflicts_section += "This PR contains merge conflicts that require manual resolution.\n\n" + if conflict_files: + conflicts_section += "**Files with conflicts:**\n\n" + for conflict in conflict_files: + file_link = f"https://github.com/{repo_name}/pull/{pr_number}/files" if pr_number else f"https://github.com/{repo_name}/blob/{branch_for_instructions}/{conflict.file_path}" + conflicts_section += f"- [{conflict.file_path}]({file_link})\n" + conflicts_section += f""" +**How to resolve conflicts:** + +```bash +git fetch origin +git checkout --track origin/{branch_for_instructions} +# Resolve conflicts in files +git add . +git commit -m "Resolved merge conflicts" +git push +``` + +After resolving conflicts: +1. Fix the PR title (remove `[CONFLICT]` if conflicts are resolved) +2. Mark PR as ready for review +""" + + workflow_section = f"\n\n---\n\nPR was created by cherry-pick workflow [run]({workflow_url})" if workflow_url else "\n\n---\n\nPR was created by cherry-pick script" + + body = f"""### Changelog entry + +{changelog_entry} + +### Changelog category + +{category_section} + +### Description for reviewers + +{description}{conflicts_section}{cherry_pick_log_section}{workflow_section} +""" + + return title, body + + +def find_existing_backport_comment(pull: Any, logger): + """Finds existing backport comment""" + try: + for comment in pull.get_issue_comments(): + if comment.user.login == "YDBot" and "Backport" in comment.body and "in progress" in comment.body: + return comment + except Exception: + pass + return None + + +def update_comments(backport_comments: List[Tuple[Any, object]], results: List, skipped_branches: List[Tuple[str, str]], target_branches: List[str], workflow_url: Optional[str], logger): + """Updates comments with backport results""" + if not backport_comments: + return + + for pull, comment in backport_comments: + try: + existing_body = comment.body + total_branches = len(results) + len(skipped_branches) + + if total_branches == 0: + new_results = f"Backport to {', '.join([f'`{b}`' for b in target_branches])} completed with no results" + if workflow_url: + new_results += f" - [workflow run]({workflow_url})" + elif total_branches == 1 and len(results) == 1: + result = results[0] + if result.pr: + status = "draft PR" if result.has_conflicts else "PR" + new_results = f"Backported to `{result.target_branch}`: {status} {result.pr.html_url}" + if result.has_conflicts: + new_results += " (contains conflicts requiring manual resolution)" + if workflow_url: + new_results += f" - [workflow run]({workflow_url})" + else: + new_results = f"Backported to `{result.target_branch}`: failed" + if workflow_url: + new_results += f" - [workflow run]({workflow_url})" + else: + new_results = "Backport results:\n" + for result in results: + if result.pr: + status = "draft PR" if result.has_conflicts else "PR" + conflict_note = " (contains conflicts requiring manual resolution)" if result.has_conflicts else "" + new_results += f"- `{result.target_branch}`: {status} {result.pr.html_url}{conflict_note}\n" + else: + new_results += f"- `{result.target_branch}`: failed\n" + for target_branch, reason in skipped_branches: + new_results += f"- `{target_branch}`: skipped ({reason})\n" + if workflow_url: + new_results += f"\n[workflow run]({workflow_url})" + + # Replace "in progress" line with results + lines = existing_body.split('\n') + updated_lines = [] + found = False + + for line in lines: + # Check if this is the "in progress" line for our target branches + is_progress_line = ( + "in progress" in line and + any(f"`{b}`" in line for b in target_branches) and + (not workflow_url or workflow_url in line) + ) + + if is_progress_line and not found: + updated_lines.append(new_results) + found = True + else: + updated_lines.append(line) + + if not found: + updated_lines.append("") + updated_lines.append(new_results) + + updated_comment = '\n'.join(updated_lines) + + comment.edit(updated_comment) + logger.info(f"Updated backport comment in original PR #{pull.number}") + except GithubException as e: + logger.warning(f"Failed to update comment in original PR #{pull.number}: {e}") + + +def process_branch( + repo_path: str, target_branch: str, dev_branch_name: str, commit_shas: List[str], + repo_name: str, repo, token: str, sources: List[Source], workflow_triggerer: str, + workflow_url: Optional[str], summary_path: Optional[str], logger +): + """Processes single branch""" + all_conflict_files = [] + cherry_pick_logs = [] + + # Prepare branch + run_git(repo_path, ['fetch', 'origin', target_branch], logger) + run_git(repo_path, ['reset', '--hard', 'HEAD'], logger) + run_git(repo_path, ['checkout', '-B', target_branch, f'origin/{target_branch}'], logger) + run_git(repo_path, ['checkout', '-b', dev_branch_name, target_branch], logger) + + # Cherry-pick each commit + for commit_sha in commit_shas: + logger.info("Cherry-picking commit: %s", commit_sha[:7]) + # Fetch commit to ensure it's available locally (needed for unmerged PRs) + run_git(repo_path, ['fetch', 'origin', commit_sha], logger, check=False) + try: + result = run_git(repo_path, ['cherry-pick', '--allow-empty', commit_sha], logger, check=False) + output = (result.stdout or '') + (('\n' + result.stderr) if result.stderr else '') + + if result.returncode != 0: + if "conflict" in output.lower(): + conflicts = detect_conflicts(repo_path, logger) + if conflicts: + run_git(repo_path, ['add', '-A'], logger) + run_git(repo_path, ['commit', '-m', f"BACKPORT-CONFLICT: manual resolution required for commit {commit_sha[:7]}"], logger) + all_conflict_files.extend(conflicts) + else: + run_git(repo_path, ['cherry-pick', '--abort'], logger, check=False) + raise RuntimeError(f"Cherry-pick failed for commit {commit_sha[:7]}") + else: + raise RuntimeError(f"Cherry-pick failed for commit {commit_sha[:7]}: {output}") + + if output: + cherry_pick_logs.append(f"=== Cherry-picking {commit_sha[:7]} ===\n{output}") + except subprocess.CalledProcessError as e: + raise RuntimeError(f"Cherry-pick failed for commit {commit_sha[:7]}: {e}") + + # Push branch + run_git(repo_path, ['push', '--set-upstream', 'origin', dev_branch_name], logger) + + # Create PR + has_conflicts = len(all_conflict_files) > 0 + title, body = build_pr_content( + repo_name, repo, token, target_branch, dev_branch_name, + sources, all_conflict_files, cherry_pick_logs, + workflow_triggerer, workflow_url, None, logger + ) + + pr = repo.create_pull( + base=target_branch, + head=dev_branch_name, + title=title, + body=body, + maintainer_can_modify=True, + draft=has_conflicts + ) + + # Update body with PR number for correct links + if has_conflicts: + _, updated_body = build_pr_content( + repo_name, repo, token, target_branch, dev_branch_name, + sources, all_conflict_files, cherry_pick_logs, + workflow_triggerer, workflow_url, pr.number, logger + ) + pr.edit(body=updated_body) + + # Assign assignee + if workflow_triggerer != 'unknown': + try: + pr.add_to_assignees(workflow_triggerer) + except GithubException: + pass + + # Enable automerge if no conflicts + if not has_conflicts: + try: + pr.enable_automerge(merge_method='MERGE') + except Exception: + try: + pr.enable_automerge(merge_method='SQUASH') + except Exception: + pass + + # Write to summary + if summary_path: + summary = f"### Branch `{target_branch}`: " + summary += f"**CONFLICT** Draft PR {pr.html_url}\n\n" if has_conflicts else f"PR {pr.html_url}\n\n" + if cherry_pick_logs: + summary += "**Git Cherry-Pick Log:**\n\n```\n" + '\n'.join(cherry_pick_logs) + "```\n\n" + if has_conflicts and all_conflict_files: + summary += "**Files with conflicts:**\n\n" + for conflict in all_conflict_files: + summary += f"- `{conflict.file_path}`\n" + with open(summary_path, 'a') as f: + f.write(f'{summary}\n\n') + + return BackportResult( + target_branch=target_branch, + pr=pr, + conflict_files=all_conflict_files, + cherry_pick_logs=cherry_pick_logs + ) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--commits", help="List of commits to cherry-pick. Can be SHA, PR number or URL. Separated by space, comma or line end.") + parser.add_argument("--target-branches", help="List of branches to cherry-pick. Separated by space, comma or line end.") + parser.add_argument("--allow-unmerged", action='store_true', help="Allow backporting unmerged PRs") + args = parser.parse_args() + + logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.DEBUG) + logger = logging.getLogger("cherry-pick") + + repo_name = os.environ["REPO"] + token = os.environ["TOKEN"] + workflow_triggerer = os.environ.get('GITHUB_ACTOR', 'unknown') + summary_path = os.getenv('GITHUB_STEP_SUMMARY') + + # Initialize GitHub + gh = Github(auth=Auth.Token(token)) + repo = gh.get_repo(repo_name) + + # Get workflow URL + workflow_url = None + run_id = os.getenv('GITHUB_RUN_ID') + if run_id: + try: + workflow_url = repo.get_workflow_run(int(run_id)).html_url + except (GithubException, ValueError): + pass + + # Parse input + def split_input(s: str) -> List[str]: + if not s: + return [] + pattern = r"[, \n]+" + return [part.strip() for part in re.split(pattern, s) if part.strip()] + + commits = split_input(args.commits) + target_branches = split_input(args.target_branches) + allow_unmerged = getattr(args, 'allow_unmerged', False) + + # Collect sources + sources = [] + for c in commits: + ref = c.split('/')[-1].strip() + try: + pr_num = int(ref) + try: + pull = repo.get_pull(pr_num) + except GithubException as e: + logger.error(f"VALIDATION_ERROR: PR #{pr_num} does not exist: {e}") + sys.exit(1) + + if not pull.merged and not allow_unmerged: + raise ValueError(f"PR #{pr_num} is not merged. Use --allow-unmerged to backport unmerged PRs") + if not pull.merged: + logger.info(f"PR #{pr_num} is not merged, but --allow-unmerged is set, proceeding with commits from PR") + source = create_pr_source(pull, allow_unmerged, logger) + sources.append(source) + if not pull.merged: + logger.info(f"PR #{pr_num} is unmerged, using {len(source.commit_shas)} commits from PR") + elif pull.merge_commit_sha: + merge_commit = repo.get_commit(pull.merge_commit_sha) + if merge_commit.parents and len(merge_commit.parents) > 1: + logger.info(f"PR #{pr_num} was merged as merge commit, using {len(source.commit_shas)} individual commits") + else: + logger.info(f"PR #{pr_num} was merged as squash/rebase, using merge_commit_sha") + except ValueError: + # Not a PR number, treat as commit SHA + try: + expanded_sha = expand_sha(repo, ref, logger) + except ValueError as e: + logger.error(f"VALIDATION_ERROR: Failed to expand SHA {ref}: {e}") + sys.exit(1) + + try: + commit = repo.get_commit(expanded_sha) + except GithubException as e: + logger.error(f"VALIDATION_ERROR: Commit {ref} (expanded to {expanded_sha}) does not exist: {e}") + sys.exit(1) + + # Check if commit is linked to PR + pulls = commit.get_pulls() + if pulls.totalCount > 0: + pr = pulls.get_page(0)[0] + if not pr.merged and not allow_unmerged: + raise ValueError(f"PR #{pr.number} (associated with commit {expanded_sha[:7]}) is not merged. Cannot backport unmerged PR. Use --allow-unmerged to allow") + if not pr.merged: + logger.info(f"PR #{pr.number} (associated with commit {expanded_sha[:7]}) is not merged, but --allow-unmerged is set, proceeding") + + source = create_commit_source(commit, repo, logger) + sources.append(source) + + # Validate + all_commit_shas = [] + all_pull_requests = [] + for source in sources: + all_commit_shas.extend(source.commit_shas) + all_pull_requests.extend(source.pull_requests) + + if not all_commit_shas: + logger.error("VALIDATION_ERROR: No commits to cherry-pick") + sys.exit(1) + if not target_branches: + logger.error("VALIDATION_ERROR: No target branches specified") + sys.exit(1) + + for pull in all_pull_requests: + if not pull.merged and not allow_unmerged: + logger.error(f"VALIDATION_ERROR: PR #{pull.number} is not merged. Use --allow-unmerged to allow backporting unmerged PRs") + sys.exit(1) + + for commit_sha in all_commit_shas: + try: + repo.get_commit(commit_sha) + except GithubException as e: + logger.error(f"VALIDATION_ERROR: Commit {commit_sha} does not exist: {e}") + sys.exit(1) + + # Validate branches and collect invalid ones + invalid_branches = [] + for branch in target_branches: + try: + repo.get_branch(branch) + except GithubException as e: + logger.error(f"VALIDATION_ERROR: Branch {branch} does not exist: {e}") + invalid_branches.append(branch) + + # Remove invalid branches from target_branches + valid_target_branches = [b for b in target_branches if b not in invalid_branches] + + if not valid_target_branches: + logger.error("VALIDATION_ERROR: No valid target branches after validation") + sys.exit(1) + + if invalid_branches: + logger.warning(f"VALIDATION_WARNING: Skipping invalid branches: {', '.join(invalid_branches)}") + + logger.info("Input validation successful") + + # Create initial comment + backport_comments = [] + if all_pull_requests: + target_branches_str = ', '.join([f"`{b}`" for b in target_branches]) + if workflow_url: + new_line = f"Backport to {target_branches_str} in progress: [workflow run]({workflow_url})" + else: + new_line = f"Backport to {target_branches_str} in progress" + + for pull in all_pull_requests: + try: + existing_comment = find_existing_backport_comment(pull, logger) + if existing_comment: + existing_body = existing_comment.body + branches_already_mentioned = all(f"`{b}`" in existing_body for b in target_branches) + should_skip = ( + branches_already_mentioned and + ("in progress" in existing_body) and + (not workflow_url or workflow_url in existing_body) + ) + + if should_skip: + backport_comments.append((pull, existing_comment)) + else: + existing_comment.edit(f"{existing_body}\n\n{new_line}") + backport_comments.append((pull, existing_comment)) + logger.info(f"Updated existing backport comment in original PR #{pull.number}") + else: + comment = pull.create_issue_comment(new_line) + backport_comments.append((pull, comment)) + logger.info(f"Created initial backport comment in original PR #{pull.number}") + except GithubException as e: + logger.warning(f"Failed to create/update initial comment in original PR #{pull.number}: {e}") + + # Clone repository + repo_dir = tempfile.mkdtemp(prefix="ydb-cherry-pick-") + try: + repo_url = f"https://{token}@github.com/{repo_name}.git" + logger.info("Cloning repository: %s to %s", repo_url, repo_dir) + subprocess.run( + ['git', 'clone', repo_url, repo_dir], + env={**os.environ, 'GIT_PROTOCOL': '2'}, + check=True, + capture_output=True + ) + + # Process each target branch + results = [] + skipped_branches = [] + # Add invalid branches from validation + for invalid_branch in invalid_branches: + skipped_branches.append((invalid_branch, "branch does not exist")) + + has_errors = False + dtm = datetime.datetime.now().strftime("%y%m%d-%H%M%S") + + for target_branch in valid_target_branches: + try: + dev_branch_name = f"cherry-pick-{target_branch}-{dtm}" + result = process_branch( + repo_dir, target_branch, dev_branch_name, all_commit_shas, + repo_name, repo, token, sources, workflow_triggerer, workflow_url, summary_path, logger + ) + results.append(result) + except Exception as e: + has_errors = True + error_msg = f"UNEXPECTED_ERROR: Branch {target_branch} - {type(e).__name__}: {e}" + logger.error(error_msg) + if summary_path: + with open(summary_path, 'a') as f: + f.write(f"Branch {target_branch} error: {type(e).__name__}\n```\n{e}\n```\n\n") + skipped_branches.append((target_branch, f"unexpected error: {type(e).__name__}")) + + # Update comments + update_comments(backport_comments, results, skipped_branches, target_branches, workflow_url, logger) + + # Check errors + if has_errors: + error_msg = "WORKFLOW_FAILED: Cherry-pick workflow completed with errors. Check logs above for details." + logger.error(error_msg) + if summary_path: + with open(summary_path, 'a') as f: + f.write(f'{error_msg}\n\n') + sys.exit(1) + + logger.info("WORKFLOW_SUCCESS: All cherry-pick operations completed successfully") + if summary_path: + with open(summary_path, 'a') as f: + f.write("All cherry-pick operations completed successfully\n\n") + finally: + if os.path.exists(repo_dir): + shutil.rmtree(repo_dir) + + +if __name__ == "__main__": + main() diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index ddb19c18228e..1dfb9b560a13 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -55,7 +55,7 @@ on: default: small,medium,large options: - small - - medium, + - medium - large - small,medium - small,medium,large @@ -85,6 +85,7 @@ jobs: with: sparse-checkout: | .github/config/stable_branches.json + .github/actions/run_tests/ - name: Set branches id: set-branches @@ -113,6 +114,21 @@ jobs: echo "Final branches to use: $(cat $GITHUB_OUTPUT | grep branch_array | cut -d= -f2)" + - name: Post start comment to PR + if: inputs.pull_number != '' + env: + GITHUB_TOKEN: ${{ github.token }} + PR_NUMBER: ${{ inputs.pull_number }} + BUILD_PRESET: ${{ inputs.build_preset }} + TEST_SIZE: ${{ inputs.test_size }} + TEST_TARGETS: ${{ inputs.test_targets }} + GITHUB_SERVER_URL: ${{ github.server_url }} + GITHUB_REPOSITORY: ${{ github.repository }} + GITHUB_RUN_ID: ${{ github.run_id }} + run: | + python3 -m pip install PyGithub -q + python3 ./.github/actions/run_tests/pr_comment.py start + run_tests: needs: prepare name: ${{ matrix.branch }}:${{ inputs.build_preset }} @@ -148,17 +164,35 @@ jobs: with: ref: ${{ matrix.branch }} +<<<<<<< HEAD +<<<<<<< HEAD - name: Setup ssh key for slice uses: webfactory/ssh-agent@v0.9.0 with: ssh-private-key: ${{ secrets.SLICE_QA_SSH_PRIVATE_KEY }} +======= + - name: Post start comment to PR + if: inputs.pull_number != '' + env: + GITHUB_TOKEN: ${{ github.token }} + PR_NUMBER: ${{ inputs.pull_number }} + BUILD_PRESET: ${{ inputs.build_preset }} + TEST_SIZE: ${{ inputs.test_size }} + TEST_TARGETS: ${{ inputs.test_targets }} + run: | + python3 -m pip install PyGithub -q + python3 ./.github/actions/run_tests/pr_comment.py start +>>>>>>> c8cecab2b81 (Add PR comment functionality for test runs) +======= +>>>>>>> a0c2829e574 (Enhance GitHub Actions workflow to post start comments on PRs) - name: Setup ydb access uses: ./.github/actions/setup_ci_ydb_service_account_key_file_credentials with: ci_ydb_service_account_key_file_credentials: ${{ secrets.CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS }} - name: Run YDB Tests + id: run_tests timeout-minutes: ${{ fromJson(env.timeout) }} uses: ./.github/actions/build_and_test_ya with: @@ -174,7 +208,31 @@ jobs: custom_branch_name: ${{ matrix.branch }} put_build_results_to_cache: true additional_ya_make_args: -DDEBUGINFO_LINES_ONLY ${{ inputs.additional_ya_make_args }} +<<<<<<< HEAD secs: ${{ format('{{"TESTMO_TOKEN2":"{0}","AWS_KEY_ID":"{1}","AWS_KEY_VALUE":"{2}","REMOTE_CACHE_USERNAME":"{3}","REMOTE_CACHE_PASSWORD":"{4}"}}', secrets.TESTMO_TOKEN2, secrets.AWS_KEY_ID, secrets.AWS_KEY_VALUE, secrets.REMOTE_CACHE_USERNAME, secrets.REMOTE_CACHE_PASSWORD ) }} vars: ${{ format('{{"AWS_BUCKET":"{0}","AWS_ENDPOINT":"{1}","REMOTE_CACHE_URL":"{2}","TESTMO_URL":"{3}","TESTMO_PROJECT_ID":"{4}"}}', vars.AWS_BUCKET, vars.AWS_ENDPOINT, vars.REMOTE_CACHE_URL_YA, vars.TESTMO_URL, vars.TESTMO_PROJECT_ID ) }} +======= + secs: ${{ format('{{"AWS_KEY_ID":"{0}","AWS_KEY_VALUE":"{1}","REMOTE_CACHE_USERNAME":"{2}","REMOTE_CACHE_PASSWORD":"{3}","TELEGRAM_YDBOT_TOKEN":"{4}"}}', + secrets.AWS_KEY_ID, secrets.AWS_KEY_VALUE, secrets.REMOTE_CACHE_USERNAME, secrets.REMOTE_CACHE_PASSWORD, secrets.TELEGRAM_YDBOT_TOKEN ) }} + vars: ${{ format('{{"AWS_BUCKET":"{0}","AWS_ENDPOINT":"{1}","REMOTE_CACHE_URL":"{2}","GH_ALERTS_TG_LOGINS":"{3}","GH_ALERTS_TG_CHAT":"{4}"}}', + vars.AWS_BUCKET, vars.AWS_ENDPOINT, vars.REMOTE_CACHE_URL_YA, vars.GH_ALERTS_TG_LOGINS, vars.GH_ALERTS_TG_CHAT ) }} + + - name: Update PR comment with results + if: always() && inputs.pull_number != '' + env: + GITHUB_TOKEN: ${{ github.token }} + PR_NUMBER: ${{ inputs.pull_number }} + BUILD_PRESET: ${{ inputs.build_preset }} + TEST_SIZE: ${{ inputs.test_size }} + TEST_TARGETS: ${{ inputs.test_targets }} + TEST_STATUS: ${{ steps.run_tests.outcome }} + run: | + python3 -m pip install PyGithub -q +<<<<<<< HEAD + python3 ./.github/actions/run_tests/pr_comment.py complete "$GITHUB_STEP_SUMMARY" +>>>>>>> c8cecab2b81 (Add PR comment functionality for test runs) +======= + python3 ./.github/actions/run_tests/pr_comment.py complete +>>>>>>> d73acb789f4 (Refactor PR comment script to streamline summary handling)