From 88aa0693ce2fa1240767eca14a2783491fd361ba Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 11:26:17 -0700 Subject: [PATCH 01/22] Update build script. --- scripts/gha/report_build_status.py | 474 +++++++++++++++++++++++++++++ 1 file changed, 474 insertions(+) create mode 100644 scripts/gha/report_build_status.py diff --git a/scripts/gha/report_build_status.py b/scripts/gha/report_build_status.py new file mode 100644 index 0000000000..c590252241 --- /dev/null +++ b/scripts/gha/report_build_status.py @@ -0,0 +1,474 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Installing prerequisites: +# +# sudo python3 -m pip install python-dateutil progress attrs + +"""A utility to report on daily build status. + +USAGE: + python scripts/gha/report_build_status.py \ + --token ${{github.token}} \ +""" + +import datetime +import dateutil +import dateutil.parser +import dateutil.relativedelta +import dateutil.utils +import io +import os +import progress +import progress.bar +import re +import requests +import shutil +import sys +import tempfile +import zipfile + +from absl import app +from absl import flags +from absl import logging + +import github +import summarize_test_results + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + "token", None, + "github.token: A token to authenticate on your repository.") + +flags.DEFINE_string( + "start", None, + "start date of the range to report, default = [--days] days before start") + +flags.DEFINE_string( + "end", None, + "end date of the range to report, default = today") + +flags.DEFINE_string( + "days", '7', + "If start date is unspecified, go this many days back") + +flags.DEFINE_bool( + "output_markdown", False, + "Output a Markdown-formatted table.") + +flags.DEFINE_bool( + "output_header", True, + "Output a table header row. Forced true if outputting markdown.") + +flags.DEFINE_bool( + "output_username", True, + "Include a username column in the outputted table.") + +_WORKFLOW_TESTS = 'integration_tests.yml' +_WORKFLOW_PACKAGING = 'cpp-packaging.yml' +_TRIGGER_USER = 'firebase-workflow-trigger[bot]' +_BRANCH = 'main' +_LIMIT = 300 + +_PASS_TEXT = "Pass" +_FAILURE_TEXT = "Failure" +_FLAKY_TEXT = "Flaky Failure" + +general_test_time = ' 09:0' +firestore_test_time = ' 10:0' + +def rename_key(old_dict,old_name,new_name): + new_dict = {} + for key,value in zip(old_dict.keys(),old_dict.values()): + new_key = key if key != old_name else new_name + new_dict[new_key] = old_dict[key] + return new_dict + + +def english_list(items, sep=','): + if len(items) == 2: + return items[0] + " and " + items[1] + else: + if len(items) > 2: + items[len(items)-1] = 'and ' + items[len(items)-1] + return (sep+' ').join(items) + + +def decorate_url(text, url): + if not FLAGS.output_markdown: + return text + return ("[%s](%s)" % (text.replace(" ", " "), url)) + + +def analyze_log(text, url): + build_status = decorate_url(_PASS_TEXT, url) + test_status = decorate_url(_PASS_TEXT, url) + if '[BUILD] [ERROR]' in text: + build_status = decorate_url(_FAILURE_TEXT, url) + elif '[BUILD] [FLAKINESS]' in text: + build_status =decorate_url(_FLAKY_TEXT, url) + if '[TEST] [ERROR]' in text: + test_status = decorate_url(_FAILURE_TEXT, url) + elif '[TEST] [FLAKINESS]' in text: + test_status = decorate_url(_FLAKY_TEXT, url) + return (build_status, test_status) + + +def format_errors(all_errors, severity, event): + product_errors = [] + if severity not in all_errors: return None + if event not in all_errors[severity]: return None + + errors = all_errors[severity][event] + total_errors = 0 + individual_errors = 0 + for product, platform_dict in errors.items(): + platforms = list(platform_dict.keys()) + + if product == 'missing_log': + product_name = 'missing logs' + elif product == 'gma': + product_name = product.upper() + else: + product_name = product.replace('_', ' ').title() + + if 'iOS' in platforms: + all_simulator = True + for descriptors in platform_dict['iOS']: + if 'simulator_' not in descriptors: + all_simulator = False + if all_simulator: + platform_dict = rename_key(platform_dict, 'iOS', 'iOS simulator') + platforms = list(platform_dict.keys()) + + if 'Android' in platforms: + all_emulator = True + for descriptors in platform_dict['Android']: + if 'emulator_' not in descriptors: + all_emulator = False + if all_emulator: + platform_dict = rename_key(platform_dict, 'Android', 'Android emulator') + platforms = list(platform_dict.keys()) + + total_errors += 1 + individual_errors += len(platforms) + platforms_text = english_list(platforms) + if product == 'missing_log': + product_errors.insert(0, '%s on %s' % (product_name, platforms_text)) + else: + product_errors.append('%s on %s' % (product_name, platforms_text)) + + event_text = event.lower() + severity_text = 'flake' if severity == 'FLAKINESS' else severity.lower() + + if total_errors == 0: + return None + + final_text = english_list(product_errors, ';' if ',' in ''.join(product_errors) else ',') + if total_errors == 1: + if 'missing logs' in final_text: + final_text = final_text.replace('missing logs', 'missing %s logs' % event_text) + return final_text + else: + final_text = 'in ' + final_text + else: + final_text = ('including ' if 'missing logs' in final_text else 'in ') + final_text + + final_text = '%s%s %s%s %s' % ('a ' if individual_errors == 1 else '', + event_text, + severity_text, + 's' if individual_errors > 1 else '', + final_text) + return final_text + + +def create_notes(text): + if not text: return '' + errors = {} + text += '\n' + lines = text.split('\n') + current_product = None + for line in lines: + if not current_product: + m = re.search(r'^([a-z_]+):', line) + if m: + current_product = m.group(1) + else: + # Got a current product + if len(line) == 0: + current_product = None + else: + m = re.search( + r'\[(BUILD|TEST)\] \[(ERROR|FLAKINESS)\] \[([a-zA-Z]+)\] (\[.*\])', + line) + if m: + event = m.group(1) + severity = m.group(2) + platform = m.group(3) + other = m.group(4) + product = current_product + + if severity not in errors: + errors[severity] = {} + if event not in errors[severity]: + errors[severity][event] = {} + if product not in errors[severity][event]: + errors[severity][event][product] = {} + if platform not in errors[severity][event][product]: + errors[severity][event][product][platform] = set() + errors[severity][event][product][platform].add(other) + + log_items = [] + text = format_errors(errors, 'ERROR', 'BUILD') + if text: log_items.append(text) + text = format_errors(errors, 'ERROR', 'TEST') + if text: log_items.append(text) + text = format_errors(errors, 'FLAKINESS', 'TEST') + if text: log_items.append(text) + if len(log_items) == 0: + text = format_errors(errors, 'FLAKINESS', 'BUILD') + if text: log_items.append(text) + + if len(log_items) == 0: + return '' + if len(log_items) == 2 and ' and ' in ''.join(log_items): + log_items[0] += ',' + final_text = english_list(log_items) + final_text = final_text[0].capitalize() + final_text[1:] + '.' + return final_text + + +def get_message_from_github_log(logs_zip, + regex_filename, + regex_line, debug=False): + for log in logs_zip.namelist(): + if re.search(regex_filename, log): + log_text = logs_zip.read(log).decode() + if debug: print(log_text) + m = re.search(regex_line, log_text, re.MULTILINE | re.DOTALL) + if m: + return m + return None + + +def main(argv): + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + if not FLAGS.verbosity: + logging.set_verbosity(logging.WARN) + end_date = (dateutil.parser.parse(FLAGS.end) if FLAGS.end else dateutil.utils.today()).date() + start_date = (dateutil.parser.parse(FLAGS.start) if FLAGS.start else dateutil.utils.today() - dateutil.relativedelta.relativedelta(days=int(FLAGS.days)-1)).date() + all_days = set() + if FLAGS.output_markdown: + # Forced options if outputting Markdown. + FLAGS.output_header = True + FLAGS.output_username = False + global _FAILURE_TEXT, _PASS_TEXT, _FLAKY_TEXT + _FAILURE_TEXT = "❌ **" + _FAILURE_TEXT + "**" + _PASS_TEXT = "✅ " + _PASS_TEXT + _FLAKY_TEXT = _PASS_TEXT + " (flaky)" + + with progress.bar.Bar('Reading jobs...', max=3) as bar: + workflow_id = _WORKFLOW_TESTS + all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'schedule', _LIMIT) + bar.next() + source_tests = {} + for run in reversed(all_runs): + run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) + run['day'] = run['date'].date() + day = str(run['date'].date()) + if day in source_tests: continue + if run['status'] != 'completed': continue + if run['day'] < start_date or run['day'] > end_date: continue + run['duration'] = dateutil.parser.parse(run['updated_at'], ignoretz=True) - run['date'] + if general_test_time in str(run['date']): + source_tests[day] = run + all_days.add(day) + # elif firestore_test_time in str(run['date']): + # firestore_tests[day] = run + + workflow_id = _WORKFLOW_PACKAGING + all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'schedule', _LIMIT) + bar.next() + packaging_runs = {} + packaging_run_ids = set() + for run in reversed(all_runs): + run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) + day = str(run['date'].date()) + run['day'] = run['date'].date() + if day in packaging_runs: continue + if run['status'] != 'completed': continue + if run['day'] < start_date or run['day'] > end_date: continue + day = str(run['date'].date()) + all_days.add(day) + packaging_runs[day] = run + packaging_run_ids.add(str(run['id'])) + + workflow_id = _WORKFLOW_TESTS + all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'workflow_dispatch', _LIMIT) + bar.next() + package_tests_all = [] + for run in reversed(all_runs): + run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) + day = str(run['date'].date()) + run['day'] = run['date'].date() + if day not in packaging_runs: continue + if run['status'] != 'completed': continue + if run['day'] < start_date or run['day'] > end_date: continue + if run['triggering_actor']['login'] != _TRIGGER_USER: continue + package_tests_all.append(run) + + logs_summary = {} + + # For each run in pack + package_tests = {} + + logging.info("Source tests: %s %s", list(source_tests.keys()), [source_tests[r]['id'] for r in source_tests.keys()]) + logging.info("Packaging runs: %s %s", list(packaging_runs.keys()), [packaging_runs[r]['id'] for r in packaging_runs.keys()]) + + with progress.bar.Bar('Downloading triggered workflow logs...', max=len(package_tests_all)) as bar: + for run in package_tests_all: + day = str(run['date'].date()) + if day in package_tests and int(package_tests[day]['id']) < int(run['id']): + bar.next() + continue + + packaging_run = 0 + + logs_url = run['logs_url'] + headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token} + with requests.get(logs_url, headers=headers, stream=True) as response: + if response.status_code == 200: + logs_compressed_data = io.BytesIO(response.content) + logs_zip = zipfile.ZipFile(logs_compressed_data) + m = get_message_from_github_log( + logs_zip, + r'check_and_prepare/.*Run if.*expanded.*then.*\.txt', + r'\[warning\]Downloading SDK package from previous run:[^\n]*/([0-9]*)$') + if m: + packaging_run = m.group(1) + if str(packaging_run) in packaging_run_ids: + package_tests[day] = run + bar.next() + + logging.info("Package tests: %s %s", list(package_tests.keys()), [package_tests[r]['id'] for r in package_tests.keys()]) + + with progress.bar.Bar('Downloading test summaries...', max=len(source_tests)+len(package_tests)) as bar: + for tests in source_tests, package_tests: + for day in tests: + run = tests[day] + run['log_success'] = True + run['log_results'] = '' + artifacts = github.list_artifacts(FLAGS.token, run['id']) + if 'log-artifact' in [a['name'] for a in artifacts]: + artifact_id = [a['id'] for a in artifacts if a['name'] == 'log-artifact'][0] + artifact_contents = github.download_artifact(FLAGS.token, artifact_id) + if artifact_contents: + artifact_data = io.BytesIO(artifact_contents) + artifact_zip = zipfile.ZipFile(artifact_data) + with tempfile.TemporaryDirectory() as tmpdir: + artifact_zip.extractall(path=tmpdir) + (success, results) = summarize_test_results.summarize_logs(tmpdir, False, False, True) + run['log_success'] = success + run['log_results'] = results + else: + logging.info("Reading github logs for run %s instead", run['id']) + # artifact_contents is empty, get the github logs which is much slower + logs_url = run['logs_url'] + headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token} + with requests.get(logs_url, headers=headers, stream=True) as response: + if response.status_code == 200: + logs_compressed_data = io.BytesIO(response.content) + logs_zip = zipfile.ZipFile(logs_compressed_data) + m = get_message_from_github_log( + logs_zip, + r'summarize-results/.*Summarize results into GitHub', + r'\[error\]INTEGRATION TEST FAILURES\n—+\n(.*)$') + if m: + run['log_success'] = False + m2 = re.match(r'(.*?)^' + day, m.group(1), re.MULTILINE | re.DOTALL) + if m2: + run['log_results'] = m2.group(1) + else: + run['log_results'] = m.group(1) + logging.debug("Integration test results: %s", run['log_results']) + tests[day] = run + bar.next() + + prev_notes = '' + last_good_day = None + + if FLAGS.output_markdown: + print("### Testing History (last %d days)\n" % len(all_days)) + + table_fields = ( + ["Date"] + + (["Build Bulbasaur"] if FLAGS.output_username else []) + + ["Build vs Source Repo", "Test vs Source Repo", + "SDK Packaging", "Build vs SDK Package", "Test vs SDK Package", + "Notes"] + ) + if FLAGS.output_markdown: + row_prefix = row_separator = row_suffix = "|" + else: + row_prefix = row_suffix = "" + row_separator = "\t" + + table_header_string = row_prefix + row_separator.join(table_fields) + row_suffix + table_row_fmt = row_prefix + row_separator.join([" %s " for f in table_fields]) + row_suffix + print(table_header_string) + + if FLAGS.output_header: + print(table_row_fmt.replace(" %s ", "---")) + + for day in sorted(all_days): + day_str = day + if FLAGS.output_markdown: + day_str = day_str.replace("-", "‑") # non-breaking hyphen. + if day not in package_tests or day not in packaging_runs or day not in source_tests: + day = last_good_day + if not day: continue + last_good_day = day + source_tests_log = analyze_log(source_tests[day]['log_results'], source_tests[day]['html_url']) + if packaging_runs[day]['conclusion'] == "success": + package_build_log = _PASS_TEXT + else: + package_build_log = _FAILURE_TEXT + package_build_log = decorate_url(package_build_log, packaging_runs[day]['html_url']) + package_tests_log = analyze_log(package_tests[day]['log_results'], package_tests[day]['html_url']) + + notes = create_notes(source_tests[day]['log_results'] if source_tests[day]['log_results'] else package_tests[day]['log_results']) + if FLAGS.output_markdown and notes: + notes = "
 " + notes + "
" + if notes == prev_notes and not FLAGS.output_markdown: + if len(notes) > 0: notes = '\'\"\"' + else: + prev_notes = notes + + table_row_contents = ( + [day_str] + + ([os.getlogin()] if FLAGS.output_username else []) + + [source_tests_log[0], + source_tests_log[1], + package_build_log, + package_tests_log[0], + package_tests_log[1], + notes] + ) + print(table_row_fmt % tuple(table_row_contents)) + +if __name__ == "__main__": + flags.mark_flag_as_required("token") + app.run(main) From cde6177053de5718f30f0d8bc54e09ec2bf41db1 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 12:34:04 -0700 Subject: [PATCH 02/22] Add caching. --- scripts/gha/report_build_status.py | 297 ++++++++++++++++------------- 1 file changed, 169 insertions(+), 128 deletions(-) diff --git a/scripts/gha/report_build_status.py b/scripts/gha/report_build_status.py index c590252241..53a6f247c8 100644 --- a/scripts/gha/report_build_status.py +++ b/scripts/gha/report_build_status.py @@ -28,8 +28,10 @@ import dateutil.parser import dateutil.relativedelta import dateutil.utils +import fcntl import io import os +import pickle import progress import progress.bar import re @@ -38,6 +40,7 @@ import sys import tempfile import zipfile +import pickle from absl import app from absl import flags @@ -73,18 +76,30 @@ "Output a table header row. Forced true if outputting markdown.") flags.DEFINE_bool( - "output_username", True, - "Include a username column in the outputted table.") + "output_username", False, + "Include a username column in the outputted table, otherwise include a blank column.") + +flags.DEFINE_bool( + "include_blank_column", True, + "In text output, include a blank column to match the build log spreadsheet format.") + +flags.DEFINE_string( + "write_cache", None, + "Write a cache file that can be used with --read_cache on a subsequent run.") + +flags.DEFINE_string( + "read_cache", None, + "Read a cache file that was written by a previous run via --write_cache.") _WORKFLOW_TESTS = 'integration_tests.yml' _WORKFLOW_PACKAGING = 'cpp-packaging.yml' _TRIGGER_USER = 'firebase-workflow-trigger[bot]' _BRANCH = 'main' -_LIMIT = 300 +_LIMIT = 300 # Hard limit on how many jobs to fetch. _PASS_TEXT = "Pass" _FAILURE_TEXT = "Failure" -_FLAKY_TEXT = "Flaky Failure" +_FLAKY_TEXT = "Pass (flaky)" general_test_time = ' 09:0' firestore_test_time = ' 10:0' @@ -280,132 +295,156 @@ def main(argv): _PASS_TEXT = "✅ " + _PASS_TEXT _FLAKY_TEXT = _PASS_TEXT + " (flaky)" - with progress.bar.Bar('Reading jobs...', max=3) as bar: - workflow_id = _WORKFLOW_TESTS - all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'schedule', _LIMIT) - bar.next() - source_tests = {} - for run in reversed(all_runs): - run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) - run['day'] = run['date'].date() - day = str(run['date'].date()) - if day in source_tests: continue - if run['status'] != 'completed': continue - if run['day'] < start_date or run['day'] > end_date: continue - run['duration'] = dateutil.parser.parse(run['updated_at'], ignoretz=True) - run['date'] - if general_test_time in str(run['date']): - source_tests[day] = run + if FLAGS.read_cache: + logging.info("Reading cache file: %s", FLAGS.read_cache) + with open(FLAGS.read_cache, "rb") as handle: + fcntl.lockf(handle, fcntl.LOCK_SH) # For reading, shared lock is OK. + _cache = pickle.load(handle) + fcntl.lockf(handle, fcntl.LOCK_UN) + + all_days = _cache['all_days'] + source_tests = _cache['source_tests'] + packaging_runs = _cache['packaging_runs'] + package_tests = _cache['package_tests'] + else: + _cache = {} + + with progress.bar.Bar('Reading jobs...', max=3) as bar: + workflow_id = _WORKFLOW_TESTS + all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'schedule', _LIMIT) + bar.next() + source_tests = {} + for run in reversed(all_runs): + run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) + run['day'] = run['date'].date() + day = str(run['date'].date()) + if day in source_tests: continue + if run['status'] != 'completed': continue + if run['day'] < start_date or run['day'] > end_date: continue + run['duration'] = dateutil.parser.parse(run['updated_at'], ignoretz=True) - run['date'] + if general_test_time in str(run['date']): + source_tests[day] = run + all_days.add(day) + # elif firestore_test_time in str(run['date']): + # firestore_tests[day] = run + + workflow_id = _WORKFLOW_PACKAGING + all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'schedule', _LIMIT) + bar.next() + packaging_runs = {} + packaging_run_ids = set() + for run in reversed(all_runs): + run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) + day = str(run['date'].date()) + run['day'] = run['date'].date() + if day in packaging_runs: continue + if run['status'] != 'completed': continue + if run['day'] < start_date or run['day'] > end_date: continue + day = str(run['date'].date()) all_days.add(day) - # elif firestore_test_time in str(run['date']): - # firestore_tests[day] = run + packaging_runs[day] = run + packaging_run_ids.add(str(run['id'])) + + workflow_id = _WORKFLOW_TESTS + all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'workflow_dispatch', _LIMIT) + bar.next() + package_tests_all = [] + for run in reversed(all_runs): + run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) + day = str(run['date'].date()) + run['day'] = run['date'].date() + if day not in packaging_runs: continue + if run['status'] != 'completed': continue + if run['day'] < start_date or run['day'] > end_date: continue + if run['triggering_actor']['login'] != _TRIGGER_USER: continue + package_tests_all.append(run) - workflow_id = _WORKFLOW_PACKAGING - all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'schedule', _LIMIT) - bar.next() - packaging_runs = {} - packaging_run_ids = set() - for run in reversed(all_runs): - run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) - day = str(run['date'].date()) - run['day'] = run['date'].date() - if day in packaging_runs: continue - if run['status'] != 'completed': continue - if run['day'] < start_date or run['day'] > end_date: continue - day = str(run['date'].date()) - all_days.add(day) - packaging_runs[day] = run - packaging_run_ids.add(str(run['id'])) + # For each run in pack + package_tests = {} - workflow_id = _WORKFLOW_TESTS - all_runs = github.list_workflow_runs(FLAGS.token, workflow_id, _BRANCH, 'workflow_dispatch', _LIMIT) - bar.next() - package_tests_all = [] - for run in reversed(all_runs): - run['date'] = dateutil.parser.parse(run['created_at'], ignoretz=True) - day = str(run['date'].date()) - run['day'] = run['date'].date() - if day not in packaging_runs: continue - if run['status'] != 'completed': continue - if run['day'] < start_date or run['day'] > end_date: continue - if run['triggering_actor']['login'] != _TRIGGER_USER: continue - package_tests_all.append(run) - - logs_summary = {} - - # For each run in pack - package_tests = {} - - logging.info("Source tests: %s %s", list(source_tests.keys()), [source_tests[r]['id'] for r in source_tests.keys()]) - logging.info("Packaging runs: %s %s", list(packaging_runs.keys()), [packaging_runs[r]['id'] for r in packaging_runs.keys()]) - - with progress.bar.Bar('Downloading triggered workflow logs...', max=len(package_tests_all)) as bar: - for run in package_tests_all: - day = str(run['date'].date()) - if day in package_tests and int(package_tests[day]['id']) < int(run['id']): - bar.next() - continue + logging.info("Source tests: %s %s", list(source_tests.keys()), [source_tests[r]['id'] for r in source_tests.keys()]) + logging.info("Packaging runs: %s %s", list(packaging_runs.keys()), [packaging_runs[r]['id'] for r in packaging_runs.keys()]) - packaging_run = 0 - - logs_url = run['logs_url'] - headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token} - with requests.get(logs_url, headers=headers, stream=True) as response: - if response.status_code == 200: - logs_compressed_data = io.BytesIO(response.content) - logs_zip = zipfile.ZipFile(logs_compressed_data) - m = get_message_from_github_log( - logs_zip, - r'check_and_prepare/.*Run if.*expanded.*then.*\.txt', - r'\[warning\]Downloading SDK package from previous run:[^\n]*/([0-9]*)$') - if m: - packaging_run = m.group(1) - if str(packaging_run) in packaging_run_ids: - package_tests[day] = run - bar.next() - - logging.info("Package tests: %s %s", list(package_tests.keys()), [package_tests[r]['id'] for r in package_tests.keys()]) - - with progress.bar.Bar('Downloading test summaries...', max=len(source_tests)+len(package_tests)) as bar: - for tests in source_tests, package_tests: - for day in tests: - run = tests[day] - run['log_success'] = True - run['log_results'] = '' - artifacts = github.list_artifacts(FLAGS.token, run['id']) - if 'log-artifact' in [a['name'] for a in artifacts]: - artifact_id = [a['id'] for a in artifacts if a['name'] == 'log-artifact'][0] - artifact_contents = github.download_artifact(FLAGS.token, artifact_id) - if artifact_contents: - artifact_data = io.BytesIO(artifact_contents) - artifact_zip = zipfile.ZipFile(artifact_data) - with tempfile.TemporaryDirectory() as tmpdir: - artifact_zip.extractall(path=tmpdir) - (success, results) = summarize_test_results.summarize_logs(tmpdir, False, False, True) - run['log_success'] = success - run['log_results'] = results - else: - logging.info("Reading github logs for run %s instead", run['id']) - # artifact_contents is empty, get the github logs which is much slower - logs_url = run['logs_url'] - headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token} - with requests.get(logs_url, headers=headers, stream=True) as response: - if response.status_code == 200: - logs_compressed_data = io.BytesIO(response.content) - logs_zip = zipfile.ZipFile(logs_compressed_data) - m = get_message_from_github_log( - logs_zip, - r'summarize-results/.*Summarize results into GitHub', - r'\[error\]INTEGRATION TEST FAILURES\n—+\n(.*)$') - if m: - run['log_success'] = False - m2 = re.match(r'(.*?)^' + day, m.group(1), re.MULTILINE | re.DOTALL) - if m2: - run['log_results'] = m2.group(1) - else: - run['log_results'] = m.group(1) - logging.debug("Integration test results: %s", run['log_results']) - tests[day] = run + with progress.bar.Bar('Downloading triggered workflow logs...', max=len(package_tests_all)) as bar: + for run in package_tests_all: + day = str(run['date'].date()) + if day in package_tests and int(package_tests[day]['id']) < int(run['id']): + bar.next() + continue + + packaging_run = 0 + + logs_url = run['logs_url'] + headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token} + with requests.get(logs_url, headers=headers, stream=True) as response: + if response.status_code == 200: + logs_compressed_data = io.BytesIO(response.content) + logs_zip = zipfile.ZipFile(logs_compressed_data) + m = get_message_from_github_log( + logs_zip, + r'check_and_prepare/.*Run if.*expanded.*then.*\.txt', + r'\[warning\]Downloading SDK package from previous run:[^\n]*/([0-9]*)$') + if m: + packaging_run = m.group(1) + if str(packaging_run) in packaging_run_ids: + package_tests[day] = run bar.next() + + logging.info("Package tests: %s %s", list(package_tests.keys()), [package_tests[r]['id'] for r in package_tests.keys()]) + + with progress.bar.Bar('Downloading test summaries...', max=len(source_tests)+len(package_tests)) as bar: + for tests in source_tests, package_tests: + for day in tests: + run = tests[day] + run['log_success'] = True + run['log_results'] = '' + artifacts = github.list_artifacts(FLAGS.token, run['id']) + if 'log-artifact' in [a['name'] for a in artifacts]: + artifact_id = [a['id'] for a in artifacts if a['name'] == 'log-artifact'][0] + artifact_contents = github.download_artifact(FLAGS.token, artifact_id) + if artifact_contents: + artifact_data = io.BytesIO(artifact_contents) + artifact_zip = zipfile.ZipFile(artifact_data) + with tempfile.TemporaryDirectory() as tmpdir: + artifact_zip.extractall(path=tmpdir) + (success, results) = summarize_test_results.summarize_logs(tmpdir, False, False, True) + run['log_success'] = success + run['log_results'] = results + else: + logging.info("Reading github logs for run %s instead", run['id']) + # artifact_contents is empty, get the github logs which is much slower + logs_url = run['logs_url'] + headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token} + with requests.get(logs_url, headers=headers, stream=True) as response: + if response.status_code == 200: + logs_compressed_data = io.BytesIO(response.content) + logs_zip = zipfile.ZipFile(logs_compressed_data) + m = get_message_from_github_log( + logs_zip, + r'summarize-results/.*Summarize results into GitHub', + r'\[error\]INTEGRATION TEST FAILURES\n—+\n(.*)$') + if m: + run['log_success'] = False + m2 = re.match(r'(.*?)^' + day, m.group(1), re.MULTILINE | re.DOTALL) + if m2: + run['log_results'] = m2.group(1) + else: + run['log_results'] = m.group(1) + logging.debug("Integration test results: %s", run['log_results']) + tests[day] = run + bar.next() + + _cache['all_days'] = all_days + _cache['source_tests'] = source_tests + _cache['packaging_runs'] = packaging_runs + _cache['package_tests'] = package_tests + + if FLAGS.write_cache: + logging.info("Writing cache file: %s", FLAGS.write_cache) + with open(FLAGS.write_cache, "wb") as handle: + fcntl.lockf(handle, fcntl.LOCK_EX) # For writing, need exclusive lock. + pickle.dump(_cache, handle, protocol=pickle.HIGHEST_PROTOCOL) + fcntl.lockf(handle, fcntl.LOCK_UN) prev_notes = '' last_good_day = None @@ -415,7 +454,8 @@ def main(argv): table_fields = ( ["Date"] + - (["Build Bulbasaur"] if FLAGS.output_username else []) + + (["Build Bulbasaur"] if FLAGS.output_username else [""]) + + ([""] if FLAGS.include_blank_column else []) + ["Build vs Source Repo", "Test vs Source Repo", "SDK Packaging", "Build vs SDK Package", "Test vs SDK Package", "Notes"] @@ -430,7 +470,7 @@ def main(argv): table_row_fmt = row_prefix + row_separator.join([" %s " for f in table_fields]) + row_suffix print(table_header_string) - if FLAGS.output_header: + if FLAGS.output_header and FLAGS.output_markdown: print(table_row_fmt.replace(" %s ", "---")) for day in sorted(all_days): @@ -459,7 +499,8 @@ def main(argv): table_row_contents = ( [day_str] + - ([os.getlogin()] if FLAGS.output_username else []) + + ([os.getlogin()] if FLAGS.output_username else [""]) + + ([""] if FLAGS.include_blank_column else []) + [source_tests_log[0], source_tests_log[1], package_build_log, From 7c6fd0f693bf3b6717ee4b90f45ddf9ba1323a81 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 14:03:21 -0700 Subject: [PATCH 03/22] Clean up output in text mode. --- scripts/gha/report_build_status.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/scripts/gha/report_build_status.py b/scripts/gha/report_build_status.py index 53a6f247c8..efdbec7c2a 100644 --- a/scripts/gha/report_build_status.py +++ b/scripts/gha/report_build_status.py @@ -71,13 +71,17 @@ "output_markdown", False, "Output a Markdown-formatted table.") +flags.DEFINE_bool( + "reverse", False, + "Reverse output, so most recent is first.") + flags.DEFINE_bool( "output_header", True, "Output a table header row. Forced true if outputting markdown.") flags.DEFINE_bool( "output_username", False, - "Include a username column in the outputted table, otherwise include a blank column.") + "Include a username column in the outputted table, otherwise include a blank column in text or no column in Markdown.") flags.DEFINE_bool( "include_blank_column", True, @@ -95,7 +99,7 @@ _WORKFLOW_PACKAGING = 'cpp-packaging.yml' _TRIGGER_USER = 'firebase-workflow-trigger[bot]' _BRANCH = 'main' -_LIMIT = 300 # Hard limit on how many jobs to fetch. +_LIMIT = 400 # Hard limit on how many jobs to fetch. _PASS_TEXT = "Pass" _FAILURE_TEXT = "Failure" @@ -454,26 +458,30 @@ def main(argv): table_fields = ( ["Date"] + - (["Build Bulbasaur"] if FLAGS.output_username else [""]) + - ([""] if FLAGS.include_blank_column else []) + + (["Build Bulbasaur"] if FLAGS.output_username else ([] if FLAGS.output_markdown else [""])) + + ([""] if FLAGS.include_blank_column and not FLAGS.output_markdown else []) + ["Build vs Source Repo", "Test vs Source Repo", "SDK Packaging", "Build vs SDK Package", "Test vs SDK Package", "Notes"] ) if FLAGS.output_markdown: - row_prefix = row_separator = row_suffix = "|" + row_prefix = "| " + row_separator = "|" + row_suffix = " |" else: row_prefix = row_suffix = "" row_separator = "\t" table_header_string = row_prefix + row_separator.join(table_fields) + row_suffix - table_row_fmt = row_prefix + row_separator.join([" %s " for f in table_fields]) + row_suffix + table_row_fmt = row_prefix + row_separator.join(["%s" for f in table_fields]) + row_suffix print(table_header_string) if FLAGS.output_header and FLAGS.output_markdown: - print(table_row_fmt.replace(" %s ", "---")) - - for day in sorted(all_days): + print(table_row_fmt.replace("%s", "---").replace(" ", "")) + + days_sorted = sorted(all_days) + if FLAGS.reverse: days_sorted = reversed(days_sorted) + for day in days_sorted: day_str = day if FLAGS.output_markdown: day_str = day_str.replace("-", "‑") # non-breaking hyphen. @@ -499,8 +507,8 @@ def main(argv): table_row_contents = ( [day_str] + - ([os.getlogin()] if FLAGS.output_username else [""]) + - ([""] if FLAGS.include_blank_column else []) + + ([os.getlogin()] if FLAGS.output_username else ([] if FLAGS.output_markdown else [""])) + + ([""] if FLAGS.include_blank_column and not FLAGS.output_markdown else []) + [source_tests_log[0], source_tests_log[1], package_build_log, From 720313c1b373e8103a2d3cb43e7b7e4e0475d806 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 14:16:42 -0700 Subject: [PATCH 04/22] Run most basic version of script. --- .github/workflows/build-report.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index 92ef2f407a..71dabb3b54 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -14,6 +14,21 @@ jobs: generate-report: runs-on: ubuntu-20.04 steps: + - name: Setup python + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Install Desktop SDK prerequisites + uses: nick-invision/retry@v2 + with: + timeout_minutes: 15 + max_attempts: 3 + command: | + python3 scripts/gha/install_prereqs_desktop.py --gha_build + python3 -m pip install python-dateutil progress attrs - name: Fetch GitHub jobs run: | - true + python3 scripts/gha/report_build_status --token {{ github.token }} --days ${{ env.numDays }} --write_cache build_status.cache From 7ae62a11c0cdc4f09c2d5e666f04c03d881c67df Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 14:17:23 -0700 Subject: [PATCH 05/22] Untabify --- .github/workflows/build-report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index 71dabb3b54..ad4dd3f915 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -28,7 +28,7 @@ jobs: max_attempts: 3 command: | python3 scripts/gha/install_prereqs_desktop.py --gha_build - python3 -m pip install python-dateutil progress attrs + python3 -m pip install python-dateutil progress attrs - name: Fetch GitHub jobs run: | python3 scripts/gha/report_build_status --token {{ github.token }} --days ${{ env.numDays }} --write_cache build_status.cache From 39e0e894c577edc8bd2e9014205b434c028af212 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 14:19:01 -0700 Subject: [PATCH 06/22] Fix python script --- .github/workflows/build-report.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index ad4dd3f915..a7c0d837f0 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -1,4 +1,4 @@ -name: Generate Weekly Test Report +name: Generate Test Report Table on: workflow_dispatch: @@ -31,4 +31,4 @@ jobs: python3 -m pip install python-dateutil progress attrs - name: Fetch GitHub jobs run: | - python3 scripts/gha/report_build_status --token {{ github.token }} --days ${{ env.numDays }} --write_cache build_status.cache + python3 scripts/gha/report_build_status.py --token {{ github.token }} --days ${{ env.numDays }} --write_cache build_status.cache From f436699e9859c8bcf932548ee3afd8ce8405cf7c Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 14:20:38 -0700 Subject: [PATCH 07/22] Install requests module --- .github/workflows/build-report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index a7c0d837f0..6d6ad3cdc4 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -28,7 +28,7 @@ jobs: max_attempts: 3 command: | python3 scripts/gha/install_prereqs_desktop.py --gha_build - python3 -m pip install python-dateutil progress attrs + python3 -m pip install requests python-dateutil progress attrs - name: Fetch GitHub jobs run: | python3 scripts/gha/report_build_status.py --token {{ github.token }} --days ${{ env.numDays }} --write_cache build_status.cache From 0b6d0f41d7beb15789eeaae3bf85853641ada809 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 14:22:06 -0700 Subject: [PATCH 08/22] Fix command --- .github/workflows/build-report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index 6d6ad3cdc4..c62d55575d 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -31,4 +31,4 @@ jobs: python3 -m pip install requests python-dateutil progress attrs - name: Fetch GitHub jobs run: | - python3 scripts/gha/report_build_status.py --token {{ github.token }} --days ${{ env.numDays }} --write_cache build_status.cache + python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --write_cache build_status.cache From 7032c05e572249dd3231d214c1dcb06e312c625b Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 14:35:44 -0700 Subject: [PATCH 09/22] Full script. --- .github/workflows/build-report.yml | 41 +++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index c62d55575d..d7f9f3d91a 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -31,4 +31,43 @@ jobs: python3 -m pip install requests python-dateutil progress attrs - name: Fetch GitHub jobs run: | - python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --write_cache build_status.cache + python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDaysExtended }} --write_cache build_status.cache + - name: Generate report files + run: | + python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --output_markdown --read_cache build_status.cache > report_short.md + python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDaysExtended }} --output_markdown --read_cache build_status.cache > report_extended.md + python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --read_cache build_status.cache > report.txt + - name: Generate comment string + run: | + echo -n > comment.md + cat >> comment.md <> comment.md + cat >> comment.md <View extended history + + ### Testing History (last ${{ env.numDaysExtended }} days) + + EOF + cat report_extended.md >> comment.md + cat >> comment.md < +
📄
+	EOF
+	cat report.txt >> comment.md
+	cat >> comment.md <
+ EOF + - name: Show comment string + run: | + cat comment.md + - name: Upload comment file artifact + uses: actions/upload-artifact@v3 + with: + name: comment.md + path: comment.md From 506a9e2dcb1fea6d8eb2ea2e5de189cadf667d42 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 14:36:25 -0700 Subject: [PATCH 10/22] Tabs --- .github/workflows/build-report.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index d7f9f3d91a..628c272d5b 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -41,12 +41,12 @@ jobs: run: | echo -n > comment.md cat >> comment.md <> comment.md + cat report_short.md >> comment.md cat >> comment.md <View extended history @@ -54,15 +54,15 @@ jobs: ### Testing History (last ${{ env.numDaysExtended }} days) EOF - cat report_extended.md >> comment.md - cat >> comment.md < -
📄
-	EOF
-	cat report.txt >> comment.md
-	cat >> comment.md <
- EOF + cat report_extended.md >> comment.md + cat >> comment.md < +
📄
+        EOF
+        cat report.txt >> comment.md
+        cat >> comment.md <
+ EOF - name: Show comment string run: | cat comment.md From 2eaa2d4cd38621e161dc623d186853b70ffeef25 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 16:37:00 -0700 Subject: [PATCH 11/22] Fix build report. --- .github/workflows/build-report.yml | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index 628c272d5b..30e52dd86b 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -31,29 +31,25 @@ jobs: python3 -m pip install requests python-dateutil progress attrs - name: Fetch GitHub jobs run: | + python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --write_cache build_status_short.cache python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDaysExtended }} --write_cache build_status.cache - name: Generate report files run: | - python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --output_markdown --read_cache build_status.cache > report_short.md + python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --output_markdown --read_cache build_status_short.cache > report_short.md python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDaysExtended }} --output_markdown --read_cache build_status.cache > report_extended.md - python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --read_cache build_status.cache > report.txt + python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --read_cache build_status_short.cache > report.txt - name: Generate comment string run: | echo -n > comment.md cat >> comment.md <> comment.md cat >> comment.md <View extended history - - ### Testing History (last ${{ env.numDaysExtended }} days) - - EOF + EOF cat report_extended.md >> comment.md cat >> comment.md < @@ -71,3 +67,8 @@ jobs: with: name: comment.md path: comment.md + - name: Upload build status cache (debugging) + uses: actions/upload-artifact@v3 + with: + name: build_status.cache + path: build_status.cache From 4b67fd3673ac2bc595eab31023149debe8fd88f1 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Fri, 2 Jun 2023 16:37:29 -0700 Subject: [PATCH 12/22] Tabs. --- .github/workflows/build-report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index 30e52dd86b..0a72bde212 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -49,7 +49,7 @@ jobs: cat >> comment.md <View extended history - EOF + EOF cat report_extended.md >> comment.md cat >> comment.md < From d8c88085f62a4dc39bcec57d4f39a892a240d39b Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Mon, 5 Jun 2023 15:20:22 -0700 Subject: [PATCH 13/22] Preserve dashboard comment in test workflow. --- scripts/gha/it_workflow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/gha/it_workflow.py b/scripts/gha/it_workflow.py index ee9db0fafe..344828f3de 100644 --- a/scripts/gha/it_workflow.py +++ b/scripts/gha/it_workflow.py @@ -222,7 +222,7 @@ def test_report(token, actor, commit, run_id, build_against, build_apis): report_title = _REPORT_TITLE firestore_issue_number = _get_issue_number(token, _REPORT_TITLE_FIRESTORE, _REPORT_LABEL) firestore_issue_url = "https://github.com/firebase/firebase-cpp-sdk/issues/%s" % firestore_issue_number - prefix = "Note: This report excludes firestore. Please also check **[the report for firestore](%s)**\n***\n" % firestore_issue_url + prefix = "Note: This report excludes Firestore. Please also check **[the report for Firestore](%s).**\n***\n" % firestore_issue_url issue_number = _get_issue_number(token, report_title, _REPORT_LABEL) previous_comment = github.get_issue_body(token, issue_number) From f248bd641dac1a41e5d3330e88353fd622b5dc1d Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Mon, 5 Jun 2023 15:23:46 -0700 Subject: [PATCH 14/22] Add script for updating issue comment. --- scripts/gha/report_build_status.py | 2 +- scripts/gha/update_issue_comment.py | 97 +++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 1 deletion(-) create mode 100644 scripts/gha/update_issue_comment.py diff --git a/scripts/gha/report_build_status.py b/scripts/gha/report_build_status.py index efdbec7c2a..b86268f35d 100644 --- a/scripts/gha/report_build_status.py +++ b/scripts/gha/report_build_status.py @@ -20,7 +20,7 @@ USAGE: python scripts/gha/report_build_status.py \ - --token ${{github.token}} \ + --token ${{github.token}} """ import datetime diff --git a/scripts/gha/update_issue_comment.py b/scripts/gha/update_issue_comment.py new file mode 100644 index 0000000000..45b2927d07 --- /dev/null +++ b/scripts/gha/update_issue_comment.py @@ -0,0 +1,97 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Installing prerequisites: +# +# sudo python3 -m pip install python-dateutil progress attrs + +"""A utility to add modify part of an issue comment, which must already exist. +The comment text should be entered in stdin. The script preserves the previous +text before and affer the hidden tags. + +USAGE: + python scripts/gha/update_issue_comment.py \ + --token GITHUB_TOKEN \ + --issue_title "Issue title goes here" \ + --issue_label "Issue label goes here" \ + --start_tag "hidden-tag-start" \ + --end_tag "hidden-tag-end" < updated-comment-section.md +""" + + +from absl import app +from absl import flags +from absl import logging + +import github + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + "token", None, + "github.token: A token to authenticate on your repository.") + +flags.DEFINE_string( + "start_tag", "comment-start", + "Starting tag (inside a element).") + +flags.DEFINE_string( + "end_tag", "comment-end", + "Ending tag (inside a element).") + +flags.DEFINE_string( + "issue_title", None, + "Title of the issue to modify. Will fail if it doesn't exist.") + +flags.DEFINE_string( + "issue_label", 'nightly-testing', + "Label to search for.") + + +def get_issue_number(token, title, label): + issues = github.search_issues_by_label(label) + for issue in issues: + if issue["title"] == title: + return issue["number"] + return None + + +def main(argv): + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + if not FLAGS.verbosity: + logging.set_verbosity(logging.WARN) + + comment_start = "\r\n\r\n" % FLAGS.comment_start + comment_end = "\r\n\r\n" % FLAGS.comment_end + + issue_number = get_issue_number(FLAGS.token, FLAGS.issue_title, FLAGS.issue_label) + if not issue_number: + logging.fatal("Couldn't find a '%s' issue matching '%s'", + FLAGS.issue_label, + FLAGS.issue_title) + + previous_comment = github.get_issue_body(FLAGS.token, issue_number) + if comment_start not in previous_comment: + logging.fatal("Couldn't find start tag '%s' in previous comment", comment_start) + if comment_end not in previous_comment: + logging.fatal("Couldn't find end tag '%s' in previous comment", comment_end) + + + + +if __name__ == "__main__": + flags.mark_flag_as_required("token") + flags.mark_flag_as_required("issue_title") + app.run(main) From d537109d4869d1d0df7adc4cf4f84879b8c3cd49 Mon Sep 17 00:00:00 2001 From: Jon Simantov Date: Mon, 5 Jun 2023 15:29:38 -0700 Subject: [PATCH 15/22] Update workflow script to handle dashboard. --- .github/workflows/build-report.yml | 3 ++ scripts/gha/it_workflow.py | 55 +++++++++++++++++++++--------- 2 files changed, 41 insertions(+), 17 deletions(-) diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml index 0a72bde212..e563929522 100644 --- a/.github/workflows/build-report.yml +++ b/.github/workflows/build-report.yml @@ -42,6 +42,7 @@ jobs: run: | echo -n > comment.md cat >> comment.md <> comment.md <View extended history + EOF cat report_extended.md >> comment.md cat >> comment.md <
📄
+
         EOF
         cat report.txt >> comment.md
         cat >> comment.md <\r\n'
 
+_COMMENT_IDENTIFIER_DASHBOARD = "build-dashboard-comment"
+_COMMENT_DASHBOARD_START = f'\r\n\r\n'
+_COMMENT_DASHBOARD_END = f'\r\n\r\n'
+
 _LOG_ARTIFACT_NAME = "log-artifact"
 _LOG_OUTPUT_DIR = "test_results"
 
@@ -105,7 +109,7 @@
     "Different stage while running the workflow. Valid values in _BUILD_STAGES.")
 
 flags.DEFINE_string(
-    "token", None, 
+    "token", None,
     "github.token: A token to authenticate on your repository.")
 
 flags.DEFINE_string(
@@ -127,7 +131,7 @@
     "new_token", None,
     "Only used with --stage end"
     "Use a different token to remove the \"in-progress\" label,"
-    "to allow the removal to trigger the \"Check Labels\" workflow.")   
+    "to allow the removal to trigger the \"Check Labels\" workflow.")
 
 flags.DEFINE_string(
     "build_against", None,
@@ -151,7 +155,7 @@ def test_start(token, issue_number, actor, commit, run_id):
 
 
 def test_progress(token, issue_number, actor, commit, run_id):
-  """In PR, when some test failed, update failure info and 
+  """In PR, when some test failed, update failure info and
   add label \"tests: failed\""""
   success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
   if success_or_only_flakiness and not log_summary:
@@ -174,7 +178,7 @@ def test_progress(token, issue_number, actor, commit, run_id):
 
 
 def test_end(token, issue_number, actor, commit, run_id, new_token):
-  """In PR, when some test end, update Test Result Report and 
+  """In PR, when some test end, update Test Result Report and
   update label: add \"tests: failed\" if test failed, add label
   \"tests: succeeded\" if test succeed"""
   success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
@@ -205,11 +209,14 @@ def test_end(token, issue_number, actor, commit, run_id, new_token):
 
 
 def test_report(token, actor, commit, run_id, build_against, build_apis):
-  """Update (create if not exist) a Daily/Nightly Report in Issue. 
+  """Update (create if not exist) a Daily/Nightly Report in Issue.
   The Issue with title _REPORT_TITLE and label _REPORT_LABEL:
   https://github.com/firebase/firebase-cpp-sdk/issues?q=is%3Aissue+label%3Anightly-testing
   The report is with the format below:
     PREFIX
+    HIDDEN DASHBOARD START - optional
+    BUILD DASHBOARD - optional
+    HIDDEN DASHBOARD END - optional
     HIDDEN DIVIDER
     REPORT (TEST AGAINST REPO)
     HIDDEN DIVIDER
@@ -226,12 +233,20 @@ def test_report(token, actor, commit, run_id, build_against, build_apis):
 
   issue_number = _get_issue_number(token, report_title, _REPORT_LABEL)
   previous_comment = github.get_issue_body(token, issue_number)
-  [_, previous_comment_repo, previous_comment_sdk, previous_comment_tip] = previous_comment.split(_COMMENT_HIDDEN_DIVIDER)
+  [previous_prefix, previous_comment_repo, previous_comment_sdk,
+   previous_comment_tip] = previous_comment.split(_COMMENT_HIDDEN_DIVIDER)
+   # If there is a build dashboard, preserve it.
+   if (_COMMENT_DASHBOARD_START in previous_prefix and
+       _COMMENT_DASHBOARD_END in previous_prefix):
+     [_, previous_dashboard_plus_the_rest] = previous_prefix.split(_COMMENT_DASHBOARD_START)
+     [previous_dashboard, _] = previous_dashboard_plus_the_rest.split(_COMMENT_DASHBOARD_END)
+     prefix = prefix + _COMMENT_DASHBOARD_START + previous_dashboard + _COMMENT_DASHBOARD_END
+
   success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
   if success_or_only_flakiness and not log_summary:
     # succeeded (without flakiness)
     if build_against==_BUILD_AGAINST_REPO:
-      title = _COMMENT_TITLE_SUCCEED_REPO  
+      title = _COMMENT_TITLE_SUCCEED_REPO
     elif build_against==_BUILD_AGAINST_SDK:
       title = _COMMENT_TITLE_SUCCEED_SDK
     else:
@@ -241,7 +256,7 @@ def test_report(token, actor, commit, run_id, build_against, build_apis):
     if success_or_only_flakiness:
       # all failures/errors are due to flakiness (succeeded after retry)
       if build_against==_BUILD_AGAINST_REPO:
-        title = _COMMENT_TITLE_FLAKY_REPO  
+        title = _COMMENT_TITLE_FLAKY_REPO
       elif build_against==_BUILD_AGAINST_SDK:
         title = _COMMENT_TITLE_FLAKY_SDK
       else:
@@ -249,13 +264,13 @@ def test_report(token, actor, commit, run_id, build_against, build_apis):
     else:
       # failures/errors still exist after retry
       if build_against==_BUILD_AGAINST_REPO:
-        title = _COMMENT_TITLE_FAIL_REPO  
+        title = _COMMENT_TITLE_FAIL_REPO
       elif build_against==_BUILD_AGAINST_SDK:
         title = _COMMENT_TITLE_FAIL_SDK
       else:
         title = _COMMENT_TITLE_FAIL_TIP
     comment = title + _get_description(actor, commit, run_id) + log_summary + _COMMENT_FLAKY_TRACKER
-  
+
   if build_against==_BUILD_AGAINST_REPO:
     comment = prefix + _COMMENT_HIDDEN_DIVIDER + comment + _COMMENT_HIDDEN_DIVIDER + previous_comment_sdk + _COMMENT_HIDDEN_DIVIDER + previous_comment_tip
   elif build_against==_BUILD_AGAINST_SDK:
@@ -267,7 +282,7 @@ def test_report(token, actor, commit, run_id, build_against, build_apis):
     github.close_issue(token, issue_number)
   else:
     github.open_issue(token, issue_number)
-    
+
   github.update_issue_comment(token, issue_number, comment)
 
 
@@ -276,7 +291,13 @@ def _get_issue_number(token, title, label):
   for issue in issues:
     if issue["title"] == title:
       return issue["number"]
-  empty_comment = _COMMENT_HIDDEN_DIVIDER + " " + _COMMENT_HIDDEN_DIVIDER + " " + _COMMENT_HIDDEN_DIVIDER 
+  empty_comment = (" " +
+                   _COMMENT_DASHBOARD_START + " " +
+                   _COMMENT_DASHBOARD_END + " " +
+                   _COMMENT_HIDDEN_DIVIDER + " " +
+                   _COMMENT_HIDDEN_DIVIDER + " " +
+                   _COMMENT_HIDDEN_DIVIDER + " "
+                   )
   return github.create_issue(token, title, label, empty_comment)["number"]
 
 
@@ -287,7 +308,7 @@ def _update_comment(token, issue_number, comment):
   else:
     github.update_comment(token, comment_id, comment)
 
-  
+
 def _get_comment_id(token, issue_number, comment_identifier):
   comments = github.list_comments(token, issue_number)
   for comment in comments:

From bc5e2af969dd32ed2e5efa094b40fc2fe98db005 Mon Sep 17 00:00:00 2001
From: Jon Simantov 
Date: Mon, 5 Jun 2023 15:42:40 -0700
Subject: [PATCH 16/22] Finish script to update issue comment.

---
 scripts/gha/update_issue_comment.py | 25 ++++++++++++++++++++-----
 1 file changed, 20 insertions(+), 5 deletions(-)

diff --git a/scripts/gha/update_issue_comment.py b/scripts/gha/update_issue_comment.py
index 45b2927d07..7a19dd0264 100644
--- a/scripts/gha/update_issue_comment.py
+++ b/scripts/gha/update_issue_comment.py
@@ -29,6 +29,7 @@
     --end_tag "hidden-tag-end" < updated-comment-section.md
 """
 
+import sys
 
 from absl import app
 from absl import flags
@@ -70,25 +71,39 @@ def get_issue_number(token, title, label):
 def main(argv):
   if len(argv) > 1:
     raise app.UsageError("Too many command-line arguments.")
-  if not FLAGS.verbosity:
-    logging.set_verbosity(logging.WARN)
+  #if not FLAGS.verbosity:
+  #  logging.set_verbosity(logging.WARN)
 
-  comment_start = "\r\n\r\n" % FLAGS.comment_start
-  comment_end = "\r\n\r\n" % FLAGS.comment_end
+  comment_start = "\r\n\r\n" % FLAGS.start_tag
+  comment_end = "\r\n\r\n" % FLAGS.end_tag
 
   issue_number = get_issue_number(FLAGS.token, FLAGS.issue_title, FLAGS.issue_label)
   if not issue_number:
     logging.fatal("Couldn't find a '%s' issue matching '%s'",
                   FLAGS.issue_label,
                   FLAGS.issue_title)
+  logging.info("Got issue number: %d", issue_number)
   
   previous_comment = github.get_issue_body(FLAGS.token, issue_number)
   if comment_start not in previous_comment:
     logging.fatal("Couldn't find start tag '%s' in previous comment", comment_start)
   if comment_end not in previous_comment:
     logging.fatal("Couldn't find end tag '%s' in previous comment", comment_end)
-  
 
+  logging.info("Got previous comment (%d bytes)", len(previous_comment))
+
+  if comment_start == comment_end:
+    [prefix, _, suffix] = previous_comment.split(comment_start)
+  else:
+    [prefix, remainder] = previous_comment.split(comment_start)
+    [_, suffix] = remainder.split(comment_end)
+
+  logging.info("Prefix is %d bytes, suffix is %d bytes", len(prefix), len(suffix))
+
+  new_text = sys.stdin.read()
+  new_comment = prefix + comment_start + new_text + comment_end + suffix
+
+  print(new_comment)
 
 
 if __name__ == "__main__":

From fbbdc4e0bd971737182961b6eba06a0dd3218891 Mon Sep 17 00:00:00 2001
From: Jon Simantov 
Date: Mon, 5 Jun 2023 16:01:18 -0700
Subject: [PATCH 17/22] Add issue updating to the workflow.

---
 .github/workflows/build-report.yml  | 15 ++++-----------
 scripts/gha/update_issue_comment.py | 14 +++++---------
 2 files changed, 9 insertions(+), 20 deletions(-)

diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml
index e563929522..f3edbe9d94 100644
--- a/.github/workflows/build-report.yml
+++ b/.github/workflows/build-report.yml
@@ -37,7 +37,7 @@ jobs:
       run: |
         python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --output_markdown --read_cache build_status_short.cache > report_short.md
         python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDaysExtended }} --output_markdown --read_cache build_status.cache > report_extended.md
-        python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --read_cache build_status_short.cache > report.txt
+        python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --nooutput-header --read_cache build_status_short.cache > report.txt
     - name: Generate comment string
       run: |
         echo -n > comment.md
@@ -65,13 +65,6 @@ jobs:
     - name: Show comment string
       run: |
         cat comment.md
-    - name: Upload comment file artifact
-      uses: actions/upload-artifact@v3
-      with:
-        name: comment.md
-        path: comment.md
-    - name: Upload build status cache (debugging)
-      uses: actions/upload-artifact@v3
-      with:
-        name: build_status.cache
-        path: build_status.cache
+    - name: Update GitHub issue
+      run: |
+        python3 scripts/gha/update_issue_comment.py --token ${{ github.token }} --issue_title '[C++] Nightly Integration Testing Report' --start_tag build-dashboard-comment-start --end_tag build-dashboard-comment-end < comment.md
diff --git a/scripts/gha/update_issue_comment.py b/scripts/gha/update_issue_comment.py
index 7a19dd0264..381375cc4c 100644
--- a/scripts/gha/update_issue_comment.py
+++ b/scripts/gha/update_issue_comment.py
@@ -61,6 +61,7 @@
 
 
 def get_issue_number(token, title, label):
+  """Get the GitHub isssue number for a given issue title and label"""
   issues = github.search_issues_by_label(label)
   for issue in issues:
     if issue["title"] == title:
@@ -71,8 +72,6 @@ def get_issue_number(token, title, label):
 def main(argv):
   if len(argv) > 1:
     raise app.UsageError("Too many command-line arguments.")
-  #if not FLAGS.verbosity:
-  #  logging.set_verbosity(logging.WARN)
 
   comment_start = "\r\n\r\n" % FLAGS.start_tag
   comment_end = "\r\n\r\n" % FLAGS.end_tag
@@ -82,7 +81,7 @@ def main(argv):
     logging.fatal("Couldn't find a '%s' issue matching '%s'",
                   FLAGS.issue_label,
                   FLAGS.issue_title)
-  logging.info("Got issue number: %d", issue_number)
+  logging.info("Found issue number: %d", issue_number)
   
   previous_comment = github.get_issue_body(FLAGS.token, issue_number)
   if comment_start not in previous_comment:
@@ -90,20 +89,17 @@ def main(argv):
   if comment_end not in previous_comment:
     logging.fatal("Couldn't find end tag '%s' in previous comment", comment_end)
 
-  logging.info("Got previous comment (%d bytes)", len(previous_comment))
-
   if comment_start == comment_end:
     [prefix, _, suffix] = previous_comment.split(comment_start)
   else:
     [prefix, remainder] = previous_comment.split(comment_start)
     [_, suffix] = remainder.split(comment_end)
 
-  logging.info("Prefix is %d bytes, suffix is %d bytes", len(prefix), len(suffix))
-
   new_text = sys.stdin.read()
-  new_comment = prefix + comment_start + new_text + comment_end + suffix
+  comment = prefix + comment_start + new_text + comment_end + suffix
+
+  github.update_issue_comment(FLAGS.token, issue_number, comment)
 
-  print(new_comment)
 
 
 if __name__ == "__main__":

From fa08f62355eae230d52969e6e0acdaf72c49a743 Mon Sep 17 00:00:00 2001
From: Jon Simantov 
Date: Mon, 5 Jun 2023 16:12:16 -0700
Subject: [PATCH 18/22] Add comments.

---
 scripts/gha/report_build_status.py | 21 +++++++++++++++++++--
 1 file changed, 19 insertions(+), 2 deletions(-)

diff --git a/scripts/gha/report_build_status.py b/scripts/gha/report_build_status.py
index b86268f35d..72b894ccd4 100644
--- a/scripts/gha/report_build_status.py
+++ b/scripts/gha/report_build_status.py
@@ -109,6 +109,7 @@
 firestore_test_time = ' 10:0'
 
 def rename_key(old_dict,old_name,new_name):
+    """Rename a key in a dictionary, preserving the order."""
     new_dict = {}
     for key,value in zip(old_dict.keys(),old_dict.values()):
         new_key = key if key != old_name else new_name
@@ -117,6 +118,9 @@ def rename_key(old_dict,old_name,new_name):
 
 
 def english_list(items, sep=','):
+  """Format a list in English. If there are two items, separate with "and".
+     If more than 2 items, separate with commas as well.
+  """
   if len(items) == 2:
     return items[0] + " and " + items[1]
   else:
@@ -126,12 +130,18 @@ def english_list(items, sep=','):
 
 
 def decorate_url(text, url):
+  """Put the text in a URL and replace spaces with nonbreaking spaces.
+     If not outputting Markdown, this does nothing.
+  """
   if not FLAGS.output_markdown:
     return text
   return ("[%s](%s)" % (text.replace(" ", " "), url))
 
 
 def analyze_log(text, url):
+  """Do a simple analysis of the log summary text to determine if the build
+     or test succeeded, flaked, or failed.
+  """
   build_status = decorate_url(_PASS_TEXT, url)
   test_status = decorate_url(_PASS_TEXT, url)
   if '[BUILD] [ERROR]' in text:
@@ -146,6 +156,7 @@ def analyze_log(text, url):
 
 
 def format_errors(all_errors, severity, event):
+  """Return a list of English-language formatted errors."""
   product_errors = []
   if severity not in all_errors: return None
   if event not in all_errors[severity]: return None
@@ -214,6 +225,8 @@ def format_errors(all_errors, severity, event):
 
 
 def create_notes(text):
+  """Combine the sets of errors into a single string.
+  """
   if not text: return ''
   errors = {}
   text += '\n'
@@ -272,6 +285,7 @@ def create_notes(text):
 def get_message_from_github_log(logs_zip,
                                 regex_filename,
                                 regex_line, debug=False):
+  """Find a specific line inside a single file from a GitHub run's logs."""
   for log in logs_zip.namelist():
     if re.search(regex_filename, log):
       log_text = logs_zip.read(log).decode()
@@ -363,7 +377,7 @@ def main(argv):
         if run['triggering_actor']['login'] != _TRIGGER_USER: continue
         package_tests_all.append(run)
   
-    # For each run in pack
+    # For each workflow_trigger run of the tests, determine which packaging run it goes with.
     package_tests = {}
   
     logging.info("Source tests: %s %s", list(source_tests.keys()),  [source_tests[r]['id'] for r in source_tests.keys()])
@@ -415,8 +429,11 @@ def main(argv):
                 run['log_success'] = success
                 run['log_results'] = results
             else:
+              # Artifacts expire after some time, so if they are gone, we need
+              # to read the GitHub logs instead.  This is much slower, so we
+              # prefer to read artifacts instead whenever possible.
               logging.info("Reading github logs for run %s instead", run['id'])
-              # artifact_contents is empty, get the github logs which is much slower
+
               logs_url = run['logs_url']
               headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': 'Bearer %s' % FLAGS.token}
               with requests.get(logs_url, headers=headers, stream=True) as response:

From 8b86e41c57469f4ced18b6c4022e9242f1f7d47a Mon Sep 17 00:00:00 2001
From: Jon Simantov 
Date: Mon, 5 Jun 2023 16:35:32 -0700
Subject: [PATCH 19/22] Schedule build matrix earlier.

---
 .github/workflows/build-report.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml
index f3edbe9d94..221d9d7c09 100644
--- a/.github/workflows/build-report.yml
+++ b/.github/workflows/build-report.yml
@@ -3,7 +3,7 @@ name: Generate Test Report Table
 on:
   workflow_dispatch:
   schedule:
-    - cron: "0 16 * * *"  # 4pm UTC = 8am PST / 9am PDT, 7 hours after testapps run
+    - cron: "0 15 * * *"  # 3pm UTC = 7am PST / 8am PDT, 6 hours after testapps run
 
 env:
   GITHUB_TOKEN: ${{ github.token }}

From 7dff43d85907aa6164c98be3fde5cf1cade52194 Mon Sep 17 00:00:00 2001
From: Jon Simantov 
Date: Mon, 5 Jun 2023 17:24:51 -0700
Subject: [PATCH 20/22] Don't include header if --nooutput_header is specified.

---
 .github/workflows/build-report.yml | 2 +-
 scripts/gha/report_build_status.py | 9 +++++----
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml
index 221d9d7c09..46f37a1be7 100644
--- a/.github/workflows/build-report.yml
+++ b/.github/workflows/build-report.yml
@@ -37,7 +37,7 @@ jobs:
       run: |
         python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --output_markdown --read_cache build_status_short.cache > report_short.md
         python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDaysExtended }} --output_markdown --read_cache build_status.cache > report_extended.md
-        python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --nooutput-header --read_cache build_status_short.cache > report.txt
+        python3 scripts/gha/report_build_status.py --token ${{ github.token }} --days ${{ env.numDays }} --nooutput_header --read_cache build_status_short.cache > report.txt
     - name: Generate comment string
       run: |
         echo -n > comment.md
diff --git a/scripts/gha/report_build_status.py b/scripts/gha/report_build_status.py
index 72b894ccd4..49d6bc71c7 100644
--- a/scripts/gha/report_build_status.py
+++ b/scripts/gha/report_build_status.py
@@ -491,10 +491,11 @@ def main(argv):
 
   table_header_string = row_prefix + row_separator.join(table_fields) + row_suffix
   table_row_fmt = row_prefix + row_separator.join(["%s" for f in table_fields]) + row_suffix
-  print(table_header_string)
-  
-  if FLAGS.output_header and FLAGS.output_markdown:
-    print(table_row_fmt.replace("%s", "---").replace(" ", ""))
+
+  if FLAGS.output_header:
+    print(table_header_string)
+    if FLAGS.output_markdown:
+      print(table_row_fmt.replace("%s", "---").replace(" ", ""))
 
   days_sorted = sorted(all_days)
   if FLAGS.reverse: days_sorted = reversed(days_sorted)

From 8846e089eaebc78424113850c755461c000f2eff Mon Sep 17 00:00:00 2001
From: Jon Simantov 
Date: Mon, 5 Jun 2023 17:27:19 -0700
Subject: [PATCH 21/22] Change heading to Username

---
 scripts/gha/report_build_status.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/scripts/gha/report_build_status.py b/scripts/gha/report_build_status.py
index 49d6bc71c7..f997e1d8c7 100644
--- a/scripts/gha/report_build_status.py
+++ b/scripts/gha/report_build_status.py
@@ -475,7 +475,7 @@ def main(argv):
 
   table_fields = (
       ["Date"] +
-      (["Build Bulbasaur"] if FLAGS.output_username else ([] if FLAGS.output_markdown else [""])) +
+      (["Username"] if FLAGS.output_username else ([] if FLAGS.output_markdown else [""])) +
       ([""] if FLAGS.include_blank_column and not FLAGS.output_markdown else []) +
       ["Build vs Source Repo", "Test vs Source Repo",
        "SDK Packaging", "Build vs SDK Package", "Test vs SDK Package",

From c25f51be93701a5fd430b68d6f54624b5c70e6d1 Mon Sep 17 00:00:00 2001
From: Jon Simantov 
Date: Mon, 5 Jun 2023 23:27:47 -0700
Subject: [PATCH 22/22] Remove extraneous newline causing bad formatting.

---
 .github/workflows/build-report.yml | 1 -
 1 file changed, 1 deletion(-)

diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml
index 46f37a1be7..40fd23c90d 100644
--- a/.github/workflows/build-report.yml
+++ b/.github/workflows/build-report.yml
@@ -56,7 +56,6 @@ jobs:
         cat >> comment.md <
         
📄
-
         EOF
         cat report.txt >> comment.md
         cat >> comment.md <