Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ This action can be configured to authenticate with GitHub App Installation or Pe
| `HIDE_STATUS` | False | True | If set to `true`, the status column will not be shown |
| `HIDE_CREATED_AT` | False | True | If set to `true`, the creation timestamp will not be displayed in the generated Markdown file. |
| `HIDE_PR_STATISTICS` | False | True | If set to `true`, PR comment statistics (mean, median, 90th percentile, and individual PR comment counts) will not be displayed in the generated Markdown file. |
| `HIDE_ITEMS_LIST` | False | False | If set to `true`, the individual items list table of individual pull requests, issues, and discussions will not be displayed in the generated Markdown file. Only the summary metrics will be shown. |
| `DRAFT_PR_TRACKING` | False | False | If set to `true`, draft PRs will be included in the metrics as a new column and in the summary stats. |
| `IGNORE_USERS` | False | False | A comma separated list of users to ignore when calculating metrics. (ie. `IGNORE_USERS: 'user1,user2'`). To ignore bots, append `[bot]` to the user (ie. `IGNORE_USERS: 'github-actions[bot]'`) Users in this list will also have their authored issues and pull requests removed from the Markdown table. |
| `ENABLE_MENTOR_COUNT` | False | False | If set to 'TRUE' count number of comments users left on discussions, issues and PRs and display number of active mentors |
Expand Down
6 changes: 6 additions & 0 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ class EnvVars:
draft_pr_tracking (bool): If set to TRUE, track PR time in draft state
in addition to other metrics
hide_pr_statistics (bool): If set to TRUE, hide PR comment statistics in the output
hide_items_list (bool): If set to TRUE, hide the list of individual items in the report
"""

def __init__(
Expand Down Expand Up @@ -90,6 +91,7 @@ def __init__(
rate_limit_bypass: bool = False,
draft_pr_tracking: bool = False,
hide_pr_statistics: bool = True,
hide_items_list: bool = False,
):
self.gh_app_id = gh_app_id
self.gh_app_installation_id = gh_app_installation_id
Expand Down Expand Up @@ -119,6 +121,7 @@ def __init__(
self.rate_limit_bypass = rate_limit_bypass
self.draft_pr_tracking = draft_pr_tracking
self.hide_pr_statistics = hide_pr_statistics
self.hide_items_list = hide_items_list

def __repr__(self):
return (
Expand Down Expand Up @@ -151,6 +154,7 @@ def __repr__(self):
f"{self.rate_limit_bypass}"
f"{self.draft_pr_tracking}"
f"{self.hide_pr_statistics}"
f"{self.hide_items_list}"
)


Expand Down Expand Up @@ -249,6 +253,7 @@ def get_env_vars(test: bool = False) -> EnvVars:
hide_created_at = get_bool_env_var("HIDE_CREATED_AT", True)
hide_status = get_bool_env_var("HIDE_STATUS", True)
hide_pr_statistics = get_bool_env_var("HIDE_PR_STATISTICS", True)
hide_items_list = get_bool_env_var("HIDE_ITEMS_LIST", False)
enable_mentor_count = get_bool_env_var("ENABLE_MENTOR_COUNT", False)
min_mentor_comments = os.getenv("MIN_MENTOR_COMMENTS", "10")
max_comments_eval = os.getenv("MAX_COMMENTS_EVAL", "20")
Expand Down Expand Up @@ -284,4 +289,5 @@ def get_env_vars(test: bool = False) -> EnvVars:
rate_limit_bypass,
draft_pr_tracking,
hide_pr_statistics,
hide_items_list,
)
115 changes: 60 additions & 55 deletions markdown_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,68 +189,73 @@ def write_to_markdown(
)

# Write second table with individual issue/pr/discussion metrics
# First write the header
file.write("|")
for column in columns:
file.write(f" {column} |")
file.write("\n")
# Skip this table if hide_items_list is True
if not env_vars.hide_items_list:
# First write the header
file.write("|")
for column in columns:
file.write(f" {column} |")
file.write("\n")

# Then write the column dividers
file.write("|")
for _ in columns:
file.write(" --- |")
file.write("\n")
# Then write the column dividers
file.write("|")
for _ in columns:
file.write(" --- |")
file.write("\n")

# Then write the issues/pr/discussions row by row
for issue in issues_with_metrics:
# Replace the vertical bar with the HTML entity
issue.title = issue.title.replace("|", "|")
# Replace any whitespace
issue.title = issue.title.strip()
# Then write the issues/pr/discussions row by row
for issue in issues_with_metrics:
# Replace the vertical bar with the HTML entity
issue.title = issue.title.replace("|", "|")
# Replace any whitespace
issue.title = issue.title.strip()

endpoint = ghe.removeprefix("https://") if ghe else "github.com"
if non_mentioning_links:
file.write(
f"| {issue.title} | "
f"{issue.html_url}".replace(
f"https://{endpoint}", f"https://www.{endpoint}"
endpoint = ghe.removeprefix("https://") if ghe else "github.com"
if non_mentioning_links:
file.write(
f"| {issue.title} | "
f"{issue.html_url}".replace(
f"https://{endpoint}", f"https://www.{endpoint}"
)
+ " |"
)
+ " |"
)
else:
file.write(f"| {issue.title} | {issue.html_url} |")
if "Assignee" in columns:
if issue.assignees:
assignee_links = [
f"[{assignee}](https://{endpoint}/{assignee})"
for assignee in issue.assignees
]
file.write(f" {', '.join(assignee_links)} |")
else:
file.write(" None |")
if "Author" in columns:
file.write(f" [{issue.author}](https://{endpoint}/{issue.author}) |")
if "Time to first response" in columns:
file.write(f" {issue.time_to_first_response} |")
if "Time to close" in columns:
file.write(f" {issue.time_to_close} |")
if "Time to answer" in columns:
file.write(f" {issue.time_to_answer} |")
if "Time in draft" in columns:
file.write(f" {issue.time_in_draft} |")
if labels and issue.label_metrics:
for label in labels:
if f"Time spent in {label}" in columns:
file.write(f" {issue.label_metrics[label]} |")
if "Created At" in columns:
file.write(f" {issue.created_at} |")
if "Status" in columns:
file.write(f" {issue.status} |")
if "PR Comments" in columns:
file.write(f" {issue.pr_comment_count or 'N/A'} |")
file.write(f"| {issue.title} | {issue.html_url} |")
if "Assignee" in columns:
if issue.assignees:
assignee_links = [
f"[{assignee}](https://{endpoint}/{assignee})"
for assignee in issue.assignees
]
file.write(f" {', '.join(assignee_links)} |")
else:
file.write(" None |")
if "Author" in columns:
file.write(
f" [{issue.author}](https://{endpoint}/{issue.author}) |"
)
if "Time to first response" in columns:
file.write(f" {issue.time_to_first_response} |")
if "Time to close" in columns:
file.write(f" {issue.time_to_close} |")
if "Time to answer" in columns:
file.write(f" {issue.time_to_answer} |")
if "Time in draft" in columns:
file.write(f" {issue.time_in_draft} |")
if labels and issue.label_metrics:
for label in labels:
if f"Time spent in {label}" in columns:
file.write(f" {issue.label_metrics[label]} |")
if "Created At" in columns:
file.write(f" {issue.created_at} |")
if "Status" in columns:
file.write(f" {issue.status} |")
if "PR Comments" in columns:
file.write(f" {issue.pr_comment_count or 'N/A'} |")
file.write("\n")
file.write("\n")
file.write(
"\n_This report was generated with the \
"_This report was generated with the \
[Issue Metrics Action](https://github.com/github/issue-metrics)_\n"
)
if search_query:
Expand Down
16 changes: 16 additions & 0 deletions test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,20 @@ def test_get_env_vars_missing_query(self):
with self.assertRaises(ValueError):
get_env_vars(True)

@patch.dict(
os.environ,
{
"GH_TOKEN": TOKEN,
"SEARCH_QUERY": SEARCH_QUERY,
"HIDE_ITEMS_LIST": "true",
},
clear=True,
)
def test_get_env_vars_hide_items_list(self):
"""Test that HIDE_ITEMS_LIST environment variable is properly read."""
result = get_env_vars(True)
self.assertTrue(result.hide_items_list)

@patch.dict(
os.environ,
{
Expand Down Expand Up @@ -293,6 +307,7 @@ def test_get_env_vars_optional_values(self):
rate_limit_bypass=True,
draft_pr_tracking=True,
hide_pr_statistics=True,
hide_items_list=False,
)
result = get_env_vars(True)
self.assertEqual(str(result), str(expected_result))
Expand Down Expand Up @@ -339,6 +354,7 @@ def test_get_env_vars_optionals_are_defaulted(self):
rate_limit_bypass=False,
draft_pr_tracking=False,
hide_pr_statistics=True,
hide_items_list=False,
)
result = get_env_vars(True)
self.assertEqual(str(result), str(expected_result))
Expand Down
107 changes: 107 additions & 0 deletions test_markdown_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -531,6 +531,113 @@ def test_writes_markdown_file_with_hidden_status_column(self):
self.assertEqual(content, expected_content)
os.remove("issue_metrics.md")

@patch.dict(
os.environ,
{
"SEARCH_QUERY": "is:open repo:user/repo",
"GH_TOKEN": "test_token",
"HIDE_CREATED_AT": "False",
"HIDE_TIME_TO_FIRST_RESPONSE": "True",
"HIDE_TIME_TO_CLOSE": "True",
"HIDE_TIME_TO_ANSWER": "True",
"HIDE_LABEL_METRICS": "True",
"NON_MENTIONING_LINKS": "True",
"GH_ENTERPRISE_URL": "https://ghe.com",
"HIDE_STATUS": "True", # Status column should be hidden
"HIDE_ITEMS_LIST": "True", # Hide the items list table
},
)
def test_writes_markdown_file_with_hidden_items_list(self):
"""
Test that write_to_markdown writes the correct markdown file
when HIDE_ITEMS_LIST is set to True, ensuring the individual
items table is not present in the output.
"""
# Create mock data
issues_with_metrics = [
IssueWithMetrics(
title="Issue 1",
html_url="https://ghe.com/user/repo/issues/1",
author="alice",
assignee="charlie",
assignees=["charlie"],
created_at=timedelta(days=-5),
time_to_first_response=timedelta(minutes=10),
time_to_close=timedelta(days=1),
time_to_answer=timedelta(hours=2),
time_in_draft=timedelta(days=1),
labels_metrics={
"label1": timedelta(days=1),
},
),
IssueWithMetrics(
title="Issue 2",
html_url="https://ghe.com/user/repo/issues/2",
author="bob",
assignee=None,
assignees=[],
created_at=timedelta(days=-5),
time_to_first_response=timedelta(minutes=20),
time_to_close=timedelta(days=2),
time_to_answer=timedelta(hours=4),
labels_metrics={
"label1": timedelta(days=1),
},
),
]
average_time_to_first_response = timedelta(minutes=15)
average_time_to_close = timedelta(days=1.5)
average_time_to_answer = timedelta(hours=3)
average_time_in_draft = timedelta(days=1)
average_time_in_labels = {
"label1": timedelta(days=1),
}
num_issues_opened = 2
num_issues_closed = 2
num_mentor_count = 5
ghe = "https://ghe.com"

# Call the function
write_to_markdown(
issues_with_metrics=issues_with_metrics,
average_time_to_first_response=average_time_to_first_response,
average_time_to_close=average_time_to_close,
average_time_to_answer=average_time_to_answer,
average_time_in_draft=average_time_in_draft,
average_time_in_labels=average_time_in_labels,
stats_pr_comments=None,
num_issues_opened=num_issues_opened,
num_issues_closed=num_issues_closed,
num_mentor_count=num_mentor_count,
labels=["label1"],
search_query="repo:user/repo is:issue",
hide_label_metrics=True,
hide_items_closed_count=True,
enable_mentor_count=True,
non_mentioning_links=True,
report_title="Issue Metrics",
output_file="issue_metrics.md",
ghe=ghe,
)

# Check that the function writes the correct markdown file
with open("issue_metrics.md", "r", encoding="utf-8") as file:
content = file.read()

# Expected content should not include the individual items table
expected_content = (
"# Issue Metrics\n\n"
"| Metric | Count |\n"
"| --- | ---: |\n"
"| Number of items that remain open | 2 |\n"
"| Number of most active mentors | 5 |\n"
"| Total number of items created | 2 |\n\n"
"_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n"
"Search query used to find these items: `repo:user/repo is:issue`\n"
)
self.assertEqual(content, expected_content)
os.remove("issue_metrics.md")


if __name__ == "__main__":
unittest.main()
Loading