diff --git a/README.md b/README.md index d429178..603bcda 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ Feel free to inquire about its usage by creating an issue in this repository. | Time to Close | The period from creation to closure.\* | | Time to Answer (Discussions Only) | The time from creation to an answer. | | Time in Label | The duration from label application to removal, requires `LABELS_TO_MEASURE` env variable. | +| Time in Draft (PRs Only) | The duration from creation to the PR being marked as ready for review. | \*For pull requests, these metrics exclude the time the PR was in draft mode. @@ -151,6 +152,7 @@ This action can be configured to authenticate with GitHub App Installation or Pe | `HIDE_TIME_TO_ANSWER` | False | False | If set to `true`, the time to answer a discussion will not be displayed in the generated Markdown file. | | `HIDE_TIME_TO_CLOSE` | False | False | If set to `true`, the time to close will not be displayed in the generated Markdown file. | | `HIDE_TIME_TO_FIRST_RESPONSE` | False | False | If set to `true`, the time to first response will not be displayed in the generated Markdown file. | +| `DRAFT_PR_TRACKING` | False | False | If set to `true`, draft PRs will be included in the metrics as a new column and in the summary stats. | | `IGNORE_USERS` | False | False | A comma separated list of users to ignore when calculating metrics. (ie. `IGNORE_USERS: 'user1,user2'`). To ignore bots, append `[bot]` to the user (ie. `IGNORE_USERS: 'github-actions[bot]'`) Users in this list will also have their authored issues and pull requests removed from the Markdown table. | | `ENABLE_MENTOR_COUNT` | False | False | If set to 'TRUE' count number of comments users left on discussions, issues and PRs and display number of active mentors | | `MIN_MENTOR_COMMENTS` | False | 10 | Minimum number of comments to count as a mentor | diff --git a/classes.py b/classes.py index e1e6684..7428d31 100644 --- a/classes.py +++ b/classes.py @@ -18,6 +18,7 @@ class IssueWithMetrics: time_to_close (timedelta, optional): The time it took to close the issue. time_to_answer (timedelta, optional): The time it took to answer the discussions in the issue. + time_in_draft (timedelta, optional): The time the PR was in draft state. label_metrics (dict, optional): A dictionary containing the label metrics mentor_activity (dict, optional): A dictionary containing active mentors @@ -33,6 +34,7 @@ def __init__( time_to_first_response=None, time_to_close=None, time_to_answer=None, + time_in_draft=None, labels_metrics=None, mentor_activity=None, ): @@ -42,5 +44,6 @@ def __init__( self.time_to_first_response = time_to_first_response self.time_to_close = time_to_close self.time_to_answer = time_to_answer + self.time_in_draft = time_in_draft self.label_metrics = labels_metrics self.mentor_activity = mentor_activity diff --git a/config.py b/config.py index 4d544e4..5de753f 100644 --- a/config.py +++ b/config.py @@ -1,6 +1,7 @@ """A module for managing environment variables used in GitHub metrics calculation. -This module defines a class for encapsulating environment variables and a function to retrieve these variables. +This module defines a class for encapsulating environment variables +and a function to retrieve these variables. Classes: EnvVars: Represents the collection of environment variables used in the script. @@ -23,27 +24,36 @@ class EnvVars: Attributes: gh_app_id (int | None): The GitHub App ID to use for authentication - gh_app_installation_id (int | None): The GitHub App Installation ID to use for authentication - gh_app_private_key_bytes (bytes): The GitHub App Private Key as bytes to use for authentication + gh_app_installation_id (int | None): The GitHub App Installation ID to use for + authentication + gh_app_private_key_bytes (bytes): The GitHub App Private Key as bytes to use for + authentication gh_token (str | None): GitHub personal access token (PAT) for API authentication ghe (str): The GitHub Enterprise URL to use for authentication hide_author (bool): If true, the author's information is hidden in the output - hide_items_closed_count (bool): If true, the number of items closed metric is hidden in the output + hide_items_closed_count (bool): If true, the number of items closed metric is hidden + in the output hide_label_metrics (bool): If true, the label metrics are hidden in the output hide_time_to_answer (bool): If true, the time to answer discussions is hidden in the output hide_time_to_close (bool): If true, the time to close metric is hidden in the output - hide_time_to_first_response (bool): If true, the time to first response metric is hidden in the output + hide_time_to_first_response (bool): If true, the time to first response metric is hidden + in the output ignore_users (List[str]): List of usernames to ignore when calculating metrics labels_to_measure (List[str]): List of labels to measure how much time the lable is applied enable_mentor_count (bool): If set to TRUE, compute number of mentors min_mentor_comments (str): If set, defines the minimum number of comments for mentors - max_comments_eval (str): If set, defines the maximum number of comments to look at for mentor evaluation - heavily_involved_cutoff (str): If set, defines the cutoff after which heavily involved commentors in + max_comments_eval (str): If set, defines the maximum number of comments to look + at for mentor evaluation + heavily_involved_cutoff (str): If set, defines the cutoff after which heavily + involved commentors in search_query (str): Search query used to filter issues/prs/discussions on GitHub - non_mentioning_links (bool): If set to TRUE, links do not cause a notification in the desitnation repository + non_mentioning_links (bool): If set to TRUE, links do not cause a notification + in the desitnation repository report_title (str): The title of the report output_file (str): The name of the file to write the report to rate_limit_bypass (bool): If set to TRUE, bypass the rate limit for the GitHub API + draft_pr_tracking (bool): If set to TRUE, track PR time in draft state + in addition to other metrics """ def __init__( @@ -70,6 +80,7 @@ def __init__( report_title: str, output_file: str, rate_limit_bypass: bool = False, + draft_pr_tracking: bool = False, ): self.gh_app_id = gh_app_id self.gh_app_installation_id = gh_app_installation_id @@ -93,6 +104,7 @@ def __init__( self.report_title = report_title self.output_file = output_file self.rate_limit_bypass = rate_limit_bypass + self.draft_pr_tracking = draft_pr_tracking def __repr__(self): return ( @@ -119,6 +131,7 @@ def __repr__(self): f"{self.report_title}" f"{self.output_file}" f"{self.rate_limit_bypass}" + f"{self.draft_pr_tracking}" ) @@ -203,6 +216,7 @@ def get_env_vars(test: bool = False) -> EnvVars: report_title = os.getenv("REPORT_TITLE", "Issue Metrics") output_file = os.getenv("OUTPUT_FILE", "") rate_limit_bypass = get_bool_env_var("RATE_LIMIT_BYPASS", False) + draft_pr_tracking = get_bool_env_var("DRAFT_PR_TRACKING", False) # Hidden columns hide_author = get_bool_env_var("HIDE_AUTHOR", False) @@ -240,4 +254,5 @@ def get_env_vars(test: bool = False) -> EnvVars: report_title, output_file, rate_limit_bypass, + draft_pr_tracking, ) diff --git a/issue_metrics.py b/issue_metrics.py index a33042b..1228654 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -30,6 +30,7 @@ from markdown_writer import write_to_markdown from most_active_mentors import count_comments_per_user, get_mentor_count from search import get_owners_and_repositories, search_issues +from time_in_draft import get_stats_time_in_draft, measure_time_in_draft from time_to_answer import get_stats_time_to_answer, measure_time_to_answer from time_to_close import get_stats_time_to_close, measure_time_to_close from time_to_first_response import ( @@ -112,13 +113,9 @@ def get_per_issue_metrics( continue issue_with_metrics = IssueWithMetrics( - issue.title, # type: ignore - issue.html_url, # type: ignore - issue.user["login"], # type: ignore - None, - None, - None, - None, + title=issue.title, # type: ignore + html_url=issue.html_url, # type: ignore + author=issue.user["login"], # type: ignore ) # Check if issue is actually a pull request @@ -126,6 +123,11 @@ def get_per_issue_metrics( if issue.issue.pull_request_urls: # type: ignore pull_request = issue.issue.pull_request() # type: ignore ready_for_review_at = get_time_to_ready_for_review(issue, pull_request) + if env_vars.draft_pr_tracking: + issue_with_metrics.time_in_draft = measure_time_in_draft( + issue=issue, + ready_for_review_at=ready_for_review_at, + ) if env_vars.hide_time_to_first_response is False: issue_with_metrics.time_to_first_response = ( @@ -242,6 +244,7 @@ def main(): # pragma: no cover average_time_to_first_response=None, average_time_to_close=None, average_time_to_answer=None, + average_time_in_draft=None, average_time_in_labels=None, num_issues_opened=None, num_issues_closed=None, @@ -266,6 +269,7 @@ def main(): # pragma: no cover average_time_to_first_response=None, average_time_to_close=None, average_time_to_answer=None, + average_time_in_draft=None, average_time_in_labels=None, num_issues_opened=None, num_issues_closed=None, @@ -297,6 +301,7 @@ def main(): # pragma: no cover stats_time_to_close = get_stats_time_to_close(issues_with_metrics) stats_time_to_answer = get_stats_time_to_answer(issues_with_metrics) + stats_time_in_draft = get_stats_time_in_draft(issues_with_metrics) num_mentor_count = 0 if enable_mentor_count: @@ -308,16 +313,17 @@ def main(): # pragma: no cover # Write the results to json and a markdown file write_to_json( - issues_with_metrics, - stats_time_to_first_response, - stats_time_to_close, - stats_time_to_answer, - stats_time_in_labels, - num_issues_open, - num_issues_closed, - num_mentor_count, - search_query, - output_file, + issues_with_metrics=issues_with_metrics, + stats_time_to_first_response=stats_time_to_first_response, + stats_time_to_close=stats_time_to_close, + stats_time_to_answer=stats_time_to_answer, + stats_time_in_draft=stats_time_in_draft, + stats_time_in_labels=stats_time_in_labels, + num_issues_opened=num_issues_open, + num_issues_closed=num_issues_closed, + num_mentor_count=num_mentor_count, + search_query=search_query, + output_file=output_file, ) write_to_markdown( @@ -325,6 +331,7 @@ def main(): # pragma: no cover average_time_to_first_response=stats_time_to_first_response, average_time_to_close=stats_time_to_close, average_time_to_answer=stats_time_to_answer, + average_time_in_draft=stats_time_in_draft, average_time_in_labels=stats_time_in_labels, num_issues_opened=num_issues_open, num_issues_closed=num_issues_closed, @@ -345,9 +352,9 @@ def main(): # pragma: no cover shutil.move("issue_metrics_0.md", "issue_metrics.md") print( "Issue metrics markdown file is too large for GitHub issue body and has been \ - split into multiple files. ie. issue_metrics.md, issue_metrics_1.md, etc. \ - The full file is saved as issue_metrics_full.md\n\ - See https://github.com/github/issue-metrics/blob/main/docs/dealing-with-large-issue-metrics.md" +split into multiple files. ie. issue_metrics.md, issue_metrics_1.md, etc. \ +The full file is saved as issue_metrics_full.md\n\ +See https://github.com/github/issue-metrics/blob/main/docs/dealing-with-large-issue-metrics.md" ) diff --git a/json_writer.py b/json_writer.py index 0b191dc..015dbe4 100644 --- a/json_writer.py +++ b/json_writer.py @@ -2,12 +2,15 @@ Functions: write_to_json( - issues_with_metrics: List[IssueWithMetrics], - average_time_to_first_response: timedelta, - average_time_to_close: timedelta, - average_time_to_answer: timedelta, - num_issues_opened: int, - num_issues_closed: int, + issues_with_metrics: Union[List[IssueWithMetrics], None], + stats_time_to_first_response: Union[dict[str, timedelta], None], + stats_time_to_close: Union[dict[str, timedelta], None], + stats_time_to_answer: Union[dict[str, timedelta], None], + stats_time_in_draft: Union[dict[str, timedelta], None], + stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None], + num_issues_opened: Union[int, None], + num_issues_closed: Union[int, None], + num_mentor_count: Union[int, None], search_query: str, output_file: str, ) -> str: @@ -28,6 +31,7 @@ def write_to_json( stats_time_to_first_response: Union[dict[str, timedelta], None], stats_time_to_close: Union[dict[str, timedelta], None], stats_time_to_answer: Union[dict[str, timedelta], None], + stats_time_in_draft: Union[dict[str, timedelta], None], stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], @@ -40,37 +44,48 @@ def write_to_json( json structure is like following { - "average_time_to_first_response": "2 days, 12:00:00", - "average_time_to_close": "5 days, 0:00:00", - "average_time_to_answer": "1 day, 0:00:00", + "average_time_to_first_response": "None", + "average_time_to_close": "None", + "average_time_to_answer": "None", + "average_time_in_draft": "None", + "average_time_in_labels": {}, + "median_time_to_first_response": "None", + "median_time_to_close": "None", + "median_time_to_answer": "None", + "median_time_in_draft": "None", + "median_time_in_labels": {}, + "90_percentile_time_to_first_response": "None", + "90_percentile_time_to_close": "None", + "90_percentile_time_to_answer": "None", + "90_percentile_time_in_draft": "None", + "90_percentile_time_in_labels": {}, "num_items_opened": 2, - "num_items_closed": 1, + "num_items_closed": 0, "num_mentor_count": 5, "total_item_count": 2, "issues": [ { "title": "Issue 1", "html_url": "https://github.com/owner/repo/issues/1", - "author": "author", - "time_to_first_response": "3 days, 0:00:00", - "time_to_close": "6 days, 0:00:00", + "author": "alice", + "time_to_first_response": "None", + "time_to_close": "None", "time_to_answer": "None", - "label_metrics": { - "bug": "1 day, 16:24:12" - } + "time_in_draft": "None", + "label_metrics": {} }, { "title": "Issue 2", "html_url": "https://github.com/owner/repo/issues/2", - "author": "author", - "time_to_first_response": "2 days, 0:00:00", - "time_to_close": "4 days, 0:00:00", - "time_to_answer": "1 day, 0:00:00", - "label_metrics": { - } - }, + "author": "bob", + "time_to_first_response": "None", + "time_to_close": "None", + "time_to_answer": "None", + "time_in_draft": "None", + "label_metrics": {} + } ], - "search_query": "is:issue is:open repo:owner/repo" + "search_query": "is:issue repo:owner/repo" } """ @@ -106,6 +121,16 @@ def write_to_json( med_time_to_answer = stats_time_to_answer["med"] p90_time_to_answer = stats_time_to_answer["90p"] + # time in draft + average_time_in_draft = None + med_time_in_draft = None + p90_time_in_draft = None + if stats_time_in_draft is not None: + average_time_in_draft = stats_time_in_draft["avg"] + med_time_in_draft = stats_time_in_draft["med"] + p90_time_in_draft = stats_time_in_draft["90p"] + + # time in labels average_time_in_labels = {} med_time_in_labels = {} p90_time_in_labels = {} @@ -122,14 +147,17 @@ def write_to_json( "average_time_to_first_response": str(average_time_to_first_response), "average_time_to_close": str(average_time_to_close), "average_time_to_answer": str(average_time_to_answer), + "average_time_in_draft": str(average_time_in_draft), "average_time_in_labels": average_time_in_labels, "median_time_to_first_response": str(med_time_to_first_response), "median_time_to_close": str(med_time_to_close), "median_time_to_answer": str(med_time_to_answer), + "median_time_in_draft": str(med_time_in_draft), "median_time_in_labels": med_time_in_labels, "90_percentile_time_to_first_response": str(p90_time_to_first_response), "90_percentile_time_to_close": str(p90_time_to_close), "90_percentile_time_to_answer": str(p90_time_to_answer), + "90_percentile_time_in_draft": str(p90_time_in_draft), "90_percentile_time_in_labels": p90_time_in_labels, "num_items_opened": num_issues_opened, "num_items_closed": num_issues_closed, @@ -152,6 +180,7 @@ def write_to_json( "time_to_first_response": str(issue.time_to_first_response), "time_to_close": str(issue.time_to_close), "time_to_answer": str(issue.time_to_answer), + "time_in_draft": str(issue.time_in_draft), "label_metrics": formatted_label_metrics, } ) diff --git a/markdown_writer.py b/markdown_writer.py index e8cb5e1..bcebc7e 100644 --- a/markdown_writer.py +++ b/markdown_writer.py @@ -71,6 +71,10 @@ def get_non_hidden_columns(labels) -> List[str]: if not hide_time_to_answer: columns.append("Time to answer") + enable_time_in_draft = env_vars.draft_pr_tracking + if enable_time_in_draft: + columns.append("Time in draft") + hide_label_metrics = env_vars.hide_label_metrics if not hide_label_metrics and labels: for label in labels: @@ -84,6 +88,7 @@ def write_to_markdown( average_time_to_first_response: Union[dict[str, timedelta], None], average_time_to_close: Union[dict[str, timedelta], None], average_time_to_answer: Union[dict[str, timedelta], None], + average_time_in_draft: Union[dict[str, timedelta], None], average_time_in_labels: Union[dict, None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], @@ -104,6 +109,7 @@ def write_to_markdown( response for the issues. average_time_to_close (datetime.timedelta): The average time to close for the issues. average_time_to_answer (datetime.timedelta): The average time to answer the discussions. + average_time_in_draft (datetime.timedelta): The average time spent in draft for the issues. average_time_in_labels (dict): A dictionary containing the average time spent in each label. file (file object, optional): The file object to write to. If not provided, a file named "issue_metrics.md" will be created. @@ -112,9 +118,12 @@ def write_to_markdown( num_mentor_count (int): The number of very active commentors. labels (List[str]): A list of the labels that are used in the issues. search_query (str): The search query used to find the issues. - hide_label_metrics (bool): Represents whether the user has chosen to hide label metrics in the output - hide_items_closed_count (bool): Represents whether the user has chosen to hide the number of items closed - non_mentioning_links (bool): Represents whether links do not cause a notification in the desitnation repository + hide_label_metrics (bool): Represents whether the user has chosen to hide label + metrics in the output + hide_items_closed_count (bool): Represents whether the user has chosen to hide + the number of items closed + non_mentioning_links (bool): Represents whether links do not cause a notification + in the destination repository report_title (str): The title of the report output_file (str): The name of the file to write the report to @@ -131,7 +140,8 @@ def write_to_markdown( if not issues_with_metrics or len(issues_with_metrics) == 0: file.write("no issues found for the given search criteria\n\n") file.write( - "\n_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n" + "\n_This report was generated with the \ +[Issue Metrics Action](https://github.com/github/issue-metrics)_\n" ) if search_query: file.write(f"Search query used to find these items: `{search_query}`\n") @@ -143,6 +153,7 @@ def write_to_markdown( average_time_to_first_response, average_time_to_close, average_time_to_answer, + average_time_in_draft, average_time_in_labels, num_issues_opened, num_issues_closed, @@ -189,13 +200,16 @@ def write_to_markdown( file.write(f" {issue.time_to_close} |") if "Time to answer" in columns: file.write(f" {issue.time_to_answer} |") + if "Time in draft" in columns: + file.write(f" {issue.time_in_draft} |") if labels and issue.label_metrics: for label in labels: if f"Time spent in {label}" in columns: file.write(f" {issue.label_metrics[label]} |") file.write("\n") file.write( - "\n_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n" + "\n_This report was generated with the \ +[Issue Metrics Action](https://github.com/github/issue-metrics)_\n" ) if search_query: file.write(f"Search query used to find these items: `{search_query}`\n") @@ -208,6 +222,7 @@ def write_overall_metrics_tables( stats_time_to_first_response, stats_time_to_close, stats_time_to_answer, + average_time_in_draft, stats_time_in_labels, num_issues_opened, num_issues_closed, @@ -219,12 +234,15 @@ def write_overall_metrics_tables( hide_items_closed_count=False, ): """Write the overall metrics tables to the markdown file.""" - if ( - "Time to first response" in columns - or "Time to close" in columns - or "Time to answer" in columns - or (hide_label_metrics is False and len(labels) > 0) - ): + if any( + column in columns + for column in [ + "Time to first response", + "Time to close", + "Time to answer", + "Time in draft", + ] + ) or (hide_label_metrics is False and len(labels) > 0): file.write("| Metric | Average | Median | 90th percentile |\n") file.write("| --- | --- | --- | ---: |\n") if "Time to first response" in columns: @@ -257,6 +275,16 @@ def write_overall_metrics_tables( ) else: file.write("| Time to answer | None | None | None |\n") + if "Time in draft" in columns: + if average_time_in_draft is not None: + file.write( + f"| Time in draft " + f"| {average_time_in_draft['avg']} " + f"| {average_time_in_draft['med']} " + f"| {average_time_in_draft['90p']} |\n" + ) + else: + file.write("| Time in draft | None | None | None |\n") if labels and stats_time_in_labels: for label in labels: if ( diff --git a/test_config.py b/test_config.py index ea7ddb2..dab4e7d 100644 --- a/test_config.py +++ b/test_config.py @@ -116,28 +116,28 @@ def setUp(self): def test_get_env_vars_with_github_app(self): """Test that all environment variables are set correctly using GitHub App""" expected_result = EnvVars( - 12345, - 678910, - b"hello", - "", - "", - False, - False, - False, - False, - False, - False, - [], - [], - False, - "10", - "20", - "3", - SEARCH_QUERY, - False, - "", - "", - False, + gh_app_id=12345, + gh_app_installation_id=678910, + gh_app_private_key_bytes=b"hello", + gh_token="", + ghe="", + hide_author=False, + hide_items_closed_count=False, + hide_label_metrics=False, + hide_time_to_answer=False, + hide_time_to_close=False, + hide_time_to_first_response=False, + ignore_user=[], + labels_to_measure=[], + enable_mentor_count=False, + min_mentor_comments="10", + max_comments_eval="20", + heavily_involved_cutoff="3", + search_query=SEARCH_QUERY, + non_mentioning_links=False, + report_title="", + output_file="", + draft_pr_tracking=False, ) result = get_env_vars(True) self.assertEqual(str(result), str(expected_result)) @@ -168,27 +168,27 @@ def test_get_env_vars_with_github_app(self): def test_get_env_vars_with_token(self): """Test that all environment variables are set correctly using a list of repositories""" expected_result = EnvVars( - None, - None, - b"", - TOKEN, - "", - False, - False, - False, - False, - False, - False, - [], - [], - False, - "10", - "20", - "3", - SEARCH_QUERY, - False, - "", - "", + gh_app_id=None, + gh_app_installation_id=None, + gh_app_private_key_bytes=b"", + gh_token=TOKEN, + ghe="", + hide_author=False, + hide_items_closed_count=False, + hide_label_metrics=False, + hide_time_to_answer=False, + hide_time_to_close=False, + hide_time_to_first_response=False, + ignore_user=[], + labels_to_measure=[], + enable_mentor_count=False, + min_mentor_comments="10", + max_comments_eval="20", + heavily_involved_cutoff="3", + search_query=SEARCH_QUERY, + non_mentioning_links=False, + report_title="", + output_file="", ) result = get_env_vars(True) self.assertEqual(str(result), str(expected_result)) @@ -248,32 +248,36 @@ def test_get_env_vars_missing_query(self): "OUTPUT_FILE": "issue_metrics.md", "REPORT_TITLE": "Issue Metrics", "SEARCH_QUERY": SEARCH_QUERY, + "RATE_LIMIT_BYPASS": "true", + "DRAFT_PR_TRACKING": "True", }, ) def test_get_env_vars_optional_values(self): """Test that optional values are set to their default values if not provided""" expected_result = EnvVars( - None, - None, - b"", - TOKEN, - "", - True, - True, - True, - True, - True, - True, - [], - ["waiting-for-review", "waiting-for-manager"], - False, - 10, - 20, - 3, - SEARCH_QUERY, - True, - "Issue Metrics", - "issue_metrics.md", + gh_app_id=None, + gh_app_installation_id=None, + gh_app_private_key_bytes=b"", + gh_token=TOKEN, + ghe="", + hide_author=True, + hide_items_closed_count=True, + hide_label_metrics=True, + hide_time_to_answer=True, + hide_time_to_close=True, + hide_time_to_first_response=True, + ignore_user=[], + labels_to_measure=["waiting-for-review", "waiting-for-manager"], + enable_mentor_count=False, + min_mentor_comments=10, + max_comments_eval=20, + heavily_involved_cutoff=3, + search_query=SEARCH_QUERY, + non_mentioning_links=True, + report_title="Issue Metrics", + output_file="issue_metrics.md", + rate_limit_bypass=True, + draft_pr_tracking=True, ) result = get_env_vars(True) self.assertEqual(str(result), str(expected_result)) @@ -292,27 +296,29 @@ def test_get_env_vars_optional_values(self): def test_get_env_vars_optionals_are_defaulted(self): """Test that optional values are set to their default values if not provided""" expected_result = EnvVars( - None, - None, - b"", - "TOKEN", - "", - False, - False, - False, - False, - False, - False, - [], - [], - False, - "10", - "20", - "3", - SEARCH_QUERY, - False, - "Issue Metrics", - "", + gh_app_id=None, + gh_app_installation_id=None, + gh_app_private_key_bytes=b"", + gh_token="TOKEN", + ghe="", + hide_author=False, + hide_items_closed_count=False, + hide_label_metrics=False, + hide_time_to_answer=False, + hide_time_to_close=False, + hide_time_to_first_response=False, + ignore_user=[], + labels_to_measure=[], + enable_mentor_count=False, + min_mentor_comments="10", + max_comments_eval="20", + heavily_involved_cutoff="3", + search_query=SEARCH_QUERY, + non_mentioning_links=False, + report_title="Issue Metrics", + output_file="", + rate_limit_bypass=False, + draft_pr_tracking=False, ) result = get_env_vars(True) self.assertEqual(str(result), str(expected_result)) diff --git a/test_json_writer.py b/test_json_writer.py index 5dae93c..1525d6c 100644 --- a/test_json_writer.py +++ b/test_json_writer.py @@ -24,6 +24,7 @@ def test_write_to_json(self): time_to_first_response=timedelta(days=3), time_to_close=timedelta(days=6), time_to_answer=None, + time_in_draft=timedelta(days=1), labels_metrics={ "bug": timedelta(days=1, hours=16, minutes=24, seconds=12) }, @@ -54,6 +55,11 @@ def test_write_to_json(self): "med": timedelta(days=2), "90p": timedelta(days=3), } + stats_time_in_draft = { + "avg": timedelta(days=1), + "med": timedelta(days=1), + "90p": timedelta(days=1), + } stats_time_in_labels = { "avg": {"bug": timedelta(days=1, hours=16, minutes=24, seconds=12)}, "med": {"bug": timedelta(days=1, hours=16, minutes=24, seconds=12)}, @@ -67,14 +73,17 @@ def test_write_to_json(self): "average_time_to_first_response": "2 days, 12:00:00", "average_time_to_close": "5 days, 0:00:00", "average_time_to_answer": "1 day, 0:00:00", + "average_time_in_draft": "1 day, 0:00:00", "average_time_in_labels": {"bug": "1 day, 16:24:12"}, "median_time_to_first_response": "2 days, 12:00:00", "median_time_to_close": "4 days, 0:00:00", "median_time_to_answer": "2 days, 0:00:00", + "median_time_in_draft": "1 day, 0:00:00", "median_time_in_labels": {"bug": "1 day, 16:24:12"}, "90_percentile_time_to_first_response": "1 day, 12:00:00", "90_percentile_time_to_close": "3 days, 0:00:00", "90_percentile_time_to_answer": "3 days, 0:00:00", + "90_percentile_time_in_draft": "1 day, 0:00:00", "90_percentile_time_in_labels": {"bug": "1 day, 16:24:12"}, "num_items_opened": 2, "num_items_closed": 1, @@ -88,6 +97,7 @@ def test_write_to_json(self): "time_to_first_response": "3 days, 0:00:00", "time_to_close": "6 days, 0:00:00", "time_to_answer": "None", + "time_in_draft": "1 day, 0:00:00", "label_metrics": {"bug": "1 day, 16:24:12"}, }, { @@ -97,6 +107,7 @@ def test_write_to_json(self): "time_to_first_response": "2 days, 0:00:00", "time_to_close": "4 days, 0:00:00", "time_to_answer": "1 day, 0:00:00", + "time_in_draft": "None", "label_metrics": {}, }, ], @@ -110,6 +121,7 @@ def test_write_to_json(self): stats_time_to_first_response=stats_time_to_first_response, stats_time_to_close=stats_time_to_close, stats_time_to_answer=stats_time_to_answer, + stats_time_in_draft=stats_time_in_draft, stats_time_in_labels=stats_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, @@ -151,6 +163,7 @@ def test_write_to_json_with_no_response(self): "med": {}, "90p": {}, } + stats_time_in_draft = None num_issues_opened = 2 num_issues_closed = 0 num_mentor_count = 5 @@ -159,14 +172,17 @@ def test_write_to_json_with_no_response(self): "average_time_to_first_response": "None", "average_time_to_close": "None", "average_time_to_answer": "None", + "average_time_in_draft": "None", "average_time_in_labels": {}, "median_time_to_first_response": "None", "median_time_to_close": "None", "median_time_to_answer": "None", + "median_time_in_draft": "None", "median_time_in_labels": {}, "90_percentile_time_to_first_response": "None", "90_percentile_time_to_close": "None", "90_percentile_time_to_answer": "None", + "90_percentile_time_in_draft": "None", "90_percentile_time_in_labels": {}, "num_items_opened": 2, "num_items_closed": 0, @@ -180,6 +196,7 @@ def test_write_to_json_with_no_response(self): "time_to_first_response": "None", "time_to_close": "None", "time_to_answer": "None", + "time_in_draft": "None", "label_metrics": {}, }, { @@ -189,6 +206,7 @@ def test_write_to_json_with_no_response(self): "time_to_first_response": "None", "time_to_close": "None", "time_to_answer": "None", + "time_in_draft": "None", "label_metrics": {}, }, ], @@ -202,6 +220,7 @@ def test_write_to_json_with_no_response(self): stats_time_to_first_response=stats_time_to_first_response, stats_time_to_close=stats_time_to_close, stats_time_to_answer=stats_time_to_answer, + stats_time_in_draft=stats_time_in_draft, stats_time_in_labels=stats_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, diff --git a/test_labels.py b/test_labels.py index e7e8b5f..2fb8212 100644 --- a/test_labels.py +++ b/test_labels.py @@ -101,7 +101,13 @@ def setUp(self): self.issues_with_metrics = MagicMock() self.issues_with_metrics = [ IssueWithMetrics( - "issue1", "url1", "alice", None, None, None, {"bug": timedelta(days=2)} + title="issue1", + html_url="url1", + author="alice", + time_to_first_response=None, + time_to_close=None, + time_to_answer=None, + labels_metrics={"bug": timedelta(days=2)}, ), ] diff --git a/test_markdown_writer.py b/test_markdown_writer.py index 3678d0b..7f69abd 100644 --- a/test_markdown_writer.py +++ b/test_markdown_writer.py @@ -18,7 +18,11 @@ @patch.dict( os.environ, - {"SEARCH_QUERY": "is:open repo:user/repo", "GH_TOKEN": "test_token"}, + { + "SEARCH_QUERY": "is:open repo:user/repo", + "GH_TOKEN": "test_token", + "DRAFT_PR_TRACKING": "True", + }, ) class TestWriteToMarkdown(unittest.TestCase): """Test the write_to_markdown function.""" @@ -37,22 +41,24 @@ def test_write_to_markdown(self): # Create mock data issues_with_metrics = [ IssueWithMetrics( - "Issue 1", - "https://github.com/user/repo/issues/1", - "alice", - timedelta(days=1), - timedelta(days=2), - timedelta(days=3), - {"bug": timedelta(days=1)}, + title="Issue 1", + html_url="https://github.com/user/repo/issues/1", + author="alice", + time_to_first_response=timedelta(days=1), + time_to_close=timedelta(days=2), + time_to_answer=timedelta(days=3), + time_in_draft=timedelta(days=1), + labels_metrics={"bug": timedelta(days=4)}, ), IssueWithMetrics( - "Issue 2\r", - "https://github.com/user/repo/issues/2", - "bob", - timedelta(days=3), - timedelta(days=4), - timedelta(days=5), - {"bug": timedelta(days=2)}, + title="Issue 2\r", + html_url="https://github.com/user/repo/issues/2", + author="bob", + time_to_first_response=timedelta(days=3), + time_to_close=timedelta(days=4), + time_to_answer=timedelta(days=5), + time_in_draft=timedelta(days=1), + labels_metrics={"bug": timedelta(days=2)}, ), ] time_to_first_response = { @@ -70,6 +76,11 @@ def test_write_to_markdown(self): "med": timedelta(days=4), "90p": timedelta(days=4), } + time_in_draft = { + "avg": timedelta(days=1), + "med": timedelta(days=1), + "90p": timedelta(days=1), + } time_in_labels = { "avg": {"bug": "1 day, 12:00:00"}, "med": {"bug": "1 day, 12:00:00"}, @@ -86,6 +97,7 @@ def test_write_to_markdown(self): average_time_to_first_response=time_to_first_response, average_time_to_close=time_to_close, average_time_to_answer=time_to_answer, + average_time_in_draft=time_in_draft, average_time_in_labels=time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, @@ -106,6 +118,7 @@ def test_write_to_markdown(self): "| Time to first response | 2 days, 0:00:00 | 2 days, 0:00:00 | 2 days, 0:00:00 |\n" "| Time to close | 3 days, 0:00:00 | 3 days, 0:00:00 | 3 days, 0:00:00 |\n" "| Time to answer | 4 days, 0:00:00 | 4 days, 0:00:00 | 4 days, 0:00:00 |\n" + "| Time in draft | 1 day, 0:00:00 | 1 day, 0:00:00 | 1 day, 0:00:00 |\n" "| Time spent in bug | 1 day, 12:00:00 | 1 day, 12:00:00 | 1 day, 12:00:00 |\n" "\n" "| Metric | Count |\n" @@ -115,12 +128,12 @@ def test_write_to_markdown(self): "| Number of most active mentors | 5 |\n" "| Total number of items created | 2 |\n\n" "| Title | URL | Author | Time to first response | Time to close |" - " Time to answer | Time spent in bug |\n" - "| --- | --- | --- | --- | --- | --- | --- |\n" + " Time to answer | Time in draft | Time spent in bug |\n" + "| --- | --- | --- | --- | --- | --- | --- | --- |\n" "| Issue 1 | https://github.com/user/repo/issues/1 | [alice](https://github.com/alice) | 1 day, 0:00:00 | " - "2 days, 0:00:00 | 3 days, 0:00:00 | 1 day, 0:00:00 |\n" + "2 days, 0:00:00 | 3 days, 0:00:00 | 1 day, 0:00:00 | 4 days, 0:00:00 |\n" "| Issue 2 | https://github.com/user/repo/issues/2 | [bob](https://github.com/bob) | 3 days, 0:00:00 | " - "4 days, 0:00:00 | 5 days, 0:00:00 | 2 days, 0:00:00 |\n\n" + "4 days, 0:00:00 | 5 days, 0:00:00 | 1 day, 0:00:00 | 2 days, 0:00:00 |\n\n" "_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n" "Search query used to find these items: `is:issue is:open label:bug`\n" ) @@ -139,22 +152,23 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): # Create mock data issues_with_metrics = [ IssueWithMetrics( - "Issue 1", - "https://github.com/user/repo/issues/1", - "alice", - timedelta(days=1), - timedelta(days=2), - timedelta(days=3), - {"bug": timedelta(days=1)}, + title="Issue 1", + html_url="https://github.com/user/repo/issues/1", + author="alice", + time_to_first_response=timedelta(days=1), + time_to_close=timedelta(days=2), + time_to_answer=timedelta(days=3), + time_in_draft=timedelta(days=1), + labels_metrics={"bug": timedelta(days=1)}, ), IssueWithMetrics( - "feat| Issue 2", # title contains a vertical bar - "https://github.com/user/repo/issues/2", - "bob", - timedelta(days=3), - timedelta(days=4), - timedelta(days=5), - {"bug": timedelta(days=2)}, + title="feat| Issue 2", # title contains a vertical bar + html_url="https://github.com/user/repo/issues/2", + author="bob", + time_to_first_response=timedelta(days=3), + time_to_close=timedelta(days=4), + time_to_answer=timedelta(days=5), + labels_metrics={"bug": timedelta(days=2)}, ), ] average_time_to_first_response = { @@ -172,6 +186,11 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): "med": timedelta(days=4), "90p": timedelta(days=4), } + average_time_in_draft = { + "avg": timedelta(days=1), + "med": timedelta(days=1), + "90p": timedelta(days=1), + } average_time_in_labels = { "avg": {"bug": "1 day, 12:00:00"}, "med": {"bug": "1 day, 12:00:00"}, @@ -188,6 +207,7 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): average_time_to_first_response=average_time_to_first_response, average_time_to_close=average_time_to_close, average_time_to_answer=average_time_to_answer, + average_time_in_draft=average_time_in_draft, average_time_in_labels=average_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, @@ -207,6 +227,7 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): "| Time to first response | 2 days, 0:00:00 | 2 days, 0:00:00 | 2 days, 0:00:00 |\n" "| Time to close | 3 days, 0:00:00 | 3 days, 0:00:00 | 3 days, 0:00:00 |\n" "| Time to answer | 4 days, 0:00:00 | 4 days, 0:00:00 | 4 days, 0:00:00 |\n" + "| Time in draft | 1 day, 0:00:00 | 1 day, 0:00:00 | 1 day, 0:00:00 |\n" "| Time spent in bug | 1 day, 12:00:00 | 1 day, 12:00:00 | 1 day, 12:00:00 |\n" "\n" "| Metric | Count |\n" @@ -216,12 +237,12 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): "| Number of most active mentors | 5 |\n" "| Total number of items created | 2 |\n\n" "| Title | URL | Author | Time to first response | Time to close |" - " Time to answer | Time spent in bug |\n" - "| --- | --- | --- | --- | --- | --- | --- |\n" + " Time to answer | Time in draft | Time spent in bug |\n" + "| --- | --- | --- | --- | --- | --- | --- | --- |\n" "| Issue 1 | https://github.com/user/repo/issues/1 | [alice](https://github.com/alice) | 1 day, 0:00:00 | " - "2 days, 0:00:00 | 3 days, 0:00:00 | 1 day, 0:00:00 |\n" + "2 days, 0:00:00 | 3 days, 0:00:00 | 1 day, 0:00:00 | 1 day, 0:00:00 |\n" "| feat| Issue 2 | https://github.com/user/repo/issues/2 | [bob](https://github.com/bob) | 3 days, 0:00:00 | " - "4 days, 0:00:00 | 5 days, 0:00:00 | 2 days, 0:00:00 |\n\n" + "4 days, 0:00:00 | 5 days, 0:00:00 | None | 2 days, 0:00:00 |\n\n" "_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n" ) self.assertEqual(content, expected_content) @@ -240,6 +261,7 @@ def test_write_to_markdown_no_issues(self): None, None, None, + None, report_title="Issue Metrics", ) @@ -289,6 +311,7 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): time_to_first_response=timedelta(minutes=10), time_to_close=timedelta(days=1), time_to_answer=timedelta(hours=2), + time_in_draft=timedelta(days=1), labels_metrics={ "label1": timedelta(days=1), }, @@ -308,6 +331,7 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): average_time_to_first_response = timedelta(minutes=15) average_time_to_close = timedelta(days=1.5) average_time_to_answer = timedelta(hours=3) + average_time_in_draft = timedelta(days=1) average_time_in_labels = { "label1": timedelta(days=1), } @@ -322,6 +346,7 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): average_time_to_close=average_time_to_close, average_time_to_answer=average_time_to_answer, average_time_in_labels=average_time_in_labels, + average_time_in_draft=average_time_in_draft, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, diff --git a/test_time_in_draft.py b/test_time_in_draft.py new file mode 100644 index 0000000..486cb9b --- /dev/null +++ b/test_time_in_draft.py @@ -0,0 +1,108 @@ +"""A test suite for the measure_time_in_draft function.""" + +import unittest +from datetime import datetime, timedelta +from unittest.mock import MagicMock + +import pytz +from time_in_draft import get_stats_time_in_draft, measure_time_in_draft + + +class TestMeasureTimeInDraft(unittest.TestCase): + """ + Unit tests for the measure_time_in_draft function. + """ + + def setUp(self): + """ + Setup common test data and mocks. + """ + self.issue = MagicMock() + self.issue.issue.created_at = datetime(2021, 1, 1, tzinfo=pytz.utc) + self.issue.issue.state = "open" + + def test_time_in_draft_with_ready_for_review(self): + """ + Test measure_time_in_draft when ready_for_review_at is provided. + """ + ready_for_review_at = datetime(2021, 1, 3, tzinfo=pytz.utc) + result = measure_time_in_draft(self.issue, ready_for_review_at) + expected = timedelta(days=2) + self.assertEqual(result, expected, "The time in draft should be 2 days.") + + def test_time_in_draft_without_ready_for_review(self): + """ + Test measure_time_in_draft when ready_for_review_at is not provided and issue is still open. + """ + now = datetime(2021, 1, 4, tzinfo=pytz.utc) + with unittest.mock.patch("time_in_draft.datetime") as mock_datetime: + mock_datetime.now.return_value = now + result = measure_time_in_draft(self.issue, None) + expected = timedelta(days=3) + self.assertEqual(result, expected, "The time in draft should be 3 days.") + + def test_time_in_draft_without_ready_for_review_and_closed(self): + """ + Test measure_time_in_draft when ready_for_review_at is not provided and issue is closed. + """ + self.issue.issue.state = "closed" + result = measure_time_in_draft(self.issue, None) + self.assertIsNone( + result, "The result should be None when draft was never used." + ) + + +class TestGetStatsTimeInDraft(unittest.TestCase): + """ + Unit tests for the get_stats_time_in_draft function. + """ + + def test_get_stats_time_in_draft_with_data(self): + """ + Test get_stats_time_in_draft with valid draft times. + """ + issues = [ + MagicMock(time_in_draft=timedelta(days=1)), + MagicMock(time_in_draft=timedelta(days=2)), + MagicMock(time_in_draft=timedelta(days=3)), + ] + + result = get_stats_time_in_draft(issues) + expected = { + "avg": timedelta(days=2), + "med": timedelta(days=2), + "90p": timedelta(days=2, seconds=69120), + } + + self.assertEqual( + result, expected, "The statistics for time in draft are incorrect." + ) + + def test_get_stats_time_in_draft_no_data(self): + """ + Test get_stats_time_in_draft with no draft times. + """ + issues = [ + MagicMock(time_in_draft=None), + MagicMock(time_in_draft=None), + ] + + result = get_stats_time_in_draft(issues) + self.assertIsNone( + result, "The result should be None when there are no draft times." + ) + + def test_get_stats_time_in_draft_empty_list(self): + """ + Test get_stats_time_in_draft with an empty list of issues. + """ + issues = [] + + result = get_stats_time_in_draft(issues) + self.assertIsNone( + result, "The result should be None when the list of issues is empty." + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/time_in_draft.py b/time_in_draft.py new file mode 100644 index 0000000..59fe24a --- /dev/null +++ b/time_in_draft.py @@ -0,0 +1,72 @@ +""" +This module contains a function that measures the time a pull request has been in draft state. +""" + +from datetime import datetime, timedelta +from typing import List, Union + +import github3 +import numpy +import pytz +from classes import IssueWithMetrics + + +def measure_time_in_draft( + issue: github3.issues.Issue, + ready_for_review_at: Union[datetime, None], +) -> Union[datetime, None]: + """If a pull request has had time in the draft state, return the amount of time it was in draft. + + args: + issue (github3.issues.Issue): A GitHub issue which has been pre-qualified as a pull request. + ready_for_review_at (datetime | None): The time the pull request was marked as + ready for review. + + returns: + Union[datetime, None]: The time the pull request was in draft state. + """ + if ready_for_review_at: + return ready_for_review_at - issue.issue.created_at + if issue.issue.state == "open": + return datetime.now(pytz.utc) - issue.issue.created_at + return None + + +def get_stats_time_in_draft( + issues_with_metrics: List[IssueWithMetrics], +) -> Union[dict[str, timedelta], None]: + """ + Calculate stats describing the time in draft for a list of issues. + """ + # Filter out issues with no time in draft + issues_with_time_to_draft = [ + issue for issue in issues_with_metrics if issue.time_in_draft is not None + ] + + # Calculate the total time in draft for all issues + draft_times = [] + if issues_with_time_to_draft: + for issue in issues_with_time_to_draft: + if issue.time_in_draft: + draft_times.append(issue.time_in_draft.total_seconds()) + + # Calculate stats describing time in draft + num_issues_with_time_in_draft = len(issues_with_time_to_draft) + if num_issues_with_time_in_draft > 0: + average_time_in_draft = numpy.round(numpy.average(draft_times)) + med_time_in_draft = numpy.round(numpy.median(draft_times)) + ninety_percentile_time_in_draft = numpy.round( + numpy.percentile(draft_times, 90, axis=0) + ) + else: + return None + + stats = { + "avg": timedelta(seconds=average_time_in_draft), + "med": timedelta(seconds=med_time_in_draft), + "90p": timedelta(seconds=ninety_percentile_time_in_draft), + } + + # Print the average time in draft converting seconds to a readable time format + print(f"Average time in draft: {timedelta(seconds=average_time_in_draft)}") + return stats diff --git a/time_to_ready_for_review.py b/time_to_ready_for_review.py index f95ffc3..5c5d772 100644 --- a/time_to_ready_for_review.py +++ b/time_to_ready_for_review.py @@ -21,7 +21,8 @@ def get_time_to_ready_for_review( - issue: github3.issues.Issue, pull_request: github3.pulls.PullRequest + issue: github3.issues.Issue, + pull_request: github3.pulls.PullRequest, ) -> Union[datetime, None]: """If a pull request was formerly a draft, get the time it was marked as ready for review