diff --git a/issue_metrics.py b/issue_metrics.py index d9102a6..ef3c81b 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -32,14 +32,14 @@ from classes import IssueWithMetrics from discussions import get_discussions from json_writer import write_to_json -from labels import get_average_time_in_labels, get_label_metrics +from labels import get_stats_time_in_labels, get_label_metrics from markdown_writer import write_to_markdown -from time_to_answer import get_average_time_to_answer, measure_time_to_answer -from time_to_close import get_average_time_to_close, measure_time_to_close +from time_to_answer import get_stats_time_to_answer, measure_time_to_answer +from time_to_close import get_stats_time_to_close, measure_time_to_close from time_to_ready_for_review import get_time_to_ready_for_review from time_to_merge import measure_time_to_merge from time_to_first_response import ( - get_average_time_to_first_response, + get_stats_time_to_first_response, measure_time_to_first_response, ) @@ -317,36 +317,36 @@ def main(): ignore_users=ignore_users, ) - average_time_to_first_response = get_average_time_to_first_response( + stats_time_to_first_response = get_stats_time_to_first_response( issues_with_metrics ) - average_time_to_close = None + stats_time_to_close = None if num_issues_closed > 0: - average_time_to_close = get_average_time_to_close(issues_with_metrics) + stats_time_to_close = get_stats_time_to_close(issues_with_metrics) - average_time_to_answer = get_average_time_to_answer(issues_with_metrics) + stats_time_to_answer = get_stats_time_to_answer(issues_with_metrics) - # Get the average time in label for each label and store it in a dictionary + # Get stats describing the time in label for each label and store it in a dictionary # where the key is the label and the value is the average time - average_time_in_labels = get_average_time_in_labels(issues_with_metrics, labels) + stats_time_in_labels = get_stats_time_in_labels(issues_with_metrics, labels) # Write the results to json and a markdown file write_to_json( issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - average_time_in_labels, + stats_time_to_first_response, + stats_time_to_close, + stats_time_to_answer, + stats_time_in_labels, num_issues_open, num_issues_closed, search_query, ) write_to_markdown( issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - average_time_in_labels, + stats_time_to_first_response, + stats_time_to_close, + stats_time_to_answer, + stats_time_in_labels, num_issues_open, num_issues_closed, labels, diff --git a/labels.py b/labels.py index 6af1d5b..8544ee4 100644 --- a/labels.py +++ b/labels.py @@ -3,6 +3,7 @@ from typing import List import github3 +import numpy import pytz from classes import IssueWithMetrics @@ -88,32 +89,39 @@ def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: return label_metrics -def get_average_time_in_labels( +def get_stats_time_in_labels( issues_with_metrics: List[IssueWithMetrics], labels: List[str], ) -> dict[str, timedelta]: - """Calculate the average time spent in each label.""" - average_time_in_labels = {} - number_of_issues_in_labels = {} + """Calculate stats describing time spent in each label.""" + time_in_labels = {} for issue in issues_with_metrics: if issue.label_metrics: for label in issue.label_metrics: if issue.label_metrics[label] is None: continue - if label not in average_time_in_labels: - average_time_in_labels[label] = issue.label_metrics[label] - number_of_issues_in_labels[label] = 1 + if label not in time_in_labels: + time_in_labels[label] = [issue.label_metrics[label]] else: - average_time_in_labels[label] += issue.label_metrics[label] - number_of_issues_in_labels[label] += 1 + time_in_labels[label].append(issue.label_metrics[label]) - for label in average_time_in_labels: - average_time_in_labels[label] = ( - average_time_in_labels[label] / number_of_issues_in_labels[label] - ) + average_time_in_labels = {} + med_time_in_labels = {} + ninety_percentile_in_labels = {} + for label, time_list in time_in_labels.items(): + average_time_in_labels[label] = numpy.average(time_list) + med_time_in_labels[label] = numpy.median(time_list) + ninety_percentile_in_labels[label] = numpy.percentile(time_list, 90, axis=0) for label in labels: if label not in average_time_in_labels: average_time_in_labels[label] = None - - return average_time_in_labels + med_time_in_labels[label] = None + ninety_percentile_in_labels[label] = None + + stats = { + 'avg': average_time_in_labels, + 'med': med_time_in_labels, + '90p': ninety_percentile_in_labels + } + return stats diff --git a/markdown_writer.py b/markdown_writer.py index 82ed856..dc2d317 100644 --- a/markdown_writer.py +++ b/markdown_writer.py @@ -170,10 +170,10 @@ def write_to_markdown( def write_overall_metrics_table( issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - average_time_in_labels, + stats_time_to_first_response, + stats_time_to_close, + stats_time_to_answer, + stats_time_in_labels, num_issues_opened, num_issues_closed, labels, @@ -181,21 +181,46 @@ def write_overall_metrics_table( file, ): """Write the overall metrics table to the markdown file.""" - file.write("| Metric | Value |\n") - file.write("| --- | ---: |\n") + file.write("| Metric | Average | Median | 90th percentile |\n") + file.write("| --- | --- | --- | ---: |\n") if "Time to first response" in columns: - file.write( - f"| Average time to first response | {average_time_to_first_response} |\n" - ) + if stats_time_to_first_response is not None: + file.write( + f"| Time to first response " + f"| {stats_time_to_first_response['avg']} " + f"| {stats_time_to_first_response['med']} " + f"| {stats_time_to_first_response['90p']} |\n" + ) + else: + file.write("| Time to first response | None | None | None |\n") if "Time to close" in columns: - file.write(f"| Average time to close | {average_time_to_close} |\n") + if stats_time_to_close is not None: + file.write( + f"| Time to close " + f"| {stats_time_to_close['avg']} " + f"| {stats_time_to_close['med']} " + f"| {stats_time_to_close['90p']} |\n" + ) + else: + file.write("| Time to close | None | None | None |\n") if "Time to answer" in columns: - file.write(f"| Average time to answer | {average_time_to_answer} |\n") - if labels and average_time_in_labels: + if stats_time_to_answer is not None: + file.write( + f"| Time to answer " + f"| {stats_time_to_answer['avg']} " + f"| {stats_time_to_answer['med']} " + f"| {stats_time_to_answer['90p']} |\n" + ) + else: + file.write("| Time to answer | None | None | None |\n") + if labels and stats_time_in_labels: for label in labels: - if f"Time spent in {label}" in columns and label in average_time_in_labels: + if f"Time spent in {label}" in columns and label in stats_time_in_labels['avg']: file.write( - f"| Average time spent in {label} | {average_time_in_labels[label]} |\n" + f"| Time spent in {label} " + f"| {stats_time_in_labels['avg'][label]} " + f"| {stats_time_in_labels['med'][label]} " + f"| {stats_time_in_labels['90p'][label]} |\n" ) file.write(f"| Number of items that remain open | {num_issues_opened} |\n") file.write(f"| Number of items closed | {num_issues_closed} |\n") diff --git a/requirements.txt b/requirements.txt index fcfd2df..f7bbb5d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,4 @@ pytest==7.4.2 pytest-cov==4.1.0 flake8==6.1.0 pylint==3.0.1 +numpy==1.26.0 diff --git a/test_issue_metrics.py b/test_issue_metrics.py index 7b6d965..fdedb9c 100644 --- a/test_issue_metrics.py +++ b/test_issue_metrics.py @@ -133,7 +133,7 @@ class TestMain(unittest.TestCase): @patch("issue_metrics.auth_to_github") @patch("issue_metrics.search_issues") @patch("issue_metrics.measure_time_to_first_response") - @patch("issue_metrics.get_average_time_to_first_response") + @patch("issue_metrics.get_stats_time_to_first_response") @patch.dict( os.environ, { @@ -142,7 +142,7 @@ class TestMain(unittest.TestCase): ) def test_main( self, - mock_get_average_time_to_first_response, + mock_get_stats_time_to_first_response, mock_measure_time_to_first_response, mock_search_issues, mock_auth_to_github, @@ -179,10 +179,10 @@ def test_main( ] mock_measure_time_to_first_response.return_value = mock_issues_with_ttfr - # Set up the mock get_average_time_to_first_response function - mock_average_time_to_first_response = 15 - mock_get_average_time_to_first_response.return_value = ( - mock_average_time_to_first_response + # Set up the mock get_stats_time_to_first_response function + mock_stats_time_to_first_response = 15 + mock_get_stats_time_to_first_response.return_value = ( + mock_stats_time_to_first_response ) # Call main and check that it runs without errors diff --git a/test_labels.py b/test_labels.py index a266647..35c07e0 100644 --- a/test_labels.py +++ b/test_labels.py @@ -7,7 +7,7 @@ import pytz from classes import IssueWithMetrics -from labels import get_average_time_in_labels, get_label_events, get_label_metrics +from labels import get_stats_time_in_labels, get_label_events, get_label_metrics class TestLabels(unittest.TestCase): @@ -69,7 +69,7 @@ def test_get_label_metrics_open_issue(self): class TestGetAverageTimeInLabels(unittest.TestCase): - """Unit tests for get_average_time_in_labels""" + """Unit tests for get_stats_time_in_labels""" def setUp(self): self.issues_with_metrics = MagicMock() @@ -79,13 +79,14 @@ def setUp(self): ), ] - def test_get_average_time_in_labels(self): - """Test get_average_time_in_labels""" + def test_get_stats_time_in_labels(self): + """Test get_stats_time_in_labels""" labels = ["bug", "feature"] - metrics = get_average_time_in_labels(self.issues_with_metrics, labels) - self.assertEqual(len(metrics), 2) - self.assertEqual(metrics["bug"], timedelta(days=2)) - self.assertIsNone(metrics.get("feature")) + metrics = get_stats_time_in_labels(self.issues_with_metrics, labels) + print(metrics) + self.assertEqual(len(metrics['avg']), 2) + self.assertEqual(metrics['avg']["bug"], timedelta(days=2)) + self.assertIsNone(metrics['avg'].get("feature")) if __name__ == "__main__": diff --git a/test_markdown_writer.py b/test_markdown_writer.py index 11efeef..152233f 100644 --- a/test_markdown_writer.py +++ b/test_markdown_writer.py @@ -17,6 +17,7 @@ class TestWriteToMarkdown(unittest.TestCase): """Test the write_to_markdown function.""" + maxDiff = None def test_write_to_markdown(self): """Test that write_to_markdown writes the correct markdown file. @@ -48,20 +49,33 @@ def test_write_to_markdown(self): {"bug": timedelta(days=2)}, ), ] - average_time_to_first_response = timedelta(days=2) - average_time_to_close = timedelta(days=3) - average_time_to_answer = timedelta(days=4) - average_time_in_labels = {"bug": "1 day, 12:00:00"} + time_to_first_response = { + 'avg': timedelta(days=2), + 'med': timedelta(days=2), + '90p': timedelta(days=2)} + time_to_close = { + 'avg': timedelta(days=3), + 'med': timedelta(days=3), + '90p': timedelta(days=3)} + time_to_answer = { + 'avg': timedelta(days=4), + 'med': timedelta(days=4), + '90p': timedelta(days=4)} + time_in_labels = { + 'avg': {"bug": "1 day, 12:00:00"}, + 'med': {"bug": "1 day, 12:00:00"}, + '90p': {"bug": "1 day, 12:00:00"}} + num_issues_opened = 2 num_issues_closed = 1 # Call the function write_to_markdown( issues_with_metrics=issues_with_metrics, - average_time_to_first_response=average_time_to_first_response, - average_time_to_close=average_time_to_close, - average_time_to_answer=average_time_to_answer, - average_time_in_labels=average_time_in_labels, + average_time_to_first_response=time_to_first_response, + average_time_to_close=time_to_close, + average_time_to_answer=time_to_answer, + average_time_in_labels=time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, labels=["bug"], @@ -73,12 +87,12 @@ def test_write_to_markdown(self): content = file.read() expected_content = ( "# Issue Metrics\n\n" - "| Metric | Value |\n" - "| --- | ---: |\n" - "| Average time to first response | 2 days, 0:00:00 |\n" - "| Average time to close | 3 days, 0:00:00 |\n" - "| Average time to answer | 4 days, 0:00:00 |\n" - "| Average time spent in bug | 1 day, 12:00:00 |\n" + "| Metric | Average | Median | 90th percentile |\n" + "| --- | --- | --- | ---: |\n" + "| Time to first response | 2 days, 0:00:00 | 2 days, 0:00:00 | 2 days, 0:00:00 |\n" + "| Time to close | 3 days, 0:00:00 | 3 days, 0:00:00 | 3 days, 0:00:00 |\n" + "| Time to answer | 4 days, 0:00:00 | 4 days, 0:00:00 | 4 days, 0:00:00 |\n" + "| Time spent in bug | 1 day, 12:00:00 | 1 day, 12:00:00 | 1 day, 12:00:00 |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" "| Total number of items created | 2 |\n\n" @@ -125,10 +139,23 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): {"bug": timedelta(days=2)}, ), ] - average_time_to_first_response = timedelta(days=2) - average_time_to_close = timedelta(days=3) - average_time_to_answer = timedelta(days=4) - average_time_in_labels = {"bug": "1 day, 12:00:00"} + average_time_to_first_response = { + 'avg': timedelta(days=2), + 'med': timedelta(days=2), + '90p': timedelta(days=2)} + average_time_to_close = { + 'avg': timedelta(days=3), + 'med': timedelta(days=3), + '90p': timedelta(days=3)} + average_time_to_answer = { + 'avg': timedelta(days=4), + 'med': timedelta(days=4), + '90p': timedelta(days=4)} + average_time_in_labels = { + 'avg': {"bug": "1 day, 12:00:00"}, + 'med': {"bug": "1 day, 12:00:00"}, + '90p': {"bug": "1 day, 12:00:00"}} + num_issues_opened = 2 num_issues_closed = 1 @@ -149,12 +176,12 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): content = file.read() expected_content = ( "# Issue Metrics\n\n" - "| Metric | Value |\n" - "| --- | ---: |\n" - "| Average time to first response | 2 days, 0:00:00 |\n" - "| Average time to close | 3 days, 0:00:00 |\n" - "| Average time to answer | 4 days, 0:00:00 |\n" - "| Average time spent in bug | 1 day, 12:00:00 |\n" + "| Metric | Average | Median | 90th percentile |\n" + "| --- | --- | --- | ---: |\n" + "| Time to first response | 2 days, 0:00:00 | 2 days, 0:00:00 | 2 days, 0:00:00 |\n" + "| Time to close | 3 days, 0:00:00 | 3 days, 0:00:00 | 3 days, 0:00:00 |\n" + "| Time to answer | 4 days, 0:00:00 | 4 days, 0:00:00 | 4 days, 0:00:00 |\n" + "| Time spent in bug | 1 day, 12:00:00 | 1 day, 12:00:00 | 1 day, 12:00:00 |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" "| Total number of items created | 2 |\n\n" @@ -259,8 +286,8 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): content = file.read() expected_content = ( "# Issue Metrics\n\n" - "| Metric | Value |\n" - "| --- | ---: |\n" + "| Metric | Average | Median | 90th percentile |\n" + "| --- | --- | --- | ---: |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" "| Total number of items created | 2 |\n\n" diff --git a/test_time_to_answer.py b/test_time_to_answer.py index 7097f95..700079d 100644 --- a/test_time_to_answer.py +++ b/test_time_to_answer.py @@ -5,16 +5,16 @@ from typing import List from classes import IssueWithMetrics -from time_to_answer import get_average_time_to_answer, measure_time_to_answer +from time_to_answer import get_stats_time_to_answer, measure_time_to_answer class TestGetAverageTimeToAnswer(unittest.TestCase): - """A test case for the get_average_time_to_answer function. + """A test case for the get_stats_time_to_answer function. This test case includes three test methods: - test_returns_none_for_empty_list - test_returns_none_for_list_with_no_time_to_answer - - test_returns_average_time_to_answer + - test_returns_stats_time_to_answer """ def test_returns_none_for_empty_list(self): @@ -23,7 +23,7 @@ def test_returns_none_for_empty_list(self): issues_with_metrics: List[IssueWithMetrics] = [] # Act - result = get_average_time_to_answer(issues_with_metrics) + result = get_stats_time_to_answer(issues_with_metrics) # Assert self.assertIsNone(result) @@ -40,12 +40,12 @@ def test_returns_none_for_list_with_no_time_to_answer(self): ] # Act - result = get_average_time_to_answer(issues_with_metrics) + result = get_stats_time_to_answer(issues_with_metrics) # Assert self.assertIsNone(result) - def test_returns_average_time_to_answer(self): + def test_returns_stats_time_to_answer(self): """ Tests that the function correctly calculates the average time to answer for a list of issues with time to answer. @@ -59,7 +59,7 @@ def test_returns_average_time_to_answer(self): ] # Act - result = get_average_time_to_answer(issues_with_metrics) + result = get_stats_time_to_answer(issues_with_metrics)['avg'] # Assert self.assertEqual(result, timedelta(seconds=20)) diff --git a/test_time_to_close.py b/test_time_to_close.py index d4f1bea..c459615 100644 --- a/test_time_to_close.py +++ b/test_time_to_close.py @@ -1,12 +1,12 @@ """A module containing unit tests for the time_to_close module. This module contains unit tests for the measure_time_to_close and -get_average_time_to_close functions in the time_to_close module. +get_stats_time_to_close functions in the time_to_close module. The tests use mock GitHub issues to test the functions' behavior. Classes: TestMeasureTimeToClose: A class to test the measure_time_to_close function. - TestGetAverageTimeToClose: A class to test the get_average_time_to_close function. + TestGetStatsTimeToClose: A class to test the get_stats_time_to_close function. """ from datetime import timedelta @@ -14,13 +14,13 @@ from unittest.mock import MagicMock from classes import IssueWithMetrics -from time_to_close import get_average_time_to_close, measure_time_to_close +from time_to_close import get_stats_time_to_close, measure_time_to_close class TestGetAverageTimeToClose(unittest.TestCase): - """Test suite for the get_average_time_to_close function.""" + """Test suite for the get_stats_time_to_close function.""" - def test_get_average_time_to_close(self): + def test_get_stats_time_to_close(self): """Test that the function correctly calculates the average time to close.""" # Create mock data issues_with_metrics = [ @@ -44,11 +44,11 @@ def test_get_average_time_to_close(self): ] # Call the function and check the result - result = get_average_time_to_close(issues_with_metrics) + result = get_stats_time_to_close(issues_with_metrics)['avg'] expected_result = timedelta(days=3) self.assertEqual(result, expected_result) - def test_get_average_time_to_close_no_issues(self): + def test_get_stats_time_to_close_no_issues(self): """Test that the function returns None if there are no issues with time to close.""" # Create mock data issues_with_metrics = [ @@ -64,7 +64,7 @@ def test_get_average_time_to_close_no_issues(self): ] # Call the function and check the result - result = get_average_time_to_close(issues_with_metrics) + result = get_stats_time_to_close(issues_with_metrics) expected_result = None self.assertEqual(result, expected_result) diff --git a/test_time_to_first_response.py b/test_time_to_first_response.py index a5132bb..a1a124a 100644 --- a/test_time_to_first_response.py +++ b/test_time_to_first_response.py @@ -1,13 +1,13 @@ """A module containing unit tests for the time_to_first_response module. This module contains unit tests for the measure_time_to_first_response and -get_average_time_to_first_response functions in the time_to_first_response module. +get_stats_time_to_first_response functions in the time_to_first_response module. The tests use mock GitHub issues and comments to test the functions' behavior. Classes: TestMeasureTimeToFirstResponse: A class to test the measure_time_to_first_response function. TestGetAverageTimeToFirstResponse: A class to test the - get_average_time_to_first_response function. + get_stats_time_to_first_response function. """ import unittest @@ -16,7 +16,7 @@ from classes import IssueWithMetrics from time_to_first_response import ( - get_average_time_to_first_response, + get_stats_time_to_first_response, measure_time_to_first_response, ) @@ -311,14 +311,14 @@ def test_measure_time_to_first_response_ignore_bot(self): self.assertEqual(result, expected_result) -class TestGetAverageTimeToFirstResponse(unittest.TestCase): - """Test the get_average_time_to_first_response function.""" +class TestGetStatsTimeToFirstResponse(unittest.TestCase): + """Test the get_stats_time_to_first_response function.""" - def test_get_average_time_to_first_response(self): - """Test that get_average_time_to_first_response calculates the correct average. + def test_get_stats_time_to_first_response(self): + """Test that get_stats_time_to_first_response calculates the correct average. This test creates a list of mock GitHub issues with time to first response - attributes, calls get_average_time_to_first_response with the list, and + attributes, calls get_stats_time_to_first_response with the list, and checks that the function returns the correct average time to first response. """ @@ -334,12 +334,12 @@ def test_get_average_time_to_first_response(self): ] # Call the function and check the result - result = get_average_time_to_first_response(issues_with_metrics) + result = get_stats_time_to_first_response(issues_with_metrics)['avg'] expected_result = timedelta(days=1.5) self.assertEqual(result, expected_result) - def test_get_average_time_to_first_response_with_all_none(self): - """Test that get_average_time_to_first_response with all None data.""" + def test_get_stats_time_to_first_response_with_all_none(self): + """Test that get_stats_time_to_first_response with all None data.""" # Create mock data with all None issues_with_metrics = [ @@ -348,6 +348,6 @@ def test_get_average_time_to_first_response_with_all_none(self): ] # Call the function and check the result - result = get_average_time_to_first_response(issues_with_metrics) + result = get_stats_time_to_first_response(issues_with_metrics) expected_result = None self.assertEqual(result, expected_result) diff --git a/time_to_answer.py b/time_to_answer.py index 6d41756..c33b414 100644 --- a/time_to_answer.py +++ b/time_to_answer.py @@ -1,13 +1,13 @@ """A module for measuring the time it takes to answer a GitHub discussion. This module provides functions for measuring the time it takes to answer a GitHub -discussion, as well as calculating the average time to answer for a list of discussions. +discussion, as well as calculating stats describing the time to answer for a list of discussions. Functions: - get_average_time_to_answer( + get_stats_time_to_answer( issues_with_metrics: List[IssueWithMetrics] ) -> Union[timedelta, None]: - Calculate the average time to answer for a list of issues with metrics. + Calculate stats describing the time to answer for a list of issues with metrics. measure_time_to_answer( discussion: dict ) -> Union[timedelta, None]: @@ -17,14 +17,16 @@ from datetime import datetime, timedelta from typing import List, Union +import numpy + from classes import IssueWithMetrics -def get_average_time_to_answer( +def get_stats_time_to_answer( issues_with_metrics: List[IssueWithMetrics], ) -> Union[timedelta, None]: """ - Calculate the average time to answer for a list of issues. + Calculate stats describing the time to answer for a list of issues. """ # Filter out issues with no time to answer issues_with_time_to_answer = [ @@ -32,23 +34,29 @@ def get_average_time_to_answer( ] # Calculate the total time to answer for all issues - total_time_to_answer = None + answer_times = [] if issues_with_time_to_answer: - total_time_to_answer = 0 for issue in issues_with_time_to_answer: if issue.time_to_answer: - total_time_to_answer += issue.time_to_answer.total_seconds() + answer_times.append(issue.time_to_answer.total_seconds()) - # Calculate the average time to answer + # Calculate stats describing time to answer num_issues_with_time_to_answer = len(issues_with_time_to_answer) - if num_issues_with_time_to_answer > 0 and total_time_to_answer is not None: - average_time_to_answer = total_time_to_answer / num_issues_with_time_to_answer + if num_issues_with_time_to_answer > 0: + average_time_to_answer = numpy.average(answer_times) + med_time_to_answer = numpy.median(answer_times) + ninety_percentile_time_to_answer = numpy.percentile(answer_times, 90, axis=0) else: return None + stats = { + 'avg': timedelta(seconds=average_time_to_answer), + 'med': timedelta(seconds=med_time_to_answer), + '90p': timedelta(seconds=ninety_percentile_time_to_answer)} + # Print the average time to answer converting seconds to a readable time format print(f"Average time to answer: {timedelta(seconds=average_time_to_answer)}") - return timedelta(seconds=average_time_to_answer) + return stats def measure_time_to_answer(discussion: dict) -> Union[timedelta, None]: diff --git a/time_to_close.py b/time_to_close.py index c8c6c38..aead394 100644 --- a/time_to_close.py +++ b/time_to_close.py @@ -1,7 +1,7 @@ """A module for measuring the time it takes to close a GitHub issue or discussion. This module provides functions for measuring the time it takes to close a GitHub issue -or discussion, as well as calculating the average time to close for a list of issues. +or discussion, as well as calculating stats describing the time to close for a list of issues. Functions: measure_time_to_close( @@ -9,16 +9,17 @@ discussion: Union[dict, None] ) -> Union[timedelta, None]: Measure the time it takes to close an issue or discussion. - get_average_time_to_close( + get_stats_time_to_close( issues_with_metrics: List[IssueWithMetrics] ) -> Union[timedelta, None]: - Calculate the average time to close for a list of issues with metrics. + Calculate stats describing the time to close for a list of issues with metrics. """ from datetime import datetime, timedelta from typing import List, Union import github3 +import numpy from classes import IssueWithMetrics @@ -54,17 +55,17 @@ def measure_time_to_close( return None -def get_average_time_to_close( +def get_stats_time_to_close( issues_with_metrics: List[IssueWithMetrics], ) -> Union[timedelta, None]: - """Calculate the average time to close for a list of issues. + """Calculate stats describing the time to close for a list of issues. Args: issues_with_metrics (List[IssueWithMetrics]): A list of issues with metrics. Each issue should be a issue_with_metrics tuple. Returns: - Union[float, None]: The average time to close for the issues. + Union[Dict{string: float}, None]: Stats describing the time to close for the issues. """ # Filter out issues with no time to close @@ -73,20 +74,27 @@ def get_average_time_to_close( ] # Calculate the total time to close for all issues - total_time_to_close = None + close_times = [] if issues_with_time_to_close: total_time_to_close = 0 for issue in issues_with_time_to_close: if issue.time_to_close: - total_time_to_close += issue.time_to_close.total_seconds() + close_times.append(issue.time_to_close.total_seconds()) - # Calculate the average time to close + # Calculate stats describing time to close num_issues_with_time_to_close = len(issues_with_time_to_close) if num_issues_with_time_to_close > 0 and total_time_to_close is not None: - average_time_to_close = total_time_to_close / num_issues_with_time_to_close + average_time_to_close = numpy.average(close_times) + med_time_to_close = numpy.median(close_times) + ninety_percentile_time_to_close = numpy.percentile(close_times, 90, axis=0) else: return None + stats = { + 'avg': timedelta(seconds=average_time_to_close), + 'med': timedelta(seconds=med_time_to_close), + '90p': timedelta(seconds=ninety_percentile_time_to_close)} + # Print the average time to close converting seconds to a readable time format - print(f"Average time to close: {timedelta(seconds=average_time_to_close)}") - return timedelta(seconds=average_time_to_close) + print(f"Time to close: {timedelta(seconds=average_time_to_close)}") + return stats diff --git a/time_to_first_response.py b/time_to_first_response.py index ab84975..e764be5 100644 --- a/time_to_first_response.py +++ b/time_to_first_response.py @@ -11,16 +11,17 @@ pull_request: Union[github3.pulls.PullRequest, None], ) -> Union[timedelta, None]: Measure the time to first response for a single issue or a discussion. - get_average_time_to_first_response( + get_stats_time_to_first_response( issues: List[IssueWithMetrics] ) -> Union[timedelta, None]: - Calculate the average time to first response for a list of issues with metrics. + Calculate stats describing time to first response for a list of issues with metrics. """ from datetime import datetime, timedelta from typing import List, Union import github3 +import numpy from classes import IssueWithMetrics @@ -121,36 +122,41 @@ def ignore_comment( or (ready_for_review_at and comment_created_at < ready_for_review_at)) -def get_average_time_to_first_response( +def get_stats_time_to_first_response( issues: List[IssueWithMetrics], ) -> Union[timedelta, None]: - """Calculate the average time to first response for a list of issues. + """Calculate the stats describing time to first response for a list of issues. Args: issues (List[IssueWithMetrics]): A list of GitHub issues with metrics attached. Returns: - datetime.timedelta: The average time to first response for the issues in seconds. + Union[Dict{String: datetime.timedelta}, None]: The stats describing time to first response for the issues in seconds. """ - total_time_to_first_response = 0 + response_times = [] none_count = 0 for issue in issues: if issue.time_to_first_response: - total_time_to_first_response += issue.time_to_first_response.total_seconds() + response_times.append(issue.time_to_first_response.total_seconds()) else: none_count += 1 if len(issues) - none_count <= 0: return None - average_seconds_to_first_response = total_time_to_first_response / ( - len(issues) - none_count - ) # type: ignore + average_seconds_to_first_response = numpy.average(response_times) + med_seconds_to_first_response = numpy.median(response_times) + ninety_percentile_seconds_to_first_response = numpy.percentile(response_times, 90, axis=0) + + stats = { + 'avg': timedelta(seconds=average_seconds_to_first_response), + 'med': timedelta(seconds=med_seconds_to_first_response), + '90p': timedelta(seconds=ninety_percentile_seconds_to_first_response)} # Print the average time to first response converting seconds to a readable time format print( f"Average time to first response: {timedelta(seconds=average_seconds_to_first_response)}" ) - return timedelta(seconds=average_seconds_to_first_response) + return stats