diff --git a/.github/scripts/env_vars_check.sh b/.github/scripts/env_vars_check.sh new file mode 100755 index 0000000..ecee7be --- /dev/null +++ b/.github/scripts/env_vars_check.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Find all test_*.py files +files=$(find . -name "test_*.py") +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + +# Loop through each file +for file in $files; do + # Search for instances of get_env_vars() with no arguments + result=$(grep -n "get_env_vars()" "$file") + + # If any instances are found, print the file name and line number + if [ -n "$result" ]; then + echo "Found in $file:" + echo "$result" + echo -e "${RED}ERROR: get_env_vars() should always set test=True in test*.py files.${NC}" + exit 1 + fi +done +echo -e " ${GREEN}PASS:${NC} All test*.py files call get_env_vars() with test=True." diff --git a/Makefile b/Makefile index 58d7479..c94fa87 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ .PHONY: test test: pytest -v --cov=. --cov-config=.coveragerc --cov-fail-under=80 --cov-report term-missing + .github/scripts/env_vars_check.sh .PHONY: clean clean: diff --git a/issue_metrics.py b/issue_metrics.py index f5c750a..15f58ee 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -26,7 +26,7 @@ import github3 from classes import IssueWithMetrics -from config import get_env_vars +from config import EnvVars, get_env_vars from discussions import get_discussions from json_writer import write_to_json from labels import get_label_metrics, get_stats_time_in_labels @@ -122,6 +122,7 @@ def auth_to_github( def get_per_issue_metrics( issues: Union[List[dict], List[github3.search.IssueSearchResult]], # type: ignore + env_vars: EnvVars, discussions: bool = False, labels: Union[List[str], None] = None, ignore_users: Union[List[str], None] = None, @@ -138,6 +139,7 @@ def get_per_issue_metrics( Defaults to False. labels (List[str]): A list of labels to measure time spent in. Defaults to empty list. ignore_users (List[str]): A list of users to ignore when calculating metrics. + env_vars (EnvVars): The environment variables for the script. Returns: tuple[List[IssueWithMetrics], int, int]: A tuple containing a @@ -161,24 +163,30 @@ def get_per_issue_metrics( None, None, ) - issue_with_metrics.time_to_first_response = measure_time_to_first_response( - None, issue, ignore_users - ) - issue_with_metrics.mentor_activity = count_comments_per_user( - None, - issue, - ignore_users, - None, - None, - max_comments_to_eval, - heavily_involved, - ) - issue_with_metrics.time_to_answer = measure_time_to_answer(issue) - if issue["closedAt"]: - issue_with_metrics.time_to_close = measure_time_to_close(None, issue) - num_issues_closed += 1 - else: - num_issues_open += 1 + if env_vars.hide_time_to_first_response is False: + issue_with_metrics.time_to_first_response = ( + measure_time_to_first_response(None, issue, ignore_users) + ) + if env_vars.enable_mentor_count: + issue_with_metrics.mentor_activity = count_comments_per_user( + None, + issue, + ignore_users, + None, + None, + max_comments_to_eval, + heavily_involved, + ) + if env_vars.hide_time_to_answer is False: + issue_with_metrics.time_to_answer = measure_time_to_answer(issue) + if env_vars.hide_time_to_close is False: + if issue["closedAt"]: + issue_with_metrics.time_to_close = measure_time_to_close( + None, issue + ) + num_issues_closed += 1 + else: + num_issues_open += 1 else: issue_with_metrics = IssueWithMetrics( issue.title, # type: ignore @@ -196,32 +204,37 @@ def get_per_issue_metrics( pull_request = issue.issue.pull_request() # type: ignore ready_for_review_at = get_time_to_ready_for_review(issue, pull_request) - issue_with_metrics.time_to_first_response = measure_time_to_first_response( - issue, None, pull_request, ready_for_review_at, ignore_users - ) - issue_with_metrics.mentor_activity = count_comments_per_user( - issue, - None, - pull_request, - ready_for_review_at, - ignore_users, - max_comments_to_eval, - heavily_involved, - ) - if labels: - issue_with_metrics.label_metrics = get_label_metrics(issue, labels) - if issue.state == "closed": # type: ignore - if pull_request: - issue_with_metrics.time_to_close = measure_time_to_merge( - pull_request, ready_for_review_at + if env_vars.hide_time_to_first_response is False: + issue_with_metrics.time_to_first_response = ( + measure_time_to_first_response( + issue, None, pull_request, ready_for_review_at, ignore_users ) - else: - issue_with_metrics.time_to_close = measure_time_to_close( - issue, None - ) - num_issues_closed += 1 - elif issue.state == "open": # type: ignore - num_issues_open += 1 + ) + if env_vars.enable_mentor_count: + issue_with_metrics.mentor_activity = count_comments_per_user( + issue, + None, + pull_request, + ready_for_review_at, + ignore_users, + max_comments_to_eval, + heavily_involved, + ) + if labels and env_vars.hide_label_metrics is False: + issue_with_metrics.label_metrics = get_label_metrics(issue, labels) + if env_vars.hide_time_to_close is False: + if issue.state == "closed": # type: ignore + if pull_request: + issue_with_metrics.time_to_close = measure_time_to_merge( + pull_request, ready_for_review_at + ) + else: + issue_with_metrics.time_to_close = measure_time_to_close( + issue, None + ) + num_issues_closed += 1 + elif issue.state == "open": # type: ignore + num_issues_open += 1 issues_with_metrics.append(issue_with_metrics) return issues_with_metrics, num_issues_open, num_issues_closed @@ -324,6 +337,7 @@ def main(): ignore_users=ignore_users, max_comments_to_eval=max_comments_eval, heavily_involved=heavily_involved_cutoff, + env_vars=env_vars, ) stats_time_to_first_response = get_stats_time_to_first_response(issues_with_metrics) diff --git a/test_issue_metrics.py b/test_issue_metrics.py index d192157..582dc2e 100644 --- a/test_issue_metrics.py +++ b/test_issue_metrics.py @@ -108,8 +108,8 @@ def test_get_env_vars(self): """Test that the function correctly retrieves the environment variables.""" # Call the function and check the result - search_query = get_env_vars().search_query - gh_token = get_env_vars().gh_token + search_query = get_env_vars(test=True).search_query + gh_token = get_env_vars(test=True).gh_token gh_token_expected_result = "test_token" search_query_expected_result = "is:issue is:open repo:user/repo" self.assertEqual(gh_token, gh_token_expected_result) @@ -238,6 +238,10 @@ def test_main_no_issues_found( class TestGetPerIssueMetrics(unittest.TestCase): """Test suite for the get_per_issue_metrics function.""" + @patch.dict( + os.environ, + {"GH_TOKEN": "test_token", "SEARCH_QUERY": "is:issue is:open repo:user/repo"}, + ) def test_get_per_issue_metrics(self): """Test that the function correctly calculates the metrics for a list of GitHub issues.""" # Create mock data @@ -286,7 +290,7 @@ def test_get_per_issue_metrics(self): result_issues_with_metrics, result_num_issues_open, result_num_issues_closed, - ) = get_per_issue_metrics(issues) + ) = get_per_issue_metrics(issues, env_vars=get_env_vars(test=True)) expected_issues_with_metrics = [ IssueWithMetrics( "Issue 1", @@ -364,6 +368,10 @@ def setUp(self): "closedAt": "2023-01-07T00:00:00Z", } + @patch.dict( + os.environ, + {"GH_TOKEN": "test_token", "SEARCH_QUERY": "is:issue is:open repo:user/repo"}, + ) def test_get_per_issue_metrics_with_discussion(self): """ Test that the function correctly calculates @@ -371,7 +379,9 @@ def test_get_per_issue_metrics_with_discussion(self): """ issues = [self.issue1, self.issue2] - metrics = get_per_issue_metrics(issues, discussions=True) + metrics = get_per_issue_metrics( + issues, discussions=True, env_vars=get_env_vars(test=True) + ) # get_per_issue_metrics returns a tuple of # (issues_with_metrics, num_issues_open, num_issues_closed)