Skip to content

Commit

Permalink
Adds mentor counting to json output and adds missing config.
Browse files Browse the repository at this point in the history
This adds mentor counting output to json format. In addition this change makes
max number of comments to evaluate configurable as well as the cutoff for
heavily involved mentors.
  • Loading branch information
MaineC committed Mar 28, 2024
1 parent f041169 commit 0837e49
Show file tree
Hide file tree
Showing 7 changed files with 60 additions and 24 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ This action can be configured to authenticate with GitHub App Installation or Pe
| `IGNORE_USERS` | False | False | A comma separated list of users to ignore when calculating metrics. (ie. `IGNORE_USERS: 'user1,user2'`). To ignore bots, append `[bot]` to the user (ie. `IGNORE_USERS: 'github-actions[bot]'`) |
| `ENABLE_MENTOR_COUNT` | False | False | If set to 'TRUE' count number of comments users left on discussions, issues and PRs and display number of active mentors |
| `MIN_MENTOR_COMMENTS` | False | 10 | Minimum number of comments to count as a mentor |
| `MAX_COMMENTS_EVAL` | False | 20 | Maximum number of comments per thread to evaluate for mentor stats |
| `HEAVILY_INVOLVED_CUTOFF` | False | 3 | Cutoff after which a mentor's comments in one issue are no longer counted against their total score |
| `LABELS_TO_MEASURE` | False | `""` | A comma separated list of labels to measure how much time the label is applied. If not provided, no labels durations will be measured. Not compatible with discussions at this time. |
| `SEARCH_QUERY` | True | `""` | The query by which you can filter issues/PRs which must contain a `repo:`, `org:`, `owner:`, or a `user:` entry. For discussions, include `type:discussions` in the query. |

Expand Down
23 changes: 11 additions & 12 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ class EnvVars:
labels_to_measure (List[str]): List of labels to measure how much time the lable is applied
enable_mentor_count (str): If set to TRUE, compute number of mentors
min_mentor_comments (str): If set, defines the minimum number of comments for mentors
max_comments_eval (str): If set, defines the maximum number of comments to look at for mentor evaluation
heavily_involved_cutoff (str): If set, defines the cutoff after which heavily involved commentors in
search_query (str): Search query used to filter issues/prs/discussions on GitHub
"""

Expand All @@ -58,6 +60,7 @@ def __init__(
self.gh_app_id = gh_app_id
self.gh_app_installation_id = gh_app_installation_id
self.gh_app_private_key_bytes = gh_app_private_key_bytes
self.search_query = search_query
self.gh_token = gh_token
self.ghe = ghe
self.ignore_users = ignore_user
Expand All @@ -69,6 +72,8 @@ def __init__(
self.hide_time_to_first_response = hide_time_to_first_response
self.enable_mentor_count = enable_mentor_count
self.min_mentor_comments = min_mentor_comments
self.max_comments_eval = max_comments_eval
self.heavily_involved_cutoff = heavily_involved_cutoff
self.search_query = search_query

def __repr__(self):
Expand All @@ -88,6 +93,8 @@ def __repr__(self):
f"{self.labels_to_measure},"
f"{self.enable_mentor_count},"
f"{self.min_mentor_comments},"
f"{self.max_comments_eval},"
f"{self.heavily_involved_cutoff},"
f"{self.search_query})"
)

Expand Down Expand Up @@ -166,21 +173,15 @@ def get_env_vars(test: bool = False) -> EnvVars:
ignore_users_list = ignore_users.split(",")

# Hidden columns
<<<<<<< HEAD
hide_author = get_bool_env_var("HIDE_AUTHOR")
hide_label_metrics = get_bool_env_var("HIDE_LABEL_METRICS")
hide_time_to_answer = get_bool_env_var("HIDE_TIME_TO_ANSWER")
hide_time_to_close = get_bool_env_var("HIDE_TIME_TO_CLOSE")
hide_time_to_first_response = get_bool_env_var("HIDE_TIME_TO_FIRST_RESPONSE")
=======
hide_author = os.getenv("HIDE_AUTHOR")
hide_time_to_first_response = os.getenv("HIDE_TIME_TO_FIRST_RESPONSE")
hide_time_to_close = os.getenv("HIDE_TIME_TO_CLOSE")
hide_time_to_answer = os.getenv("HIDE_TIME_TO_ANSWER")
hide_label_metrics = os.getenv("HIDE_LABEL_METRICS")
enable_mentor_count = os.getenv("ENABLE_MENTOR_COUNT", "FALSE")
min_mentor_comments = os.getenv("MIN_MENTOR_COMMENTS", "10")
>>>>>>> e5f7987 (Make mentor counting configurable.)
max_comments_eval = os.getenv("MAX_COMMENTS_EVAL", "20")
heavily_involved_cutoff = os.getenv("HEAVILY_INVOLVED_CUTOFF", "3")

return EnvVars(
gh_app_id,
Expand All @@ -191,15 +192,13 @@ def get_env_vars(test: bool = False) -> EnvVars:
hide_author,
hide_label_metrics,
hide_time_to_answer,
<<<<<<< HEAD
hide_time_to_close,
hide_time_to_first_response,
ignore_users_list,
labels_to_measure_list,
search_query,
=======
hide_label_metrics,
enable_mentor_count,
min_mentor_comments
>>>>>>> e5f7987 (Make mentor counting configurable.)
max_comments_eval,
heavily_involved_cutoff
)
18 changes: 12 additions & 6 deletions issue_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,8 @@ def get_per_issue_metrics(
discussions: bool = False,
labels: Union[List[str], None] = None,
ignore_users: Union[List[str], None] = None,
max_comments_to_eval: int = 20,
heavily_involved: int = 3
) -> tuple[List, int, int]:
"""
Calculate the metrics for each issue/pr/discussion in a list provided.
Expand Down Expand Up @@ -168,8 +170,8 @@ def get_per_issue_metrics(
None, issue, ignore_users
)
issue_with_metrics.mentor_activity = count_comments_per_user(
None, issue, ignore_users
# TODO review arguments max_comments_to_eval, heavily_involved
None, issue, ignore_users, None, None,
max_comments_to_eval, heavily_involved
)
issue_with_metrics.time_to_answer = measure_time_to_answer(issue)
if issue["closedAt"]:
Expand Down Expand Up @@ -198,9 +200,9 @@ def get_per_issue_metrics(
issue, None, pull_request, ready_for_review_at, ignore_users
)
issue_with_metrics.mentor_activity = count_comments_per_user(
issue, None, pull_request, ready_for_review_at, ignore_users
issue, None, pull_request, ready_for_review_at, ignore_users,
max_comments_to_eval, heavily_involved
)
# TODO review arguments max_comments_to_eval, heavily_involved
if labels:
issue_with_metrics.label_metrics = get_label_metrics(issue, labels)
if issue.state == "closed": # type: ignore
Expand Down Expand Up @@ -263,7 +265,6 @@ def main():
search_query = env_vars.search_query
token = env_vars.gh_token
ignore_users = env_vars.ignore_users
enable_mentor_count = env_vars.enable_mentor_count

# Auth to GitHub.com
github_connection = auth_to_github(
Expand All @@ -273,7 +274,10 @@ def main():
token,
env_vars.ghe,
)
enable_mentor_count = env_vars.enable_mentor_count
min_mentor_count = int(env_vars.min_mentor_comments)
max_comments_eval = int(env_vars.max_comments_eval)
heavily_involved_cutoff = int(env_vars.heavily_involved_cutoff)

# Get the repository owner and name from the search query
owner = get_owner(search_query)
Expand Down Expand Up @@ -313,6 +317,8 @@ def main():
discussions="type:discussions" in search_query,
labels=labels,
ignore_users=ignore_users,
max_comments_to_eval=max_comments_eval,
heavily_involved=heavily_involved_cutoff,
)

stats_time_to_first_response = get_stats_time_to_first_response(issues_with_metrics)
Expand All @@ -324,7 +330,7 @@ def main():

num_mentor_count = 0
if enable_mentor_count == "TRUE":
num_mentor_count = get_mentor_count(issues_with_metrics, min_mentor_comments)
num_mentor_count = get_mentor_count(issues_with_metrics, min_mentor_count)

# Get stats describing the time in label for each label and store it in a dictionary
# where the key is the label and the value is the average time
Expand Down
3 changes: 3 additions & 0 deletions json_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ def write_to_json(
stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None],
num_issues_opened: Union[int, None],
num_issues_closed: Union[int, None],
num_mentor_count: Union[int, None],
search_query: str,
) -> str:
"""
Expand All @@ -42,6 +43,7 @@ def write_to_json(
"average_time_to_answer": "1 day, 0:00:00",
"num_items_opened": 2,
"num_items_closed": 1,
"num_mentor_count": 5,
"total_item_count": 2,
"issues": [
{
Expand Down Expand Up @@ -129,6 +131,7 @@ def write_to_json(
"90_percentile_time_in_labels": p90_time_in_labels,
"num_items_opened": num_issues_opened,
"num_items_closed": num_issues_closed,
"num_mentor_count": num_mentor_count,
"total_item_count": len(issues_with_metrics),
}

Expand Down
22 changes: 20 additions & 2 deletions most_active_mentors.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@

from classes import IssueWithMetrics


def count_comments_per_user(
issue: Union[github3.issues.Issue, None], # type: ignore
discussion: Union[dict, None] = None,
Expand Down Expand Up @@ -112,6 +113,23 @@ def count_comments_per_user(
else:
mentor_count[review_comment.user.login] = 1

if discussion and len(discussion["comments"]["nodes"]) > 0:
for comment in discussion["comments"]["nodes"]:
if ignore_comment(
comment.user,
comment.user,
ignore_users,
comment.submitted_at,
comment.ready_for_review_at
):
continue

# increase the number of comments left by current user by 1
if comment.user.login in mentor_count:
mentor_count[comment.user.login] += 1
else:
mentor_count[comment.user.login] = 1

return mentor_count


Expand Down Expand Up @@ -153,8 +171,8 @@ def get_mentor_count(
"""

mentor_count = Counter({})
for issueWithMetrics in issues_with_metrics:
current_counter = Counter(issueWithMetrics.mentor_activity)
for issue_with_metrics in issues_with_metrics:
current_counter = Counter(issue_with_metrics.mentor_activity)
mentor_count = mentor_count + current_counter

active_mentor_count = 0
Expand Down
6 changes: 6 additions & 0 deletions test_json_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def test_write_to_json(self):
}
num_issues_opened = 2
num_issues_closed = 1
num_mentor_count = 5

expected_output = {
"average_time_to_first_response": "2 days, 12:00:00",
Expand All @@ -77,6 +78,7 @@ def test_write_to_json(self):
"90_percentile_time_in_labels": {"bug": "1 day, 16:24:12"},
"num_items_opened": 2,
"num_items_closed": 1,
"num_mentor_count": 5,
"total_item_count": 2,
"issues": [
{
Expand Down Expand Up @@ -111,6 +113,7 @@ def test_write_to_json(self):
stats_time_in_labels=stats_time_in_labels,
num_issues_opened=num_issues_opened,
num_issues_closed=num_issues_closed,
num_mentor_count=num_mentor_count,
search_query="is:issue repo:owner/repo",
),
json.dumps(expected_output),
Expand Down Expand Up @@ -149,6 +152,7 @@ def test_write_to_json_with_no_response(self):
}
num_issues_opened = 2
num_issues_closed = 0
num_mentor_count = 5

expected_output = {
"average_time_to_first_response": "None",
Expand All @@ -165,6 +169,7 @@ def test_write_to_json_with_no_response(self):
"90_percentile_time_in_labels": {},
"num_items_opened": 2,
"num_items_closed": 0,
"num_mentor_count": 5,
"total_item_count": 2,
"issues": [
{
Expand Down Expand Up @@ -199,6 +204,7 @@ def test_write_to_json_with_no_response(self):
stats_time_in_labels=stats_time_in_labels,
num_issues_opened=num_issues_opened,
num_issues_closed=num_issues_closed,
num_mentor_count=num_mentor_count,
search_query="is:issue repo:owner/repo",
),
json.dumps(expected_output),
Expand Down
10 changes: 6 additions & 4 deletions test_most_active_mentors.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,12 @@ def test_get_mentor_count(self):

# Create moc data
issues_with_metrics = [
IssueWithMetrics("Issue 1", "https://github.com/user/repo/issues/1",
"alice", None, mentor_activity=mentor_activity),
IssueWithMetrics("Issue 2", "https://github.com/user/repo/issues/2",
"bob", None, mentor_activity=mentor_activity),
IssueWithMetrics(
"Issue 1", "https://github.com/user/repo/issues/1",
"alice", None, mentor_activity=mentor_activity),
IssueWithMetrics(
"Issue 2", "https://github.com/user/repo/issues/2",
"bob", None, mentor_activity=mentor_activity),
]

# Call the function and check the result
Expand Down

0 comments on commit 0837e49

Please sign in to comment.