From eb98ac51058b593311088c451c727f4bbe77eed1 Mon Sep 17 00:00:00 2001 From: Isabel Drost-Fromm Date: Thu, 11 Apr 2024 07:02:44 +0200 Subject: [PATCH] First stab at figuring out the number of very active mentors (#206) * First stab at figuring out the number of very active mentors in a project. * Fix format and linter errors. * Add flake to requirements. * Adds call to mentor counter and output in markdown. This adds the call to mentor counter and displays the results in markdown including first tests for this functionality. * Make mentor counting configurable. This adds two configuration options: One to enable mentor counting and one for configuring how many comments a user needs to leave in discussions, PRs. and issues to be counted as an active mentor. * Adds mentor counting to json output and adds missing config. This adds mentor counting output to json format. In addition this change makes max number of comments to evaluate configurable as well as the cutoff for heavily involved mentors. * Fix merge conflicts. * Fix linting errors. * fix: linting fixes Signed-off-by: Zack Koppert * 8 is reasonable number of attrs Signed-off-by: Zack Koppert * Update test_most_active_mentors.py Co-authored-by: Jason Meridth * Update config.py Co-authored-by: Jason Meridth * Update config.py Remove merge residual * Update requirements.txt Remove lib only needed for testing. * Update issue_metrics.py Co-authored-by: Jason Meridth * Update config.py * Update config.py set type of `enable_mentor_count` to `bool` * Update test_config.py change tests to handle boolean change of enable_mentor_count --------- Signed-off-by: Zack Koppert Co-authored-by: Drost-Fromm Co-authored-by: Zack Koppert Co-authored-by: Jason Meridth Co-authored-by: Jason Meridth --- README.md | 4 + classes.py | 5 + config.py | 24 +++++ issue_metrics.py | 40 +++++++- json_writer.py | 3 + markdown_writer.py | 8 +- most_active_mentors.py | 179 ++++++++++++++++++++++++++++++++++++ test_config.py | 12 +++ test_issue_metrics.py | 4 +- test_json_writer.py | 6 ++ test_markdown_writer.py | 11 ++- test_most_active_mentors.py | 82 +++++++++++++++++ 12 files changed, 370 insertions(+), 8 deletions(-) create mode 100755 most_active_mentors.py create mode 100755 test_most_active_mentors.py diff --git a/README.md b/README.md index 028d0e7..801ea85 100644 --- a/README.md +++ b/README.md @@ -145,6 +145,10 @@ This action can be configured to authenticate with GitHub App Installation or Pe | `HIDE_TIME_TO_CLOSE` | False | False | If set to `true`, the time to close will not be displayed in the generated Markdown file. | | `HIDE_TIME_TO_FIRST_RESPONSE` | False | False | If set to `true`, the time to first response will not be displayed in the generated Markdown file. | | `IGNORE_USERS` | False | False | A comma separated list of users to ignore when calculating metrics. (ie. `IGNORE_USERS: 'user1,user2'`). To ignore bots, append `[bot]` to the user (ie. `IGNORE_USERS: 'github-actions[bot]'`) | +| `ENABLE_MENTOR_COUNT` | False | False | If set to 'TRUE' count number of comments users left on discussions, issues and PRs and display number of active mentors | +| `MIN_MENTOR_COMMENTS` | False | 10 | Minimum number of comments to count as a mentor | +| `MAX_COMMENTS_EVAL` | False | 20 | Maximum number of comments per thread to evaluate for mentor stats | +| `HEAVILY_INVOLVED_CUTOFF` | False | 3 | Cutoff after which a mentor's comments in one issue are no longer counted against their total score | | `LABELS_TO_MEASURE` | False | `""` | A comma separated list of labels to measure how much time the label is applied. If not provided, no labels durations will be measured. Not compatible with discussions at this time. | | `SEARCH_QUERY` | True | `""` | The query by which you can filter issues/PRs which must contain a `repo:`, `org:`, `owner:`, or a `user:` entry. For discussions, include `type:discussions` in the query. | diff --git a/classes.py b/classes.py index 3867f26..e1e6684 100644 --- a/classes.py +++ b/classes.py @@ -19,9 +19,12 @@ class IssueWithMetrics: time_to_answer (timedelta, optional): The time it took to answer the discussions in the issue. label_metrics (dict, optional): A dictionary containing the label metrics + mentor_activity (dict, optional): A dictionary containing active mentors """ + # pylint: disable=too-many-instance-attributes + def __init__( self, title, @@ -31,6 +34,7 @@ def __init__( time_to_close=None, time_to_answer=None, labels_metrics=None, + mentor_activity=None, ): self.title = title self.html_url = html_url @@ -39,3 +43,4 @@ def __init__( self.time_to_close = time_to_close self.time_to_answer = time_to_answer self.label_metrics = labels_metrics + self.mentor_activity = mentor_activity diff --git a/config.py b/config.py index e7d87be..30e91aa 100644 --- a/config.py +++ b/config.py @@ -34,6 +34,10 @@ class EnvVars: hide_time_to_first_response (bool): If true, the time to first response metric is hidden in the output ignore_users (List[str]): List of usernames to ignore when calculating metrics labels_to_measure (List[str]): List of labels to measure how much time the lable is applied + enable_mentor_count (bool): If set to TRUE, compute number of mentors + min_mentor_comments (str): If set, defines the minimum number of comments for mentors + max_comments_eval (str): If set, defines the maximum number of comments to look at for mentor evaluation + heavily_involved_cutoff (str): If set, defines the cutoff after which heavily involved commentors in search_query (str): Search query used to filter issues/prs/discussions on GitHub """ @@ -51,6 +55,10 @@ def __init__( hide_time_to_first_response: bool, ignore_user: List[str], labels_to_measure: List[str], + enable_mentor_count: bool, + min_mentor_comments: str, + max_comments_eval: str, + heavily_involved_cutoff: str, search_query: str, ): self.gh_app_id = gh_app_id @@ -65,6 +73,10 @@ def __init__( self.hide_time_to_answer = hide_time_to_answer self.hide_time_to_close = hide_time_to_close self.hide_time_to_first_response = hide_time_to_first_response + self.enable_mentor_count = enable_mentor_count + self.min_mentor_comments = min_mentor_comments + self.max_comments_eval = max_comments_eval + self.heavily_involved_cutoff = heavily_involved_cutoff self.search_query = search_query def __repr__(self): @@ -82,6 +94,10 @@ def __repr__(self): f"{self.hide_time_to_first_response}," f"{self.ignore_users}," f"{self.labels_to_measure}," + f"{self.enable_mentor_count}," + f"{self.min_mentor_comments}," + f"{self.max_comments_eval}," + f"{self.heavily_involved_cutoff}," f"{self.search_query})" ) @@ -166,6 +182,10 @@ def get_env_vars(test: bool = False) -> EnvVars: hide_time_to_answer = get_bool_env_var("HIDE_TIME_TO_ANSWER") hide_time_to_close = get_bool_env_var("HIDE_TIME_TO_CLOSE") hide_time_to_first_response = get_bool_env_var("HIDE_TIME_TO_FIRST_RESPONSE") + enable_mentor_count = get_bool_env_var("ENABLE_MENTOR_COUNT") + min_mentor_comments = os.getenv("MIN_MENTOR_COMMENTS", "10") + max_comments_eval = os.getenv("MAX_COMMENTS_EVAL", "20") + heavily_involved_cutoff = os.getenv("HEAVILY_INVOLVED_CUTOFF", "3") return EnvVars( gh_app_id, @@ -180,5 +200,9 @@ def get_env_vars(test: bool = False) -> EnvVars: hide_time_to_first_response, ignore_users_list, labels_to_measure_list, + enable_mentor_count, + min_mentor_comments, + max_comments_eval, + heavily_involved_cutoff, search_query, ) diff --git a/issue_metrics.py b/issue_metrics.py index bd4d040..f5c750a 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -31,6 +31,7 @@ from json_writer import write_to_json from labels import get_label_metrics, get_stats_time_in_labels from markdown_writer import write_to_markdown +from most_active_mentors import count_comments_per_user, get_mentor_count from time_to_answer import get_stats_time_to_answer, measure_time_to_answer from time_to_close import get_stats_time_to_close, measure_time_to_close from time_to_first_response import ( @@ -40,8 +41,6 @@ from time_to_merge import measure_time_to_merge from time_to_ready_for_review import get_time_to_ready_for_review -GITHUB_BASE_URL = "https://github.com" - def search_issues( search_query: str, github_connection: github3.GitHub @@ -126,6 +125,8 @@ def get_per_issue_metrics( discussions: bool = False, labels: Union[List[str], None] = None, ignore_users: Union[List[str], None] = None, + max_comments_to_eval: int = 20, + heavily_involved: int = 3, ) -> tuple[List, int, int]: """ Calculate the metrics for each issue/pr/discussion in a list provided. @@ -158,10 +159,20 @@ def get_per_issue_metrics( None, None, None, + None, ) issue_with_metrics.time_to_first_response = measure_time_to_first_response( None, issue, ignore_users ) + issue_with_metrics.mentor_activity = count_comments_per_user( + None, + issue, + ignore_users, + None, + None, + max_comments_to_eval, + heavily_involved, + ) issue_with_metrics.time_to_answer = measure_time_to_answer(issue) if issue["closedAt"]: issue_with_metrics.time_to_close = measure_time_to_close(None, issue) @@ -188,6 +199,15 @@ def get_per_issue_metrics( issue_with_metrics.time_to_first_response = measure_time_to_first_response( issue, None, pull_request, ready_for_review_at, ignore_users ) + issue_with_metrics.mentor_activity = count_comments_per_user( + issue, + None, + pull_request, + ready_for_review_at, + ignore_users, + max_comments_to_eval, + heavily_involved, + ) if labels: issue_with_metrics.label_metrics = get_label_metrics(issue, labels) if issue.state == "closed": # type: ignore @@ -259,6 +279,10 @@ def main(): token, env_vars.ghe, ) + enable_mentor_count = env_vars.enable_mentor_count + min_mentor_count = int(env_vars.min_mentor_comments) + max_comments_eval = int(env_vars.max_comments_eval) + heavily_involved_cutoff = int(env_vars.heavily_involved_cutoff) # Get the repository owner and name from the search query owner = get_owner(search_query) @@ -283,13 +307,13 @@ def main(): issues = get_discussions(token, search_query) if len(issues) <= 0: print("No discussions found") - write_to_markdown(None, None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None, None) return else: issues = search_issues(search_query, github_connection) if len(issues) <= 0: print("No issues found") - write_to_markdown(None, None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None, None) return # Get all the metrics @@ -298,6 +322,8 @@ def main(): discussions="type:discussions" in search_query, labels=labels, ignore_users=ignore_users, + max_comments_to_eval=max_comments_eval, + heavily_involved=heavily_involved_cutoff, ) stats_time_to_first_response = get_stats_time_to_first_response(issues_with_metrics) @@ -307,6 +333,10 @@ def main(): stats_time_to_answer = get_stats_time_to_answer(issues_with_metrics) + num_mentor_count = 0 + if enable_mentor_count: + num_mentor_count = get_mentor_count(issues_with_metrics, min_mentor_count) + # Get stats describing the time in label for each label and store it in a dictionary # where the key is the label and the value is the average time stats_time_in_labels = get_stats_time_in_labels(issues_with_metrics, labels) @@ -320,6 +350,7 @@ def main(): stats_time_in_labels, num_issues_open, num_issues_closed, + num_mentor_count, search_query, ) write_to_markdown( @@ -330,6 +361,7 @@ def main(): stats_time_in_labels, num_issues_open, num_issues_closed, + num_mentor_count, labels, search_query, ) diff --git a/json_writer.py b/json_writer.py index 1955ef3..1b40e01 100644 --- a/json_writer.py +++ b/json_writer.py @@ -30,6 +30,7 @@ def write_to_json( stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], + num_mentor_count: Union[int, None], search_query: str, ) -> str: """ @@ -42,6 +43,7 @@ def write_to_json( "average_time_to_answer": "1 day, 0:00:00", "num_items_opened": 2, "num_items_closed": 1, + "num_mentor_count": 5, "total_item_count": 2, "issues": [ { @@ -129,6 +131,7 @@ def write_to_json( "90_percentile_time_in_labels": p90_time_in_labels, "num_items_opened": num_issues_opened, "num_items_closed": num_issues_closed, + "num_mentor_count": num_mentor_count, "total_item_count": len(issues_with_metrics), } diff --git a/markdown_writer.py b/markdown_writer.py index ba49622..0b587ae 100644 --- a/markdown_writer.py +++ b/markdown_writer.py @@ -13,6 +13,7 @@ average_time_to_answer: timedelta, num_issues_opened: int, num_issues_closed: int, + num_mentor_count: int, file: file object = None ) -> None: Write the issues with metrics to a markdown file. @@ -79,6 +80,7 @@ def write_to_markdown( average_time_in_labels: Union[dict, None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], + num_mentor_count: Union[int, None], labels=None, search_query=None, hide_label_metrics=False, @@ -95,7 +97,8 @@ def write_to_markdown( file (file object, optional): The file object to write to. If not provided, a file named "issue_metrics.md" will be created. num_issues_opened (int): The Number of items that remain opened. - num_issues_closed (int): The number of issues that were closed. + num_issues_closed (int): The number of issues that were closedi. + num_mentor_count (int): The number of very active commentors. labels (List[str]): A list of the labels that are used in the issues. search_query (str): The search query used to find the issues. hide_label_metrics (bool): Represents whether the user has chosen to hide label metrics in the output @@ -127,6 +130,7 @@ def write_to_markdown( average_time_in_labels, num_issues_opened, num_issues_closed, + num_mentor_count, labels, columns, file, @@ -184,6 +188,7 @@ def write_overall_metrics_tables( stats_time_in_labels, num_issues_opened, num_issues_closed, + num_mentor_count, labels, columns, file, @@ -246,4 +251,5 @@ def write_overall_metrics_tables( file.write("| --- | ---: |\n") file.write(f"| Number of items that remain open | {num_issues_opened} |\n") file.write(f"| Number of items closed | {num_issues_closed} |\n") + file.write(f"| Number of most active mentors | {num_mentor_count} |\n") file.write(f"| Total number of items created | {len(issues_with_metrics)} |\n\n") diff --git a/most_active_mentors.py b/most_active_mentors.py new file mode 100755 index 0000000..99bdfae --- /dev/null +++ b/most_active_mentors.py @@ -0,0 +1,179 @@ +"""A module for measuring the number of very active mentors + +This module provides functions for measuring the number of active mentors on a +project. + +This is measured by number of PR comments. We are working under the assumption +that PR comments are left in good faith to move contributors further instead of +nitpicking and discouraging them. + +Open questions: + - should there be an option to limit this to certain users, e.g. core + maintainers? + - should there be a limit to how many comments per PR we consider to avoid + having the statistic dominated by contested PRs? + - should this metric count consecutive comments coming from the same user as + only one to avoid people unnessesarily splitting their comments to game the + metric? + - instead of PR comments should we count PRs on which a username was seen as + commenter? + +Functions: + collect_response_usernames( + issue: Union[github3.issues.Issue, None], + discussion: Union[dict, None], + pull_request: Union[github3.pulls.PullRequest, None], + max_comments_to_evaluate, + ) -> ____________ + Collect the number of responses per username for single item. Take only + top n comments (max_comments_to_evaluate) into consideration. + get_number_of_active_reviewers( + mentors: List [mentors with metrics) + ) -> int active_number + Count the number of mentors active at least n times + +""" + +from collections import Counter +from datetime import datetime +from typing import Dict, List, Union + +import github3 +from classes import IssueWithMetrics + + +def count_comments_per_user( + issue: Union[github3.issues.Issue, None], # type: ignore + discussion: Union[dict, None] = None, + pull_request: Union[github3.pulls.PullRequest, None] = None, + ready_for_review_at: Union[datetime, None] = None, + ignore_users: List[str] | None = None, + max_comments_to_eval=20, + heavily_involved=3, +) -> dict: + """Count the number of times a user was seen commenting on a single item. + + Args: + issue (Union[github3.issues.Issue, None]): A GitHub issue. + pull_request (Union[github3.pulls.PullRequest, None]): A GitHub pull + request. ignore_users (List[str]): A list of GitHub usernames to + ignore. + max_comments_to_eval: Maximum number of comments per item to look at. + heavily_involved: Maximum number of comments to count for one + user per issue. + + Returns: + dict: A dictionary of usernames seen and number of comments they left. + + """ + if ignore_users is None: + ignore_users = [] + mentor_count: Dict[str, int] = {} + + # Get the first comments + if issue: + comments = issue.issue.comments( + number=max_comments_to_eval, sort="created", direction="asc" + ) # type: ignore + for comment in comments: + if ignore_comment( + issue.issue.user, + comment.user, + ignore_users, + comment.created_at, + ready_for_review_at, + ): + continue + # increase the number of comments left by current user by 1 + if comment.user.login in mentor_count: + if mentor_count[comment.user.login] < heavily_involved: + mentor_count[comment.user.login] += 1 + else: + mentor_count[comment.user.login] = 1 + + # Check if the issue is actually a pull request + # so we may also get the first review comment time + if pull_request: + review_comments = pull_request.reviews(number=max_comments_to_eval) + # type: ignore + for review_comment in review_comments: + if ignore_comment( + issue.issue.user, + review_comment.user, + ignore_users, + review_comment.submitted_at, + ready_for_review_at, + ): + continue + + # increase the number of comments left by current user by 1 + if review_comment.user.login in mentor_count: + mentor_count[review_comment.user.login] += 1 + else: + mentor_count[review_comment.user.login] = 1 + + if discussion and len(discussion["comments"]["nodes"]) > 0: + for comment in discussion["comments"]["nodes"]: + if ignore_comment( + comment.user, + comment.user, + ignore_users, + comment.submitted_at, + comment.ready_for_review_at, + ): + continue + + # increase the number of comments left by current user by 1 + if comment.user.login in mentor_count: + mentor_count[comment.user.login] += 1 + else: + mentor_count[comment.user.login] = 1 + + return mentor_count + + +def ignore_comment( + issue_user: github3.users.User, + comment_user: github3.users.User, + ignore_users: List[str], + comment_created_at: datetime, + ready_for_review_at: Union[datetime, None], +) -> bool: + """Check if a comment should be ignored.""" + return bool( + # ignore comments by IGNORE_USERS + comment_user.login in ignore_users + # ignore comments by bots + or comment_user.type == "Bot" + # ignore comments by the issue creator + or comment_user.login == issue_user.login + # ignore comments created before the issue was ready for review + or (ready_for_review_at and comment_created_at < ready_for_review_at) + ) + + +def get_mentor_count(issues_with_metrics: List[IssueWithMetrics], cutoff: int) -> int: + """Calculate the number of active mentors on the project. + + Args: + issues_with_metrics (List[IssueWithMetrics]): A list of issues w/ + metrics + cutoff (int: the minimum number of comments a user has to leave + to count as active mentor.) + + Returns: + int: Number of active mentors + + """ + + mentor_count: Counter[str] = Counter({}) + for issue_with_metrics in issues_with_metrics: + current_counter = Counter(issue_with_metrics.mentor_activity) + mentor_count = mentor_count + current_counter + + active_mentor_count = 0 + for count in mentor_count.values(): + if count >= cutoff: + active_mentor_count += 1 + + return active_mentor_count diff --git a/test_config.py b/test_config.py index e4a00bb..d2f1bd1 100644 --- a/test_config.py +++ b/test_config.py @@ -118,6 +118,10 @@ def test_get_env_vars_with_github_app(self): False, [], [], + False, + "10", + "20", + "3", SEARCH_QUERY, ) result = get_env_vars(True) @@ -157,6 +161,10 @@ def test_get_env_vars_with_token(self): False, [], [], + False, + "10", + "20", + "3", SEARCH_QUERY, ) result = get_env_vars(True) @@ -195,6 +203,10 @@ def test_get_env_vars_optional_values(self): True, [], ["waiting-for-review", "waiting-for-manager"], + False, + 10, + 20, + 3, SEARCH_QUERY, ) result = get_env_vars(True) diff --git a/test_issue_metrics.py b/test_issue_metrics.py index 34b2e38..d192157 100644 --- a/test_issue_metrics.py +++ b/test_issue_metrics.py @@ -123,7 +123,7 @@ def test_get_env_vars_missing_query(self): # Call the function and check that it raises a ValueError with self.assertRaises(ValueError): - get_env_vars() + get_env_vars(test=True) class TestMain(unittest.TestCase): @@ -231,7 +231,7 @@ def test_main_no_issues_found( # Call main and check that it writes 'No issues found' issue_metrics.main() mock_write_to_markdown.assert_called_once_with( - None, None, None, None, None, None, None + None, None, None, None, None, None, None, None ) diff --git a/test_json_writer.py b/test_json_writer.py index b7b18ac..fe1fe1b 100644 --- a/test_json_writer.py +++ b/test_json_writer.py @@ -61,6 +61,7 @@ def test_write_to_json(self): } num_issues_opened = 2 num_issues_closed = 1 + num_mentor_count = 5 expected_output = { "average_time_to_first_response": "2 days, 12:00:00", @@ -77,6 +78,7 @@ def test_write_to_json(self): "90_percentile_time_in_labels": {"bug": "1 day, 16:24:12"}, "num_items_opened": 2, "num_items_closed": 1, + "num_mentor_count": 5, "total_item_count": 2, "issues": [ { @@ -111,6 +113,7 @@ def test_write_to_json(self): stats_time_in_labels=stats_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, + num_mentor_count=num_mentor_count, search_query="is:issue repo:owner/repo", ), json.dumps(expected_output), @@ -149,6 +152,7 @@ def test_write_to_json_with_no_response(self): } num_issues_opened = 2 num_issues_closed = 0 + num_mentor_count = 5 expected_output = { "average_time_to_first_response": "None", @@ -165,6 +169,7 @@ def test_write_to_json_with_no_response(self): "90_percentile_time_in_labels": {}, "num_items_opened": 2, "num_items_closed": 0, + "num_mentor_count": 5, "total_item_count": 2, "issues": [ { @@ -199,6 +204,7 @@ def test_write_to_json_with_no_response(self): stats_time_in_labels=stats_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, + num_mentor_count=num_mentor_count, search_query="is:issue repo:owner/repo", ), json.dumps(expected_output), diff --git a/test_markdown_writer.py b/test_markdown_writer.py index 00b585c..053dc0a 100644 --- a/test_markdown_writer.py +++ b/test_markdown_writer.py @@ -78,6 +78,7 @@ def test_write_to_markdown(self): num_issues_opened = 2 num_issues_closed = 1 + num_mentor_count = 5 # Call the function write_to_markdown( @@ -88,6 +89,7 @@ def test_write_to_markdown(self): average_time_in_labels=time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, + num_mentor_count=num_mentor_count, labels=["bug"], search_query="is:issue is:open label:bug", ) @@ -108,6 +110,7 @@ def test_write_to_markdown(self): "| --- | ---: |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" + "| Number of most active mentors | 5 |\n" "| Total number of items created | 2 |\n\n" "| Title | URL | Author | Time to first response | Time to close |" " Time to answer | Time spent in bug |\n" @@ -175,6 +178,7 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): num_issues_opened = 2 num_issues_closed = 1 + num_mentor_count = 5 # Call the function write_to_markdown( @@ -185,6 +189,7 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): average_time_in_labels=average_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, + num_mentor_count=num_mentor_count, labels=["bug"], ) @@ -204,6 +209,7 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): "| --- | ---: |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" + "| Number of most active mentors | 5 |\n" "| Total number of items created | 2 |\n\n" "| Title | URL | Author | Time to first response | Time to close |" " Time to answer | Time spent in bug |\n" @@ -221,7 +227,7 @@ def test_write_to_markdown_no_issues(self): """Test that write_to_markdown writes the correct markdown file when no issues are found.""" # Call the function with no issues with patch("builtins.open", mock_open()) as mock_open_file: - write_to_markdown(None, None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None, None) # Check that the file was written correctly expected_output = [ @@ -292,6 +298,7 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): } num_issues_opened = 2 num_issues_closed = 1 + num_mentor_count = 5 # Call the function write_to_markdown( @@ -302,6 +309,7 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): average_time_in_labels=average_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, + num_mentor_count=num_mentor_count, labels=["label1"], search_query="repo:user/repo is:issue", hide_label_metrics=True, @@ -316,6 +324,7 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): "| --- | ---: |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" + "| Number of most active mentors | 5 |\n" "| Total number of items created | 2 |\n\n" "| Title | URL | Author |\n" "| --- | --- | --- |\n" diff --git a/test_most_active_mentors.py b/test_most_active_mentors.py new file mode 100755 index 0000000..9fc5db5 --- /dev/null +++ b/test_most_active_mentors.py @@ -0,0 +1,82 @@ +"""A module containing unit tests for the most_active_mentors module. + +This module contains unit tests for the count_comments_per_user and +get_mentor_count functions in the most_active_mentors module. +The tests use mock GitHub issues and comments to test the functions' behavior. + +Classes: + TestCountCommentsPerUser: A class testing count_comments_per_user. + TestGetMentorCount: A class to test the + get_mentor_count function. + +""" + +import unittest +from datetime import datetime +from unittest.mock import MagicMock + +from classes import IssueWithMetrics +from most_active_mentors import count_comments_per_user, get_mentor_count + + +class TestCountCommentsPerUser(unittest.TestCase): + """Test the count_comments_per_user function.""" + + def test_count_comments_per_user(self): + """Test that count_comments_per_user correctly counts user comments. + + This test mocks the GitHub connection and issue comments, and checks + that count_comments_per_user correctly considers user comments for + counting. + + """ + # Set up the mock GitHub issues + mock_issue1 = MagicMock() + mock_issue1.comments = 2 + mock_issue1.issue.user.login = "issue_owner" + mock_issue1.created_at = "2023-01-01T00:00:00Z" + + # Set up 21 mock GitHub issue comments - only 20 should be counted + mock_issue1.issue.comments.return_value = [] + for i in range(22): + mock_comment1 = MagicMock() + mock_comment1.user.login = "very_active_user" + mock_comment1.created_at = datetime.fromisoformat( + f"2023-01-02T{i:02d}:00:00Z" + ) + # pylint: disable=maybe-no-member + mock_issue1.issue.comments.return_value.append(mock_comment1) + + # Call the function + result = count_comments_per_user(mock_issue1) + expected_result = {"very_active_user": 3} + + # Check the results + self.assertEqual(result, expected_result) + + def test_get_mentor_count(self): + """Test that get_mentor_count correctly counts comments per user.""" + mentor_activity = {"sue": 15, "bob": 10} + + # Create mock data + issues_with_metrics = [ + IssueWithMetrics( + "Issue 1", + "https://github.com/user/repo/issues/1", + "alice", + None, + mentor_activity=mentor_activity, + ), + IssueWithMetrics( + "Issue 2", + "https://github.com/user/repo/issues/2", + "bob", + None, + mentor_activity=mentor_activity, + ), + ] + + # Call the function and check the result + result = get_mentor_count(issues_with_metrics, 2) + expected_result = 2 + self.assertEqual(result, expected_result)