diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..fd51988 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,30 @@ +# Copilot Instructions + +This is a GitHub Action that searches for issues/pull requests/discussions in a repository, measures several metrics, and generates a report in form of a GitHub issue. The issues/pull requests/discussions to search for can be filtered by using a search query. + +## Code Standards + +### Required Before Each Commit + +- Run `make lint` before committing any changes to ensure proper code linting and formatting. + +### Development Flow + +- Lint: `make lint` +- Test: `make test` + +## Repository Structure + +- `Makefile`: Contains commands for linting, testing, and other tasks +- `requirements.txt`: Python dependencies for the project +- `requirements-test.txt`: Python dependencies for testing +- `README.md`: Project documentation and setup instructions +- `setup.py`: Python package setup configuration +- `test_*.py`: Python test files matching the naming convention for test discovery + +## Key Guidelines + +1. Follow Python best practices and idiomatic patterns +2. Maintain existing code structure and organization +3. Write unit tests for new functionality. +4. Document changes to environment variables in the `README.md` file. diff --git a/.github/workflows/auto-labeler.yml b/.github/workflows/auto-labeler.yml index 43fe779..48d4f70 100644 --- a/.github/workflows/auto-labeler.yml +++ b/.github/workflows/auto-labeler.yml @@ -11,7 +11,7 @@ jobs: permissions: contents: write pull-requests: write - uses: github/ospo-reusable-workflows/.github/workflows/auto-labeler.yaml@6f158f242fe68adb5a2698ef47e06dac07ac7e71 + uses: github/ospo-reusable-workflows/.github/workflows/auto-labeler.yaml@ebb4e218b75c6043139fd69a4c9bb5a465fb696b with: config-name: release-drafter.yml secrets: diff --git a/.github/workflows/contributor_report.yaml b/.github/workflows/contributor_report.yaml index d15c184..123abb1 100644 --- a/.github/workflows/contributor_report.yaml +++ b/.github/workflows/contributor_report.yaml @@ -27,7 +27,7 @@ jobs: echo "END_DATE=$end_date" >> "$GITHUB_ENV" - name: Run contributor action - uses: github/contributors@4d90d92531d4c5775be5a70c119ca7c0be165964 # v1.5.9 + uses: github/contributors@69e531b620b7e5b0fad2e9823681607b54db447a # v1.5.11 env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} START_DATE: ${{ env.START_DATE }} diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml new file mode 100644 index 0000000..cddb8d9 --- /dev/null +++ b/.github/workflows/copilot-setup-steps.yml @@ -0,0 +1,38 @@ +name: "Copilot Setup Steps" + +# Automatically run the setup steps when they are changed to allow for easy validation, and +# allow manual testing through the repository's "Actions" tab +on: + workflow_dispatch: + push: + paths: + - .github/workflows/copilot-setup-steps.yml + pull_request: + paths: + - .github/workflows/copilot-setup-steps.yml + +# Set the permissions to the lowest permissions possible needed for your steps. +# Copilot will be given its own token for its operations. +permissions: + # If you want to clone the repository as part of your setup steps, for example to install dependencies, you'll need the `contents: read` permission. If you don't clone the repository in your setup steps, Copilot will do this for you automatically after the steps complete. + contents: read + +jobs: + # The job MUST be called `copilot-setup-steps` or it will not be picked up by Copilot. + copilot-setup-steps: + runs-on: ubuntu-latest + + # You can define any steps you want, and they will run before the agent starts. + # If you do not check out your code, Copilot will do this for you. + steps: + - name: Checkout code + uses: actions/checkout@v5.0.0 + + - name: Set up Python + uses: actions/setup-python@v5.6.0 + with: + python-version: 3.12 + + - name: Install dependencies + run: | + pip install -r requirements.txt -r requirements-test.txt diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 3c2f8ec..9f65a35 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -14,6 +14,6 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@v5.0.0 - name: Build the Docker image run: docker build . --file Dockerfile --platform linux/amd64 --tag issue-metrics:"$(date +%s)" diff --git a/.github/workflows/linter.yaml b/.github/workflows/linter.yaml index 9a7a3df..fe5ec0d 100644 --- a/.github/workflows/linter.yaml +++ b/.github/workflows/linter.yaml @@ -18,7 +18,7 @@ jobs: statuses: write steps: - name: Checkout Code - uses: actions/checkout@v4.2.2 + uses: actions/checkout@v5.0.0 with: # Full git history is needed to get a proper # list of changed files within `super-linter` @@ -30,7 +30,7 @@ jobs: run: | pip install -r requirements.txt -r requirements-test.txt - name: Lint Code Base - uses: super-linter/super-linter@12150456a73e248bdc94d0794898f94e23127c88 # v7.4.0 + uses: super-linter/super-linter@5119dcd8011e92182ce8219d9e9efc82f16fddb6 # v8.0.0 env: DEFAULT_BRANCH: main GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml index e06a1fb..77afc54 100644 --- a/.github/workflows/pr-title.yml +++ b/.github/workflows/pr-title.yml @@ -12,6 +12,6 @@ jobs: contents: read pull-requests: read statuses: write - uses: github/ospo-reusable-workflows/.github/workflows/pr-title.yaml@6f158f242fe68adb5a2698ef47e06dac07ac7e71 + uses: github/ospo-reusable-workflows/.github/workflows/pr-title.yaml@ebb4e218b75c6043139fd69a4c9bb5a465fb696b secrets: github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 2853bb1..07a7ff7 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -21,7 +21,7 @@ jobs: python-version: [3.11, 3.12] steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@v5.0.0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5.6.0 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2f89b0a..5cc0ca0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ jobs: permissions: contents: write pull-requests: read - uses: github/ospo-reusable-workflows/.github/workflows/release.yaml@6f158f242fe68adb5a2698ef47e06dac07ac7e71 + uses: github/ospo-reusable-workflows/.github/workflows/release.yaml@ebb4e218b75c6043139fd69a4c9bb5a465fb696b with: publish: true release-config-name: release-drafter.yml @@ -25,7 +25,7 @@ jobs: packages: write id-token: write attestations: write - uses: github/ospo-reusable-workflows/.github/workflows/release-image.yaml@6f158f242fe68adb5a2698ef47e06dac07ac7e71 + uses: github/ospo-reusable-workflows/.github/workflows/release-image.yaml@ebb4e218b75c6043139fd69a4c9bb5a465fb696b with: image-name: ${{ github.repository_owner }}/issue_metrics full-tag: ${{ needs.release.outputs.full-tag }} @@ -40,7 +40,7 @@ jobs: permissions: contents: read discussions: write - uses: github/ospo-reusable-workflows/.github/workflows/release-discussion.yaml@6f158f242fe68adb5a2698ef47e06dac07ac7e71 + uses: github/ospo-reusable-workflows/.github/workflows/release-discussion.yaml@ebb4e218b75c6043139fd69a4c9bb5a465fb696b with: full-tag: ${{ needs.release.outputs.full-tag }} body: ${{ needs.release.outputs.body }} diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index b1ce6af..c9ca2c6 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@v4.2.2 + uses: actions/checkout@v5.0.0 with: persist-credentials: false @@ -42,6 +42,6 @@ jobs: path: results.sarif retention-days: 5 - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.24.9 + uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.24.9 with: sarif_file: results.sarif diff --git a/Dockerfile b/Dockerfile index f7112a5..b12d6ce 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ #checkov:skip=CKV_DOCKER_2 #checkov:skip=CKV_DOCKER_3 -FROM python:3.13-slim@sha256:f2fdaec50160418e0c2867ba3e254755edd067171725886d5d303fd7057bbf81 +FROM python:3.13-slim@sha256:27f90d79cc85e9b7b2560063ef44fa0e9eaae7a7c3f5a9f74563065c5477cc24 LABEL com.github.actions.name="issue-metrics" \ com.github.actions.description="Gather metrics on issues/prs/discussions such as time to first response, count of issues opened, closed, etc." \ com.github.actions.icon="check-square" \ @@ -17,7 +17,7 @@ COPY requirements.txt *.py /action/workspace/ RUN python3 -m pip install --no-cache-dir -r requirements.txt \ && apt-get -y update \ - && apt-get -y install --no-install-recommends git=1:2.39.5-0+deb12u2 \ + && apt-get -y install --no-install-recommends git=1:2.47.2-0.2 \ && rm -rf /var/lib/apt/lists/* CMD ["/action/workspace/issue_metrics.py"] diff --git a/README.md b/README.md index e63018c..baddaba 100644 --- a/README.md +++ b/README.md @@ -112,9 +112,9 @@ All feedback regarding our GitHub Actions, as a whole, should be communicated th - Do this by creating a [GitHub API token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic) with permissions to read the repository and write issues. - Then take the value of the API token you just created, and [create a repository secret](https://docs.github.com/en/actions/security-guides/encrypted-secrets) where the name of the secret is `GH_TOKEN` and the value of the secret the API token. - Then finally update the workflow file to use that repository secret by changing `GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}` to `GH_TOKEN: ${{ secrets.GH_TOKEN }}`. The name of the secret can really be anything. It just needs to match between when you create the secret name and when you refer to it in the workflow file. - - Help on verifying your token's access to your repository [here](docs/verify-token-access-to-repository.md) + - Help on verifying your token's access to your repository [in the docs directory](docs/verify-token-access-to-repository.md) 6. If you want the resulting issue with the metrics in it to appear in a different repository other than the one the workflow file runs in, update the line `token: ${{ secrets.GITHUB_TOKEN }}` with your own GitHub API token stored as a repository secret. - - This process is the same as described in the step above. More info on creating secrets can be found [here](https://docs.github.com/en/actions/security-guides/encrypted-secrets). + - This process is the same as described in the step above. More info on creating secrets can be found [in the GitHub docs security guide on encrypted secrets](https://docs.github.com/en/actions/security-guides/encrypted-secrets). 7. Commit the workflow file to the default branch (often `master` or `main`) 8. Wait for the action to trigger based on the `schedule` entry or manually trigger the workflow as shown in the [documentation](https://docs.github.com/en/actions/using-workflows/manually-running-a-workflow). @@ -154,7 +154,8 @@ This action can be configured to authenticate with GitHub App Installation or Pe | `HIDE_TIME_TO_ANSWER` | False | False | If set to `true`, the time to answer a discussion will not be displayed in the generated Markdown file. | | `HIDE_TIME_TO_CLOSE` | False | False | If set to `true`, the time to close will not be displayed in the generated Markdown file. | | `HIDE_TIME_TO_FIRST_RESPONSE` | False | False | If set to `true`, the time to first response will not be displayed in the generated Markdown file. | -| `HIDE_CREATED_AT` | False | True | If set to `true`, the creation timestmap will not be displayed in the generated Markdown file. | +| `HIDE_STATUS` | False | True | If set to `true`, the status column will not be shown | +| `HIDE_CREATED_AT` | False | True | If set to `true`, the creation timestamp will not be displayed in the generated Markdown file. | | `DRAFT_PR_TRACKING` | False | False | If set to `true`, draft PRs will be included in the metrics as a new column and in the summary stats. | | `IGNORE_USERS` | False | False | A comma separated list of users to ignore when calculating metrics. (ie. `IGNORE_USERS: 'user1,user2'`). To ignore bots, append `[bot]` to the user (ie. `IGNORE_USERS: 'github-actions[bot]'`) Users in this list will also have their authored issues and pull requests removed from the Markdown table. | | `ENABLE_MENTOR_COUNT` | False | False | If set to 'TRUE' count number of comments users left on discussions, issues and PRs and display number of active mentors | diff --git a/classes.py b/classes.py index 20ab9b3..d24f430 100644 --- a/classes.py +++ b/classes.py @@ -24,6 +24,7 @@ class IssueWithMetrics: label_metrics (dict, optional): A dictionary containing the label metrics mentor_activity (dict, optional): A dictionary containing active mentors created_at (datetime, optional): The time the issue was created. + status (str, optional): The status of the issue, e.g., "open", "closed as completed", """ # pylint: disable=too-many-instance-attributes @@ -42,6 +43,7 @@ def __init__( created_at=None, assignee=None, assignees=None, + status=None, ): self.title = title self.html_url = html_url @@ -55,3 +57,4 @@ def __init__( self.label_metrics = labels_metrics self.mentor_activity = mentor_activity self.created_at = created_at + self.status = status diff --git a/config.py b/config.py index 55768dc..475aa34 100644 --- a/config.py +++ b/config.py @@ -39,6 +39,8 @@ class EnvVars: hide_time_to_close (bool): If true, the time to close metric is hidden in the output hide_time_to_first_response (bool): If true, the time to first response metric is hidden in the output + hide_created_at (bool): If true, the created at timestamp is hidden in the output + hide_status (bool): If true, the status column is hidden in the output ignore_users (List[str]): List of usernames to ignore when calculating metrics labels_to_measure (List[str]): List of labels to measure how much time the label is applied enable_mentor_count (bool): If set to TRUE, compute number of mentors @@ -73,6 +75,7 @@ def __init__( hide_time_to_close: bool, hide_time_to_first_response: bool, hide_created_at: bool, + hide_status: bool, ignore_user: List[str], labels_to_measure: List[str], enable_mentor_count: bool, @@ -102,6 +105,7 @@ def __init__( self.hide_time_to_close = hide_time_to_close self.hide_time_to_first_response = hide_time_to_first_response self.hide_created_at = hide_created_at + self.hide_status = hide_status self.enable_mentor_count = enable_mentor_count self.min_mentor_comments = min_mentor_comments self.max_comments_eval = max_comments_eval @@ -130,6 +134,7 @@ def __repr__(self): f"{self.hide_time_to_close}," f"{self.hide_time_to_first_response}," f"{self.hide_created_at}," + f"{self.hide_status}," f"{self.ignore_users}," f"{self.labels_to_measure}," f"{self.enable_mentor_count}," @@ -238,6 +243,7 @@ def get_env_vars(test: bool = False) -> EnvVars: hide_time_to_close = get_bool_env_var("HIDE_TIME_TO_CLOSE", False) hide_time_to_first_response = get_bool_env_var("HIDE_TIME_TO_FIRST_RESPONSE", False) hide_created_at = get_bool_env_var("HIDE_CREATED_AT", True) + hide_status = get_bool_env_var("HIDE_STATUS", True) enable_mentor_count = get_bool_env_var("ENABLE_MENTOR_COUNT", False) min_mentor_comments = os.getenv("MIN_MENTOR_COMMENTS", "10") max_comments_eval = os.getenv("MAX_COMMENTS_EVAL", "20") @@ -259,6 +265,7 @@ def get_env_vars(test: bool = False) -> EnvVars: hide_time_to_close, hide_time_to_first_response, hide_created_at, + hide_status, ignore_users_list, labels_to_measure_list, enable_mentor_count, diff --git a/docs/verify-token-access-to-repository.md b/docs/verify-token-access-to-repository.md index 25a4f1b..e6d7d54 100644 --- a/docs/verify-token-access-to-repository.md +++ b/docs/verify-token-access-to-repository.md @@ -4,7 +4,7 @@ GitHub PAT token access can be confusing. Here's a quick way to test if the toke **Remove this snippet after you've verified your token.** -- Make sure you follow the token setup instructions [here](https://github.com/github/issue-metrics/tree/main?tab=readme-ov-file#use-as-a-github-action) first. +- Make sure you follow the token setup instructions [in the `README.md`](https://github.com/github/issue-metrics/tree/main?tab=readme-ov-file#use-as-a-github-action) first. - Replace `{owner/repo}` with your own repository information. diff --git a/issue_metrics.py b/issue_metrics.py index e7f5982..0c86912 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -175,8 +175,12 @@ def get_per_issue_metrics( issue_with_metrics.time_to_close = measure_time_to_close( issue, None ) + if not env_vars.hide_status: + issue_with_metrics.status = f"{issue.issue.state} as {issue.issue.state_reason}" # type: ignore elif issue.state == "open": # type: ignore num_issues_open += 1 + if not env_vars.hide_status: + issue_with_metrics.status = f"{issue.issue.state}" # type: ignore if not env_vars.hide_created_at: if isinstance(issue, github3.search.IssueSearchResult): # type: ignore issue_with_metrics.created_at = issue.issue.created_at # type: ignore diff --git a/labels.py b/labels.py index b9502b4..ee8656b 100644 --- a/labels.py +++ b/labels.py @@ -85,11 +85,14 @@ def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: if label in labeled: # if the issue is closed, add the time from the issue creation to the closed_at time if issue.state == "closed": + # Only add the final (closed_at - created_at) span if the label was still applied at closure. + if label_last_event_type.get(label) != "labeled": + continue label_metrics[label] += datetime.fromisoformat( issue.closed_at ) - datetime.fromisoformat(issue.created_at) else: - # skip label if last labeling event is 'unlabled' and issue is still open + # skip label if last labeling event is 'unlabeled' and issue is still open if label_last_event_type[label] == "unlabeled": continue diff --git a/markdown_writer.py b/markdown_writer.py index efaf0ac..67cc241 100644 --- a/markdown_writer.py +++ b/markdown_writer.py @@ -87,6 +87,10 @@ def get_non_hidden_columns(labels) -> List[str]: if not hide_created_at: columns.append("Created At") + hide_status = env_vars.hide_status + if not hide_status: + columns.append("Status") + return columns @@ -232,6 +236,8 @@ def write_to_markdown( file.write(f" {issue.label_metrics[label]} |") if "Created At" in columns: file.write(f" {issue.created_at} |") + if "Status" in columns: + file.write(f" {issue.status} |") file.write("\n") file.write( "\n_This report was generated with the \ @@ -324,6 +330,8 @@ def write_overall_metrics_tables( f"| {stats_time_in_labels['med'][label]} " f"| {stats_time_in_labels['90p'][label]} |\n" ) + if "Status" in columns: # Add logic for the 'status' column + file.write("| Status | | | |\n") file.write("\n") # Write count stats to a separate table diff --git a/requirements-test.txt b/requirements-test.txt index b69e850..c0be31e 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,9 +1,9 @@ black==25.1.0 -flake8==7.2.0 -mypy==1.16.1 +flake8==7.3.0 +mypy==1.17.1 mypy-extensions==1.1.0 -pylint==3.3.7 -pytest==8.4.0 +pylint==3.3.8 +pytest==8.4.1 pytest-cov==6.2.1 -types-pytz==2025.2.0.20250516 -types-requests==2.32.4.20250611 +types-pytz==2025.2.0.20250809 +types-requests==2.32.4.20250809 diff --git a/requirements.txt b/requirements.txt index efe5a96..5ba9ab5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ github3.py==4.0.1 numpy==2.2.4 -python-dotenv==1.1.0 +python-dotenv==1.1.1 pytz==2025.2 -requests==2.32.4 +requests==2.32.5 diff --git a/test_assignee_functionality.py b/test_assignee_functionality.py index 1c12a9b..890fc62 100644 --- a/test_assignee_functionality.py +++ b/test_assignee_functionality.py @@ -75,6 +75,34 @@ def test_get_non_hidden_columns_hides_both_assignee_and_author(self): self.assertNotIn("Assignee", columns) self.assertNotIn("Author", columns) + @patch.dict( + os.environ, + { + "GH_TOKEN": "test_token", + "SEARCH_QUERY": "is:issue is:open repo:user/repo", + "HIDE_STATUS": "false", + }, + clear=True, + ) + def test_get_non_hidden_columns_includes_status_by_default(self): + """Test that status column is included by default.""" + columns = get_non_hidden_columns(labels=None) + self.assertIn("Status", columns) + + @patch.dict( + os.environ, + { + "GH_TOKEN": "test_token", + "SEARCH_QUERY": "is:issue is:open repo:user/repo", + "HIDE_STATUS": "true", + }, + clear=True, + ) + def test_get_non_hidden_columns_hides_status_when_env_set(self): + """Test that status column is hidden when HIDE_STATUS is true.""" + columns = get_non_hidden_columns(labels=None) + self.assertNotIn("Status", columns) + def test_assignee_column_position(self): """Test that assignee column appears before author column.""" with patch.dict( diff --git a/test_column_order_fix.py b/test_column_order_fix.py new file mode 100644 index 0000000..c186ad7 --- /dev/null +++ b/test_column_order_fix.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 + +""" +Test to verify that the Status and Created At columns have their content aligned with headers. + +This test specifically validates the fix for issue #568 where the Status and Created At +columns had their data swapped. +""" + +import os +import unittest +from datetime import timedelta +from unittest.mock import patch + +from classes import IssueWithMetrics +from markdown_writer import get_non_hidden_columns, write_to_markdown + + +@patch.dict( + os.environ, + { + "SEARCH_QUERY": "is:open repo:user/repo", + "GH_TOKEN": "test_token", + "HIDE_CREATED_AT": "False", + "HIDE_STATUS": "False", + }, +) +class TestColumnOrderFix(unittest.TestCase): + """Test that Status and Created At columns have correct data.""" + + def test_status_and_created_at_columns_alignment(self): + """Test that Status and Created At columns show correct data values. + + This test specifically validates that: + 1. The Status column contains actual status values (not dates) + 2. The Created At column contains actual date values (not status) + """ + # Create test data with clearly distinguishable Status and Created At values + issues_with_metrics = [ + IssueWithMetrics( + title="Test Issue", + html_url="https://github.com/user/repo/issues/1", + author="testuser", + assignee="assignee1", + assignees=["assignee1"], + created_at="2023-01-01T00:00:00Z", # This should appear in Created At column + status="open", # This should appear in Status column + time_to_first_response=timedelta(days=1), + time_to_close=timedelta(days=2), + time_to_answer=timedelta(days=3), + ) + ] + + # Call the function + write_to_markdown( + issues_with_metrics=issues_with_metrics, + average_time_to_first_response=None, + average_time_to_close=None, + average_time_to_answer=None, + average_time_in_draft=None, + average_time_in_labels=None, + num_issues_opened=1, + num_issues_closed=0, + num_mentor_count=0, + labels=None, + search_query="is:issue is:open repo:user/repo", + hide_label_metrics=True, + hide_items_closed_count=False, + enable_mentor_count=False, + non_mentioning_links=False, + report_title="Test Report", + output_file="test_column_order.md", + ) + + # Read the generated markdown + with open("test_column_order.md", "r", encoding="utf-8") as file: + content = file.read() + + # The table should have the columns in the correct order + # and the data should be properly aligned + expected_header = ( + "| Title | URL | Assignee | Author | Time to first response | " + "Time to close | Time to answer | Created At | Status |" + ) + self.assertIn(expected_header, content) + + # Verify the data row has correct values in correct positions + # The Created At column should contain the date value + # The Status column should contain the status value + expected_row = ( + "| Test Issue | https://github.com/user/repo/issues/1 | " + "[assignee1](https://github.com/assignee1) | " + "[testuser](https://github.com/testuser) | 1 day, 0:00:00 | " + "2 days, 0:00:00 | 3 days, 0:00:00 | 2023-01-01T00:00:00Z | open |" + ) + self.assertIn(expected_row, content) + + # Clean up + os.remove("test_column_order.md") + + def test_get_non_hidden_columns_order(self): + """Test that get_non_hidden_columns returns columns in the correct order.""" + columns = get_non_hidden_columns(labels=None) + + # Find the indices of the Status and Created At columns + try: + created_at_index = columns.index("Created At") + status_index = columns.index("Status") + + # Status should come after Created At + self.assertGreater( + status_index, + created_at_index, + "Status column should come after Created At column", + ) + except ValueError: + # If one of the columns is hidden, that's fine, but we shouldn't get here + # given our environment variables + self.fail("Both Status and Created At columns should be present") + + +if __name__ == "__main__": + unittest.main() diff --git a/test_config.py b/test_config.py index 537d157..49435fa 100644 --- a/test_config.py +++ b/test_config.py @@ -131,6 +131,7 @@ def test_get_env_vars_with_github_app(self): hide_time_to_close=False, hide_time_to_first_response=False, hide_created_at=True, + hide_status=True, ignore_user=[], labels_to_measure=[], enable_mentor_count=False, @@ -186,6 +187,7 @@ def test_get_env_vars_with_token(self): hide_time_to_close=False, hide_time_to_first_response=False, hide_created_at=True, + hide_status=True, ignore_user=[], labels_to_measure=[], enable_mentor_count=False, @@ -276,6 +278,7 @@ def test_get_env_vars_optional_values(self): hide_time_to_close=True, hide_time_to_first_response=True, hide_created_at=True, + hide_status=True, ignore_user=[], labels_to_measure=["waiting-for-review", "waiting-for-manager"], enable_mentor_count=False, @@ -320,6 +323,7 @@ def test_get_env_vars_optionals_are_defaulted(self): hide_time_to_close=False, hide_time_to_first_response=False, hide_created_at=True, + hide_status=True, ignore_user=[], labels_to_measure=[], enable_mentor_count=False, diff --git a/test_labels.py b/test_labels.py index 9c49b2b..bea6edf 100644 --- a/test_labels.py +++ b/test_labels.py @@ -93,6 +93,181 @@ def test_get_label_metrics_closed_issue_labeled_past_closed_at(self): metrics = get_label_metrics(self.issue, labels) self.assertEqual(metrics["foo"], None) + def test_get_label_metrics_closed_issue_label_removed_before_closure(self): + """Test get_label_metrics for a closed issue where label was removed before closure""" + # Create a mock issue that reproduces the problem scenario: + # Issue created: day 0 (2021-01-01) + # Label added: day 5 (2021-01-06) + # Label removed: day 10 (2021-01-11) + # Issue closed: day 15 (2021-01-16) + # Expected duration: 5 days (from day 5 to day 10) + + issue = MagicMock() + issue.issue = MagicMock(spec=github3.issues.Issue) + issue.created_at = "2021-01-01T00:00:00Z" + issue.closed_at = "2021-01-16T00:00:00Z" # 15 days after creation + issue.state = "closed" + issue.issue.events.return_value = [ + MagicMock( + event="labeled", + label={"name": "test-label"}, + created_at=datetime(2021, 1, 6, tzinfo=pytz.UTC), # day 5 + ), + MagicMock( + event="unlabeled", + label={"name": "test-label"}, + created_at=datetime(2021, 1, 11, tzinfo=pytz.UTC), # day 10 + ), + ] + + labels = ["test-label"] + metrics = get_label_metrics(issue, labels) + + # Should be 5 days (from day 5 to day 10), not 15 days (full issue duration) + expected_duration = timedelta(days=5) + self.assertEqual(metrics["test-label"], expected_duration) + + def test_get_label_metrics_closed_issue_label_remains_through_closure(self): + """Test get_label_metrics for a closed issue where label remains applied through closure""" + # Test scenario where label is applied and never removed: + # Issue created: day 0 (2021-01-01) + # Label added: day 2 (2021-01-03) + # Issue closed: day 10 (2021-01-11) + # Expected duration: 10 days (from issue creation to closure) + + issue = MagicMock() + issue.issue = MagicMock(spec=github3.issues.Issue) + issue.created_at = "2021-01-01T00:00:00Z" + issue.closed_at = "2021-01-11T00:00:00Z" # 10 days after creation + issue.state = "closed" + issue.issue.events.return_value = [ + MagicMock( + event="labeled", + label={"name": "stays-applied"}, + created_at=datetime(2021, 1, 3, tzinfo=pytz.UTC), # day 2 + ), + # No unlabel event - label remains applied + ] + + labels = ["stays-applied"] + metrics = get_label_metrics(issue, labels) + + # Should be 8 days (from day 2 when label was applied to day 10 when issue closed) + expected_duration = timedelta(days=8) + self.assertEqual(metrics["stays-applied"], expected_duration) + + def test_get_label_metrics_label_applied_at_creation_and_removed_before_closure( + self, + ): + """Test get_label_metrics for a label applied at issue creation and removed before closure""" + # Test scenario where label is applied at creation and later removed: + # Issue created: day 0 (2021-01-01) with label applied + # Label removed: day 7 (2021-01-08) + # Issue closed: day 20 (2021-01-21) + # Expected duration: 7 days (from creation to removal) + + issue = MagicMock() + issue.issue = MagicMock(spec=github3.issues.Issue) + issue.created_at = "2021-01-01T00:00:00Z" + issue.closed_at = "2021-01-21T00:00:00Z" # 20 days after creation + issue.state = "closed" + issue.issue.events.return_value = [ + MagicMock( + event="labeled", + label={"name": "creation-label"}, + created_at=datetime(2021, 1, 1, tzinfo=pytz.UTC), # day 0 - at creation + ), + MagicMock( + event="unlabeled", + label={"name": "creation-label"}, + created_at=datetime(2021, 1, 8, tzinfo=pytz.UTC), # day 7 + ), + ] + + labels = ["creation-label"] + metrics = get_label_metrics(issue, labels) + + # Should be 7 days (from creation to removal), not 20 days (full issue duration) + expected_duration = timedelta(days=7) + self.assertEqual(metrics["creation-label"], expected_duration) + + def test_get_label_metrics_label_applied_at_creation_remains_through_closure(self): + """Test get_label_metrics for a label applied at creation and kept through closure""" + # Test scenario where label is applied at creation and never removed: + # Issue created: day 0 (2021-01-01) with label applied + # Issue closed: day 30 (2021-01-31) + # Expected duration: 30 days (full issue duration) + + issue = MagicMock() + issue.issue = MagicMock(spec=github3.issues.Issue) + issue.created_at = "2021-01-01T00:00:00Z" + issue.closed_at = "2021-01-31T00:00:00Z" # 30 days after creation + issue.state = "closed" + issue.issue.events.return_value = [ + MagicMock( + event="labeled", + label={"name": "permanent-label"}, + created_at=datetime(2021, 1, 1, tzinfo=pytz.UTC), # day 0 - at creation + ), + # No unlabel event - label remains applied + ] + + labels = ["permanent-label"] + metrics = get_label_metrics(issue, labels) + + # Should be 30 days (full issue duration since label was applied at creation) + expected_duration = timedelta(days=30) + self.assertEqual(metrics["permanent-label"], expected_duration) + + def test_get_label_metrics_multiple_labels_different_timeframes(self): + """Test get_label_metrics with multiple labels having different application patterns and longer timeframes""" + # Test scenario with multiple labels and longer timeframes: + # Issue created: day 0 (2021-01-01) + # Label A applied: day 0 (at creation) + # Label B applied: day 14 (2021-01-15) + # Label A removed: day 21 (2021-01-22) + # Label B removed: day 35 (2021-02-05) + # Issue closed: day 60 (2021-03-02) + # Expected: Label A = 21 days, Label B = 21 days + + issue = MagicMock() + issue.issue = MagicMock(spec=github3.issues.Issue) + issue.created_at = "2021-01-01T00:00:00Z" + issue.closed_at = "2021-03-02T00:00:00Z" # 60 days after creation + issue.state = "closed" + issue.issue.events.return_value = [ + MagicMock( + event="labeled", + label={"name": "label-a"}, + created_at=datetime(2021, 1, 1, tzinfo=pytz.UTC), # day 0 - at creation + ), + MagicMock( + event="labeled", + label={"name": "label-b"}, + created_at=datetime(2021, 1, 15, tzinfo=pytz.UTC), # day 14 + ), + MagicMock( + event="unlabeled", + label={"name": "label-a"}, + created_at=datetime(2021, 1, 22, tzinfo=pytz.UTC), # day 21 + ), + MagicMock( + event="unlabeled", + label={"name": "label-b"}, + created_at=datetime(2021, 2, 5, tzinfo=pytz.UTC), # day 35 + ), + ] + + labels = ["label-a", "label-b"] + metrics = get_label_metrics(issue, labels) + + # Label A: 21 days (from day 0 to day 21) + # Label B: 21 days (from day 14 to day 35) + expected_duration_a = timedelta(days=21) + expected_duration_b = timedelta(days=21) + self.assertEqual(metrics["label-a"], expected_duration_a) + self.assertEqual(metrics["label-b"], expected_duration_b) + class TestGetAverageTimeInLabels(unittest.TestCase): """Unit tests for get_stats_time_in_labels""" diff --git a/test_markdown_writer.py b/test_markdown_writer.py index bf3612c..c79536b 100644 --- a/test_markdown_writer.py +++ b/test_markdown_writer.py @@ -23,6 +23,7 @@ "GH_TOKEN": "test_token", "DRAFT_PR_TRACKING": "True", "HIDE_CREATED_AT": "False", + "HIDE_STATUS": "False", }, ) class TestWriteToMarkdown(unittest.TestCase): @@ -128,20 +129,21 @@ def test_write_to_markdown(self): "| Time to answer | 4 days, 0:00:00 | 4 days, 0:00:00 | 4 days, 0:00:00 |\n" "| Time in draft | 1 day, 0:00:00 | 1 day, 0:00:00 | 1 day, 0:00:00 |\n" "| Time spent in bug | 1 day, 12:00:00 | 1 day, 12:00:00 | 1 day, 12:00:00 |\n" + "| Status | | | |\n" "\n" "| Metric | Count |\n" "| --- | ---: |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" "| Total number of items created | 2 |\n\n" - "| Title | URL | Assignee | Author | Time to first response | Time to close |" - " Time to answer | Time in draft | Time spent in bug | Created At |\n" - "| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |\n" + "| Title | URL | Assignee | Author | Time to first response | Time to close | " + "Time to answer | Time in draft | Time spent in bug | Created At | Status |\n" + "| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |\n" "| Issue 1 | https://github.com/user/repo/issues/1 | [charlie](https://github.com/charlie) | " "[alice](https://github.com/alice) | 1 day, 0:00:00 | 2 days, 0:00:00 | 3 days, 0:00:00 | " - "1 day, 0:00:00 | 4 days, 0:00:00 | -5 days, 0:00:00 |\n" + "1 day, 0:00:00 | 4 days, 0:00:00 | -5 days, 0:00:00 | None |\n" "| Issue 2 | https://github.com/user/repo/issues/2 | None | [bob](https://github.com/bob) | 3 days, 0:00:00 | " - "4 days, 0:00:00 | 5 days, 0:00:00 | 1 day, 0:00:00 | 2 days, 0:00:00 | -5 days, 0:00:00 |\n\n" + "4 days, 0:00:00 | 5 days, 0:00:00 | 1 day, 0:00:00 | 2 days, 0:00:00 | -5 days, 0:00:00 | None |\n\n" "_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n" "Search query used to find these items: `is:issue is:open label:bug`\n" ) @@ -182,6 +184,7 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): time_to_first_response=timedelta(days=3), time_to_close=timedelta(days=4), time_to_answer=timedelta(days=5), + time_in_draft=None, labels_metrics={"bug": timedelta(days=2)}, ), ] @@ -243,21 +246,22 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): "| Time to answer | 4 days, 0:00:00 | 4 days, 0:00:00 | 4 days, 0:00:00 |\n" "| Time in draft | 1 day, 0:00:00 | 1 day, 0:00:00 | 1 day, 0:00:00 |\n" "| Time spent in bug | 1 day, 12:00:00 | 1 day, 12:00:00 | 1 day, 12:00:00 |\n" + "| Status | | | |\n" "\n" "| Metric | Count |\n" "| --- | ---: |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" "| Total number of items created | 2 |\n\n" - "| Title | URL | Assignee | Author | Time to first response | Time to close |" - " Time to answer | Time in draft | Time spent in bug | Created At |\n" - "| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |\n" + "| Title | URL | Assignee | Author | Time to first response | Time to close | " + "Time to answer | Time in draft | Time spent in bug | Created At | Status |\n" + "| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |\n" "| Issue 1 | https://github.com/user/repo/issues/1 | [charlie](https://github.com/charlie) | " "[alice](https://github.com/alice) | 1 day, 0:00:00 | 2 days, 0:00:00 | 3 days, 0:00:00 | " - "1 day, 0:00:00 | 1 day, 0:00:00 | -5 days, 0:00:00 |\n" + "1 day, 0:00:00 | 1 day, 0:00:00 | -5 days, 0:00:00 | None |\n" "| feat| Issue 2 | https://github.com/user/repo/issues/2 | None | " "[bob](https://github.com/bob) | 3 days, 0:00:00 | " - "4 days, 0:00:00 | 5 days, 0:00:00 | None | 2 days, 0:00:00 | -5 days, 0:00:00 |\n\n" + "4 days, 0:00:00 | 5 days, 0:00:00 | None | 2 days, 0:00:00 | -5 days, 0:00:00 | None |\n\n" "_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n" ) self.assertEqual(content, expected_content) @@ -308,6 +312,7 @@ def test_write_to_markdown_no_issues(self): "HIDE_LABEL_METRICS": "True", "NON_MENTIONING_LINKS": "True", "GH_ENTERPRISE_URL": "https://ghe.com", + "HIDE_STATUS": "False", }, ) class TestWriteToMarkdownWithEnv(unittest.TestCase): @@ -400,7 +405,116 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): "| Number of items that remain open | 2 |\n" "| Number of most active mentors | 5 |\n" "| Total number of items created | 2 |\n\n" - "| Title | URL | Assignee | Author | Created At |\n" + "| Title | URL | Assignee | Author | Created At | Status |\n" + "| --- | --- | --- | --- | --- | --- |\n" + "| Issue 1 | https://www.ghe.com/user/repo/issues/1 | [charlie](https://ghe.com/charlie) | " + "[alice](https://ghe.com/alice) | -5 days, 0:00:00 | None |\n" + "| Issue 2 | https://www.ghe.com/user/repo/issues/2 | None | [bob](https://ghe.com/bob) | -5 days, 0:00:00 | None |\n\n" + "_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n" + "Search query used to find these items: `repo:user/repo is:issue`\n" + ) + self.assertEqual(content, expected_content) + os.remove("issue_metrics.md") + + @patch.dict( + os.environ, + { + "SEARCH_QUERY": "is:open repo:user/repo", + "GH_TOKEN": "test_token", + "HIDE_CREATED_AT": "False", + "HIDE_TIME_TO_FIRST_RESPONSE": "True", + "HIDE_TIME_TO_CLOSE": "True", + "HIDE_TIME_TO_ANSWER": "True", + "HIDE_LABEL_METRICS": "True", + "NON_MENTIONING_LINKS": "True", + "GH_ENTERPRISE_URL": "https://ghe.com", + "HIDE_STATUS": "True", # Status column should be hidden + }, + ) + def test_writes_markdown_file_with_hidden_status_column(self): + """ + Test that write_to_markdown writes the correct markdown file + when HIDE_STATUS is set to True, ensuring the Status column + is not present in the output. + """ + # Create mock data + issues_with_metrics = [ + IssueWithMetrics( + title="Issue 1", + html_url="https://ghe.com/user/repo/issues/1", + author="alice", + assignee="charlie", + assignees=["charlie"], + created_at=timedelta(days=-5), + time_to_first_response=timedelta(minutes=10), + time_to_close=timedelta(days=1), + time_to_answer=timedelta(hours=2), + time_in_draft=timedelta(days=1), + labels_metrics={ + "label1": timedelta(days=1), + }, + ), + IssueWithMetrics( + title="Issue 2", + html_url="https://ghe.com/user/repo/issues/2", + author="bob", + assignee=None, + assignees=[], + created_at=timedelta(days=-5), + time_to_first_response=timedelta(minutes=20), + time_to_close=timedelta(days=2), + time_to_answer=timedelta(hours=4), + labels_metrics={ + "label1": timedelta(days=1), + }, + ), + ] + average_time_to_first_response = timedelta(minutes=15) + average_time_to_close = timedelta(days=1.5) + average_time_to_answer = timedelta(hours=3) + average_time_in_draft = timedelta(days=1) + average_time_in_labels = { + "label1": timedelta(days=1), + } + num_issues_opened = 2 + num_issues_closed = 2 + num_mentor_count = 5 + ghe = "https://ghe.com" + + # Call the function + write_to_markdown( + issues_with_metrics=issues_with_metrics, + average_time_to_first_response=average_time_to_first_response, + average_time_to_close=average_time_to_close, + average_time_to_answer=average_time_to_answer, + average_time_in_labels=average_time_in_labels, + average_time_in_draft=average_time_in_draft, + num_issues_opened=num_issues_opened, + num_issues_closed=num_issues_closed, + num_mentor_count=num_mentor_count, + labels=["label1"], + search_query="repo:user/repo is:issue", + hide_label_metrics=True, + hide_items_closed_count=True, + enable_mentor_count=True, + non_mentioning_links=True, + report_title="Issue Metrics", + output_file="issue_metrics.md", + ghe=ghe, + ) + + # Check that the function writes the correct markdown file + with open("issue_metrics.md", "r", encoding="utf-8") as file: + content = file.read() + + expected_content = ( + "# Issue Metrics\n\n" + "| Metric | Count |\n" + "| --- | ---: |\n" + "| Number of items that remain open | 2 |\n" + "| Number of most active mentors | 5 |\n" + "| Total number of items created | 2 |\n\n" + "| Title | URL | Assignee | Author | Created At |\n" # Status column should be missing "| --- | --- | --- | --- | --- |\n" "| Issue 1 | https://www.ghe.com/user/repo/issues/1 | [charlie](https://ghe.com/charlie) | " "[alice](https://ghe.com/alice) | -5 days, 0:00:00 |\n" @@ -410,3 +524,7 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): ) self.assertEqual(content, expected_content) os.remove("issue_metrics.md") + + +if __name__ == "__main__": + unittest.main() diff --git a/test_time_in_draft.py b/test_time_in_draft.py index 2475147..8a442cc 100644 --- a/test_time_in_draft.py +++ b/test_time_in_draft.py @@ -4,6 +4,7 @@ from datetime import datetime, timedelta from unittest.mock import MagicMock +import github3 import pytz from time_in_draft import get_stats_time_in_draft, measure_time_in_draft @@ -18,13 +19,14 @@ def setUp(self): Setup common test data and mocks. """ self.issue = MagicMock() + self.issue.issue = MagicMock(spec=github3.issues.Issue) self.issue.issue.state = "open" def test_time_in_draft_with_ready_for_review(self): """ Test measure_time_in_draft with one draft and review interval. """ - self.issue.events.return_value = [ + self.issue.issue.events.return_value = [ MagicMock( event="converted_to_draft", created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), @@ -42,7 +44,7 @@ def test_time_in_draft_without_ready_for_review(self): """ Test measure_time_in_draft when ready_for_review_at is not provided and issue is still open. """ - self.issue.events.return_value = [ + self.issue.issue.events.return_value = [ MagicMock( event="converted_to_draft", created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), @@ -59,7 +61,7 @@ def test_time_in_draft_multiple_intervals(self): """ Test measure_time_in_draft with multiple draft intervals. """ - self.issue.events.return_value = [ + self.issue.issue.events.return_value = [ MagicMock( event="converted_to_draft", created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), @@ -85,7 +87,7 @@ def test_time_in_draft_ongoing_draft(self): """ Test measure_time_in_draft with an ongoing draft interval. """ - self.issue.events.return_value = [ + self.issue.issue.events.return_value = [ MagicMock( event="converted_to_draft", created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), @@ -103,7 +105,7 @@ def test_time_in_draft_no_draft_events(self): """ Test measure_time_in_draft with no draft-related events. """ - self.issue.events.return_value = [] + self.issue.issue.events.return_value = [] result = measure_time_in_draft(self.issue) self.assertIsNone( result, "The result should be None when there are no draft events." @@ -113,7 +115,7 @@ def test_time_in_draft_without_ready_for_review_and_closed(self): """ Test measure_time_in_draft for a closed issue with an ongoing draft and ready_for_review_at is not provided. """ - self.issue.events.return_value = [ + self.issue.issue.events.return_value = [ MagicMock( event="converted_to_draft", created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), @@ -126,6 +128,30 @@ def test_time_in_draft_without_ready_for_review_and_closed(self): "The result should be None for a closed issue with an ongoing draft.", ) + def test_time_in_draft_with_attribute_error_scenario(self): + """ + Test measure_time_in_draft to ensure it doesn't raise AttributeError when called + with issue structure similar to what get_per_issue_metrics passes. + This test reproduces the original bug scenario. + """ + # This simulates the actual issue structure passed from get_per_issue_metrics + issue_search_result = MagicMock() + issue_search_result.issue = MagicMock(spec=github3.issues.Issue) + issue_search_result.issue.state = "open" + issue_search_result.issue.events.return_value = [ + MagicMock( + event="converted_to_draft", + created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), + ), + ] + + # This should NOT raise AttributeError: events + with unittest.mock.patch("time_in_draft.datetime") as mock_datetime: + mock_datetime.now.return_value = datetime(2021, 1, 4, tzinfo=pytz.utc) + result = measure_time_in_draft(issue_search_result) + expected = timedelta(days=3) + self.assertEqual(result, expected, "The time in draft should be 3 days.") + class TestGetStatsTimeInDraft(unittest.TestCase): """ diff --git a/time_in_draft.py b/time_in_draft.py index 9244145..98f01e7 100644 --- a/time_in_draft.py +++ b/time_in_draft.py @@ -22,7 +22,7 @@ def measure_time_in_draft( returns: Union[timedelta, None]: Total time the pull request has spent in draft state. """ - events = issue.events() + events = issue.issue.events() draft_start = None total_draft_time = timedelta(0)