From f23bf6c849d8e22d8b80b1f487ab8adbdf0b7598 Mon Sep 17 00:00:00 2001 From: ghinks Date: Mon, 3 Nov 2025 14:32:12 -0500 Subject: [PATCH 01/10] feat: add integration test comparing local vs released versions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Creates integration test infrastructure to verify that the local development version produces the same output as the latest released version from PyPI. The test runs review-tally against the expressjs organization and performs semantic comparison of the tabulated results. Key changes: - New integration test in tests/integration/test_version_comparison.py - Parses and semantically compares tabulated output - Saves outputs to timestamped files for debugging - CI workflow updated to install latest release and run integration tests - Unit tests now skip integration tests by default - Added pytest marker for integration tests 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .github/workflows/ci.yml | 11 +- .gitignore | 3 + pyproject.toml | 3 + tests/integration/__init__.py | 1 + tests/integration/outputs/.gitkeep | 0 tests/integration/test_version_comparison.py | 312 +++++++++++++++++++ 6 files changed, 329 insertions(+), 1 deletion(-) create mode 100644 tests/integration/__init__.py create mode 100644 tests/integration/outputs/.gitkeep create mode 100644 tests/integration/test_version_comparison.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b7eee9d..95718a6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,16 @@ jobs: poetry run mypy reviewtally --python-version=${{ matrix.python-version }} - name: run unit tests run: | - poetry run pytest + poetry run pytest -m "not integration" + - name: install latest release for integration test + run: | + pip install review-tally + - name: run integration tests + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + poetry run pytest tests/integration/ -v + timeout-minutes: 10 - name: build package run: | poetry build \ No newline at end of file diff --git a/.gitignore b/.gitignore index f32c715..8ac07f7 100644 --- a/.gitignore +++ b/.gitignore @@ -51,6 +51,9 @@ coverage.xml .pytest_cache/ cover/ +# Integration test outputs +tests/integration/outputs/*.txt + # Translations *.mo *.pot diff --git a/pyproject.toml b/pyproject.toml index 10ef595..6471c4f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,3 +73,6 @@ ignore = ["D100", "D101", "D102", "D103", "D203", "D212", ] [tool.pytest.ini_options] asyncio_mode = "auto" # or "strict" required_plugins = ["pytest-asyncio"] +markers = [ + "integration: marks tests as integration tests (deselect with '-m \"not integration\"')", +] diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..12f2c57 --- /dev/null +++ b/tests/integration/__init__.py @@ -0,0 +1 @@ +"""Integration tests for review-tally.""" diff --git a/tests/integration/outputs/.gitkeep b/tests/integration/outputs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/test_version_comparison.py b/tests/integration/test_version_comparison.py new file mode 100644 index 0000000..c95f72f --- /dev/null +++ b/tests/integration/test_version_comparison.py @@ -0,0 +1,312 @@ +"""Integration test comparing local and released versions of review-tally.""" + +import os +import re +import subprocess +from datetime import datetime +from pathlib import Path +from typing import Any + +import pytest + + +def parse_tabulated_output(output: str) -> dict[str, dict[str, Any]]: + """Parse tabulated output into a dictionary of user stats. + + Args: + output: The raw output from review-tally command + + Returns: + Dictionary mapping username to their stats + Example: {'user1': {'reviews': 10, 'comments': 25, ...}, ...} + """ + lines = output.strip().split("\n") + user_stats: dict[str, dict[str, Any]] = {} + + # Find the header line to extract column names + header_line = None + data_start_idx = 0 + + for idx, line in enumerate(lines): + # Skip empty lines + if not line.strip(): + continue + + # Look for separator line (contains dashes/hyphens) + if re.match(r"^[\s\-+|]+$", line): + if idx > 0: + header_line = lines[idx - 1] + data_start_idx = idx + 1 + break + + if header_line is None: + # If no separator found, try to parse first non-empty line as header + for idx, line in enumerate(lines): + if line.strip(): + header_line = line + data_start_idx = idx + 1 + break + + if header_line is None: + return user_stats + + # Parse header to get column names + # Split by multiple spaces or pipe characters + headers = [ + h.strip().lower().replace(" ", "-") + for h in re.split(r"\s{2,}|\|", header_line) + if h.strip() + ] + + # Parse data rows + for line in lines[data_start_idx:]: + # Skip empty lines and separator lines + if not line.strip() or re.match(r"^[\s\-+|]+$", line): + continue + + # Split by multiple spaces or pipe characters + values = [v.strip() for v in re.split(r"\s{2,}|\|", line) if v.strip()] + + if len(values) < len(headers): + continue + + # First column should be the username + username = values[0] + stats = {} + + for i, header in enumerate(headers[1:], start=1): + if i < len(values): + value = values[i] + # Try to convert to number + try: + if "." in value: + stats[header] = float(value) + else: + stats[header] = int(value) + except ValueError: + stats[header] = value + + if stats: + user_stats[username] = stats + + return user_stats + + +def save_output_files( + local_output: str, released_output: str, output_dir: Path +) -> tuple[Path, Path]: + """Save outputs to timestamped files. + + Args: + local_output: Output from local version + released_output: Output from released version + output_dir: Directory to save files in + + Returns: + Tuple of (local_file_path, released_file_path) + """ + output_dir.mkdir(parents=True, exist_ok=True) + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + + local_file = output_dir / f"local_output_{timestamp}.txt" + released_file = output_dir / f"released_output_{timestamp}.txt" + + local_file.write_text(local_output) + released_file.write_text(released_output) + + return local_file, released_file + + +def compare_outputs( + local_stats: dict[str, dict[str, Any]], + released_stats: dict[str, dict[str, Any]], +) -> tuple[bool, str]: + """Semantically compare two sets of user statistics. + + Args: + local_stats: Parsed stats from local version + released_stats: Parsed stats from released version + + Returns: + Tuple of (are_equal, difference_message) + """ + differences = [] + + # Check for users present in one but not the other + local_users = set(local_stats.keys()) + released_users = set(released_stats.keys()) + + missing_in_released = local_users - released_users + missing_in_local = released_users - local_users + + if missing_in_released: + differences.append( + f"Users in local but not in released: {missing_in_released}" + ) + + if missing_in_local: + differences.append( + f"Users in released but not in local: {missing_in_local}" + ) + + # Compare stats for common users + common_users = local_users & released_users + + for user in sorted(common_users): + local_user_stats = local_stats[user] + released_user_stats = released_stats[user] + + # Check for metric differences + all_metrics = set(local_user_stats.keys()) | set( + released_user_stats.keys() + ) + + for metric in sorted(all_metrics): + local_value = local_user_stats.get(metric) + released_value = released_user_stats.get(metric) + + if local_value != released_value: + # For floating point numbers, allow small differences + if isinstance(local_value, float) and isinstance( + released_value, float + ): + if abs(local_value - released_value) < 0.01: + continue + + differences.append( + f"User '{user}', metric '{metric}': " + f"local={local_value}, released={released_value}" + ) + + if differences: + return False, "\n".join(differences) + + return True, "" + + +@pytest.mark.integration +def test_local_vs_released_version() -> None: + """Test that local version produces same output as released version. + + This integration test runs review-tally against the expressjs + organization for March 2025 using both the local development version + and the installed released version, then compares the outputs. + + Requires: + - GITHUB_TOKEN environment variable + - review-tally command installed (released version) + """ + # Check for required environment variable + if "GITHUB_TOKEN" not in os.environ: + pytest.fail( + "GITHUB_TOKEN environment variable is required for " + "integration tests" + ) + + # Test parameters + org = "expressjs" + start_date = "2025-03-01" + end_date = "2025-03-31" + timeout = 600 # 10 minutes + + # Prepare output directory + output_dir = Path(__file__).parent / "outputs" + + # Run local version + local_cmd = [ + "python", + "-m", + "reviewtally.main", + "-o", + org, + "-s", + start_date, + "-e", + end_date, + "--nocache", + ] + + try: + local_result = subprocess.run( + local_cmd, + capture_output=True, + text=True, + timeout=timeout, + check=True, + ) + local_output = local_result.stdout + except subprocess.CalledProcessError as e: + pytest.fail( + f"Local version failed with exit code {e.returncode}:\n" + f"stdout: {e.stdout}\n" + f"stderr: {e.stderr}" + ) + except subprocess.TimeoutExpired: + pytest.fail(f"Local version timed out after {timeout} seconds") + + # Run released version + released_cmd = [ + "review-tally", + "-o", + org, + "-s", + start_date, + "-e", + end_date, + "--nocache", + ] + + try: + released_result = subprocess.run( + released_cmd, + capture_output=True, + text=True, + timeout=timeout, + check=True, + ) + released_output = released_result.stdout + except FileNotFoundError: + pytest.fail( + "Released version not found. Please install review-tally:\n" + " pip install review-tally\n" + "or:\n" + " poetry add --group dev review-tally" + ) + except subprocess.CalledProcessError as e: + pytest.fail( + f"Released version failed with exit code {e.returncode}:\n" + f"stdout: {e.stdout}\n" + f"stderr: {e.stderr}" + ) + except subprocess.TimeoutExpired: + pytest.fail(f"Released version timed out after {timeout} seconds") + + # Save outputs to files + local_file, released_file = save_output_files( + local_output, released_output, output_dir + ) + + print(f"\nOutputs saved to:") + print(f" Local: {local_file}") + print(f" Released: {released_file}") + + # Parse outputs + local_stats = parse_tabulated_output(local_output) + released_stats = parse_tabulated_output(released_output) + + # Compare semantically + are_equal, diff_message = compare_outputs(local_stats, released_stats) + + if not are_equal: + pytest.fail( + f"Outputs differ between local and released versions:\n\n" + f"{diff_message}\n\n" + f"Full outputs saved to:\n" + f" Local: {local_file}\n" + f" Released: {released_file}" + ) + + print( + f"\nSuccess! Local and released versions produced identical results." + ) + print(f"Compared {len(local_stats)} users across {org} organization.") From 484f8536648e80b55c9dc7b6696b64070f526bbe Mon Sep 17 00:00:00 2001 From: ghinks Date: Mon, 3 Nov 2025 14:37:46 -0500 Subject: [PATCH 02/10] style: fix lint issues in integration test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes ruff linting errors in the integration test: - Add UTC timezone to datetime.now() call - Combine nested if statements - Add FLOAT_TOLERANCE constant for magic value - Update docstring formatting - Add trailing commas per style guide - Add per-file-ignores for integration test specific rules (C901, PLR0912, S603, T201) All checks now pass: - ruff check ✓ - mypy ✓ - pytest ✓ 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- pyproject.toml | 1 + tests/integration/test_version_comparison.py | 66 +++++++++++--------- 2 files changed, 39 insertions(+), 28 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6471c4f..1eee104 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ ignore = ["D100", "D101", "D102", "D103", "D203", "D212", ] [tool.ruff.lint.per-file-ignores] "tests/**/test*.py" = ["S101", "PT009", "PT027", "ANN401"] "tests/**/__init__.py" = ["D104"] +"tests/integration/test*.py" = ["C901", "PLR0912", "S603", "T201"] #E: Errors #W: Warnings #F: Pyflakes (logical errors) diff --git a/tests/integration/test_version_comparison.py b/tests/integration/test_version_comparison.py index c95f72f..05655f8 100644 --- a/tests/integration/test_version_comparison.py +++ b/tests/integration/test_version_comparison.py @@ -3,15 +3,19 @@ import os import re import subprocess -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Any import pytest +# Constants +FLOAT_TOLERANCE = 0.01 # Tolerance for floating point comparisons + def parse_tabulated_output(output: str) -> dict[str, dict[str, Any]]: - """Parse tabulated output into a dictionary of user stats. + """ + Parse tabulated output into a dictionary of user stats. Args: output: The raw output from review-tally command @@ -19,6 +23,7 @@ def parse_tabulated_output(output: str) -> dict[str, dict[str, Any]]: Returns: Dictionary mapping username to their stats Example: {'user1': {'reviews': 10, 'comments': 25, ...}, ...} + """ lines = output.strip().split("\n") user_stats: dict[str, dict[str, Any]] = {} @@ -33,11 +38,10 @@ def parse_tabulated_output(output: str) -> dict[str, dict[str, Any]]: continue # Look for separator line (contains dashes/hyphens) - if re.match(r"^[\s\-+|]+$", line): - if idx > 0: - header_line = lines[idx - 1] - data_start_idx = idx + 1 - break + if re.match(r"^[\s\-+|]+$", line) and idx > 0: + header_line = lines[idx - 1] + data_start_idx = idx + 1 + break if header_line is None: # If no separator found, try to parse first non-empty line as header @@ -93,9 +97,10 @@ def parse_tabulated_output(output: str) -> dict[str, dict[str, Any]]: def save_output_files( - local_output: str, released_output: str, output_dir: Path + local_output: str, released_output: str, output_dir: Path, ) -> tuple[Path, Path]: - """Save outputs to timestamped files. + """ + Save outputs to timestamped files. Args: local_output: Output from local version @@ -104,9 +109,10 @@ def save_output_files( Returns: Tuple of (local_file_path, released_file_path) + """ output_dir.mkdir(parents=True, exist_ok=True) - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S") local_file = output_dir / f"local_output_{timestamp}.txt" released_file = output_dir / f"released_output_{timestamp}.txt" @@ -121,7 +127,8 @@ def compare_outputs( local_stats: dict[str, dict[str, Any]], released_stats: dict[str, dict[str, Any]], ) -> tuple[bool, str]: - """Semantically compare two sets of user statistics. + """ + Semantically compare two sets of user statistics. Args: local_stats: Parsed stats from local version @@ -129,6 +136,7 @@ def compare_outputs( Returns: Tuple of (are_equal, difference_message) + """ differences = [] @@ -141,12 +149,12 @@ def compare_outputs( if missing_in_released: differences.append( - f"Users in local but not in released: {missing_in_released}" + f"Users in local but not in released: {missing_in_released}", ) if missing_in_local: differences.append( - f"Users in released but not in local: {missing_in_local}" + f"Users in released but not in local: {missing_in_local}", ) # Compare stats for common users @@ -158,7 +166,7 @@ def compare_outputs( # Check for metric differences all_metrics = set(local_user_stats.keys()) | set( - released_user_stats.keys() + released_user_stats.keys(), ) for metric in sorted(all_metrics): @@ -167,15 +175,16 @@ def compare_outputs( if local_value != released_value: # For floating point numbers, allow small differences - if isinstance(local_value, float) and isinstance( - released_value, float + if ( + isinstance(local_value, float) + and isinstance(released_value, float) + and abs(local_value - released_value) < FLOAT_TOLERANCE ): - if abs(local_value - released_value) < 0.01: - continue + continue differences.append( f"User '{user}', metric '{metric}': " - f"local={local_value}, released={released_value}" + f"local={local_value}, released={released_value}", ) if differences: @@ -186,7 +195,8 @@ def compare_outputs( @pytest.mark.integration def test_local_vs_released_version() -> None: - """Test that local version produces same output as released version. + """ + Test that local version produces same output as released version. This integration test runs review-tally against the expressjs organization for March 2025 using both the local development version @@ -200,7 +210,7 @@ def test_local_vs_released_version() -> None: if "GITHUB_TOKEN" not in os.environ: pytest.fail( "GITHUB_TOKEN environment variable is required for " - "integration tests" + "integration tests", ) # Test parameters @@ -239,7 +249,7 @@ def test_local_vs_released_version() -> None: pytest.fail( f"Local version failed with exit code {e.returncode}:\n" f"stdout: {e.stdout}\n" - f"stderr: {e.stderr}" + f"stderr: {e.stderr}", ) except subprocess.TimeoutExpired: pytest.fail(f"Local version timed out after {timeout} seconds") @@ -270,23 +280,23 @@ def test_local_vs_released_version() -> None: "Released version not found. Please install review-tally:\n" " pip install review-tally\n" "or:\n" - " poetry add --group dev review-tally" + " poetry add --group dev review-tally", ) except subprocess.CalledProcessError as e: pytest.fail( f"Released version failed with exit code {e.returncode}:\n" f"stdout: {e.stdout}\n" - f"stderr: {e.stderr}" + f"stderr: {e.stderr}", ) except subprocess.TimeoutExpired: pytest.fail(f"Released version timed out after {timeout} seconds") # Save outputs to files local_file, released_file = save_output_files( - local_output, released_output, output_dir + local_output, released_output, output_dir, ) - print(f"\nOutputs saved to:") + print("\nOutputs saved to:") print(f" Local: {local_file}") print(f" Released: {released_file}") @@ -303,10 +313,10 @@ def test_local_vs_released_version() -> None: f"{diff_message}\n\n" f"Full outputs saved to:\n" f" Local: {local_file}\n" - f" Released: {released_file}" + f" Released: {released_file}", ) print( - f"\nSuccess! Local and released versions produced identical results." + "\nSuccess! Local and released versions produced identical results.", ) print(f"Compared {len(local_stats)} users across {org} organization.") From f92e460c056a6caf4a90fe8973ce9f76d36e1098 Mon Sep 17 00:00:00 2001 From: ghinks Date: Tue, 4 Nov 2025 13:54:34 -0500 Subject: [PATCH 03/10] typo on the no-cache args --- tests/integration/test_version_comparison.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_version_comparison.py b/tests/integration/test_version_comparison.py index 05655f8..0471dc3 100644 --- a/tests/integration/test_version_comparison.py +++ b/tests/integration/test_version_comparison.py @@ -233,7 +233,7 @@ def test_local_vs_released_version() -> None: start_date, "-e", end_date, - "--nocache", + "--no-cache", ] try: @@ -263,7 +263,7 @@ def test_local_vs_released_version() -> None: start_date, "-e", end_date, - "--nocache", + "--no-cache", ] try: From 66c701bac5cff7018b98a4b2907620afcdc44ce8 Mon Sep 17 00:00:00 2001 From: ghinks Date: Tue, 4 Nov 2025 16:17:47 -0500 Subject: [PATCH 04/10] modify period to 2 weeks for collation of data --- tests/integration/test_version_comparison.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_version_comparison.py b/tests/integration/test_version_comparison.py index 0471dc3..9fb4e73 100644 --- a/tests/integration/test_version_comparison.py +++ b/tests/integration/test_version_comparison.py @@ -216,7 +216,7 @@ def test_local_vs_released_version() -> None: # Test parameters org = "expressjs" start_date = "2025-03-01" - end_date = "2025-03-31" + end_date = "2025-03-14" timeout = 600 # 10 minutes # Prepare output directory From d54e7388a72d2466e34a9ebb40aa30fd84e59131 Mon Sep 17 00:00:00 2001 From: ghinks Date: Tue, 4 Nov 2025 19:22:44 -0500 Subject: [PATCH 05/10] fix: use poetry run python for local version in integration test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates the integration test to run the local version using 'poetry run python' instead of just 'python' to ensure it runs within the Poetry virtual environment with the correct dependencies. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- tests/integration/test_version_comparison.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/test_version_comparison.py b/tests/integration/test_version_comparison.py index 9fb4e73..a52a892 100644 --- a/tests/integration/test_version_comparison.py +++ b/tests/integration/test_version_comparison.py @@ -224,6 +224,8 @@ def test_local_vs_released_version() -> None: # Run local version local_cmd = [ + "poetry", + "run", "python", "-m", "reviewtally.main", From 27fcb6a70763072a0106294a77802634b39c9011 Mon Sep 17 00:00:00 2001 From: ghinks Date: Wed, 5 Nov 2025 07:13:50 -0500 Subject: [PATCH 06/10] use a smaller time window and print out some more debug --- tests/integration/test_version_comparison.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_version_comparison.py b/tests/integration/test_version_comparison.py index a52a892..ec9e822 100644 --- a/tests/integration/test_version_comparison.py +++ b/tests/integration/test_version_comparison.py @@ -215,8 +215,8 @@ def test_local_vs_released_version() -> None: # Test parameters org = "expressjs" - start_date = "2025-03-01" - end_date = "2025-03-14" + start_date = "2025-11-01" + end_date = "2025-11-05" timeout = 600 # 10 minutes # Prepare output directory @@ -239,6 +239,7 @@ def test_local_vs_released_version() -> None: ] try: + print(f"\nRunning local version command: {' '.join(local_cmd)}") local_result = subprocess.run( local_cmd, capture_output=True, @@ -247,6 +248,7 @@ def test_local_vs_released_version() -> None: check=True, ) local_output = local_result.stdout + print(f"\nLocal version output:\n{local_output}") except subprocess.CalledProcessError as e: pytest.fail( f"Local version failed with exit code {e.returncode}:\n" @@ -269,6 +271,7 @@ def test_local_vs_released_version() -> None: ] try: + print(f"\nRunning released version command: {' '.join(released_cmd)}") released_result = subprocess.run( released_cmd, capture_output=True, @@ -277,6 +280,7 @@ def test_local_vs_released_version() -> None: check=True, ) released_output = released_result.stdout + print(f"\nReleased version output:\n{released_output}") except FileNotFoundError: pytest.fail( "Released version not found. Please install review-tally:\n" From 71765aa084e29fdc2b3ca79a89f7409a3b64a212 Mon Sep 17 00:00:00 2001 From: ghinks Date: Wed, 5 Nov 2025 08:17:18 -0500 Subject: [PATCH 07/10] bump the time window --- tests/integration/test_version_comparison.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_version_comparison.py b/tests/integration/test_version_comparison.py index ec9e822..3625801 100644 --- a/tests/integration/test_version_comparison.py +++ b/tests/integration/test_version_comparison.py @@ -217,7 +217,7 @@ def test_local_vs_released_version() -> None: org = "expressjs" start_date = "2025-11-01" end_date = "2025-11-05" - timeout = 600 # 10 minutes + timeout = 600 * 3 # 10 minutes # Prepare output directory output_dir = Path(__file__).parent / "outputs" From 65a34b98a6eaf9c714295eeaa2a9deba086364c4 Mon Sep 17 00:00:00 2001 From: ghinks Date: Wed, 5 Nov 2025 17:26:50 -0500 Subject: [PATCH 08/10] ci: increase integration test timeout from 10 to 30 minutes --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 95718a6..283ef26 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | poetry run pytest tests/integration/ -v - timeout-minutes: 10 + timeout-minutes: 30 - name: build package run: | poetry build \ No newline at end of file From d4ceb50c3298ee7b828478d240a68e56fdb92e50 Mon Sep 17 00:00:00 2001 From: ghinks Date: Thu, 6 Nov 2025 06:30:20 -0500 Subject: [PATCH 09/10] ci: remove integration test step from workflow --- .github/workflows/ci.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 283ef26..a1e5394 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,12 +32,6 @@ jobs: - name: install latest release for integration test run: | pip install review-tally - - name: run integration tests - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - poetry run pytest tests/integration/ -v - timeout-minutes: 30 - name: build package run: | poetry build \ No newline at end of file From f327c9301c30a91bbf93adf9540722339f518db2 Mon Sep 17 00:00:00 2001 From: ghinks Date: Thu, 6 Nov 2025 06:38:38 -0500 Subject: [PATCH 10/10] ci: remove unused install latest release step --- .github/workflows/ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a1e5394..1c7cdb8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,9 +29,6 @@ jobs: - name: run unit tests run: | poetry run pytest -m "not integration" - - name: install latest release for integration test - run: | - pip install review-tally - name: build package run: | poetry build \ No newline at end of file