From 8b46353c253505e905d5ff4f6f94865f7e9326b2 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 12 Mar 2026 13:49:52 +0000 Subject: [PATCH 1/3] feat: add FT lint rules for test conventions Add a custom linter (flagsmith-lint-tests) enforcing Flagsmith test conventions: function-only tests (FT001), no unittest imports (FT002), test_{subject}__{condition}__{expected} naming (FT003), and Given/When/Then comments (FT004). --- .pre-commit-hooks.yaml | 8 + pyproject.toml | 2 + src/common/lint_tests.py | 279 +++++++++++++ tests/unit/common/lint_tests/__init__.py | 0 .../unit/common/lint_tests/test_lint_tests.py | 377 ++++++++++++++++++ 5 files changed, 666 insertions(+) create mode 100644 .pre-commit-hooks.yaml create mode 100644 src/common/lint_tests.py create mode 100644 tests/unit/common/lint_tests/__init__.py create mode 100644 tests/unit/common/lint_tests/test_lint_tests.py diff --git a/.pre-commit-hooks.yaml b/.pre-commit-hooks.yaml new file mode 100644 index 00000000..db1a9d88 --- /dev/null +++ b/.pre-commit-hooks.yaml @@ -0,0 +1,8 @@ +- id: flagsmith-lint-tests + name: flagsmith-lint-tests + description: Lint Flagsmith test conventions (function-only, naming, GWT comments) + entry: flagsmith-lint-tests + language: python + types: [python] + files: test_.*\.py$ + args: [] diff --git a/pyproject.toml b/pyproject.toml index 8cc66c7d..0cbc8bb2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,12 +60,14 @@ Repository = "https://github.com/flagsmith/flagsmith-common" [project.scripts] flagsmith = "common.core.main:main" +flagsmith-lint-tests = "common.lint_tests:main" [project.entry-points.pytest11] flagsmith-test-tools = "common.test_tools.plugin" [dependency-groups] dev = [ + "diff-cover>=10.2.0", "dj-database-url (>=2.3.0, <3.0.0)", "django-stubs (>=5.1.3, <6.0.0)", "djangorestframework-stubs (>=3.15.3, <4.0.0)", diff --git a/src/common/lint_tests.py b/src/common/lint_tests.py new file mode 100644 index 00000000..5c43f33a --- /dev/null +++ b/src/common/lint_tests.py @@ -0,0 +1,279 @@ +"""Linter for Flagsmith test conventions. + +Enforces: +- FT001: No module-level class Test* (function-only tests) +- FT002: No `import unittest` / `from unittest import TestCase` (unittest.mock is fine) +- FT003: Test name must have exactly 2 `__` separators: test_{subject}__{condition}__{expected} +- FT004: Test body must contain # Given, # When, and # Then comments + +Output format matches ruff/flake8/mypy: {file}:{line}:{col}: {code} {message} +""" + +from __future__ import annotations + +import argparse +import ast +import re +import sys +from pathlib import Path +from typing import NamedTuple + +UNITTEST_BANNED_IMPORTS = frozenset( + {"TestCase", "TestSuite", "TestLoader", "TextTestRunner"} +) + + +class Violation(NamedTuple): + file: str + line: int + col: int + code: str + message: str + + def __str__(self) -> str: + return f"{self.file}:{self.line}:{self.col}: {self.code} {self.message}" + + +def _has_fixture_decorator(node: ast.FunctionDef) -> bool: + for decorator in node.decorator_list: + if isinstance(decorator, ast.Attribute) and decorator.attr == "fixture": + return True + if isinstance(decorator, ast.Name) and decorator.id == "fixture": + return True + # Handle @pytest.fixture(...) + if ( + isinstance(decorator, ast.Call) + and isinstance(decorator.func, ast.Attribute) + and decorator.func.attr == "fixture" + ): + return True + if ( + isinstance(decorator, ast.Call) + and isinstance(decorator.func, ast.Name) + and decorator.func.id == "fixture" + ): + return True + return False + + +_COMMENT_RE = re.compile(r"#(.*)$") + + +def _extract_comments(source: str) -> dict[int, str]: + """Return a mapping of line number (1-based) -> comment text.""" + comments: dict[int, str] = {} + for lineno, line in enumerate(source.splitlines(), start=1): + match = _COMMENT_RE.search(line) + if match: + comments[lineno] = "#" + match.group(1) + return comments + + +_NOQA_RE = re.compile(r"#\s*noqa\b(?::\s*(?P[A-Z0-9,\s]+))?") + + +def _is_noqa_suppressed(comment: str, code: str) -> bool: + """Check if a comment contains a noqa directive that suppresses the given code.""" + match = _NOQA_RE.search(comment) + if not match: + return False + codes_str = match.group("codes") + # Bare noqa (without specific codes) suppresses everything + if codes_str is None: + return True + codes = {c.strip() for c in codes_str.split(",")} + return code in codes + + +def check_ft001(tree: ast.Module, filepath: str) -> list[Violation]: + """FT001: Module-level class Test* detected.""" + violations = [] + for node in ast.iter_child_nodes(tree): + if isinstance(node, ast.ClassDef) and node.name.startswith("Test"): + violations.append( + Violation( + file=filepath, + line=node.lineno, + col=node.col_offset + 1, + code="FT001", + message=f"Module-level test class `{node.name}` detected; use function-based tests", + ) + ) + return violations + + +def check_ft002(tree: ast.Module, filepath: str) -> list[Violation]: + """FT002: import unittest / from unittest import TestCase etc. (NOT unittest.mock).""" + violations = [] + for node in ast.walk(tree): + if isinstance(node, ast.Import): + for alias in node.names: + # Flag `import unittest` but not `import unittest.mock` + if alias.name == "unittest": + violations.append( + Violation( + file=filepath, + line=node.lineno, + col=node.col_offset + 1, + code="FT002", + message="`import unittest` is not allowed; use pytest instead", + ) + ) + elif isinstance(node, ast.ImportFrom): + if node.module == "unittest": + for alias in node.names: + if alias.name in UNITTEST_BANNED_IMPORTS: + violations.append( + Violation( + file=filepath, + line=node.lineno, + col=node.col_offset + 1, + code="FT002", + message=f"`from unittest import {alias.name}` is not allowed; use pytest instead", + ) + ) + return violations + + +def check_ft003(tree: ast.Module, filepath: str) -> list[Violation]: + """FT003: Test name doesn't follow test_{subject}__{condition}__{expected} convention.""" + violations = [] + for node in ast.iter_child_nodes(tree): + if ( + isinstance(node, ast.FunctionDef) + and node.name.startswith("test_") + and not _has_fixture_decorator(node) + ): + # Strip `test_` prefix and count `__` separators + after_prefix = node.name[5:] + parts = after_prefix.split("__") + if len(parts) != 3: + violations.append( + Violation( + file=filepath, + line=node.lineno, + col=node.col_offset + 1, + code="FT003", + message=f"Test name `{node.name}` doesn't match `test_{{subject}}__{{condition}}__{{expected}}` (found {len(parts)} parts, expected 3)", + ) + ) + return violations + + +def _find_missing_gwt(func_comments: list[str]) -> list[str]: + """Return list of missing Given/When/Then keywords from comments.""" + has_given = False + has_when = False + has_then = False + for text in func_comments: + normalized = text.lstrip("#").strip().lower() + if normalized.startswith("given"): + has_given = True + # "Given / When" satisfies both + if "when" in normalized: + has_when = True + if normalized.startswith("when"): + has_when = True + # "When / Then" satisfies both + if "then" in normalized: + has_then = True + if normalized.startswith("then"): + has_then = True + + missing = [] + if not has_given: + missing.append("Given") + if not has_when: + missing.append("When") + if not has_then: + missing.append("Then") + return missing + + +def check_ft004( + tree: ast.Module, filepath: str, comments: dict[int, str] +) -> list[Violation]: + """FT004: Missing # Given, # When, or # Then comments in test body.""" + violations = [] + for node in ast.iter_child_nodes(tree): + if ( + isinstance(node, ast.FunctionDef) + and node.name.startswith("test_") + and not _has_fixture_decorator(node) + ): + func_comments = [ + text + for line_no, text in comments.items() + if node.lineno <= line_no <= (node.end_lineno or node.lineno) + ] + missing = _find_missing_gwt(func_comments) + if missing: + violations.append( + Violation( + file=filepath, + line=node.lineno, + col=node.col_offset + 1, + code="FT004", + message=f"Test `{node.name}` is missing GWT comments: {', '.join(missing)}", + ) + ) + return violations + + +def lint_file(filepath: str) -> list[Violation]: + """Run all checks on a single file.""" + path = Path(filepath) + + # Only check test_*.py files + if not (path.name.startswith("test_") and path.suffix == ".py"): + return [] + + source = path.read_text(encoding="utf-8") + try: + tree = ast.parse(source, filename=filepath) + except SyntaxError: + return [ + Violation( + file=filepath, + line=1, + col=1, + code="FT000", + message="Could not parse file (SyntaxError)", + ) + ] + + comments = _extract_comments(source) + + violations = [] + violations.extend(check_ft001(tree, filepath)) + violations.extend(check_ft002(tree, filepath)) + violations.extend(check_ft003(tree, filepath)) + violations.extend(check_ft004(tree, filepath, comments)) + + # Filter out violations suppressed by noqa comments + return [ + v + for v in violations + if v.line not in comments or not _is_noqa_suppressed(comments[v.line], v.code) + ] + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser( + description="Lint Flagsmith test conventions", + ) + parser.add_argument("files", nargs="*", help="Files to check") + args = parser.parse_args(argv) + + has_errors = False + for filepath in args.files: + violations = lint_file(filepath) + for v in violations: + has_errors = True + print(v) + + return 1 if has_errors else 0 + + +if __name__ == "__main__": # pragma: no cover + sys.exit(main()) diff --git a/tests/unit/common/lint_tests/__init__.py b/tests/unit/common/lint_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/common/lint_tests/test_lint_tests.py b/tests/unit/common/lint_tests/test_lint_tests.py new file mode 100644 index 00000000..37dfb365 --- /dev/null +++ b/tests/unit/common/lint_tests/test_lint_tests.py @@ -0,0 +1,377 @@ +import textwrap +from collections.abc import Callable +from pathlib import Path +from unittest.mock import ANY + +import pytest + +from common.lint_tests import Violation, lint_file, main + +WriteTestFileFixture = Callable[..., str] + + +@pytest.fixture() +def write_test_file(tmp_path: Path) -> WriteTestFileFixture: + """Write source to a test_*.py file and return its path.""" + + def _write(source: str, filename: str = "test_example.py") -> str: + p = tmp_path / filename + p.write_text(textwrap.dedent(source)) + return str(p) + + return _write + + +@pytest.mark.parametrize( + "source, expected_violations", + [ + pytest.param( + """\ + class TestFoo: + pass + """, + [ + Violation( + file=ANY, + line=1, + col=1, + code="FT001", + message="Module-level test class `TestFoo` detected; use function-based tests", + ), + ], + id="FT001-class-at-module-level", + ), + pytest.param( + """\ + def test_something__with_nested__works(): + # Given + class TestStub: + pass + + # When + result = TestStub() + + # Then + assert result is not None + """, + [], + id="FT001-class-nested-in-function-ok", + ), + pytest.param( + """\ + class MyHelper: + pass + """, + [], + id="FT001-non-test-class-ok", + ), + pytest.param( + """\ + import unittest + """, + [ + Violation( + file=ANY, + line=1, + col=1, + code="FT002", + message="`import unittest` is not allowed; use pytest instead", + ), + ], + id="FT002-import-unittest", + ), + pytest.param( + """\ + from unittest import TestCase + """, + [ + Violation( + file=ANY, + line=1, + col=1, + code="FT002", + message="`from unittest import TestCase` is not allowed; use pytest instead", + ), + ], + id="FT002-from-unittest-import-TestCase", + ), + pytest.param( + """\ + from unittest.mock import MagicMock + """, + [], + id="FT002-unittest-mock-ok", + ), + pytest.param( + """\ + import unittest.mock + """, + [], + id="FT002-import-unittest-mock-ok", + ), + pytest.param( + """\ + def test_subject__condition__expected(): + # Given / When + result = 1 + 1 + # Then + assert result == 2 + """, + [], + id="FT003-correct-name-ok", + ), + pytest.param( + """\ + def test_something(): + # Given / When + result = 1 + 1 + # Then + assert result == 2 + """, + [ + Violation( + file=ANY, + line=1, + col=1, + code="FT003", + message="Test name `test_something` doesn't match `test_{subject}__{condition}__{expected}` (found 1 parts, expected 3)", + ), + ], + id="FT003-missing-separators", + ), + pytest.param( + """\ + import pytest + + @pytest.fixture + def test_data(): + return 42 + """, + [], + id="FT003-FT004-fixture-skipped", + ), + pytest.param( + """\ + import pytest + + @pytest.fixture() + def test_data(): + return 42 + """, + [], + id="FT003-FT004-fixture-with-call-skipped", + ), + pytest.param( + """\ + from pytest import fixture + + @fixture + def test_data(): + return 42 + """, + [], + id="FT003-FT004-bare-fixture-skipped", + ), + pytest.param( + """\ + from pytest import fixture + + @fixture() + def test_data(): + return 42 + """, + [], + id="FT003-FT004-bare-fixture-call-skipped", + ), + pytest.param( + """\ + def test_subject__condition__expected(): + # Given + x = 1 + + # When + result = x + 1 + + # Then + assert result == 2 + """, + [], + id="FT004-all-gwt-present-ok", + ), + pytest.param( + """\ + def test_subject__condition__expected(): + # Given + x = 1 + + # Then + assert x == 1 + """, + [ + Violation( + file=ANY, + line=1, + col=1, + code="FT004", + message="Test `test_subject__condition__expected` is missing GWT comments: When", + ), + ], + id="FT004-missing-when", + ), + pytest.param( + """\ + def test_subject__condition__expected(): + # Given / When + result = 1 + 1 + + # Then + assert result == 2 + """, + [], + id="FT004-combined-given-when-ok", + ), + pytest.param( + """\ + def test_subject__condition__expected(): + # Given + x = 1 + + # When / Then + assert x == 1 + """, + [], + id="FT004-combined-when-then-ok", + ), + pytest.param( + """\ + def test_something(): # a regular comment, not noqa + pass + """, + [ + Violation( + file=ANY, + line=1, + col=1, + code="FT003", + message="Test name `test_something` doesn't match `test_{subject}__{condition}__{expected}` (found 1 parts, expected 3)", + ), + Violation( + file=ANY, + line=1, + col=1, + code="FT004", + message="Test `test_something` is missing GWT comments: Given, When, Then", + ), + ], + id="regular-comment-does-not-suppress", + ), + pytest.param( + """\ + class TestFoo: # noqa + pass + """, + [], + id="noqa-bare-suppresses-all", + ), + pytest.param( + """\ + def test_bad_name(): # noqa: FT003, FT004 + pass + """, + [], + id="noqa-specific-codes-suppressed", + ), + pytest.param( + """\ + def test_bad_name(): # noqa: FT001 + pass + """, + [ + Violation( + file=ANY, + line=1, + col=1, + code="FT003", + message="Test name `test_bad_name` doesn't match `test_{subject}__{condition}__{expected}` (found 1 parts, expected 3)", + ), + Violation( + file=ANY, + line=1, + col=1, + code="FT004", + message="Test `test_bad_name` is missing GWT comments: Given, When, Then", + ), + ], + id="noqa-wrong-code-not-suppressed", + ), + pytest.param( + """\ + import unittest # noqa: FT002 + """, + [], + id="noqa-import-suppressed", + ), + pytest.param( + "def broken(:\n", + [ + Violation( + file=ANY, + line=1, + col=1, + code="FT000", + message="Could not parse file (SyntaxError)", + ), + ], + id="FT000-syntax-error", + ), + ], +) +def test_lint_file__given_source__returns_expected_violations( + write_test_file: WriteTestFileFixture, + source: str, + expected_violations: list[Violation], +) -> None: + # Given + path = write_test_file(source) + + # When + violations = lint_file(path) + + # Then + assert violations == expected_violations + + +@pytest.mark.parametrize( + "filename", + [ + pytest.param("conftest.py", id="conftest"), + pytest.param("helpers.py", id="non-test-file"), + ], +) +def test_lint_file__non_test_file__skipped( + write_test_file: WriteTestFileFixture, + filename: str, +) -> None: + # Given + path = write_test_file("class TestFoo: pass", filename=filename) + + # When + violations = lint_file(path) + + # Then + assert violations == [] + + +def test_cli__error__exits_one(write_test_file: WriteTestFileFixture) -> None: + # Given + path = write_test_file( + """\ + import unittest + """ + ) + + # When + exit_code = main([path]) + + # Then + assert exit_code == 1 From 86dd5c98181f9f80c28d64d74ec8922f0b1f024a Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 12 Mar 2026 13:50:48 +0000 Subject: [PATCH 2/3] fix: resolve all FT lint violations and install pre-commit hook Install the flagsmith-lint-tests hook in pre-commit config and fix all existing FT003/FT004 violations across the test suite by renaming tests to test_{subject}__{condition}__{expected} and adding Given/When/Then comments. --- .pre-commit-config.yaml | 6 ++ ...cs__default_request__returns_expected.txt} | 2 +- tests/integration/core/test_main.py | 10 ++-- tests/integration/core/test_views.py | 22 ++++--- .../flagsmith_schemas/test_dynamodb.py | 4 +- tests/unit/common/core/test_logging.py | 2 +- tests/unit/common/core/test_middleware.py | 2 +- tests/unit/common/core/test_utils.py | 23 +++++--- tests/unit/common/gunicorn/test_conf.py | 2 +- tests/unit/common/gunicorn/test_logging.py | 6 +- tests/unit/common/gunicorn/test_utils.py | 2 +- tests/unit/common/test_tools/test_plugin.py | 33 +++++++---- .../test_unit_task_processor_apps.py | 6 +- .../test_unit_task_processor_decorators.py | 24 ++++---- .../test_unit_task_processor_health.py | 4 +- .../test_unit_task_processor_models.py | 28 +++++---- .../test_unit_task_processor_monitoring.py | 2 +- .../test_unit_task_processor_processor.py | 51 ++++++++-------- .../test_unit_task_processor_routers.py | 8 +-- .../test_unit_task_processor_tasks.py | 16 ++--- .../test_unit_task_processor_threads.py | 2 +- uv.lock | 58 +++++++++++++++++++ 22 files changed, 206 insertions(+), 107 deletions(-) rename tests/integration/core/snapshots/{test_metrics__return_expected.txt => test_metrics__default_request__returns_expected.txt} (54%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dc982620..74de66d5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,6 +20,12 @@ repos: - id: check-toml - repo: local hooks: + - id: flagsmith-lint-tests + name: flagsmith-lint-tests + language: system + entry: python -P src/common/lint_tests.py + types: [python] + files: ^tests/.*test_.*\.py$ - id: python-typecheck name: python-typecheck language: system diff --git a/tests/integration/core/snapshots/test_metrics__return_expected.txt b/tests/integration/core/snapshots/test_metrics__default_request__returns_expected.txt similarity index 54% rename from tests/integration/core/snapshots/test_metrics__return_expected.txt rename to tests/integration/core/snapshots/test_metrics__default_request__returns_expected.txt index d60a83fc..c7ab0315 100644 --- a/tests/integration/core/snapshots/test_metrics__return_expected.txt +++ b/tests/integration/core/snapshots/test_metrics__default_request__returns_expected.txt @@ -1,3 +1,3 @@ # HELP pytest_tests_run_total Total number of tests run by pytest. # TYPE pytest_tests_run_total counter -pytest_tests_run_total{test_name="test_metrics__return_expected"} 1.0 +pytest_tests_run_total{test_name="test_metrics__default_request__returns_expected"} 1.0 diff --git a/tests/integration/core/test_main.py b/tests/integration/core/test_main.py index fbe5885b..341a77df 100644 --- a/tests/integration/core/test_main.py +++ b/tests/integration/core/test_main.py @@ -35,7 +35,7 @@ def test_main__non_overridden_args__defaults_to_django( ["flagsmith", "healthcheck", "tcp"], ), ) -def test_main__healthcheck_tcp__no_server__runs_expected( +def test_main__healthcheck_tcp_no_server__runs_expected( argv: list[str], capsys: pytest.CaptureFixture[str], ) -> None: @@ -48,7 +48,7 @@ def test_main__healthcheck_tcp__no_server__runs_expected( assert exc_info.value.code == 1 -def test_main__healthcheck_tcp__server__runs_expected( +def test_main__healthcheck_tcp_server__runs_expected( unused_tcp_port: int, http_server: HTTPServer, ) -> None: @@ -63,7 +63,7 @@ def test_main__healthcheck_tcp__server__runs_expected( assert exc_info.value.code == 0 -def test_main__healthcheck_http__no_server__runs_expected() -> None: +def test_main__healthcheck_http_no_server__runs_expected() -> None: # Given argv = ["flagsmith", "healthcheck", "http"] @@ -79,7 +79,7 @@ def test_main__healthcheck_http__no_server__runs_expected() -> None: (["flagsmith", "healthcheck", "http", "health/readiness"], "/health/readiness"), ), ) -def test_main__healthcheck_http__server__runs_expected( +def test_main__healthcheck_http_server__runs_expected( unused_tcp_port: int, http_server: HTTPServer, argv: list[str], @@ -94,7 +94,7 @@ def test_main__healthcheck_http__server__runs_expected( main(argv) -def test_main__healthcheck_http__server_invalid_response__runs_expected( +def test_main__healthcheck_http_server_invalid_response__runs_expected( unused_tcp_port: int, http_server: HTTPServer, ) -> None: diff --git a/tests/integration/core/test_views.py b/tests/integration/core/test_views.py index e14504e5..de6ee3b0 100644 --- a/tests/integration/core/test_views.py +++ b/tests/integration/core/test_views.py @@ -5,26 +5,34 @@ from common.test_tools import SnapshotFixture -def test_liveness_probe__return_expected( +def test_liveness_probe__default_request__returns_expected( client: APIClient, ) -> None: - response = client.get("/health/liveness/") + # Given + url = "/health/liveness/" + + # When + response = client.get(url) + + # Then assert response.status_code == 200 assert response.json() == {"status": "ok"} @pytest.mark.prometheus_multiprocess_mode -def test_metrics__return_expected( +def test_metrics__default_request__returns_expected( test_metric: prometheus_client.Counter, client: APIClient, snapshot: SnapshotFixture, ) -> None: - # Arrange - test_metric.labels(test_name="test_metrics__return_expected").inc() + # Given + test_metric.labels( + test_name="test_metrics__default_request__returns_expected" + ).inc() - # Act + # When response = client.get("/metrics", follow=True) - # Assert + # Then assert response.status_code == 200 assert response.content.decode() == snapshot() diff --git a/tests/integration/flagsmith_schemas/test_dynamodb.py b/tests/integration/flagsmith_schemas/test_dynamodb.py index f6f05817..b9a8da14 100644 --- a/tests/integration/flagsmith_schemas/test_dynamodb.py +++ b/tests/integration/flagsmith_schemas/test_dynamodb.py @@ -966,7 +966,7 @@ def test_document__validate_json__expected_result( assert document == expected_result -def test_type_adapter__identity__duplicate_features__raises_expected( +def test_type_adapter__identity_duplicate_features__raises_expected( mocker: MockerFixture, ) -> None: # Given @@ -1009,7 +1009,7 @@ def test_type_adapter__identity__duplicate_features__raises_expected( ) -def test_type_adapter__environment__multivariate_feature_states_percentage_allocation_exceeds_100__raises_expected() -> ( +def test_type_adapter__environment_multivariate_feature_states_percentage_allocation_exceeds_100__raises_expected() -> ( None ): # Given diff --git a/tests/unit/common/core/test_logging.py b/tests/unit/common/core/test_logging.py index a587d069..e42bd70e 100644 --- a/tests/unit/common/core/test_logging.py +++ b/tests/unit/common/core/test_logging.py @@ -8,7 +8,7 @@ @pytest.mark.freeze_time("2023-12-08T06:05:47+00:00") -def test_json_formatter__outputs_expected( +def test_json_formatter__format_log__outputs_expected( caplog: pytest.LogCaptureFixture, request: pytest.FixtureRequest, ) -> None: diff --git a/tests/unit/common/core/test_middleware.py b/tests/unit/common/core/test_middleware.py index d98d8bc3..cde3c362 100644 --- a/tests/unit/common/core/test_middleware.py +++ b/tests/unit/common/core/test_middleware.py @@ -3,7 +3,7 @@ from common.core import middleware as middleware_module -def test_APIResponseVersionHeaderMiddleware__adds_version_header( +def test_api_response_version_header_middleware__request_processed__adds_version_header( mocker: MockerFixture, ) -> None: # Given diff --git a/tests/unit/common/core/test_utils.py b/tests/unit/common/core/test_utils.py index 777c2862..a5b72a10 100644 --- a/tests/unit/common/core/test_utils.py +++ b/tests/unit/common/core/test_utils.py @@ -49,28 +49,33 @@ def bad_replica(mocker: MockerFixture) -> MockType: return replica -def test__is_oss_for_enterprise_returns_false(fs: FakeFilesystem) -> None: +def test_is_oss__enterprise_version_exists__returns_false(fs: FakeFilesystem) -> None: # Given fs.create_file("./ENTERPRISE_VERSION") - # Then + # When / Then assert is_oss() is False -def test__is_oss_for_saas_returns_false(fs: FakeFilesystem) -> None: +def test_is_oss__saas_deployment_exists__returns_false(fs: FakeFilesystem) -> None: # Given fs.create_file("./SAAS_DEPLOYMENT") - # Then + # When / Then assert is_oss() is False -def test__is_oss_for_oss_returns_true(fs: FakeFilesystem) -> None: +def test_is_oss__no_version_files__returns_true(fs: FakeFilesystem) -> None: + # Given / When + result = is_oss() + # Then - assert is_oss() is True + assert result is True -def test_get_version_info(fs: FakeFilesystem) -> None: +def test_get_version_info__all_files_present__returns_expected( + fs: FakeFilesystem, +) -> None: # Given expected_manifest_contents = { ".": "2.66.2", @@ -98,7 +103,7 @@ def test_get_version_info(fs: FakeFilesystem) -> None: } -def test_get_version_info_with_missing_files(fs: FakeFilesystem) -> None: +def test_get_version_info__missing_files__returns_unknown(fs: FakeFilesystem) -> None: # Given fs.create_file("./ENTERPRISE_VERSION") @@ -217,7 +222,7 @@ def test_get_version__invalid_file_contents__returns_unknown( ({"default", "replica_1", "cross_region_replica_1"}, True), ], ) -def test_is_database_replica_setup__tells_whether_any_replica_is_present( +def test_is_database_replica_setup__various_database_configs__returns_expected( database_names: list[str], expected: bool, mocker: MockerFixture, diff --git a/tests/unit/common/gunicorn/test_conf.py b/tests/unit/common/gunicorn/test_conf.py index 0229b42d..27bae805 100644 --- a/tests/unit/common/gunicorn/test_conf.py +++ b/tests/unit/common/gunicorn/test_conf.py @@ -6,7 +6,7 @@ from common.gunicorn.conf import child_exit, when_ready -def test_child_exit__calls_mark_process_dead_with_worker_pid( +def test_child_exit__worker_exits__marks_process_dead_with_worker_pid( mocker: MockerFixture, ) -> None: # Given diff --git a/tests/unit/common/gunicorn/test_logging.py b/tests/unit/common/gunicorn/test_logging.py index 8a336780..f501b52d 100644 --- a/tests/unit/common/gunicorn/test_logging.py +++ b/tests/unit/common/gunicorn/test_logging.py @@ -17,7 +17,7 @@ @pytest.mark.freeze_time("2023-12-08T06:05:47+00:00") -def test_gunicorn_access_log_json_formatter__outputs_expected( +def test_gunicorn_access_log_json_formatter__format_log__outputs_expected( settings: SettingsWrapper, ) -> None: # Given @@ -97,7 +97,7 @@ def test_gunicorn_access_log_json_formatter__outputs_expected( } -def test_gunicorn_prometheus_gunicorn_logger__expected_metrics( +def test_gunicorn_prometheus_gunicorn_logger__access_logged__expected_metrics( mocker: MockerFixture, assert_metric: AssertMetricFixture, ) -> None: @@ -136,7 +136,7 @@ def test_gunicorn_prometheus_gunicorn_logger__expected_metrics( ) -def test_gunicorn_json_capable_logger__sets_expected_formatters( +def test_gunicorn_json_capable_logger__json_log_format__sets_expected_formatters( settings: SettingsWrapper, ) -> None: # Given diff --git a/tests/unit/common/gunicorn/test_utils.py b/tests/unit/common/gunicorn/test_utils.py index 9740f80c..6aa84986 100644 --- a/tests/unit/common/gunicorn/test_utils.py +++ b/tests/unit/common/gunicorn/test_utils.py @@ -53,7 +53,7 @@ def delay_kill(pid: int = pid) -> None: threading.Thread(target=delay_kill).start() - # When + # When / Then with pytest.raises(SystemExit): run_server({"bind": f"0.0.0.0:{unused_tcp_port}"}) diff --git a/tests/unit/common/test_tools/test_plugin.py b/tests/unit/common/test_tools/test_plugin.py index 15bf77d5..1ac164d4 100644 --- a/tests/unit/common/test_tools/test_plugin.py +++ b/tests/unit/common/test_tools/test_plugin.py @@ -10,7 +10,7 @@ from task_processor.models import Task -def test_assert_metrics__asserts_expected( +def test_assert_metrics__metric_incremented__asserts_expected( assert_metric: AssertMetricFixture, test_metric: prometheus_client.Counter, ) -> None: @@ -25,7 +25,7 @@ def test_assert_metrics__asserts_expected( ) -def test_assert_metrics__registry_reset_expected( +def test_assert_metrics__after_registry_reset__raises_assertion( test_metric: prometheus_client.Counter, ) -> None: # Given @@ -44,24 +44,33 @@ def test_assert_metrics__registry_reset_expected( ) -def test_no_marker__oss_edition_expected() -> None: - # When & Then - assert edition_printer() == "oss!" +def test_edition_printer__no_marker__returns_oss() -> None: + # Given / When + result = edition_printer() + + # Then + assert result == "oss!" @pytest.mark.saas_mode -def test_saas_mode_marker__is_saas_returns_expected() -> None: - # When & Then - assert edition_printer() == "saas!" +def test_edition_printer__saas_mode__returns_saas() -> None: + # Given / When + result = edition_printer() + + # Then + assert result == "saas!" @pytest.mark.enterprise_mode -def test_enterprise_mode_marker__is_enterprise_returns_expected() -> None: - # When & Then - assert edition_printer() == "enterprise!" +def test_edition_printer__enterprise_mode__returns_enterprise() -> None: + # Given / When + result = edition_printer() + + # Then + assert result == "enterprise!" -def test_run_tasks__runs_expected_tasks( +def test_run_tasks__single_task_delayed__runs_expected( run_tasks: RunTasksFixture, ) -> None: # Given diff --git a/tests/unit/task_processor/test_unit_task_processor_apps.py b/tests/unit/task_processor/test_unit_task_processor_apps.py index b071d326..61ee3873 100644 --- a/tests/unit/task_processor/test_unit_task_processor_apps.py +++ b/tests/unit/task_processor/test_unit_task_processor_apps.py @@ -10,7 +10,7 @@ from task_processor.task_run_method import TaskRunMethod -def test_skips_validation_if_task_run_method_is_not_task_processor( +def test_task_processor_app__not_task_processor_mode__skips_validation( mocker: MockerFixture, settings: SettingsWrapper, ) -> None: @@ -33,7 +33,7 @@ def test_skips_validation_if_task_run_method_is_not_task_processor( (False, None), ], ) -def test_registers_health_check_if_enabled( +def test_task_processor_app__health_check_toggle__registers_expected( enabled: bool, mocker: MockerFixture, expected_call: tuple[typing.Any, ...], @@ -94,7 +94,7 @@ def test_registers_health_check_if_enabled( ), ], ) -def test_validates_django_settings_are_compatible_with_multi_database_setup( +def test_task_processor_app__invalid_multi_database_setup__raises_improperly_configured( expected_error: str, mocker: MockerFixture, settings: SettingsWrapper, diff --git a/tests/unit/task_processor/test_unit_task_processor_decorators.py b/tests/unit/task_processor/test_unit_task_processor_decorators.py index 7b528da2..27c577bf 100644 --- a/tests/unit/task_processor/test_unit_task_processor_decorators.py +++ b/tests/unit/task_processor/test_unit_task_processor_decorators.py @@ -32,7 +32,7 @@ def mock_thread_class( @pytest.mark.django_db -def test_register_task_handler_run_in_thread__transaction_commit__true__default( +def test_register_task_handler_run_in_thread__transaction_commit_true__default( caplog: pytest.LogCaptureFixture, mock_thread_class: MagicMock, django_capture_on_commit_callbacks: DjangoCaptureOnCommitCallbacks, @@ -98,7 +98,7 @@ def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: @pytest.mark.django_db @pytest.mark.task_processor_mode -def test_register_recurring_task( +def test_register_recurring_task__task_processor_mode__creates_task( mocker: MockerFixture, ) -> None: # Given @@ -126,7 +126,7 @@ def test_register_recurring_task( @pytest.mark.django_db -def test_register_recurring_task_does_nothing_if_not_run_by_processor() -> None: +def test_register_recurring_task__not_run_by_processor__does_nothing() -> None: # Given task_kwargs = {"first_arg": "foo", "second_arg": "bar"} @@ -147,7 +147,9 @@ def some_function(first_arg: str, second_arg: str) -> None: assert get_task(task_identifier) -def test_register_task_handler_validates_inputs() -> None: +def test_register_task_handler__non_serializable_input__raises_invalid_arguments() -> ( + None +): # Given @register_task_handler() def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: @@ -156,7 +158,7 @@ def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: class NonSerializableObj: pass - # When + # When / Then with pytest.raises(InvalidArgumentsError): my_function(NonSerializableObj()) @@ -164,7 +166,7 @@ class NonSerializableObj: @pytest.mark.parametrize( "task_run_method", (TaskRunMethod.SEPARATE_THREAD, TaskRunMethod.SYNCHRONOUSLY) ) -def test_inputs_are_validated_when_run_without_task_processor( +def test_inputs_validation__run_without_task_processor__raises_invalid_arguments( settings: SettingsWrapper, task_run_method: TaskRunMethod ) -> None: # Given @@ -177,13 +179,13 @@ def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: class NonSerializableObj: pass - # When + # When / Then with pytest.raises(InvalidArgumentsError): my_function.delay(args=(NonSerializableObj(),)) @pytest.mark.django_db -def test_delay_returns_none_if_task_queue_is_full(settings: SettingsWrapper) -> None: +def test_delay__task_queue_full__returns_none(settings: SettingsWrapper) -> None: # Given settings.TASK_RUN_METHOD = TaskRunMethod.TASK_PROCESSOR @@ -204,7 +206,7 @@ def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: @pytest.mark.django_db -def test_delay__expected_metrics( +def test_delay__task_enqueued__expected_metrics( settings: SettingsWrapper, assert_metric: AssertMetricFixture, ) -> None: @@ -227,7 +229,9 @@ def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: @pytest.mark.django_db -def test_can_create_task_with_priority(settings: SettingsWrapper) -> None: +def test_create_task__with_priority__sets_expected_priority( + settings: SettingsWrapper, +) -> None: # Given settings.TASK_RUN_METHOD = TaskRunMethod.TASK_PROCESSOR diff --git a/tests/unit/task_processor/test_unit_task_processor_health.py b/tests/unit/task_processor/test_unit_task_processor_health.py index 86de292b..b0b17b10 100644 --- a/tests/unit/task_processor/test_unit_task_processor_health.py +++ b/tests/unit/task_processor/test_unit_task_processor_health.py @@ -7,7 +7,7 @@ from task_processor.task_run_method import TaskRunMethod -def test_is_processor_healthy_returns_false_if_task_not_processed( +def test_is_processor_healthy__task_not_processed__returns_false( mocker: MockerFixture, ) -> None: # Given @@ -27,7 +27,7 @@ def test_is_processor_healthy_returns_false_if_task_not_processed( @pytest.mark.django_db -def test_is_processor_healthy_returns_true_if_task_processed( +def test_is_processor_healthy__task_processed__returns_true( settings: SettingsWrapper, ) -> None: # Given diff --git a/tests/unit/task_processor/test_unit_task_processor_models.py b/tests/unit/task_processor/test_unit_task_processor_models.py index 0fb6a0c0..fd82ad31 100644 --- a/tests/unit/task_processor/test_unit_task_processor_models.py +++ b/tests/unit/task_processor/test_unit_task_processor_models.py @@ -15,7 +15,7 @@ one_hour_from_now = now + timedelta(hours=1) -def test_task_run(mocker: MockerFixture) -> None: +def test_task__run__calls_handler_with_args(mocker: MockerFixture) -> None: # Given mock = mocker.Mock() mock.__name__ = "test_task" @@ -59,29 +59,37 @@ def test_task_args__no_data__return_expected() -> None: ({"value": Decimal("10.12345")}, '{"value": 10.12345}'), ), ) -def test_serialize_data_handles_decimal_objects( +def test_serialize_data__decimal_input__returns_expected_json( input: dict[str, Decimal], expected_output: str, ) -> None: - assert Task.serialize_data(input) == expected_output + # Given / When + result = Task.serialize_data(input) + + # Then + assert result == expected_output @pytest.mark.parametrize( "first_run_time, expected", ((one_hour_ago.time(), True), (one_hour_from_now.time(), False)), ) -def test_recurring_task_run_should_execute_first_run_at( +def test_recurring_task__first_run_time_set__should_execute_returns_expected( first_run_time: time, expected: bool, ) -> None: - assert ( - RecurringTask( - first_run_time=first_run_time, - run_every=timedelta(days=1), - ).should_execute - == expected + # Given + task = RecurringTask( + first_run_time=first_run_time, + run_every=timedelta(days=1), ) + # When + result = task.should_execute + + # Then + assert result == expected + @freeze_time("2026-01-15 23:05:23") def test_recurring_task_should_execute__first_run_time_after_midnight__returns_false() -> ( diff --git a/tests/unit/task_processor/test_unit_task_processor_monitoring.py b/tests/unit/task_processor/test_unit_task_processor_monitoring.py index eae67a28..7a46cb59 100644 --- a/tests/unit/task_processor/test_unit_task_processor_monitoring.py +++ b/tests/unit/task_processor/test_unit_task_processor_monitoring.py @@ -8,7 +8,7 @@ @pytest.mark.django_db -def test_get_num_waiting_tasks() -> None: +def test_get_num_waiting_tasks__mixed_task_states__returns_only_waiting() -> None: # Given now = timezone.now() diff --git a/tests/unit/task_processor/test_unit_task_processor_processor.py b/tests/unit/task_processor/test_unit_task_processor_processor.py index e6691646..92f2e734 100644 --- a/tests/unit/task_processor/test_unit_task_processor_processor.py +++ b/tests/unit/task_processor/test_unit_task_processor_processor.py @@ -75,7 +75,7 @@ def _sleep_task(seconds: int) -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_task_runs_task_and_creates_task_run_object_when_success( +def test_run_task__success__creates_task_run_object( current_database: str, dummy_task: TaskHandler[[str, str]], ) -> None: @@ -106,7 +106,7 @@ def test_run_task_runs_task_and_creates_task_run_object_when_success( @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_task_kills_task_after_timeout( +def test_run_task__timeout__kills_task( caplog: pytest.LogCaptureFixture, current_database: str, sleep_task: TaskHandler[[int]], @@ -158,7 +158,7 @@ def test_run_task_kills_task_after_timeout( @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_recurring_task_kills_task_after_timeout( +def test_run_recurring_task__timeout__kills_task( caplog: pytest.LogCaptureFixture, current_database: str, settings: SettingsWrapper, @@ -213,7 +213,7 @@ def _dummy_recurring_task() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_recurring_tasks_runs_task_and_creates_recurring_task_run_object_when_success( +def test_run_recurring_tasks__success__creates_recurring_task_run_object( current_database: str, settings: SettingsWrapper, ) -> None: @@ -247,7 +247,7 @@ def _dummy_recurring_task() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_recurring_tasks_runs_locked_task_after_timeout( +def test_run_recurring_tasks__locked_task_after_timeout__runs_task( current_database: str, settings: SettingsWrapper, ) -> None: @@ -291,7 +291,7 @@ def _dummy_recurring_task() -> None: @pytest.mark.multi_database(transaction=True) @pytest.mark.task_processor_mode -def test_run_recurring_tasks_multiple_runs( +def test_run_recurring_tasks__multiple_runs__executes_expected_times( current_database: str, settings: SettingsWrapper, ) -> None: @@ -343,7 +343,7 @@ def _dummy_recurring_task() -> None: @pytest.mark.multi_database(transaction=True) @pytest.mark.task_processor_mode -def test_run_recurring_tasks_loops_over_all_tasks( +def test_run_recurring_tasks__multiple_tasks__loops_over_all( current_database: str, settings: SettingsWrapper, ) -> None: @@ -381,7 +381,7 @@ def _dummy_recurring_task_3() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_recurring_tasks_only_executes_tasks_after_interval_set_by_run_every( +def test_run_recurring_tasks__called_before_interval__executes_only_once( current_database: str, settings: SettingsWrapper, ) -> None: @@ -412,7 +412,7 @@ def _dummy_recurring_task() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_recurring_tasks_does_nothing_if_unregistered_task_is_new( +def test_run_recurring_tasks__unregistered_new_task__does_nothing( current_database: str, settings: SettingsWrapper, ) -> None: @@ -444,7 +444,7 @@ def _a_task() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_recurring_tasks_deletes_the_task_if_unregistered_task_is_old( +def test_run_recurring_tasks__unregistered_old_task__deletes_task( current_database: str, settings: SettingsWrapper, ) -> None: @@ -479,7 +479,7 @@ def _a_task() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_task_runs_task_and_creates_task_run_object_when_failure( +def test_run_task__failure__creates_task_run_object( caplog: pytest.LogCaptureFixture, current_database: str, raise_exception_task: TaskHandler[[str]], @@ -533,7 +533,7 @@ def test_run_task_runs_task_and_creates_task_run_object_when_failure( @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_task_runs_failed_task_again( +def test_run_task__failed_task__runs_again( current_database: str, raise_exception_task: TaskHandler[[str]], ) -> None: @@ -557,7 +557,6 @@ def test_run_task_runs_failed_task_again( == 2 ) - # Then for task_run in task_runs: assert task_run.result == TaskResult.FAILURE.value assert task_run.started_at @@ -571,7 +570,7 @@ def test_run_task_runs_failed_task_again( @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_recurring_task_runs_task_and_creates_recurring_task_run_object_when_failure( +def test_run_recurring_task__failure__creates_recurring_task_run_object( current_database: str, ) -> None: # Given @@ -599,7 +598,7 @@ def _raise_exception(organisation_name: str) -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_task_does_nothing_if_no_tasks(current_database: str) -> None: +def test_run_task__no_tasks__does_nothing(current_database: str) -> None: # Given - no tasks pass @@ -613,7 +612,7 @@ def test_run_task_does_nothing_if_no_tasks(current_database: str) -> None: @pytest.mark.multi_database(transaction=True) @pytest.mark.task_processor_mode -def test_run_task_runs_tasks_in_correct_priority( +def test_run_task__multiple_priorities__runs_in_correct_order( current_database: str, dummy_task: TaskHandler[[str, str]], ) -> None: @@ -707,7 +706,7 @@ def backoff_task() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_task__backoff__recurring__raises_expected( +def test_run_task__backoff_recurring__raises_expected( current_database: str, ) -> None: # Given @@ -717,7 +716,7 @@ def backoff_task() -> None: initialise() - # When & Then + # When / Then with pytest.raises(AssertionError) as exc_info: run_recurring_tasks(current_database) @@ -729,7 +728,7 @@ def backoff_task() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_task__backoff__max_num_failures__noop( +def test_run_task__backoff_max_num_failures__noop( current_database: str, caplog: pytest.LogCaptureFixture, ) -> None: @@ -760,7 +759,7 @@ def backoff_task() -> None: @pytest.mark.multi_database -def test_run_tasks__fails_if_not_in_task_processor_mode( +def test_run_tasks__not_in_task_processor_mode__fails( current_database: str, dummy_task: TaskHandler[[str, str]], ) -> None: @@ -771,14 +770,14 @@ def test_run_tasks__fails_if_not_in_task_processor_mode( args=("arg1", "arg2"), ).save(using=current_database) - # When + # When / Then with pytest.raises(AssertionError): run_tasks(current_database) @pytest.mark.multi_database(transaction=True) @pytest.mark.task_processor_mode -def test_run_tasks__expected_metrics( +def test_run_tasks__tasks_executed__expected_metrics( assert_metric: AssertMetricFixture, current_database: str, dummy_task: TaskHandler[[str, str]], @@ -875,7 +874,7 @@ def _fake_recurring_task() -> None: @pytest.mark.multi_database(transaction=True) @pytest.mark.task_processor_mode -def test_run_tasks_skips_locked_tasks( +def test_run_tasks__locked_task__skips_locked( current_database: str, dummy_task: TaskHandler[[str, str]], sleep_task: TaskHandler[[int]], @@ -919,7 +918,7 @@ def test_run_tasks_skips_locked_tasks( @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_more_than_one_task( +def test_run_tasks__multiple_tasks__runs_all( current_database: str, dummy_task: TaskHandler[[str, str]], ) -> None: @@ -957,7 +956,7 @@ def test_run_more_than_one_task( @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_recurring_tasks_are_unlocked_if_picked_up_but_not_executed( +def test_recurring_tasks__picked_up_but_not_executed__are_unlocked( current_database: str, settings: SettingsWrapper, ) -> None: @@ -993,7 +992,7 @@ def my_task() -> None: @pytest.mark.multi_database @pytest.mark.task_processor_mode -def test_run_task_does_not_block_on_timeout( +def test_run_task__timeout__does_not_block( current_database: str, sleep_task: TaskHandler[[int]], ) -> None: diff --git a/tests/unit/task_processor/test_unit_task_processor_routers.py b/tests/unit/task_processor/test_unit_task_processor_routers.py index 096499e5..b66a888a 100644 --- a/tests/unit/task_processor/test_unit_task_processor_routers.py +++ b/tests/unit/task_processor/test_unit_task_processor_routers.py @@ -10,7 +10,7 @@ @pytest.mark.parametrize("model", apps.get_app_config("task_processor").get_models()) -def test_TaskProcessorRouter_routes_queries_to_task_processor_database( +def test_task_processor_router__query_routing__uses_task_processor_database( mocker: MockerFixture, model: type[Model], ) -> None: @@ -25,7 +25,7 @@ def test_TaskProcessorRouter_routes_queries_to_task_processor_database( assert read_database == write_database == "task_processor" -def test_TaskProcessorRouter_does_not_affect_non_task_processor_models( +def test_task_processor_router__non_task_processor_models__not_affected( mocker: MockerFixture, ) -> None: # Given @@ -53,7 +53,7 @@ def test_TaskProcessorRouter_does_not_affect_non_task_processor_models( for model in apps.get_app_config("task_processor").get_models() ], ) -def test_TaskProcessorRouter_allows_relation_among_task_processor_models( +def test_task_processor_router__relation_among_task_processor_models__allowed( mocker: MockerFixture, model1: type[Model], model2: type[Model], @@ -80,7 +80,7 @@ def test_TaskProcessorRouter_allows_relation_among_task_processor_models( ("other", "other", None), ], ) -def test_TaskProcessorRouter_applies_migrations_to_both_databases( +def test_task_processor_router__migration_targeting__applies_to_both_databases( app_label: str, database: str, expected: bool, diff --git a/tests/unit/task_processor/test_unit_task_processor_tasks.py b/tests/unit/task_processor/test_unit_task_processor_tasks.py index 99df3e61..98aebfb7 100644 --- a/tests/unit/task_processor/test_unit_task_processor_tasks.py +++ b/tests/unit/task_processor/test_unit_task_processor_tasks.py @@ -18,7 +18,7 @@ @pytest.mark.django_db -def test_clean_up_old_tasks_does_nothing_when_no_tasks() -> None: +def test_clean_up_old_tasks__no_tasks__does_nothing() -> None: # Given assert Task.objects.count() == 0 @@ -29,7 +29,7 @@ def test_clean_up_old_tasks_does_nothing_when_no_tasks() -> None: assert Task.objects.count() == 0 -def test_clean_up_old_recurring_task_runs_does_nothing_when_no_runs(db: None) -> None: +def test_clean_up_old_recurring_task_runs__no_runs__does_nothing(db: None) -> None: # Given assert RecurringTaskRun.objects.count() == 0 @@ -41,7 +41,7 @@ def test_clean_up_old_recurring_task_runs_does_nothing_when_no_runs(db: None) -> @pytest.mark.django_db -def test_clean_up_old_tasks( +def test_clean_up_old_tasks__tasks_exist__deletes_old_completed( settings: SettingsWrapper, django_assert_num_queries: DjangoAssertNumQueries, ) -> None: @@ -92,7 +92,7 @@ def test_clean_up_old_tasks( @pytest.mark.django_db -def test_clean_up_old_recurring_task_runs( +def test_clean_up_old_recurring_task_runs__runs_exist__deletes_old( settings: SettingsWrapper, django_assert_num_queries: DjangoAssertNumQueries, ) -> None: @@ -128,7 +128,9 @@ def test_clean_up_old_recurring_task_runs( @pytest.mark.django_db -def test_clean_up_old_tasks_include_failed_tasks(settings: SettingsWrapper) -> None: +def test_clean_up_old_tasks__include_failed_enabled__deletes_failed( + settings: SettingsWrapper, +) -> None: # Given settings.TASK_DELETE_RETENTION_DAYS = 2 settings.TASK_DELETE_INCLUDE_FAILED_TASKS = True @@ -146,7 +148,7 @@ def test_clean_up_old_tasks_include_failed_tasks(settings: SettingsWrapper) -> N @pytest.mark.django_db -def test_clean_up_old_tasks_does_not_run_if_disabled( +def test_clean_up_old_tasks__disabled__does_not_run( settings: SettingsWrapper, django_assert_num_queries: DjangoAssertNumQueries, ) -> None: @@ -166,7 +168,7 @@ def test_clean_up_old_tasks_does_not_run_if_disabled( @pytest.mark.django_db -def test_clean_up_old_recurring_task_runs_does_not_run_if_disabled( +def test_clean_up_old_recurring_task_runs__disabled__does_not_run( settings: SettingsWrapper, django_assert_num_queries: DjangoAssertNumQueries, ) -> None: diff --git a/tests/unit/task_processor/test_unit_task_processor_threads.py b/tests/unit/task_processor/test_unit_task_processor_threads.py index 301e959f..21759653 100644 --- a/tests/unit/task_processor/test_unit_task_processor_threads.py +++ b/tests/unit/task_processor/test_unit_task_processor_threads.py @@ -22,7 +22,7 @@ ], ) @pytest.mark.django_db -def test_task_runner_is_resilient_to_errors( +def test_task_runner__run_iteration_error__logs_and_closes_connections( mocker: MockerFixture, caplog: pytest.LogCaptureFixture, exception: Exception, diff --git a/uv.lock b/uv.lock index d6e08e7c..a68240aa 100644 --- a/uv.lock +++ b/uv.lock @@ -56,6 +56,35 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/db/3c/33bac158f8ab7f89b2e59426d5fe2e4f63f7ed25df84c036890172b412b5/cfgv-3.5.0-py2.py3-none-any.whl", hash = "sha256:a8dc6b26ad22ff227d2634a65cb388215ce6cc96bbcc5cfde7641ae87e8dacc0", size = 7445, upload-time = "2025-11-19T20:55:50.744Z" }, ] +[[package]] +name = "chardet" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/84/e72ea5c06e687db591283474b8442ab95665fc6bae7b06043b2a6f0eaf6c/chardet-7.1.0.tar.gz", hash = "sha256:8f47bc4accac17bd9accbb4acc1d563acc024a783806c0a43c3a583f5285690b", size = 505743, upload-time = "2026-03-11T21:39:37.603Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/77/f7084d3b729389b409f1d2db5a967d4cb754f87014e4c6e2bf1eb285c865/chardet-7.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:70adef4a036d39d9b5f6c2702f773118a9985c0dff23b650e1045f20e33c9467", size = 540706, upload-time = "2026-03-11T21:39:01.303Z" }, + { url = "https://files.pythonhosted.org/packages/f5/17/ef3d76d2c2902c66d407bad019bfd65164e40dfbbd4554ee86b8b1a5997f/chardet-7.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e067ee79709ccf9caa5732419df2dbef476bd9b9658ffc929d45cf13fc91ed7", size = 534386, upload-time = "2026-03-11T21:39:03.083Z" }, + { url = "https://files.pythonhosted.org/packages/55/8f/c0d6f2d51c80a3103c2dc876d165478d9c90fedb3f4df1c36bea9cb6358e/chardet-7.1.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb2a9b4052be006b87a985dbdbb00ab35b4b1b66d2751b0ee12680f8f4e90406", size = 555265, upload-time = "2026-03-11T21:39:05.702Z" }, + { url = "https://files.pythonhosted.org/packages/79/18/9fd9d90901781a2890f69acf4a616a4ea7fb40c79f902afbcf67a1d207a2/chardet-7.1.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49b5edd762751735704d1fec38665ee219da28c66f2a4921d615b12be389735b", size = 557812, upload-time = "2026-03-11T21:39:07.515Z" }, + { url = "https://files.pythonhosted.org/packages/59/19/c90dd953e842392cb16ca385b50e9e88c591333b301e9087c56524c9bd30/chardet-7.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:a02197831a4304eed360559e0ffc58deccc9cdda9f9315c6e7ad978f7d8617d3", size = 526368, upload-time = "2026-03-11T21:39:09.036Z" }, + { url = "https://files.pythonhosted.org/packages/f0/b8/415efba024c5d6a3d81609de51598a11a99b9f2ffb916c42b72190da1973/chardet-7.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:43c1e3cba6c41d8958ee4acdab94c151dbe256d7ef8df4ae032dc62a892f294f", size = 542358, upload-time = "2026-03-11T21:39:11.023Z" }, + { url = "https://files.pythonhosted.org/packages/7f/d7/9517de8b58b487d5d05e957efacc8c9af180cb2cc97103b1a1c67120d8c0/chardet-7.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1a3c22672c9502af99e0433b47421d0d72c8803efce2cd4a91a3ae1ab5972243", size = 534566, upload-time = "2026-03-11T21:39:12.462Z" }, + { url = "https://files.pythonhosted.org/packages/c3/33/1286f2a05935a80eaadcc13fc70fb0eaa00805acc756363f0f4aca2ed936/chardet-7.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fdfc42dfc44ccd569b84fe6a1fdea1df66dc0c48461bc3899dea5efea8d507f6", size = 556240, upload-time = "2026-03-11T21:39:14.388Z" }, + { url = "https://files.pythonhosted.org/packages/c7/cc/556aeffb4768b258cc461bc1063d3592e411e1744223da8c7fbbf524438e/chardet-7.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e096d9c211050fff40e22748e1d09d0cec8348fc13ee6e2e0a1da079345b8a86", size = 559737, upload-time = "2026-03-11T21:39:16.382Z" }, + { url = "https://files.pythonhosted.org/packages/af/4a/147151940ad5ac8bf9f8728a1e46bc63502cd95e93c3a9796f01914188f9/chardet-7.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6492bebaba8882afb3e14c786fb69ed767326b6f514b8e093dcdf6e2a094d33", size = 526574, upload-time = "2026-03-11T21:39:18.311Z" }, + { url = "https://files.pythonhosted.org/packages/b9/79/2c61f33c87d3698f15ca01b0882fbd2fcb95911a783cc615d31adfae025a/chardet-7.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cc8c7520a9736da766f5794bbabb1c6cdfe446676429a5cf691af878631a80bf", size = 542249, upload-time = "2026-03-11T21:39:20.133Z" }, + { url = "https://files.pythonhosted.org/packages/eb/0c/2d0c4897e43f1bb1b68dad840551cda224696eda9951524db50721d3bc18/chardet-7.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6f806f325825325e0682226269a2a4859993344cccca14f2463855d4f5a93272", size = 534544, upload-time = "2026-03-11T21:39:21.844Z" }, + { url = "https://files.pythonhosted.org/packages/17/cb/a568eea24adc1a023da266854e9fc9e0eaffa72580d43c45b47f1b62dd2e/chardet-7.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bacc8f862998c59e9ee7fe4960538300d1cc3fe2c293b9cc99bbbc7bf3bedf51", size = 555894, upload-time = "2026-03-11T21:39:23.649Z" }, + { url = "https://files.pythonhosted.org/packages/f3/e7/958975ca18c7b5be9b94354c302a7f3d757c02e7c14e88e0c85af1e16c70/chardet-7.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c35d17822fc94467b7951adebd897cb01c0e37ac694be18d2cbd2b676d61df4f", size = 559286, upload-time = "2026-03-11T21:39:25.289Z" }, + { url = "https://files.pythonhosted.org/packages/84/0b/1eddfd650e98bb80ec9f74c0bb98fa60cc36f63d9209214cd069b2a27340/chardet-7.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:b951107b254cdc766e52f4b8339dcfa97c7b45ca9f5509075308db2497e7f3af", size = 526406, upload-time = "2026-03-11T21:39:27.103Z" }, + { url = "https://files.pythonhosted.org/packages/49/70/abaaae51228ce2e87f48b90b6c4a9636882d0515bf94adfad494af89bd6f/chardet-7.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:619d7ef3187ff1691525a7fdbe8c30f5a519885e1de82f6f57e26a29866bf11b", size = 541725, upload-time = "2026-03-11T21:39:28.664Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/2c494522099248fb44da57c7e4916c8d89f6e21e4dde4978aff15b15bf13/chardet-7.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:96e7fe0770cd77361bec21a1dd8524e77aaa567577fa8372368d5fa8dd0ef00b", size = 534654, upload-time = "2026-03-11T21:39:30.537Z" }, + { url = "https://files.pythonhosted.org/packages/00/1e/3510e8055b60fa9e14572495975795db856ab4f0982daa41efa64a204afd/chardet-7.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bbd4fccf1cf6d92fdd75a1827a478672abb5685e61e92ce863d9380b18cb813f", size = 556568, upload-time = "2026-03-11T21:39:31.922Z" }, + { url = "https://files.pythonhosted.org/packages/32/40/31fbc4c7609e5c94e7eb4917faf3a8a222afa7da2ee11ec003fb3d9f267f/chardet-7.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5d86d349f768e6d35f6804013f6643d880ec877b94c453fa40a3fd10d16ddb48", size = 559232, upload-time = "2026-03-11T21:39:33.291Z" }, + { url = "https://files.pythonhosted.org/packages/4a/12/7551df67438d751fd6ce0f74a357cb0ebda998830ea2481bbe40eb83e848/chardet-7.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:38be4c07e016dac37fb6060094f3a720200e3e49dc14f55924a1c230eeffa59f", size = 526135, upload-time = "2026-03-11T21:39:34.87Z" }, + { url = "https://files.pythonhosted.org/packages/87/13/6aa6c9118ce153a806bb0472e27e8f8c24e6925db8a5b9fe99e03e45af15/chardet-7.1.0-py3-none-any.whl", hash = "sha256:7f677725333bf53f84b7f57458f44669a8a5eb2ac4092ac699cdfa9b1af08a5f", size = 411334, upload-time = "2026-03-11T21:39:36.198Z" }, +] + [[package]] name = "charset-normalizer" version = "3.4.4" @@ -230,6 +259,21 @@ toml = [ { name = "tomli", marker = "python_full_version <= '3.11'" }, ] +[[package]] +name = "diff-cover" +version = "10.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "chardet" }, + { name = "jinja2" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/b4/eee71d1e338bc1f9bd3539b46b70e303dac061324b759c9a80fa3c96d90d/diff_cover-10.2.0.tar.gz", hash = "sha256:61bf83025f10510c76ef6a5820680cf61b9b974e8f81de70c57ac926fa63872a", size = 102473, upload-time = "2026-01-09T01:59:07.605Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2c/61eeb887055a37150db824b6bf830e821a736580769ac2fea4eadb0d613f/diff_cover-10.2.0-py3-none-any.whl", hash = "sha256:59c328595e0b8948617cc5269af9e484c86462e2844bfcafa3fb37f8fca0af87", size = 56748, upload-time = "2026-01-09T01:59:06.028Z" }, +] + [[package]] name = "distlib" version = "0.4.0" @@ -432,6 +476,7 @@ test-tools = [ [package.dev-dependencies] dev = [ + { name = "diff-cover" }, { name = "dj-database-url" }, { name = "django-stubs" }, { name = "djangorestframework-stubs" }, @@ -479,6 +524,7 @@ provides-extras = ["test-tools", "common-core", "task-processor", "flagsmith-sch [package.metadata.requires-dev] dev = [ + { name = "diff-cover", specifier = ">=10.2.0" }, { name = "dj-database-url", specifier = ">=2.3.0,<3.0.0" }, { name = "django-stubs", specifier = ">=5.1.3,<6.0.0" }, { name = "djangorestframework-stubs", specifier = ">=3.15.3,<4.0.0" }, @@ -594,6 +640,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/30/38/b0c081183ad4087c0989fc8ec588d5218cc921908bba4c355fd30a378a19/iregexp_check-0.1.4-cp38-abi3-win_amd64.whl", hash = "sha256:50837bbe9b09abdb7b387d9c7dc2eda470a77e8b29ac315a0e1409b147db14bd", size = 126043, upload-time = "2024-09-24T18:43:53.382Z" }, ] +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + [[package]] name = "jsonpath-rfc9535" version = "0.2.0" From 9407e0f084da8465259d229845c285deb049a602 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 12 Mar 2026 15:19:03 +0000 Subject: [PATCH 3/3] docs: add pre-commit hook usage to README --- README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/README.md b/README.md index 2fbdec62..bd0dc8df 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,26 @@ This enables the `route` label for Prometheus HTTP metrics. 5. To enable the `/metrics` endpoint, set the `PROMETHEUS_ENABLED` setting to `True`. +### Pre-commit hooks + +This repo provides a [`flagsmith-lint-tests`](.pre-commit-hooks.yaml) hook that enforces test conventions: + +- **FT001**: No module-level `Test*` classes — use function-based tests +- **FT002**: No `import unittest` / `from unittest import TestCase` — use pytest (`unittest.mock` is fine) +- **FT003**: Test names must follow `test_{subject}__{condition}__{expected}` +- **FT004**: Test bodies must contain `# Given`, `# When`, and `# Then` comments + +To use in your repo, add to `.pre-commit-config.yaml`: + +```yaml +- repo: https://github.com/Flagsmith/flagsmith-common + rev: main + hooks: + - id: flagsmith-lint-tests +``` + +Use `# noqa: FT003` (or any code) inline to suppress individual violations. + ### Test tools #### Fixtures