Skip to content

Commit 4f39de2

Browse files
Adding logfile output to consumer_test, instead of outputting to CI (#448)
Changed output of consumers to files to unblock the pipe. Also added Summary of the consumers to the Github summary page Full logs are printed & downloadable if wanted via the Github Artifacts.
1 parent 21640ab commit 4f39de2

2 files changed

Lines changed: 77 additions & 42 deletions

File tree

.github/workflows/consumer_test.yml

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,27 @@ jobs:
4444
- name: Run Consumer tests
4545

4646
run: |
47-
set -o pipefail
48-
.venv_docs/bin/python -m pytest -s -v src/tests/ --repo="$CONSUMER" --junitxml="reports/${{ matrix.consumer }}.xml" | tee "reports/${{ matrix.consumer }}.log"
47+
pytest_rc=0
48+
.venv_docs/bin/python -m pytest -vv src/tests/ \
49+
--repo="$CONSUMER" \
50+
--junitxml="reports/${{ matrix.consumer }}.xml" \
51+
|| pytest_rc=$?
52+
53+
54+
if [ -f "consumer_test.log" ]; then
55+
src_log="consumer_test.log"
56+
else
57+
echo "consumer_test.log not found; expected at ./consumer_test.log"
58+
exit ${pytest_rc:-1}
59+
fi
60+
61+
dest_log="reports/${{ matrix.consumer }}.log"
62+
mv "$src_log" "$dest_log"
63+
64+
tail -n 15 "$dest_log" >> "$GITHUB_STEP_SUMMARY"
65+
66+
cat "$dest_log"
67+
exit $pytest_rc
4968
env:
5069
FORCE_COLOR: "1"
5170
TERM: xterm-256color

src/tests/test_consumer.py

Lines changed: 56 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
import pytest
2323
from _pytest.config import Config
2424
from pytest import TempPathFactory
25-
from rich import print
25+
from rich import box, print
2626
from rich.console import Console
2727
from rich.table import Table
2828

@@ -47,10 +47,14 @@
4747

4848
# Max width of the printout
4949
# Trial and error has shown that 80 the best value is for GH CI output
50-
len_max = 80
50+
len_max = 120
5151
CACHE_DIR = Path.home() / ".cache" / "docs_as_code_consumer_tests"
52-
53-
console = Console(force_terminal=True if os.getenv("CI") else None, width=80)
52+
log_file_name = "consumer_test.log"
53+
# Need to ignore the ruff error here. Due to how the script is written,
54+
# can not use a context manager to open the log file, even though it would be preferable
55+
# In a future re-write this should be considered.
56+
log_fp = open(log_file_name, "a", encoding="utf-8") # noqa: SIM115
57+
console = Console(file=log_fp, force_terminal=False, width=120, color_system=None)
5458

5559

5660
@dataclass
@@ -125,22 +129,24 @@ def sphinx_base_dir(tmp_path_factory: TempPathFactory, pytestconfig: Config) ->
125129
if disable_cache:
126130
# Use persistent cache directory for local development
127131
temp_dir = tmp_path_factory.mktemp("testing_dir")
128-
print(f"[blue]Using temporary directory: {temp_dir}[/blue]")
132+
console.print(f"[blue]Using temporary directory: {temp_dir}[/blue]")
129133
return temp_dir
130134

131135
CACHE_DIR.mkdir(parents=True, exist_ok=True)
132-
print(f"[green]Using persistent cache directory: {CACHE_DIR}[/green]")
136+
console.print(f"[green]Using persistent cache directory: {CACHE_DIR}[/green]")
133137
return CACHE_DIR
134138

135139

136-
def cleanup():
140+
def cleanup(cmd: str):
137141
"""
138142
Cleanup before tests are run
139143
"""
140144
for p in Path(".").glob("*/ubproject.toml"):
141145
p.unlink()
142146
shutil.rmtree("_build", ignore_errors=True)
143-
cmd = "bazel clean --async"
147+
if cmd == "bazel run //:ide_support":
148+
shutil.rmtree(".venv_docs", ignore_errors=True)
149+
cmd = "bazel clean --async"
144150
subprocess.run(cmd.split(), text=True)
145151

146152

@@ -174,13 +180,15 @@ def filter_repos(repo_filter: str | None) -> list[ConsumerRepo]:
174180
# Warn about any repos that weren't found
175181
if requested_repos:
176182
available_names = [repo.name for repo in REPOS_TO_TEST]
177-
print(f"[yellow]Warning: Unknown repositories: {requested_repos}[/yellow]")
178-
print(f"[yellow]Available repositories: {available_names}[/yellow]")
183+
console.print(
184+
f"[yellow]Warning: Unknown repositories: {requested_repos}[/yellow]"
185+
)
186+
console.print(f"[yellow]Available repositories: {available_names}[/yellow]")
179187

180188
# If no valid repos were found but filter was provided, return all repos
181189
# This prevents accidentally running zero tests due to typos
182190
if not filtered_repos and repo_filter:
183-
print(
191+
console.print(
184192
"[red]No valid repositories found in filter, "
185193
"running all repositories instead[/red]"
186194
)
@@ -254,9 +262,9 @@ def parse_bazel_output(BR: BuildOutput, pytestconfig: Config) -> BuildOutput:
254262
warning_dict: dict[str, list[str]] = defaultdict(list)
255263

256264
if pytestconfig.get_verbosity() >= 2 and os.getenv("CI"):
257-
print("[DEBUG] Raw warnings in CI:")
265+
console.print("[DEBUG] Raw warnings in CI:")
258266
for i, warning in enumerate(split_warnings):
259-
print(f"[DEBUG] Warning {i}: {repr(warning)}")
267+
console.print(f"[DEBUG] Warning {i}: {repr(warning)}")
260268

261269
for raw_warning in split_warnings:
262270
# In the CLI we seem to have some ansi codes in the warnings.
@@ -279,23 +287,23 @@ def parse_bazel_output(BR: BuildOutput, pytestconfig: Config) -> BuildOutput:
279287
def print_overview_logs(BR: BuildOutput):
280288
warning_loggers = list(BR.warnings.keys())
281289
len_left_test_result = len_max - len("TEST RESULTS")
282-
print(
290+
console.print(
283291
f"[blue]{'=' * int(len_left_test_result / 2)}"
284292
f"TEST RESULTS"
285293
f"{'=' * int(len_left_test_result / 2)}[/blue]"
286294
)
287-
print(f"[navy_blue]{'=' * len_max}[/navy_blue]")
295+
console.print(f"[navy_blue]{'=' * len_max}[/navy_blue]")
288296
warning_total_loggers_msg = f"Warning Loggers Total: {len(warning_loggers)}"
289297
len_left_loggers = len_max - len(warning_total_loggers_msg)
290-
print(
298+
console.print(
291299
f"[blue]{'=' * int(len_left_loggers / 2)}"
292300
f"{warning_total_loggers_msg}"
293301
f"{'=' * int(len_left_loggers / 2)}[/blue]"
294302
)
295303
warning_loggers = list(BR.warnings.keys())
296304
warning_total_msg = "Logger Warnings Accumulated"
297305
len_left_loggers_total = len_max - len(warning_total_msg)
298-
print(
306+
console.print(
299307
f"[blue]{'=' * int(len_left_loggers_total / 2)}"
300308
f"{warning_total_msg}"
301309
f"{'=' * int(len_left_loggers_total / 2)}[/blue]"
@@ -306,20 +314,20 @@ def print_overview_logs(BR: BuildOutput):
306314
color = "orange1" if logger == "[NO SPECIFIC LOGGER]" else "red"
307315
warning_logger_msg = f"{logger} has {len(BR.warnings[logger])} warnings"
308316
len_left_logger = len_max - len(warning_logger_msg)
309-
print(
317+
console.print(
310318
f"[{color}]{'=' * int(len_left_logger / 2)}"
311319
f"{warning_logger_msg}"
312320
f"{'=' * int(len_left_logger / 2)}[/{color}]"
313321
)
314-
print(f"[blue]{'=' * len_max}[/blue]")
322+
console.print(f"[blue]{'=' * len_max}[/blue]")
315323

316324

317325
def verbose_printout(BR: BuildOutput):
318326
"""Prints warnings for each logger when '-v' or higher is specified."""
319327
warning_loggers = list(BR.warnings.keys())
320328
for logger in warning_loggers:
321329
len_left_logger = len_max - len(logger)
322-
print(
330+
console.print(
323331
f"[cornflower_blue]{'=' * int(len_left_logger / 2)}"
324332
f"{logger}"
325333
f"{'=' * int(len_left_logger / 2)}[/cornflower_blue]"
@@ -329,36 +337,36 @@ def verbose_printout(BR: BuildOutput):
329337
color = "red"
330338
if logger == "[NO SPECIFIC LOGGER]":
331339
color = "orange1"
332-
print(
340+
console.print(
333341
f"[{color}]{'=' * int(len_left_warnings / 2)}"
334342
f"{f'Warnings Found: {len(warnings)}'}"
335343
f"{'=' * int(len_left_warnings / 2)}[/{color}]"
336344
)
337-
print("\n".join(f"[{color}]{x}[/{color}]" for x in warnings))
345+
console.print("\n".join(f"[{color}]{x}[/{color}]" for x in warnings))
338346

339347

340348
def print_running_cmd(repo: str, cmd: str, local_or_git: str):
341349
"""Prints a 'Title Card' for the current command"""
342350
len_left_cmd = len_max - len(cmd)
343351
len_left_repo = len_max - len(repo)
344352
len_left_local = len_max - len(local_or_git)
345-
print(f"\n[cyan]{'=' * len_max}[/cyan]")
346-
print(
353+
console.print(f"\n[cyan]{'=' * len_max}[/cyan]")
354+
console.print(
347355
f"[cornflower_blue]{'=' * int(len_left_repo / 2)}"
348356
f"{repo}"
349357
f"{'=' * int(len_left_repo / 2)}[/cornflower_blue]"
350358
)
351-
print(
359+
console.print(
352360
f"[cornflower_blue]{'=' * int(len_left_local / 2)}"
353361
f"{local_or_git}"
354362
f"{'=' * int(len_left_local / 2)}[/cornflower_blue]"
355363
)
356-
print(
364+
console.print(
357365
f"[cornflower_blue]{'=' * int(len_left_cmd / 2)}"
358366
f"{cmd}"
359367
f"{'=' * int(len_left_cmd / 2)}[/cornflower_blue]"
360368
)
361-
print(f"[cyan]{'=' * len_max}[/cyan]")
369+
console.print(f"[cyan]{'=' * len_max}[/cyan]")
362370

363371

364372
def analyze_build_success(BR: BuildOutput) -> tuple[bool, str]:
@@ -401,8 +409,8 @@ def print_final_result(BR: BuildOutput, repo_name: str, cmd: str, pytestconfig:
401409
verbose_printout(BR)
402410
if pytestconfig.get_verbosity() >= 2:
403411
# Verbosity Level 2 (-vv)
404-
print("==== STDOUT ====:\n\n", BR.stdout)
405-
print("==== STDERR ====:\n\n", BR.stderr)
412+
console.print("==== STDOUT ====:\n\n", BR.stdout)
413+
console.print("==== STDERR ====:\n\n", BR.stderr)
406414

407415
is_success, reason = analyze_build_success(BR)
408416

@@ -412,20 +420,20 @@ def print_final_result(BR: BuildOutput, repo_name: str, cmd: str, pytestconfig:
412420
# Printing a small 'report' for each cmd.
413421
result_msg = f"{repo_name} - {cmd}: {status}"
414422
len_left = len_max - len(result_msg)
415-
print(
423+
console.print(
416424
f"[{color}]{'=' * int(len_left / 2)}"
417425
f"{result_msg}"
418426
f"{'=' * int(len_left / 2)}[/{color}]"
419427
)
420-
print(f"[{color}]Reason: {reason}[/{color}]")
421-
print(f"[{color}]{'=' * len_max}[/{color}]")
428+
console.print(f"[{color}]Reason: {reason}[/{color}]")
429+
console.print(f"[{color}]{'=' * len_max}[/{color}]")
422430

423431
return is_success, reason
424432

425433

426434
def print_result_table(results: list[Result]):
427435
"""Printing an 'overview' table to show all results."""
428-
table = Table(title="Docs-As-Code Consumer Test Result")
436+
table = Table(title="Docs-As-Code Consumer Test Result", box=box.MARKDOWN)
429437
table.add_column("Repository")
430438
table.add_column("CMD")
431439
table.add_column("LOCAL OR GIT")
@@ -441,12 +449,12 @@ def print_result_table(results: list[Result]):
441449
result.reason,
442450
style=style,
443451
)
444-
print(table)
452+
console.print(table)
445453

446454

447455
def stream_subprocess_output(cmd: str, repo_name: str):
448456
"""Stream subprocess output in real-time for maximum verbosity"""
449-
print(f"[green]Streaming output for: {cmd}[/green]")
457+
console.print(f"[green]Streaming output for: {cmd}[/green]")
450458

451459
process = subprocess.Popen(
452460
cmd.split(),
@@ -461,7 +469,7 @@ def stream_subprocess_output(cmd: str, repo_name: str):
461469
if process.stdout is not None:
462470
for line in iter(process.stdout.readline, ""):
463471
if line:
464-
print(line.rstrip()) # Print immediately
472+
console.print(line.rstrip()) # Print immediately
465473
output_lines.append(line)
466474

467475
process.stdout.close()
@@ -483,7 +491,7 @@ def run_cmd(
483491
) -> tuple[list[Result], bool]:
484492
verbosity: int = pytestconfig.get_verbosity()
485493

486-
cleanup()
494+
cleanup(cmd)
487495

488496
if verbosity >= 3:
489497
# Level 3 (-vvv): Stream output in real-time
@@ -584,7 +592,7 @@ def prepare_repo_overrides(
584592
repo_path = Path(repo_name)
585593

586594
if not use_cache and repo_path.exists():
587-
print(f"[green]Using cached repository: {repo_name}[/green]")
595+
console.print(f"[green]Using cached repository: {repo_name}[/green]")
588596
# Update the existing repo
589597
os.chdir(repo_name)
590598
subprocess.run(["git", "fetch", "origin"], check=True, capture_output=True)
@@ -616,6 +624,7 @@ def prepare_repo_overrides(
616624

617625
# Updated version of your test loop
618626
def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config):
627+
global log_file_name
619628
# Get command line options from pytest config
620629

621630
repo_tests: str | None = cast(str | None, pytestconfig.getoption("--repo"))
@@ -625,10 +634,10 @@ def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config):
625634

626635
# Exit early if we don't find repos to test.
627636
if not repos_to_test:
628-
print("[red]No repositories to test after filtering![/red]")
637+
console.print("[red]No repositories to test after filtering![/red]")
629638
return
630639

631-
print(
640+
console.print(
632641
f"[green]Testing {len(repos_to_test)} repositories: "
633642
f"{[r.name for r in repos_to_test]}[/green]"
634643
)
@@ -642,6 +651,12 @@ def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config):
642651
results: list[Result] = []
643652

644653
for repo in repos_to_test:
654+
len_left_repo = len_max - len(repo.name)
655+
console.print(f"{'=' * len_max}")
656+
console.print(f"{'=' * len_max}")
657+
console.print(
658+
f"{'=' * int(len_left_repo / 2)}{repo.name}{'=' * int(len_left_repo / 2)}"
659+
)
645660
# ┌─────────────────────────────────────────┐
646661
# │ Preparing the Repository for testing │
647662
# └─────────────────────────────────────────┘
@@ -692,3 +707,4 @@ def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config):
692707
pytest.fail(
693708
reason="Consumer Tests failed, see table for which commands specifically. "
694709
)
710+
log_fp.close()

0 commit comments

Comments
 (0)