diff --git a/.talismanrc b/.talismanrc index 6cfd212..c195d6e 100644 --- a/.talismanrc +++ b/.talismanrc @@ -25,4 +25,8 @@ fileignoreconfig: - filename: Contentstack.Core.Tests/RegionHandlerTest.cs checksum: 69899138754908e156aa477d775d12fd6b3fefc1a6c2afec22cb409bd6e6446c - filename: CHANGELOG.md - checksum: bc17fd4cf564e524c686a8271033f8e6e7f5f69de8137007d1c72d5f563fe92a \ No newline at end of file + checksum: bc17fd4cf564e524c686a8271033f8e6e7f5f69de8137007d1c72d5f563fe92a +- filename: Scripts/generate_html_report.py + checksum: 343a6c4a3608e4506cd7c9de04f9246da304ff95d256a3215c2f0a2d37d4e4da +- filename: Scripts/generate_enhanced_html_report.py + checksum: 69de208724714fcb474e41e17c5e67a1f875b96e2cc479c71f03c38b7a8c3be9 \ No newline at end of file diff --git a/Scripts/generate_enhanced_html_report.py b/Scripts/generate_enhanced_html_report.py new file mode 100644 index 0000000..8367b64 --- /dev/null +++ b/Scripts/generate_enhanced_html_report.py @@ -0,0 +1,1192 @@ +#!/usr/bin/env python3 +""" +Enhanced HTML Test Report Generator for .NET Test Results +Converts .trx files to beautiful HTML reports with: +- Expected vs Actual values +- HTTP Request details (including cURL) +- Response details +No external dependencies - uses only Python standard library +""" + +import xml.etree.ElementTree as ET +import os +import sys +import re +import json +from datetime import datetime + +class EnhancedTestReportGenerator: + def __init__(self, trx_file_path): + self.trx_file = trx_file_path + self.results = { + 'total': 0, + 'passed': 0, + 'failed': 0, + 'skipped': 0, + 'duration': '0s', + 'tests': [] + } + + def parse_structured_output(self, output_text): + """Parse structured test output (assertions, requests, responses)""" + if not output_text: + return { + 'assertions': [], + 'requests': [], + 'responses': [], + 'context': [], + 'steps': [] + } + + structured_data = { + 'assertions': [], + 'requests': [], + 'responses': [], + 'context': [], + 'steps': [] + } + + # Find all structured outputs + pattern = r'###TEST_OUTPUT_START###(.+?)###TEST_OUTPUT_END###' + matches = re.findall(pattern, output_text, re.DOTALL) + + for match in matches: + try: + data = json.loads(match) + output_type = data.get('type', '').upper() + + if output_type == 'ASSERTION': + structured_data['assertions'].append({ + 'name': data.get('assertionName', 'Unknown'), + 'expected': data.get('expected', 'N/A'), + 'actual': data.get('actual', 'N/A'), + 'passed': data.get('passed', True) + }) + elif output_type == 'HTTP_REQUEST': + structured_data['requests'].append({ + 'method': data.get('method', 'GET'), + 'url': data.get('url', ''), + 'headers': data.get('headers', {}), + 'body': data.get('body', ''), + 'curl': data.get('curlCommand', ''), + 'sdkMethod': data.get('sdkMethod', '') + }) + elif output_type == 'HTTP_RESPONSE': + structured_data['responses'].append({ + 'statusCode': data.get('statusCode', 0), + 'statusText': data.get('statusText', ''), + 'headers': data.get('headers', {}), + 'body': data.get('body', '') + }) + elif output_type == 'CONTEXT': + structured_data['context'].append({ + 'key': data.get('key', ''), + 'value': data.get('value', '') + }) + elif output_type == 'STEP': + structured_data['steps'].append({ + 'name': data.get('stepName', ''), + 'description': data.get('description', '') + }) + except json.JSONDecodeError: + continue + + return structured_data + + def parse_trx(self): + """Parse .trx XML file and extract test results""" + try: + tree = ET.parse(self.trx_file) + root = tree.getroot() + + # Get namespace + ns = {'': 'http://microsoft.com/schemas/VisualStudio/TeamTest/2010'} + + # Get summary + result_summary = root.find('.//ResultSummary', ns) + counters = result_summary.find('Counters', ns) if result_summary else None + + if counters is not None: + self.results['total'] = int(counters.get('total', 0)) + self.results['passed'] = int(counters.get('passed', 0)) + self.results['failed'] = int(counters.get('failed', 0)) + self.results['skipped'] = int(counters.get('notExecuted', 0)) + + # Get test results + test_results = root.findall('.//UnitTestResult', ns) + + for test_result in test_results: + test_name = test_result.get('testName', 'Unknown') + outcome = test_result.get('outcome', 'Unknown') + duration = test_result.get('duration', '0') + + # Parse duration (format: HH:MM:SS.mmmmmmm) + try: + parts = duration.split(':') + if len(parts) == 3: + hours = int(parts[0]) + minutes = int(parts[1]) + seconds = float(parts[2]) + total_seconds = hours * 3600 + minutes * 60 + seconds + duration_str = f"{total_seconds:.2f}s" + else: + duration_str = duration + except: + duration_str = duration + + # Get error message if failed + error_message = None + error_stacktrace = None + test_output = None + structured_output = None + + output_elem = test_result.find('Output', ns) + if output_elem is not None: + # Get error info + error_info = output_elem.find('ErrorInfo', ns) + if error_info is not None: + message_elem = error_info.find('Message', ns) + stacktrace_elem = error_info.find('StackTrace', ns) + if message_elem is not None: + error_message = message_elem.text + if stacktrace_elem is not None: + error_stacktrace = stacktrace_elem.text + + # Get standard output (contains our structured data) + stdout_elem = output_elem.find('StdOut', ns) + if stdout_elem is not None and stdout_elem.text: + test_output = stdout_elem.text + structured_output = self.parse_structured_output(test_output) + + # Get test category + test_def_id = test_result.get('testId', '') + test_def = root.find(f".//UnitTest[@id='{test_def_id}']", ns) + category = 'General' + if test_def is not None: + test_method = test_def.find('.//TestMethod', ns) + if test_method is not None: + class_name = test_method.get('className', '') + # Extract category from namespace + if 'Integration' in class_name: + parts = class_name.split('.') + if len(parts) >= 5: + category = parts[4] # e.g., "QueryTests", "EntryTests" + + self.results['tests'].append({ + 'name': test_name, + 'outcome': outcome, + 'duration': duration_str, + 'category': category, + 'error_message': error_message, + 'error_stacktrace': error_stacktrace, + 'structured_output': structured_output + }) + + return True + + except Exception as e: + print(f"Error parsing TRX file: {e}") + import traceback + traceback.print_exc() + return False + + def generate_test_details_html(self, test): + """Generate detailed HTML for a single test including assertions, requests, responses""" + html = "" + + if not test.get('structured_output'): + return html + + structured = test['structured_output'] + + # Assertions (Expected vs Actual) + if structured.get('assertions'): + html += """ +
+

โœ“ Assertions

+
+""" + for assertion in structured['assertions']: + status_icon = "โœ…" if assertion.get('passed', True) else "โŒ" + status_class = "passed" if assertion.get('passed', True) else "failed" + + html += f""" +
+
+ {status_icon} + {self.escape_html(assertion['name'])} +
+
+
+
+ Expected: +
{self.escape_html(str(assertion['expected']))}
+
+
+ Actual: +
{self.escape_html(str(assertion['actual']))}
+
+
+
+
+""" + html += """ +
+
+""" + + # HTTP Requests (with cURL) + if structured.get('requests'): + html += """ +
+

๐ŸŒ HTTP Requests

+""" + for i, request in enumerate(structured['requests']): + sdk_method_html = '' + if request.get('sdkMethod'): + sdk_method_html = f'
๐Ÿ“ฆ SDK Method: {self.escape_html(request["sdkMethod"])}
' + + html += f""" +
+ {sdk_method_html} +
+ {self.escape_html(request['method'])} + {self.escape_html(request['url'])} +
+""" + + # Request Headers + if request.get('headers'): + html += """ +
+ ๐Ÿ“‹ Request Headers +
"""
+                    for key, value in request['headers'].items():
+                        html += f"{self.escape_html(key)}: {self.escape_html(value)}\n"
+                    html += """
+
+""" + + # Request Body + if request.get('body'): + html += f""" +
+ ๐Ÿ“ฆ Request Body +
{self.escape_html(request['body'])}
+
+""" + + # cURL Command + if request.get('curl'): + html += f""" +
+ ๐Ÿ”ง cURL Command +
{self.escape_html(request['curl'])}
+ + +
+""" + + html += """ +
+""" + html += """ +
+""" + + # HTTP Responses + if structured.get('responses'): + html += """ +
+

๐Ÿ“ฅ HTTP Responses

+""" + for response in structured['responses']: + status_class = "success" if 200 <= response.get('statusCode', 0) < 300 else "error" + html += f""" +
+
+ {response.get('statusCode', 'N/A')} {self.escape_html(response.get('statusText', ''))} +
+""" + + # Response Headers + if response.get('headers'): + html += """ +
+ ๐Ÿ“‹ Response Headers +
"""
+                    for key, value in response['headers'].items():
+                        html += f"{self.escape_html(key)}: {self.escape_html(value)}\n"
+                    html += """
+
+""" + + # Response Body + if response.get('body'): + html += f""" +
+ ๐Ÿ“ฆ Response Body +
{self.escape_html(response['body'][:3000])}
+
+""" + + html += """ +
+""" + html += """ +
+""" + + # Context Information (collapsible, compact) + if structured.get('context'): + html += """ +
+ โ„น๏ธ Test Context + + +""" + for ctx in structured['context']: + html += f""" + + + + +""" + html += """ + +
{self.escape_html(ctx['key'])}{self.escape_html(str(ctx['value']))}
+
+""" + + return html + + def escape_html(self, text): + """Escape HTML special characters""" + if text is None: + return "" + text = str(text) + return (text + .replace('&', '&') + .replace('<', '<') + .replace('>', '>') + .replace('"', '"') + .replace("'", ''')) + + def generate_html(self, output_file='test-report-enhanced.html'): + """Generate enhanced HTML report""" + + # Calculate pass rate + pass_rate = (self.results['passed'] / self.results['total'] * 100) if self.results['total'] > 0 else 0 + + # Group tests by category + tests_by_category = {} + for test in self.results['tests']: + category = test['category'] + if category not in tests_by_category: + tests_by_category[category] = [] + tests_by_category[category].append(test) + + # Sort categories + sorted_categories = sorted(tests_by_category.keys()) + + html = f""" + + + + + .NET CDA SDK - Enhanced Test Report + + + +
+
+

.NET CDA SDK Test Report

+

Enhanced Integration Test Results - {datetime.now().strftime('%B %d, %Y at %I:%M %p')}

+
+ +
+
+
{self.results['total']}
+
Total Tests
+
+
+
{self.results['passed']}
+
Passed
+
+
+
{self.results['failed']}
+
Failed
+
+
+
{self.results['skipped']}
+
Skipped
+
+
+ +
+

Pass Rate

+
+
+ {pass_rate:.1f}% +
+
+
+ +
+

Test Results by Category

+""" + + # Generate category sections + for category in sorted_categories: + tests = tests_by_category[category] + passed = sum(1 for t in tests if t['outcome'] == 'Passed') + failed = sum(1 for t in tests if t['outcome'] == 'Failed') + skipped = sum(1 for t in tests if t['outcome'] == 'NotExecuted') + + html += f""" +
+
+
+ โ–ถ + {category} +
+
+ {passed} passed ยท + {failed} failed ยท + {skipped} skipped ยท + {len(tests)} total +
+
+ +
+ + + + + + + + + +""" + + for test_idx, test in enumerate(tests): + status_class = 'status-passed' if test['outcome'] == 'Passed' else 'status-failed' if test['outcome'] == 'Failed' else 'status-skipped' + test_id = f"test-{category}-{test_idx}" + + html += f""" + + + + + +""" + + html += """ + +
Test NameStatusDuration
+
{test['name']}
+""" + + # Add enhanced test details + details_html = self.generate_test_details_html(test) + if details_html or test.get('error_message') or test.get('error_stacktrace'): + html += f""" +
+""" + + # Add error details if failed + if test['outcome'] == 'Failed' and (test['error_message'] or test['error_stacktrace']): + html += """ +
+""" + if test['error_message']: + html += f""" +
Error:
{self.escape_html(test['error_message'])}
+""" + if test['error_stacktrace']: + html += f""" +
+ Stack Trace +
{self.escape_html(test['error_stacktrace'])}
+
+""" + html += """ +
+""" + + # Add enhanced details + html += details_html + + html += """ +
+""" + + html += f""" +
{test['outcome']}{test['duration']}
+
+
+""" + + html += f""" +
+ + +
+ + + + +""" + + # Write HTML file + with open(output_file, 'w', encoding='utf-8') as f: + f.write(html) + + print(f"โœ… Enhanced HTML report generated: {output_file}") + return output_file + + +def main(): + """Main entry point""" + print("="*80) + print("๐Ÿงช .NET Enhanced Test Report Generator") + print("="*80) + print("Features: Expected/Actual, HTTP Requests, cURL, Responses") + print("="*80) + + # Check for .trx file + trx_file = None + + if len(sys.argv) > 1: + trx_file = sys.argv[1] + else: + # Look for .trx files in TestResults directory + test_results_dir = './TestResults' + if os.path.exists(test_results_dir): + trx_files = [f for f in os.listdir(test_results_dir) if f.endswith('.trx')] + if trx_files: + trx_file = os.path.join(test_results_dir, trx_files[0]) + + if not trx_file or not os.path.exists(trx_file): + print("\nโŒ Error: No .trx file found!") + print("\nUsage:") + print(" python3 generate_enhanced_html_report.py ") + print("\nOr run tests first to generate .trx file:") + print(" dotnet test --logger 'trx;LogFileName=test-results.trx' --results-directory ./TestResults") + sys.exit(1) + + print(f"\n๐Ÿ“„ Input file: {trx_file}") + + # Generate report + generator = EnhancedTestReportGenerator(trx_file) + + print("\nโณ Parsing test results...") + if not generator.parse_trx(): + print("โŒ Failed to parse TRX file") + sys.exit(1) + + print(f"โœ… Found {generator.results['total']} tests") + print(f" โ€ข Passed: {generator.results['passed']}") + print(f" โ€ข Failed: {generator.results['failed']}") + print(f" โ€ข Skipped: {generator.results['skipped']}") + + print("\nโณ Generating enhanced HTML report...") + from datetime import datetime + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + output_file = generator.generate_html(f'test-report-enhanced_{timestamp}.html') + + print("\n" + "="*80) + print("โœ… SUCCESS! Enhanced HTML report generated") + print("="*80) + print(f"\n๐Ÿ“Š Open the report: {os.path.abspath(output_file)}") + print("\nIn your browser:") + print(f" file://{os.path.abspath(output_file)}") + + # Summary + print("\n๐Ÿ“‹ Summary:") + print(f" Total Tests: {generator.results['total']}") + print(f" Passed: {generator.results['passed']} ({generator.results['passed']/generator.results['total']*100:.1f}%)") + print(f" Failed: {generator.results['failed']}") + print(f" Skipped: {generator.results['skipped']}") + + print("\n๐ŸŽ‰ Done!") + + +if __name__ == "__main__": + main() diff --git a/Scripts/generate_html_report.py b/Scripts/generate_html_report.py new file mode 100644 index 0000000..ca84a43 --- /dev/null +++ b/Scripts/generate_html_report.py @@ -0,0 +1,660 @@ +#!/usr/bin/env python3 +""" +HTML Test Report Generator for .NET Test Results +Converts .trx files to beautiful HTML reports +No external dependencies - uses only Python standard library +""" + +import xml.etree.ElementTree as ET +import os +import sys +from datetime import datetime +import json + +class TestReportGenerator: + def __init__(self, trx_file_path): + self.trx_file = trx_file_path + self.results = { + 'total': 0, + 'passed': 0, + 'failed': 0, + 'skipped': 0, + 'duration': '0s', + 'tests': [] + } + + def parse_trx(self): + """Parse .trx XML file and extract test results""" + try: + tree = ET.parse(self.trx_file) + root = tree.getroot() + + # Get namespace + ns = {'': 'http://microsoft.com/schemas/VisualStudio/TeamTest/2010'} + + # Get summary + result_summary = root.find('.//ResultSummary', ns) + counters = result_summary.find('Counters', ns) if result_summary else None + + if counters is not None: + self.results['total'] = int(counters.get('total', 0)) + self.results['passed'] = int(counters.get('passed', 0)) + self.results['failed'] = int(counters.get('failed', 0)) + self.results['skipped'] = int(counters.get('notExecuted', 0)) + + # Get test results + test_results = root.findall('.//UnitTestResult', ns) + + for test_result in test_results: + test_name = test_result.get('testName', 'Unknown') + outcome = test_result.get('outcome', 'Unknown') + duration = test_result.get('duration', '0') + + # Parse duration (format: HH:MM:SS.mmmmmmm) + try: + parts = duration.split(':') + if len(parts) == 3: + hours = int(parts[0]) + minutes = int(parts[1]) + seconds = float(parts[2]) + total_seconds = hours * 3600 + minutes * 60 + seconds + duration_str = f"{total_seconds:.2f}s" + else: + duration_str = duration + except: + duration_str = duration + + # Get error message if failed + error_message = None + error_stacktrace = None + output_elem = test_result.find('Output', ns) + if output_elem is not None: + error_info = output_elem.find('ErrorInfo', ns) + if error_info is not None: + message_elem = error_info.find('Message', ns) + stacktrace_elem = error_info.find('StackTrace', ns) + if message_elem is not None: + error_message = message_elem.text + if stacktrace_elem is not None: + error_stacktrace = stacktrace_elem.text + + # Get test category + test_def_id = test_result.get('testId', '') + test_def = root.find(f".//UnitTest[@id='{test_def_id}']", ns) + category = 'General' + if test_def is not None: + test_method = test_def.find('.//TestMethod', ns) + if test_method is not None: + class_name = test_method.get('className', '') + # Extract category from namespace + if 'Integration' in class_name: + parts = class_name.split('.') + if len(parts) >= 5: + category = parts[4] # e.g., "QueryTests", "EntryTests" + + self.results['tests'].append({ + 'name': test_name, + 'outcome': outcome, + 'duration': duration_str, + 'category': category, + 'error_message': error_message, + 'error_stacktrace': error_stacktrace + }) + + return True + + except Exception as e: + print(f"Error parsing TRX file: {e}") + return False + + def generate_html(self, output_file='test-report.html'): + """Generate beautiful HTML report""" + + # Calculate pass rate + pass_rate = (self.results['passed'] / self.results['total'] * 100) if self.results['total'] > 0 else 0 + + # Group tests by category + tests_by_category = {} + for test in self.results['tests']: + category = test['category'] + if category not in tests_by_category: + tests_by_category[category] = [] + tests_by_category[category].append(test) + + # Sort categories + sorted_categories = sorted(tests_by_category.keys()) + + html = f""" + + + + + .NET CDA SDK - Test Report + + + +
+
+

.NET CDA SDK Test Report

+

Integration Test Results - {datetime.now().strftime('%B %d, %Y at %I:%M %p')}

+
+ +
+
+
{self.results['total']}
+
Total Tests
+
+
+
{self.results['passed']}
+
Passed
+
+
+
{self.results['failed']}
+
Failed
+
+
+
{self.results['skipped']}
+
Skipped
+
+
+ +
+

Pass Rate

+
+
+ {pass_rate:.1f}% +
+
+
+ +
+

Test Results by Category

+""" + + # Generate category sections + for category in sorted_categories: + tests = tests_by_category[category] + passed = sum(1 for t in tests if t['outcome'] == 'Passed') + failed = sum(1 for t in tests if t['outcome'] == 'Failed') + skipped = sum(1 for t in tests if t['outcome'] == 'NotExecuted') + + html += f""" +
+
+
+ โ–ถ + {category} +
+
+ {passed} passed ยท + {failed} failed ยท + {skipped} skipped ยท + {len(tests)} total +
+
+ +
+ + + + + + + + + +""" + + for test in tests: + status_class = 'status-passed' if test['outcome'] == 'Passed' else 'status-failed' if test['outcome'] == 'Failed' else 'status-skipped' + + html += f""" + + + + + +""" + + html += """ + +
Test NameStatusDuration
+
{test['name']}
+""" + + # Add error details if failed + if test['outcome'] == 'Failed' and (test['error_message'] or test['error_stacktrace']): + html += """ +
+""" + if test['error_message']: + html += f""" +
Error:
{self.escape_html(test['error_message'])}
+""" + if test['error_stacktrace']: + html += f""" +
+ Stack Trace +
{self.escape_html(test['error_stacktrace'])}
+
+""" + html += """ +
+""" + + html += f""" +
{test['outcome']}{test['duration']}
+
+
+""" + + html += f""" +
+ + +
+ + + + +""" + + # Write HTML file + with open(output_file, 'w', encoding='utf-8') as f: + f.write(html) + + print(f"โœ… HTML report generated: {output_file}") + return output_file + + def escape_html(self, text): + """Escape HTML special characters""" + if text is None: + return "" + return (text + .replace('&', '&') + .replace('<', '<') + .replace('>', '>') + .replace('"', '"') + .replace("'", ''')) + + +def main(): + """Main entry point""" + print("="*80) + print("๐Ÿงช .NET Test Report Generator") + print("="*80) + + # Check for .trx file + trx_file = None + + if len(sys.argv) > 1: + trx_file = sys.argv[1] + else: + # Look for .trx files in TestResults directory + test_results_dir = './TestResults' + if os.path.exists(test_results_dir): + trx_files = [f for f in os.listdir(test_results_dir) if f.endswith('.trx')] + if trx_files: + trx_file = os.path.join(test_results_dir, trx_files[0]) + + if not trx_file or not os.path.exists(trx_file): + print("\nโŒ Error: No .trx file found!") + print("\nUsage:") + print(" python3 generate_html_report.py ") + print("\nOr run tests first to generate .trx file:") + print(" dotnet test --logger 'trx;LogFileName=test-results.trx' --results-directory ./TestResults") + sys.exit(1) + + print(f"\n๐Ÿ“„ Input file: {trx_file}") + + # Generate report + generator = TestReportGenerator(trx_file) + + print("\nโณ Parsing test results...") + if not generator.parse_trx(): + print("โŒ Failed to parse TRX file") + sys.exit(1) + + print(f"โœ… Found {generator.results['total']} tests") + print(f" โ€ข Passed: {generator.results['passed']}") + print(f" โ€ข Failed: {generator.results['failed']}") + print(f" โ€ข Skipped: {generator.results['skipped']}") + + print("\nโณ Generating HTML report...") + output_file = generator.generate_html('test-report.html') + + print("\n" + "="*80) + print("โœ… SUCCESS! HTML report generated") + print("="*80) + print(f"\n๐Ÿ“Š Open the report: {os.path.abspath(output_file)}") + print("\nIn your browser:") + print(f" file://{os.path.abspath(output_file)}") + + # Summary + print("\n๐Ÿ“‹ Summary:") + print(f" Total Tests: {generator.results['total']}") + print(f" Passed: {generator.results['passed']} ({generator.results['passed']/generator.results['total']*100:.1f}%)") + print(f" Failed: {generator.results['failed']}") + print(f" Skipped: {generator.results['skipped']}") + + print("\n๐ŸŽ‰ Done!") + + +if __name__ == "__main__": + main() + diff --git a/Scripts/run-tests-with-report.sh b/Scripts/run-tests-with-report.sh new file mode 100755 index 0000000..b194cbc --- /dev/null +++ b/Scripts/run-tests-with-report.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Run tests and generate HTML report + +set -e + +# Resolve project root (works whether run from root or Scripts folder) +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Timestamp for unique filenames +TIMESTAMP=$(date +"%Y%m%d_%H%M%S") + +echo "==============================================" +echo " Running Tests & Generating HTML Report" +echo "==============================================" +echo "" +echo "Project: $PROJECT_ROOT" +echo "Run ID: $TIMESTAMP" +echo "" + +# Step 1: Run tests with .trx logger (timestamped) +TRX_FILE="test-results_${TIMESTAMP}.trx" +echo "Step 1: Running tests..." +dotnet test "$PROJECT_ROOT/Contentstack.Core.Tests/Contentstack.Core.Tests.csproj" \ + --filter "FullyQualifiedName~Integration" \ + --logger "trx;LogFileName=$TRX_FILE" \ + --results-directory "$PROJECT_ROOT/Contentstack.Core.Tests/TestResults" \ + --verbosity quiet || true + +echo "" +echo "Tests completed!" +echo "" + +# Step 2: Generate enhanced HTML report (timestamped) +REPORT_FILE="test-report-enhanced_${TIMESTAMP}.html" +echo "Step 2: Generating enhanced HTML report..." +cd "$PROJECT_ROOT" +python3 "$PROJECT_ROOT/Scripts/generate_enhanced_html_report.py" \ + "$PROJECT_ROOT/Contentstack.Core.Tests/TestResults/$TRX_FILE" + +# Move timestamped report to project root if generated elsewhere +if [ -f "$PROJECT_ROOT/Contentstack.Core.Tests/$REPORT_FILE" ]; then + mv "$PROJECT_ROOT/Contentstack.Core.Tests/$REPORT_FILE" "$PROJECT_ROOT/$REPORT_FILE" 2>/dev/null || true +fi + +# Find the latest generated report (in case python script created it in cwd) +LATEST_REPORT=$(ls -t "$PROJECT_ROOT"/test-report-enhanced_*.html 2>/dev/null | head -1) + +echo "" +echo "==============================================" +echo " All Done!" +echo "==============================================" +echo "" +if [ -n "$LATEST_REPORT" ]; then + echo "Report: $LATEST_REPORT" + echo "" + echo "To open: open $LATEST_REPORT" +else + echo "Warning: Report file not found. Check output above for errors." +fi +echo ""