-def generate_html_report(outpath, profile_name, template_path, faildata):
- with open("{}/report.html".format(outpath), "w") as of:
- body_begin = """
- <style type="text/css">
- h1, li {{
- font-family:Arial, sans-serif;
- }}
- .tg {{border-collapse:collapse;border-spacing:0;}}
- .tg td{{font-family:Arial, sans-serif;font-size:8px;padding:10px 5px;
- border-style:solid;border-width:1px;overflow:hidden;word-break:normal;
- border-color:black;}}
- .tg th{{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;
- padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;
- word-break:normal;border-color:black;}}
- .tg .tg-rwj1{{font-size:10px;font-family:Arial, Helvetica,
- sans-serif !important;;border-color:inherit;vertical-align:top}}</style>
- <h1>Validation Failures</h1>
- <ul>
- <li><b>Profile Selected: </b> <tt>{profile}</tt></li>
- <li><b>Report Generated At:</b> <tt>{timestamp}</tt></li>
- <li><b>Directory Validated:</b> <tt>{template_dir}</tt></li>
- <li><b>Checksum:</b> <tt>{checksum}</tt></li>
- <li><b>Total Errors:</b> {num_failures}</li>
- </ul>
- """.format(
- profile=profile_name,
- timestamp=make_timestamp(),
- checksum=hash_directory(template_path),
- template_dir=template_path,
- num_failures=len(faildata),
+def make_iso_timestamp():
+ """
+ Creates a timestamp in ISO 8601 format in UTC format. Used for JSON output.
+ """
+ now = datetime.datetime.utcnow()
+ now.replace(tzinfo=datetime.timezone.utc)
+ return now.isoformat()
+
+
+def aggregate_results(outcomes, r_id=None):
+ """
+ Determines the aggregate result for the conditions provided. Assumes the
+ results have been filtered and collected for analysis.
+
+ :param outcomes: set of outcomes from the TestResults
+ :param r_id: Optional requirement ID if known
+ :return: 'ERROR', 'PASS', 'FAIL', or 'SKIP'
+ (see aggregate_requirement_adherence for more detail)
+ """
+ if not outcomes:
+ return "PASS"
+ elif "ERROR" in outcomes:
+ return "ERROR"
+ elif "FAIL" in outcomes:
+ return "FAIL"
+ elif "PASS" in outcomes:
+ return "PASS"
+ elif {"SKIP"} == outcomes:
+ return "SKIP"
+ else:
+ pytest.warns(
+ "Unexpected error aggregating outcomes ({}) for requirement {}".format(
+ outcomes, r_id
+ )
+ )
+ return "ERROR"
+
+
+def aggregate_run_results(collection_failures, test_results):
+ """
+ Determines overall status of run based on all failures and results.
+
+ * 'ERROR' - At least one collection failure occurred during the run.
+ * 'FAIL' - Template failed at least one test
+ * 'PASS' - All tests executed properly and no failures were detected
+
+ :param collection_failures: failures occuring during test setup
+ :param test_results: list of all test executuion results
+ :return: one of 'ERROR', 'FAIL', or 'PASS'
+ """
+ if collection_failures:
+ return "ERROR"
+ elif any(r.is_failed for r in test_results):
+ return "FAIL"
+ else:
+ return "PASS"
+
+
+def relative_paths(base_dir, paths):
+ return [os.path.relpath(p, base_dir) for p in paths]
+
+
+# noinspection PyTypeChecker
+def generate_json(outpath, template_path, categories):
+ """
+ Creates a JSON summary of the entire test run.
+ """
+ reqs = load_current_requirements()
+ data = {
+ "version": "dublin",
+ "template_directory": os.path.splitdrive(template_path)[1].replace(
+ os.path.sep, "/"
+ ),
+ "timestamp": make_iso_timestamp(),
+ "checksum": hash_directory(template_path),
+ "categories": categories,
+ "outcome": aggregate_run_results(COLLECTION_FAILURES, ALL_RESULTS),
+ "tests": [],
+ "requirements": [],
+ }
+
+ results = data["tests"]
+ for result in COLLECTION_FAILURES:
+ results.append(
+ {
+ "files": [],
+ "test_module": result["module"],
+ "test_case": result["test"],
+ "result": "ERROR",
+ "error": result["error"],
+ "requirements": result["requirements"],
+ }
+ )
+ for result in ALL_RESULTS:
+ results.append(
+ {
+ "files": relative_paths(template_path, result.files),
+ "test_module": result.test_module,
+ "test_case": result.test_case,
+ "result": result.outcome,
+ "error": result.error_message if result.is_failed else "",
+ "requirements": result.requirements_metadata(reqs),
+ }
+ )
+
+ # Build a mapping of requirement ID to the results
+ r_id_results = defaultdict(lambda: {"errors": set(), "outcomes": set()})
+ for test_result in results:
+ test_reqs = test_result["requirements"]
+ r_ids = (
+ [r["id"] if isinstance(r, dict) else r for r in test_reqs]
+ if test_reqs
+ else ("",)
+ )
+ for r_id in r_ids:
+ item = r_id_results[r_id]
+ item["outcomes"].add(test_result["result"])
+ if test_result["error"]:
+ item["errors"].add(test_result["error"])
+
+ requirements = data["requirements"]
+ for r_id, r_data in reqs.items():
+ requirements.append(
+ {
+ "id": r_id,
+ "text": r_data["description"],
+ "keyword": r_data["keyword"],
+ "result": aggregate_results(r_id_results[r_id]["outcomes"]),
+ "errors": list(r_id_results[r_id]["errors"]),
+ }
+ )
+
+ if r_id_results[""]["errors"] or r_id_results[""]["outcomes"]:
+ requirements.append(
+ {
+ "id": "Unmapped",
+ "text": "Tests not mapped to requirements (see tests)",
+ "result": aggregate_results(r_id_results[""]["outcomes"]),
+ "errors": list(r_id_results[""]["errors"]),
+ }