+ :param collection_failures: failures occuring during test setup
+ :param test_results: list of all test executuion results
+ :return: one of 'ERROR', 'FAIL', or 'PASS'
+ """
+ if collection_failures:
+ return "ERROR"
+ elif any(r.is_failed for r in test_results):
+ return "FAIL"
+ else:
+ return "PASS"
+
+
+def error(failure_or_result):
+ """
+ Extracts the error message from a collection failure or test result
+ :param failure_or_result: Entry from COLLECTION_FAILURE or a TestResult
+ :return: Error message as string
+ """
+ if isinstance(failure_or_result, TestResult):
+ return failure_or_result.error_message
+ else:
+ return failure_or_result["error"]
+
+
+def req_ids(failure_or_result):
+ """
+ Extracts the requirement IDs from a collection failure or test result
+ :param failure_or_result: Entry from COLLECTION_FAILURE or a TestResult
+ :return: set of Requirement IDs. If no requirements mapped, then an empty set
+ """
+ if isinstance(failure_or_result, TestResult):
+ return set(failure_or_result.requirement_ids)
+ else:
+ return set(failure_or_result["requirements"])
+
+
+def collect_errors(r_id, collection_failures, test_result):
+ """
+ Creates a list of error messages from the collection failures and
+ test results. If r_id is provided, then it collects the error messages
+ where the failure or test is associated with that requirement ID. If
+ r_id is None, then it collects all errors that occur on failures and
+ results that are not mapped to requirements
+ """
+
+ def selector(item):
+ if r_id:
+ return r_id in req_ids(item)
+ else:
+ return not req_ids(item)
+
+ errors = (error(x) for x in chain(collection_failures, test_result) if selector(x))
+ return [e for e in errors if e]
+
+
+def relative_paths(base_dir, paths):
+ return [os.path.relpath(p, base_dir) for p in paths]
+
+
+def generate_json(outpath, template_path, categories):
+ """
+ Creates a JSON summary of the entire test run.
+ """
+ reqs = load_current_requirements()
+ data = {
+ "version": "dublin",
+ "template_directory": os.path.splitdrive(template_path)[1].replace(
+ os.path.sep, "/"
+ ),
+ "timestamp": make_iso_timestamp(),
+ "checksum": hash_directory(template_path),
+ "categories": categories,
+ "outcome": aggregate_run_results(COLLECTION_FAILURES, ALL_RESULTS),
+ "tests": [],
+ "requirements": [],
+ }
+
+ results = data["tests"]
+ for result in COLLECTION_FAILURES:
+ results.append(
+ {
+ "files": [],
+ "test_module": result["module"],
+ "test_case": result["test"],
+ "result": "ERROR",
+ "error": result["error"],
+ "requirements": result["requirements"],
+ }
+ )
+ for result in ALL_RESULTS:
+ results.append(
+ {
+ "files": relative_paths(template_path, result.files),
+ "test_module": result.test_module,
+ "test_case": result.test_case,
+ "result": result.outcome,
+ "error": result.error_message if result.is_failed else "",
+ "requirements": result.requirements_metadata(reqs),
+ }
+ )
+
+ requirements = data["requirements"]
+ for r_id, r_data in reqs.items():
+ result = aggregate_requirement_adherence(r_id, COLLECTION_FAILURES, ALL_RESULTS)
+ if result:
+ requirements.append(
+ {
+ "id": r_id,
+ "text": r_data["description"],
+ "keyword": r_data["keyword"],
+ "result": result,
+ "errors": collect_errors(r_id, COLLECTION_FAILURES, ALL_RESULTS),
+ }
+ )
+ # If there are tests that aren't mapped to a requirement, then we'll
+ # map them to a special entry so the results are coherent.
+ unmapped_outcomes = {r.outcome for r in ALL_RESULTS if not r.requirement_ids}
+ has_errors = any(not f["requirements"] for f in COLLECTION_FAILURES)
+ if unmapped_outcomes or has_errors:
+ requirements.append(
+ {
+ "id": "Unmapped",
+ "text": "Tests not mapped to requirements (see tests)",
+ "result": aggregate_results(has_errors, unmapped_outcomes),
+ "errors": collect_errors(None, COLLECTION_FAILURES, ALL_RESULTS),
+ }
+ )
+
+ report_path = os.path.join(outpath, "report.json")
+ write_json(data, report_path)
+
+
+def generate_html_report(outpath, categories, template_path, failures):
+ reqs = load_current_requirements()
+ resolutions = load_resolutions_file()
+ fail_data = []
+ for failure in failures:
+ fail_data.append(
+ {
+ "file_links": make_href(failure.files),
+ "test_id": failure.test_module,
+ "error_message": failure.error_message,
+ "raw_output": failure.raw_output,
+ "requirements": docutils.core.publish_parts(
+ writer_name="html", source=failure.requirement_text(reqs)
+ )["body"],
+ "resolution_steps": failure.resolution_steps(resolutions),
+ }
+ )
+ pkg_dir = os.path.split(__file__)[0]
+ j2_template_path = os.path.join(pkg_dir, "report.html.jinja2")
+ with open(j2_template_path, "r") as f:
+ report_template = jinja2.Template(f.read())
+ contents = report_template.render(
+ version=version.VERSION,
+ num_failures=len(failures) + len(COLLECTION_FAILURES),
+ categories=categories,
+ template_dir=make_href(template_path),
+ checksum=hash_directory(template_path),
+ timestamp=make_timestamp(),
+ failures=fail_data,
+ collection_failures=COLLECTION_FAILURES,
+ )
+ with open(os.path.join(outpath, "report.html"), "w") as f:
+ f.write(contents)