[VVP-132] Add new JSON output report
[vvp/validation-scripts.git] / ice_validator / tests / conftest.py
index 95b1289..5184fb6 100644 (file)
@@ -2,7 +2,7 @@
 # ============LICENSE_START=======================================================
 # org.onap.vvp/validation-scripts
 # ===================================================================
-# Copyright © 2018 AT&T Intellectual Property. All rights reserved.
+# Copyright © 2019 AT&T Intellectual Property. All rights reserved.
 # ===================================================================
 #
 # Unless otherwise specified, all software contained herein is licensed
 #
 # ============LICENSE_END============================================
 
-import collections
 import csv
 import datetime
 import hashlib
 import io
 import json
 import os
+import re
 import sys
 import time
+from collections import defaultdict
+from itertools import chain
+
 import requests
 import traceback
 import warnings
@@ -52,15 +55,20 @@ import docutils.core
 import jinja2
 import pytest
 from more_itertools import partition
-from six import string_types
 import xlsxwriter
+from six import string_types
 
 __path__ = [os.path.dirname(os.path.abspath(__file__))]
 
-resolution_steps_file = "resolution_steps.json"
-heat_requirements_file = "heat_requirements.json"
+DEFAULT_OUTPUT_DIR = "{}/../output".format(__path__[0])
+
+RESOLUTION_STEPS_FILE = "resolution_steps.json"
+HEAT_REQUIREMENTS_FILE = "heat_requirements.json"
+
+# noinspection PyPep8
+NEEDS_JSON_URL = "https://onap.readthedocs.io/en/latest/_downloads/789ac64d223325488fb3f120f959d985/needs.json"
 
-report_columns = [
+REPORT_COLUMNS = [
     ("Input File", "file"),
     ("Test", "test_file"),
     ("Requirements", "req_description"),
@@ -68,109 +76,285 @@ report_columns = [
     ("Error Message", "message"),
     ("Raw Test Output", "raw_output"),
 ]
-report = collections.OrderedDict(report_columns)
 
-COLLECTION_FAILURES = []
 COLLECTION_FAILURE_WARNING = """WARNING: The following unexpected errors occurred
 while preparing to validate the the input files. Some validations may not have been
 executed. Please refer these issue to the VNF Validation Tool team.
 """
 
+COLLECTION_FAILURES = []
 
-def extract_error_msg(rep):
-    try:
-        msg = str(rep.longrepr.reprcrash)
-    except AttributeError:
-        msg = str(rep)
+# Captures the results of every test run
+ALL_RESULTS = []
 
-    if "AssertionError:" in msg:
-        return msg.split("AssertionError:")[1]
-    else:
-        return msg
 
+def get_output_dir(config):
+    output_dir = config.option.output_dir or DEFAULT_OUTPUT_DIR
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir, exist_ok=True)
+    return output_dir
 
-@pytest.hookimpl(tryfirst=True, hookwrapper=True)
-def pytest_runtest_makereport(item, call):
 
-    outcome = yield
-    rep = outcome.get_result()
-
-    output_dir = "{}/../output".format(__path__[0])
-    if rep.outcome == "failed":
-        if not os.path.exists(output_dir):
-            os.mkdir(output_dir)
-        if os.path.exists("{}/failures".format(output_dir)):
-            with open("{}/failures".format(output_dir), "r") as o:
-                jdata = json.loads(o.read())
+def extract_error_msg(rep):
+    """
+    If a custom error message was provided, then extract it otherwise
+    just show the pytest assert message
+    """
+    if rep.outcome != "failed":
+        return ""
+    try:
+        full_msg = str(rep.longrepr.reprcrash.message)
+        match = re.match(
+            "AssertionError:(.*)^assert.*", full_msg, re.MULTILINE | re.DOTALL
+        )
+        if match:  # custom message was provided
+            # Extract everything between AssertionError and the start
+            # of the assert statement expansion in the pytest report
+            msg = match.group(1)
         else:
-            jdata = {}
+            msg = str(rep.longrepr.reprcrash)
+            if "AssertionError:" in msg:
+                msg = msg.split("AssertionError:")[1]
+    except AttributeError:
+        msg = str(rep)
 
-        if hasattr(item.function, "requirement_ids"):
-            requirement_ids = item.function.requirement_ids
-        else:
-            requirement_ids = ""
+    return msg
 
-        if "environment_pair" in item.fixturenames:
-            resolved_pair = "{} environment pair".format(
-                item.funcargs["environment_pair"]["name"]
-            )
-        elif "heat_volume_pair" in item.fixturenames:
-            resolved_pair = "{} volume pair".format(
-                item.funcargs["heat_volume_pair"]["name"]
-            )
-        elif "heat_templates" in item.fixturenames:
-            resolved_pair = item.funcargs["heat_templates"]
-        elif "yaml_files" in item.fixturenames:
-            resolved_pair = item.funcargs["yaml_files"]
-        else:
-            resolved_pair = rep.nodeid.split("[")[1][:-1]
 
-        markers = set(m.name for m in item.iter_markers())
-        base_test = "base" in markers
+class TestResult:
+    """
+    Wraps the test case and result to extract necessary metadata for
+    reporting purposes.
+    """
 
-        msg = extract_error_msg(rep)
-        if base_test:
-            msg = "!!Base Test Failure!! Halting test suite execution...\n{}".format(
-                msg
+    RESULT_MAPPING = {"passed": "PASS", "failed": "FAIL", "skipped": "SKIP"}
+
+    def __init__(self, item, outcome):
+        self.item = item
+        self.result = outcome.get_result()
+        self.files = [os.path.normpath(p) for p in self._get_files()]
+        self.error_message = self._get_error_message()
+
+    @property
+    def requirement_ids(self):
+        """
+        Returns list of requirement IDs mapped to the test case.
+
+        :return: Returns a list of string requirement IDs the test was
+                 annotated with ``validates`` otherwise returns and empty list
+        """
+        is_mapped = hasattr(self.item.function, "requirement_ids")
+        return self.item.function.requirement_ids if is_mapped else []
+
+    @property
+    def markers(self):
+        """
+        :return: Returns a set of pytest marker names for the test or an empty set
+        """
+        return set(m.name for m in self.item.iter_markers())
+
+    @property
+    def is_base_test(self):
+        """
+        :return: Returns True if the test is annotated with a pytest marker called base
+        """
+        return "base" in self.markers
+
+    @property
+    def is_failed(self):
+        """
+        :return: True if the test failed
+        """
+        return self.outcome == "FAIL"
+
+    @property
+    def outcome(self):
+        """
+        :return: Returns 'PASS', 'FAIL', or 'SKIP'
+        """
+        return self.RESULT_MAPPING[self.result.outcome]
+
+    @property
+    def test_case(self):
+        """
+        :return: Name of the test case method
+        """
+        return self.item.function.__name__
+
+    @property
+    def test_module(self):
+        """
+        :return: Name of the file containing the test case
+        """
+        return self.item.function.__module__.split(".")[-1]
+
+    @property
+    def raw_output(self):
+        """
+        :return: Full output from pytest for the given test case
+        """
+        return str(self.result.longrepr)
+
+    def requirement_text(self, curr_reqs):
+        """
+        Creates a text summary for the requirement IDs mapped to the test case.
+        If no requirements are mapped, then it returns the empty string.
+
+        :param curr_reqs: mapping of requirement IDs to requirement metadata
+                          loaded from the VNFRQTS projects needs.json output
+        :return: ID and text of the requirements mapped to the test case
+        """
+        text = (
+            "\n\n{}: \n{}".format(r_id, curr_reqs[r_id]["description"])
+            for r_id in self.requirement_ids
+        )
+        return "".join(text)
+
+    def requirements_metadata(self, curr_reqs):
+        """
+        Returns a list of dicts containing the following metadata for each
+        requirement mapped:
+
+        - id: Requirement ID
+        - text: Full text of the requirement
+        - keyword: MUST, MUST NOT, MAY, etc.
+
+        :param curr_reqs: mapping of requirement IDs to requirement metadata
+                          loaded from the VNFRQTS projects needs.json output
+        :return: List of requirement metadata
+        """
+        data = []
+        for r_id in self.requirement_ids:
+            if r_id not in curr_reqs:
+                continue
+            data.append(
+                {
+                    "id": r_id,
+                    "text": curr_reqs[r_id]["description"],
+                    "keyword": curr_reqs[r_id]["keyword"],
+                }
             )
+        return data
+
+    def resolution_steps(self, resolutions):
+        """
+        :param resolutions: Loaded from contents for resolution_steps.json
+        :return: Header and text for the resolution step associated with this
+                 test case.  Returns empty string if no resolutions are
+                 provided.
+        """
+        text = (
+            "\n{}: \n{}".format(entry["header"], entry["resolution_steps"])
+            for entry in resolutions
+            if self._match(entry)
+        )
+        return "".join(text)
+
+    def _match(self, resolution_entry):
+        """
+        Returns True if the test result maps to the given entry in
+        the resolutions file
+        """
+        return (
+            self.test_case == resolution_entry["function"]
+            and self.test_module == resolution_entry["module"]
+        )
 
-        jdata[len(jdata)] = {
-            "file": resolved_pair,
-            "vnfrqts": requirement_ids,
-            "test": item.function.__name__,
-            "test_file": item.function.__module__.split(".")[-1],
-            "raw_output": str(rep.longrepr),
-            "message": msg,
-        }
+    def _get_files(self):
+        """
+        Extracts the list of files passed into the test case.
+        :return: List of absolute paths to files
+        """
+        if "environment_pair" in self.item.fixturenames:
+            return [
+                "{} environment pair".format(
+                    self.item.funcargs["environment_pair"]["name"]
+                )
+            ]
+        elif "heat_volume_pair" in self.item.fixturenames:
+            return [
+                "{} volume pair".format(self.item.funcargs["heat_volume_pair"]["name"])
+            ]
+        elif "heat_templates" in self.item.fixturenames:
+            return self.item.funcargs["heat_templates"]
+        elif "yaml_files" in self.item.fixturenames:
+            return self.item.funcargs["yaml_files"]
+        else:
+            return [self.result.nodeid.split("[")[1][:-1]]
+
+    def _get_error_message(self):
+        """
+        :return: Error message or empty string if the test did not fail or error
+        """
+        if self.is_failed:
+            return extract_error_msg(self.result)
+        else:
+            return ""
 
-        with open("{}/failures".format(output_dir), "w") as f:
-            json.dump(jdata, f, indent=4)
 
-        if not item.config.option.continue_on_failure and base_test:
-            pytest.exit(
-                "{}\n{}\n{}".format(msg, resolved_pair, item.function.__name__)
-            )
+# noinspection PyUnusedLocal
+@pytest.hookimpl(tryfirst=True, hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+    """
+    Captures the test results for later reporting.  This will also halt testing
+    if a base failure is encountered (can be overridden with continue-on-failure)
+    """
+    outcome = yield
+    if outcome.get_result().when != "call":
+        return  # only capture results of test cases themselves
+    result = TestResult(item, outcome)
+    ALL_RESULTS.append(result)
+    if (
+        not item.config.option.continue_on_failure
+        and result.is_base_test
+        and result.is_failed
+    ):
+        msg = "!!Base Test Failure!! Halting test suite execution...\n{}".format(
+            result.error_message
+        )
+        pytest.exit(
+            "{}\n{}\n{}".format(msg, result.files, result.test_case)
+        )
 
 
 def make_timestamp():
+    """
+    :return: String make_iso_timestamp in format:
+             2019-01-19 10:18:49.865000 Central Standard Time
+    """
     timezone = time.tzname[time.localtime().tm_isdst]
     return "{} {}".format(str(datetime.datetime.now()), timezone)
 
 
+# noinspection PyUnusedLocal
+def pytest_sessionstart(session):
+    ALL_RESULTS.clear()
+    COLLECTION_FAILURES.clear()
+
+
+# noinspection PyUnusedLocal
 def pytest_sessionfinish(session, exitstatus):
+    """
+    If not a self-test run, generate the output reports
+    """
     if not session.config.option.template_dir:
         return
     template_path = os.path.abspath(session.config.option.template_dir[0])
-    profile_name = session.config.option.validation_profile_name
+    profile_name = session.config.option.validation_profile_name or ""
     generate_report(
-        "{}/../output".format(__path__[0]),
+        get_output_dir(session.config),
         template_path,
         profile_name,
         session.config.option.report_format,
     )
 
 
+# noinspection PyUnusedLocal
 def pytest_collection_modifyitems(session, config, items):
+    """
+    Selects tests based on the validation profile requested.  Tests without
+    pytest markers will always be executed.
+    """
     allowed_marks = ["xfail", "base"]
     profile = config.option.validation_profile
 
@@ -179,7 +363,8 @@ def pytest_collection_modifyitems(session, config, items):
         if not profile and markers and set(markers).isdisjoint(allowed_marks):
             item.add_marker(
                 pytest.mark.skip(
-                    reason="No validation profile selected. Skipping tests with marks."
+                    reason="No validation profile selected. "
+                    "Skipping tests with marks."
                 )
             )
         if (
@@ -189,16 +374,22 @@ def pytest_collection_modifyitems(session, config, items):
             and set(markers).isdisjoint(allowed_marks)
         ):
             item.add_marker(
-                pytest.mark.skip(reason="Doesn't match selection validation profile")
+                pytest.mark.skip(reason="Doesn't match selection " "validation profile")
             )
 
     items.sort(
-        key=lambda item: 0 if "base" in set(m.name for m in item.iter_markers()) else 1
+        key=lambda i: 0 if "base" in set(m.name for m in i.iter_markers()) else 1
     )
 
 
-def make_href(path):
-    paths = [path] if isinstance(path, string_types) else path
+def make_href(paths):
+    """
+    Create an anchor tag to link to the file paths provided.
+    :param paths: string or list of file paths
+    :return: String of hrefs - one for each path, each seperated by a line
+             break (<br/).
+    """
+    paths = [paths] if isinstance(paths, string_types) else paths
     links = []
     for p in paths:
         abs_path = os.path.abspath(p)
@@ -211,68 +402,81 @@ def make_href(path):
     return "<br/>".join(links)
 
 
-def generate_report(outpath, template_path, profile_name, output_format):
-    failures = "{}/failures".format(outpath)
-    faildata = None
-    rdata = None
-    hdata = None
-
-    if os.path.exists(failures):
-        with open(failures, "r") as f:
-            faildata = json.loads(f.read())
-    else:
-        faildata = {}
-
-    resolution_steps = "{}/../{}".format(__path__[0], resolution_steps_file)
+def load_resolutions_file():
+    """
+    :return: dict of data loaded from resolutions_steps.json
+    """
+    resolution_steps = "{}/../{}".format(__path__[0], RESOLUTION_STEPS_FILE)
     if os.path.exists(resolution_steps):
         with open(resolution_steps, "r") as f:
-            rdata = json.loads(f.read())
-
-    heat_requirements = "{}/../{}".format(__path__[0], heat_requirements_file)
-    if os.path.exists(heat_requirements):
-        with open(heat_requirements, "r") as f:
-            hdata = json.loads(f.read())
-
-    # point requirements at the most recent version
-    current_version = hdata["current_version"]
-    hdata = hdata["versions"][current_version]["needs"]
-    # mapping requirement IDs from failures to requirement descriptions
-    for k, v in faildata.items():
-        req_text = ""
-        if v["vnfrqts"] != "":
-            for req in v["vnfrqts"]:
-                if req in hdata:
-                    req_text += "\n\n{}: \n{}".format(req, hdata[req]["description"])
-        faildata[k]["req_description"] = req_text
-
-    # mapping resolution steps to module and test name
-    for k, v in faildata.items():
-        # resolution_step = ""
-        faildata[k]["resolution_steps"] = ""
-        for rs in rdata:
-            if v["test_file"] == rs["module"] and v["test"] == rs["function"]:
-                faildata[k]["resolution_steps"] = "\n{}: \n{}".format(
-                    rs["header"], rs["resolution_steps"]
-                )
+            return json.loads(f.read())
+
+
+def generate_report(outpath, template_path, profile_name, output_format="html"):
+    """
+    Generates the various output reports.
+
+    :param outpath: destination directory for all reports
+    :param template_path: directory containing the Heat templates validated
+    :param profile_name: Optional validation profile selected
+    :param output_format: One of "html", "excel", or "csv". Default is "html"
+    :raises: ValueError if requested output format is unknown
+    """
+    failures = [r for r in ALL_RESULTS if r.is_failed]
+    generate_failure_file(outpath)
     output_format = output_format.lower().strip() if output_format else "html"
     if output_format == "html":
-        generate_html_report(outpath, profile_name, template_path, faildata)
+        generate_html_report(outpath, profile_name, template_path, failures)
     elif output_format == "excel":
-        generate_excel_report(outpath, profile_name, template_path, faildata)
+        generate_excel_report(outpath, profile_name, template_path, failures)
+    elif output_format == "json":
+        generate_json(outpath, template_path, profile_name)
     elif output_format == "csv":
-        generate_csv_report(outpath, profile_name, template_path, faildata)
+        generate_csv_report(outpath, profile_name, template_path, failures)
     else:
         raise ValueError("Unsupported output format: " + output_format)
 
 
-def generate_csv_report(output_dir, profile_name, template_path, faildata):
+def write_json(data, path):
+    """
+    Pretty print data as JSON to the output path requested
+
+    :param data: Data structure to be converted to JSON
+    :param path: Where to write output
+    """
+    with open(path, "w") as f:
+        json.dump(data, f, indent=2)
+
+
+def generate_failure_file(outpath):
+    """
+    Writes a summary of test failures to a file named failures.
+    This is for backwards compatibility only.  The report.json offers a
+    more comprehensive output.
+    """
+    failure_path = os.path.join(outpath, "failures")
+    failures = [r for r in ALL_RESULTS if r.is_failed]
+    data = {}
+    for i, fail in enumerate(failures):
+        data[str(i)] = {
+            "file": fail.files[0] if len(fail.files) == 1 else fail.files,
+            "vnfrqts": fail.requirement_ids,
+            "test": fail.test_case,
+            "test_file": fail.test_module,
+            "raw_output": fail.raw_output,
+            "message": fail.error_message,
+        }
+    write_json(data, failure_path)
+
+
+def generate_csv_report(output_dir, profile_name, template_path, failures):
     rows = [["Validation Failures"]]
     headers = [
         ("Profile Selected:", profile_name),
         ("Report Generated At:", make_timestamp()),
         ("Directory Validated:", template_path),
         ("Checksum:", hash_directory(template_path)),
-        ("Total Errors:", len(faildata) + len(COLLECTION_FAILURES)),
+        ("Total Errors:", len(failures) + len(COLLECTION_FAILURES)),
     ]
     rows.append([])
     for header in headers:
@@ -294,18 +498,21 @@ def generate_csv_report(output_dir, profile_name, template_path, faildata):
         rows.append([])
 
     # table header
-    rows.append([col for col, _ in report_columns])
+    rows.append([col for col, _ in REPORT_COLUMNS])
+
+    reqs = load_current_requirements()
+    resolutions = load_resolutions_file()
 
     # table content
-    for data in faildata.values():
+    for failure in failures:
         rows.append(
             [
-                data.get("file", ""),
-                data.get("test_file", ""),
-                data.get("req_description", ""),
-                data.get("resolution_steps", ""),
-                data.get("message", ""),
-                data.get("raw_output", ""),
+                "\n".join(failure.files),
+                failure.test_module,
+                failure.requirement_text(reqs),
+                failure.resolution_steps(resolutions),
+                failure.error_message,
+                failure.raw_output,
             ]
         )
 
@@ -316,7 +523,7 @@ def generate_csv_report(output_dir, profile_name, template_path, faildata):
             writer.writerow(row)
 
 
-def generate_excel_report(output_dir, profile_name, template_path, faildata):
+def generate_excel_report(output_dir, profile_name, template_path, failures):
     output_path = os.path.join(output_dir, "report.xlsx")
     workbook = xlsxwriter.Workbook(output_path)
     bold = workbook.add_format({"bold": True})
@@ -331,7 +538,7 @@ def generate_excel_report(output_dir, profile_name, template_path, faildata):
         ("Report Generated At:", make_timestamp()),
         ("Directory Validated:", template_path),
         ("Checksum:", hash_directory(template_path)),
-        ("Total Errors:", len(faildata) + len(COLLECTION_FAILURES)),
+        ("Total Errors:", len(failures) + len(COLLECTION_FAILURES)),
     ]
     for row, (header, value) in enumerate(headers, start=2):
         worksheet.write(row, 0, header, bold)
@@ -355,39 +562,234 @@ def generate_excel_report(output_dir, profile_name, template_path, faildata):
     # table header
     start_error_table_row = 2 + len(headers) + len(COLLECTION_FAILURES) + 4
     worksheet.write(start_error_table_row, 0, "Validation Failures", bold)
-    for col_num, (col_name, _) in enumerate(report_columns):
+    for col_num, (col_name, _) in enumerate(REPORT_COLUMNS):
         worksheet.write(start_error_table_row + 1, col_num, col_name, bold)
 
+    reqs = load_current_requirements()
+    resolutions = load_resolutions_file()
+
     # table content
-    for row, data in enumerate(faildata.values(), start=start_error_table_row + 2):
-        for col, key in enumerate(report.values()):
-            if key == "file":
-                paths = (
-                    [data[key]] if isinstance(data[key], string_types) else data[key]
-                )
-                contents = "\n".join(paths)
-                worksheet.write(row, col, contents, normal)
-            elif key == "raw_output":
-                worksheet.write_string(row, col, data[key], code)
-            else:
-                worksheet.write(row, col, data[key], normal)
+    for row, failure in enumerate(failures, start=start_error_table_row + 2):
+        worksheet.write(row, 0, "\n".join(failure.files), normal)
+        worksheet.write(row, 1, failure.test_module, normal)
+        worksheet.write(row, 2, failure.requirement_text(reqs), normal)
+        worksheet.write(row, 3, failure.resolution_steps(resolutions), normal)
+        worksheet.write(row, 4, failure.error_message, normal)
+        worksheet.write(row, 5, failure.raw_output, code)
 
     workbook.close()
 
 
-def generate_html_report(outpath, profile_name, template_path, faildata):
-    failures = []
-    for data in faildata.values():
-        failures.append(
+def make_iso_timestamp():
+    """
+    Creates a timestamp in ISO 8601 format in UTC format.  Used for JSON output.
+    """
+    now = datetime.datetime.utcnow()
+    now.replace(tzinfo=datetime.timezone.utc)
+    return now.isoformat()
+
+
+def aggregate_requirement_adherence(r_id, collection_failures, test_results):
+    """
+    Examines all tests associated with a given requirement and determines
+    the aggregate result (PASS, FAIL, ERROR, or SKIP) for the requirement.
+
+    * ERROR - At least one ERROR occurred
+    * PASS -  At least one PASS and no FAIL or ERRORs.
+    * FAIL -  At least one FAIL occurred (no ERRORs)
+    * SKIP - All tests were SKIP
+
+
+    :param r_id: Requirement ID to examing
+    :param collection_failures: Errors that occurred during test setup.
+    :param test_results: List of TestResult
+    :return: 'PASS', 'FAIL', 'SKIP', or 'ERROR'
+    """
+    errors = any(r_id in f["requirements"] for f in collection_failures)
+    outcomes = set(r.outcome for r in test_results if r_id in r.requirement_ids)
+    return aggregate_results(errors, outcomes, r_id)
+
+
+def aggregate_results(has_errors, outcomes, r_id=None):
+    """
+    Determines the aggregate result for the conditions provided.  Assumes the
+    results have been filtered and collected for analysis.
+
+    :param has_errors: True if collection failures occurred for the tests being
+                       analyzed.
+    :param outcomes: set of outcomes from the TestResults
+    :param r_id: Optional requirement ID if known
+    :return: 'ERROR', 'PASS', 'FAIL', or 'SKIP'
+             (see aggregate_requirement_adherence for more detail)
+    """
+    if has_errors:
+        return "ERROR"
+
+    if not outcomes:
+        return "PASS"
+    elif "FAIL" in outcomes:
+        return "FAIL"
+    elif "PASS" in outcomes:
+        return "PASS"
+    elif {"SKIP"} == outcomes:
+        return "SKIP"
+    else:
+        pytest.warns(
+            "Unexpected error aggregating outcomes ({}) for requirement {}".format(
+                outcomes, r_id)
+        )
+        return "ERROR"
+
+
+def aggregate_run_results(collection_failures, test_results):
+    """
+    Determines overall status of run based on all failures and results.
+
+    * 'ERROR' - At least one collection failure occurred during the run.
+    * 'FAIL' - Template failed at least one test
+    * 'PASS' - All tests executed properly and no failures were detected
+
+    :param collection_failures: failures occuring during test setup
+    :param test_results: list of all test executuion results
+    :return: one of 'ERROR', 'FAIL', or 'PASS'
+    """
+    if collection_failures:
+        return "ERROR"
+    elif any(r.is_failed for r in test_results):
+        return "FAIL"
+    else:
+        return "PASS"
+
+
+def error(failure_or_result):
+    """
+    Extracts the error message from a collection failure or test result
+    :param failure_or_result: Entry from COLLECTION_FAILURE or a TestResult
+    :return: Error message as string
+    """
+    if isinstance(failure_or_result, TestResult):
+        return failure_or_result.error_message
+    else:
+        return failure_or_result["error"]
+
+
+def req_ids(failure_or_result):
+    """
+    Extracts the requirement IDs from a collection failure or test result
+    :param failure_or_result: Entry from COLLECTION_FAILURE or a TestResult
+    :return: set of Requirement IDs.  If no requirements mapped, then an empty set
+    """
+    if isinstance(failure_or_result, TestResult):
+        return set(failure_or_result.requirement_ids)
+    else:
+        return set(failure_or_result["requirements"])
+
+
+def collect_errors(r_id, collection_failures, test_result):
+    """
+    Creates a list of error messages from the collection failures and
+    test results.  If r_id is provided, then it collects the error messages
+    where the failure or test is associated with that requirement ID.  If
+    r_id is None, then it collects all errors that occur on failures and
+    results that are not mapped to requirements
+    """
+    def selector(item):
+        if r_id:
+            return r_id in req_ids(item)
+        else:
+            return not req_ids(item)
+
+    errors = (error(x) for x in chain(collection_failures, test_result)
+              if selector(x))
+    return [e for e in errors if e]
+
+
+def generate_json(outpath, template_path, profile_name):
+    """
+    Creates a JSON summary of the entire test run.
+    """
+    reqs = load_current_requirements()
+    data = {
+        "version": "dublin",
+        "template_directory": template_path,
+        "timestamp": make_iso_timestamp(),
+        "checksum": hash_directory(template_path),
+        "profile": profile_name,
+        "outcome": aggregate_run_results(COLLECTION_FAILURES, ALL_RESULTS),
+        "tests": [],
+        "requirements": [],
+    }
+
+    results = data["tests"]
+    for result in COLLECTION_FAILURES:
+        results.append(
+            {
+                "files": [],
+                "test_module": result["module"],
+                "test_case": result["test"],
+                "result": "ERROR",
+                "error": result["error"],
+                "requirements": result["requirements"],
+            }
+        )
+    for result in ALL_RESULTS:
+        results.append(
+            {
+                "files": result.files,
+                "test_module": result.test_module,
+                "test_case": result.test_case,
+                "result": result.outcome,
+                "error": result.error_message if result.is_failed else "",
+                "requirements": result.requirements_metadata(reqs),
+            }
+        )
+
+    requirements = data["requirements"]
+    for r_id, r_data in reqs.items():
+        result = aggregate_requirement_adherence(r_id, COLLECTION_FAILURES, ALL_RESULTS)
+        if result:
+            requirements.append(
+                {
+                    "id": r_id,
+                    "text": r_data["description"],
+                    "keyword": r_data["keyword"],
+                    "result": result,
+                    "errors": collect_errors(r_id, COLLECTION_FAILURES, ALL_RESULTS)
+                }
+            )
+    # If there are tests that aren't mapped to a requirement, then we'll
+    # map them to a special entry so the results are coherent.
+    unmapped_outcomes = {r.outcome for r in ALL_RESULTS if not r.requirement_ids}
+    has_errors = any(not f["requirements"] for f in COLLECTION_FAILURES)
+    if unmapped_outcomes or has_errors:
+        requirements.append(
+            {
+                "id": "Unmapped",
+                "text": "Tests not mapped to requirements (see tests)",
+                "result": aggregate_results(has_errors, unmapped_outcomes),
+                "errors": collect_errors(None, COLLECTION_FAILURES, ALL_RESULTS)
+            }
+        )
+
+    report_path = os.path.join(outpath, "report.json")
+    write_json(data, report_path)
+
+
+def generate_html_report(outpath, profile_name, template_path, failures):
+    reqs = load_current_requirements()
+    resolutions = load_resolutions_file()
+    fail_data = []
+    for failure in failures:
+        fail_data.append(
             {
-                "file_links": make_href(data["file"]),
-                "test_id": data["test_file"],
-                "error_message": data["message"],
-                "raw_output": data["raw_output"],
+                "file_links": make_href(failure.files),
+                "test_id": failure.test_module,
+                "error_message": failure.error_message,
+                "raw_output": failure.raw_output,
                 "requirements": docutils.core.publish_parts(
-                    writer_name="html", source=data["req_description"]
+                    writer_name="html", source=failure.requirement_text(reqs)
                 )["body"],
-                "resolution_steps": data["resolution_steps"],
+                "resolution_steps": failure.resolution_steps(resolutions),
             }
         )
     pkg_dir = os.path.split(__file__)[0]
@@ -400,7 +802,7 @@ def generate_html_report(outpath, profile_name, template_path, faildata):
             template_dir=make_href(template_path),
             checksum=hash_directory(template_path),
             timestamp=make_timestamp(),
-            failures=failures,
+            failures=fail_data,
             collection_failures=COLLECTION_FAILURES,
         )
     with open(os.path.join(outpath, "report.html"), "w") as f:
@@ -443,7 +845,7 @@ def pytest_addoption(parser):
         "--report-format",
         dest="report_format",
         action="store",
-        help="Format of output report (html, csv, excel)",
+        help="Format of output report (html, csv, excel, json)",
     )
 
     parser.addoption(
@@ -453,6 +855,14 @@ def pytest_addoption(parser):
         help="Continue validation even when structural errors exist in input files",
     )
 
+    parser.addoption(
+        "--output-directory",
+        dest="output_dir",
+        action="store",
+        default=None,
+        help="Alternate "
+    )
+
 
 def pytest_configure(config):
     """
@@ -575,6 +985,7 @@ def pytest_generate_tests(metafunc):
                 "test": metafunc.function.__name__,
                 "fixtures": metafunc.fixturenames,
                 "error": traceback.format_exc(),
+                "requirements": getattr(metafunc.function, "requirement_ids", []),
             }
         )
         raise e
@@ -592,23 +1003,24 @@ def hash_directory(path):
 
 def load_current_requirements():
     """Loads dict of current requirements or empty dict if file doesn't exist"""
-
-    url = 'https://onap.readthedocs.io/en/latest/_downloads/789ac64d223325488fb3f120f959d985/needs.json'
-
     try:
-        r = requests.get(url)
-        if r.headers.get('content-type') == 'application/json':
-            with open('requirements.json', 'wb') as needs:
+        r = requests.get(NEEDS_JSON_URL)
+        if r.headers.get("content-type") == "application/json":
+            with open(HEAT_REQUIREMENTS_FILE, "wb") as needs:
                 needs.write(r.content)
         else:
-            warnings.warn("Unexpected content-type ({}) encountered downloading requirements.json, using last saved copy".format(r.headers.get('content-type')))
+            warnings.warn(
+                (
+                    "Unexpected content-type ({}) encountered downloading "
+                    + "requirements.json, using last saved copy"
+                ).format(r.headers.get("content-type"))
+            )
     except requests.exceptions.RequestException as e:
         warnings.warn("Error downloading latest JSON, using last saved copy.")
         warnings.warn(UserWarning(e))
-    path = "requirements.json"
-    if not os.path.exists(path):
+    if not os.path.exists(HEAT_REQUIREMENTS_FILE):
         return {}
-    with io.open(path, encoding="utf8", mode="r") as f:
+    with io.open(HEAT_REQUIREMENTS_FILE, encoding="utf8", mode="r") as f:
         data = json.load(f)
         version = data["current_version"]
         return data["versions"][version]["needs"]
@@ -628,8 +1040,8 @@ def unicode_writerow(writer, row):
     writer.writerow(row)
 
 
+# noinspection PyUnusedLocal
 def pytest_report_collectionfinish(config, startdir, items):
-
     """Generates a simple traceability report to output/traceability.csv"""
     traceability_path = os.path.join(__path__[0], "../output/traceability.csv")
     output_dir = os.path.split(traceability_path)[0]
@@ -637,10 +1049,10 @@ def pytest_report_collectionfinish(config, startdir, items):
         os.makedirs(output_dir)
     requirements = load_current_requirements()
     unmapped, mapped = partition(
-        lambda item: hasattr(item.function, "requirement_ids"), items
+        lambda i: hasattr(i.function, "requirement_ids"), items
     )
 
-    req_to_test = collections.defaultdict(set)
+    req_to_test = defaultdict(set)
     mapping_errors = set()
     for item in mapped:
         for req_id in item.function.requirement_ids:
@@ -654,8 +1066,8 @@ def pytest_report_collectionfinish(config, startdir, items):
     mapping_error_path = os.path.join(__path__[0], "../output/mapping_errors.csv")
     with compat_open(mapping_error_path) as f:
         writer = csv.writer(f)
-        for error in mapping_errors:
-            unicode_writerow(writer, error)
+        for err in mapping_errors:
+            unicode_writerow(writer, err)
 
     with compat_open(traceability_path) as f:
         out = csv.writer(f)