[VVP] Reports show test file and case now
[vvp/validation-scripts.git] / ice_validator / tests / conftest.py
index 807ac71..07a66f2 100644 (file)
@@ -63,6 +63,12 @@ DEFAULT_OUTPUT_DIR = "{}/../output".format(__path__[0])
 
 RESOLUTION_STEPS_FILE = "resolution_steps.json"
 HEAT_REQUIREMENTS_FILE = os.path.join(__path__[0], "..", "heat_requirements.json")
+TEST_SCRIPT_SITE = (
+    "https://github.com/onap/vvp-validation-scripts/blob/master/ice_validator/tests/"
+)
+VNFRQTS_ID_URL = (
+    "https://docs.onap.org/en/latest/submodules/vnfrqts/requirements.git/docs/"
+)
 
 REPORT_COLUMNS = [
     ("Input File", "file"),
@@ -189,6 +195,13 @@ class TestResult:
         """
         return self.item.function.__module__.split(".")[-1]
 
+    @property
+    def test_id(self):
+        """
+        :return: ID of the test (test_module + test_case)
+        """
+        return "{}::{}".format(self.test_module, self.test_case)
+
     @property
     def raw_output(self):
         """
@@ -208,6 +221,7 @@ class TestResult:
         text = (
             "\n\n{}: \n{}".format(r_id, curr_reqs[r_id]["description"])
             for r_id in self.requirement_ids
+            if r_id in curr_reqs
         )
         return "".join(text)
 
@@ -281,7 +295,8 @@ class TestResult:
         elif "yaml_files" in self.item.fixturenames:
             return self.item.funcargs["yaml_files"]
         else:
-            return [self.result.nodeid.split("[")[1][:-1]]
+            parts = self.result.nodeid.split("[")
+            return [""] if len(parts) == 1 else [parts[1][:-1]]
 
     def _get_error_message(self):
         """
@@ -304,7 +319,6 @@ def pytest_runtest_makereport(item, call):
     if outcome.get_result().when != "call":
         return  # only capture results of test cases themselves
     result = TestResult(item, outcome)
-    ALL_RESULTS.append(result)
     if (
         not item.config.option.continue_on_failure
         and result.is_base_test
@@ -313,8 +327,12 @@ def pytest_runtest_makereport(item, call):
         msg = "!!Base Test Failure!! Halting test suite execution...\n{}".format(
             result.error_message
         )
+        result.error_message = msg
+        ALL_RESULTS.append(result)
         pytest.exit("{}\n{}\n{}".format(msg, result.files, result.test_case))
 
+    ALL_RESULTS.append(result)
+
 
 def make_timestamp():
     """
@@ -429,12 +447,13 @@ def generate_report(outpath, template_path, categories, output_format="html"):
     failures = [r for r in ALL_RESULTS if r.is_failed]
     generate_failure_file(outpath)
     output_format = output_format.lower().strip() if output_format else "html"
+    generate_json(outpath, template_path, categories)
     if output_format == "html":
         generate_html_report(outpath, categories, template_path, failures)
     elif output_format == "excel":
         generate_excel_report(outpath, categories, template_path, failures)
     elif output_format == "json":
-        generate_json(outpath, template_path, categories)
+        return
     elif output_format == "csv":
         generate_csv_report(outpath, categories, template_path, failures)
     else:
@@ -513,7 +532,7 @@ def generate_csv_report(output_dir, categories, template_path, failures):
         rows.append(
             [
                 "\n".join(failure.files),
-                failure.test_module,
+                failure.test_id,
                 failure.requirement_text(reqs),
                 failure.resolution_steps(resolutions),
                 failure.error_message,
@@ -577,7 +596,7 @@ def generate_excel_report(output_dir, categories, template_path, failures):
     # table content
     for row, failure in enumerate(failures, start=start_error_table_row + 2):
         worksheet.write(row, 0, "\n".join(failure.files), normal)
-        worksheet.write(row, 1, failure.test_module, normal)
+        worksheet.write(row, 1, failure.test_id, normal)
         worksheet.write(row, 2, failure.requirement_text(reqs), normal)
         worksheet.write(row, 3, failure.resolution_steps(resolutions), normal)
         worksheet.write(row, 4, failure.error_message, normal)
@@ -711,6 +730,10 @@ def collect_errors(r_id, collection_failures, test_result):
     return [e for e in errors if e]
 
 
+def relative_paths(base_dir, paths):
+    return [os.path.relpath(p, base_dir) for p in paths]
+
+
 def generate_json(outpath, template_path, categories):
     """
     Creates a JSON summary of the entire test run.
@@ -718,7 +741,9 @@ def generate_json(outpath, template_path, categories):
     reqs = load_current_requirements()
     data = {
         "version": "dublin",
-        "template_directory": template_path,
+        "template_directory": os.path.splitdrive(template_path)[1].replace(
+            os.path.sep, "/"
+        ),
         "timestamp": make_iso_timestamp(),
         "checksum": hash_directory(template_path),
         "categories": categories,
@@ -742,7 +767,7 @@ def generate_json(outpath, template_path, categories):
     for result in ALL_RESULTS:
         results.append(
             {
-                "files": result.files,
+                "files": relative_paths(template_path, result.files),
                 "test_module": result.test_module,
                 "test_case": result.test_case,
                 "result": result.outcome,
@@ -790,7 +815,7 @@ def generate_html_report(outpath, categories, template_path, failures):
         fail_data.append(
             {
                 "file_links": make_href(failure.files),
-                "test_id": failure.test_module,
+                "test_id": failure.test_id,
                 "error_message": failure.error_message,
                 "raw_output": failure.raw_output,
                 "requirements": docutils.core.publish_parts(
@@ -1024,7 +1049,79 @@ def load_current_requirements():
 
 def select_heat_requirements(reqs):
     """Filters dict requirements to only those requirements pertaining to Heat"""
-    return {k: v for k, v in reqs.items() if "Heat" in v["docname"]}
+    return {k: v for k, v in reqs.items() if "heat" in v["docname"].lower()}
+
+
+def is_testable(reqs):
+    """Filters dict requirements to only those which are testable"""
+    for key, values in reqs.items():
+        if (("MUST" in values.get("keyword", "").upper()) and (
+            "none" not in values.get("validation_mode", "").lower()
+        )):
+            reqs[key]["testable"] = True
+        else:
+            reqs[key]["testable"] = False
+    return reqs
+
+
+def build_rst_json(reqs):
+    """Takes requirements and returns list of only Heat requirements"""
+    for key, values in list(reqs.items()):
+        if values["testable"]:
+            # Creates links in RST format to requirements and test cases
+            if values["test_case"]:
+                mod = values["test_case"].split(".")[-1]
+                val = TEST_SCRIPT_SITE + mod + ".py"
+                rst_value = "`" + mod + " <" + val + ">`_"
+                title = (
+                    "`"
+                    + values["id"]
+                    + " <"
+                    + VNFRQTS_ID_URL
+                    + values["docname"].replace(" ", "%20")
+                    + ".html#"
+                    + values["id"]
+                    + ">`_"
+                )
+                reqs[key].update({"full_title": title, "test_case": rst_value})
+            else:
+                title = (
+                    "`"
+                    + values["id"]
+                    + " <"
+                    + VNFRQTS_ID_URL
+                    + values["docname"].replace(" ", "%20")
+                    + ".html#"
+                    + values["id"]
+                    + ">`_"
+                )
+                reqs[key].update(
+                    {
+                        "full_title": title,
+                        "test_case": "No test for requirement",
+                        "validated_by": "static",
+                    }
+                )
+        else:
+            del reqs[key]
+    return reqs
+
+
+def generate_rst_table(output_dir, data):
+    """Generate a formatted csv to be used in RST"""
+    rst_path = os.path.join(output_dir, "rst.csv")
+    with open(rst_path, "w", newline="") as f:
+        out = csv.writer(f)
+        out.writerow(("Requirement ID", "Requirement", "Test Module", "Test Name"))
+        for req_id, metadata in data.items():
+            out.writerow(
+                (
+                    metadata["full_title"],
+                    metadata["description"],
+                    metadata["test_case"],
+                    metadata["validated_by"],
+                )
+            )
 
 
 # noinspection PyUnusedLocal
@@ -1036,6 +1133,7 @@ def pytest_report_collectionfinish(config, startdir, items):
         os.makedirs(output_dir)
     reqs = load_current_requirements()
     requirements = select_heat_requirements(reqs)
+    testable_requirements = is_testable(requirements)
     unmapped, mapped = partition(
         lambda i: hasattr(i.function, "requirement_ids"), items
     )
@@ -1046,6 +1144,13 @@ def pytest_report_collectionfinish(config, startdir, items):
         for req_id in item.function.requirement_ids:
             if req_id not in req_to_test:
                 req_to_test[req_id].add(item)
+                if req_id in requirements:
+                    reqs[req_id].update(
+                        {
+                            "test_case": item.function.__module__,
+                            "validated_by": item.function.__name__,
+                        }
+                    )
             if req_id not in requirements:
                 mapping_errors.add(
                     (req_id, item.function.__module__, item.function.__name__)
@@ -1060,14 +1165,18 @@ def pytest_report_collectionfinish(config, startdir, items):
     with open(traceability_path, "w", newline="") as f:
         out = csv.writer(f)
         out.writerow(
-            ("Requirement ID", "Requirement", "Section",
-             "Keyword", "Validation Mode", "Is Testable",
-             "Test Module", "Test Name"),
+            (
+                "Requirement ID",
+                "Requirement",
+                "Section",
+                "Keyword",
+                "Validation Mode",
+                "Is Testable",
+                "Test Module",
+                "Test Name",
+            )
         )
-        for req_id, metadata in requirements.items():
-            keyword = metadata["keyword"].upper()
-            mode = metadata["validation_mode"].lower()
-            testable = keyword in {"MUST", "MUST NOT"} and mode != "none"
+        for req_id, metadata in testable_requirements.items():
             if req_to_test[req_id]:
                 for item in req_to_test[req_id]:
                     out.writerow(
@@ -1075,35 +1184,42 @@ def pytest_report_collectionfinish(config, startdir, items):
                             req_id,
                             metadata["description"],
                             metadata["section_name"],
-                            keyword,
-                            mode,
-                            "TRUE" if testable else "FALSE",
+                            metadata["keyword"],
+                            metadata["validation_mode"],
+                            metadata["testable"],
                             item.function.__module__,
                             item.function.__name__,
-                        ),
+                        )
                     )
             else:
                 out.writerow(
-                    (req_id,
-                     metadata["description"],
-                     metadata["section_name"],
-                     keyword,
-                     mode,
-                     "TRUE" if testable else "FALSE",
-                     "",   # test module
-                     ""),  # test function
+                    (
+                        req_id,
+                        metadata["description"],
+                        metadata["section_name"],
+                        metadata["keyword"],
+                        metadata["validation_mode"],
+                        metadata["testable"],
+                        "",  # test module
+                        "",
+                    )  # test function
                 )
         # now write out any test methods that weren't mapped to requirements
-        unmapped_tests = {(item.function.__module__, item.function.__name__)
-                          for item in unmapped}
+        unmapped_tests = {
+            (item.function.__module__, item.function.__name__) for item in unmapped
+        }
         for test_module, test_name in unmapped_tests:
             out.writerow(
-                ("",        # req ID
-                 "",        # description
-                 "",        # section name
-                 "",        # keyword
-                 "static",  # validation mode
-                 "TRUE",    # testable
-                 test_module,
-                 test_name)
+                (
+                    "",  # req ID
+                    "",  # description
+                    "",  # section name
+                    "",  # keyword
+                    "static",  # validation mode
+                    "TRUE",  # testable
+                    test_module,
+                    test_name,
+                )
             )
+
+    generate_rst_table(get_output_dir(config), build_rst_json(testable_requirements))