[VVP] Allow any_of and all_of in categories decorator
[vvp/validation-scripts.git] / ice_validator / tests / conftest.py
index 1a8b9c1..3ef7fe1 100644 (file)
@@ -43,6 +43,15 @@ import json
 import os
 import re
 import time
+
+from preload.model import create_preloads
+from config import get_generator_plugin_names
+from tests.helpers import get_output_dir
+
+try:
+    from html import escape
+except ImportError:
+    from cgi import escape
 from collections import defaultdict
 
 import traceback
@@ -91,18 +100,6 @@ COLLECTION_FAILURES = []
 ALL_RESULTS = []
 
 
-def get_output_dir(config):
-    """
-    Retrieve the output directory for the reports and create it if necessary
-    :param config: pytest configuration
-    :return: output directory as string
-    """
-    output_dir = config.option.output_dir or DEFAULT_OUTPUT_DIR
-    if not os.path.exists(output_dir):
-        os.makedirs(output_dir, exist_ok=True)
-    return output_dir
-
-
 def extract_error_msg(rep):
     """
     If a custom error message was provided, then extract it otherwise
@@ -348,6 +345,12 @@ def pytest_sessionfinish(session, exitstatus):
     )
 
 
+def pytest_terminal_summary(terminalreporter, exitstatus):
+    # Ensures all preload information and warnings appear after
+    # test results
+    create_preloads(terminalreporter.config, exitstatus)
+
+
 # noinspection PyUnusedLocal
 def pytest_collection_modifyitems(session, config, items):
     """
@@ -357,31 +360,27 @@ def pytest_collection_modifyitems(session, config, items):
     config.traceability_items = list(items)  # save all items for traceability
     if not config.option.self_test:
         for item in items:
-            # checking if test belongs to a category
-            if hasattr(item.function, "categories"):
-                if config.option.test_categories:
-                    test_categories = getattr(item.function, "categories")
-                    passed_categories = config.option.test_categories
-                    if not all(
-                        category in passed_categories for category in test_categories
-                    ):
-                        item.add_marker(
-                            pytest.mark.skip(
-                                reason=(
-                                    "Test categories do not match "
-                                    "all the passed categories"
-                                )
-                            )
+            passed_categories = set(config.option.test_categories or [])
+            all_of_categories = getattr(item.function, "all_categories", set())
+            any_of_categories = getattr(item.function, "any_categories", set())
+            if all_of_categories and not all_of_categories.issubset(passed_categories):
+                item.add_marker(
+                    pytest.mark.skip(
+                        reason=(
+                            "Test categories do not match " "all the passed categories"
                         )
-                else:
-                    item.add_marker(
-                        pytest.mark.skip(
-                            reason=(
-                                "Test belongs to a category but "
-                                "no categories were passed"
-                            )
+                    )
+                )
+            if any_of_categories and not passed_categories.intersection(
+                any_of_categories
+            ):
+                item.add_marker(
+                    pytest.mark.skip(
+                        reason=(
+                            "Test categories do not match " "any the passed categories"
                         )
                     )
+                )
 
     items.sort(
         key=lambda x: (0, x.name)
@@ -650,7 +649,7 @@ def aggregate_run_results(collection_failures, test_results):
 
 
 def relative_paths(base_dir, paths):
-    return [os.path.relpath(p, base_dir) for p in paths]
+    return [os.path.relpath(p, base_dir) for p in paths if p != ""]
 
 
 # noinspection PyTypeChecker
@@ -745,8 +744,10 @@ def generate_html_report(outpath, categories, template_path, failures):
             {
                 "file_links": make_href(failure.files, template_path),
                 "test_id": failure.test_id,
-                "error_message": failure.error_message.replace("\n", "<br/><br/>"),
-                "raw_output": failure.raw_output,
+                "error_message": escape(failure.error_message).replace(
+                    "\n", "<br/><br/>"
+                ),
+                "raw_output": escape(failure.raw_output),
                 "requirements": docutils.core.publish_parts(
                     writer_name="html", source=failure.requirement_text(reqs)
                 )["body"],
@@ -824,6 +825,23 @@ def pytest_addoption(parser):
         help="optional category of test to execute",
     )
 
+    parser.addoption(
+        "--env-directory",
+        dest="env_dir",
+        action="store",
+        help="optional directory of .env files for preload generation",
+    )
+
+    parser.addoption(
+        "--preload-format",
+        dest="preload_formats",
+        action="append",
+        help=(
+            "Preload format to create (multiple allowed). If not provided "
+            "then all available formats will be created: {}"
+        ).format(", ".join(get_generator_plugin_names())),
+    )
+
 
 def pytest_configure(config):
     """
@@ -958,7 +976,7 @@ def hash_directory(path):
     :param path: string directory containing files
     :return: string MD5 hash code (hex)
     """
-    md5 = hashlib.md5()
+    md5 = hashlib.md5()  # nosec
     for dir_path, sub_dirs, filenames in os.walk(path):
         for filename in filenames:
             file_path = os.path.join(dir_path, filename)
@@ -1040,12 +1058,11 @@ def generate_rst_table(output_dir, data):
     rst_path = os.path.join(output_dir, "rst.csv")
     with open(rst_path, "w", newline="") as f:
         out = csv.writer(f)
-        out.writerow(("Requirement ID", "Requirement", "Test Module", "Test Name"))
+        out.writerow(("Requirement ID", "Test Module", "Test Name"))
         for req_id, metadata in data.items():
             out.writerow(
                 (
                     metadata["full_title"],
-                    metadata["description"],
                     metadata["test_case"],
                     metadata["validated_by"],
                 )