X-Git-Url: https://gerrit.onap.org/r/gitweb?a=blobdiff_plain;f=ice_validator%2Ftests%2Fconftest.py;h=3ef7fe1474c091fe4170ddccbb9b14cfdb45a28c;hb=b3a5befeb0d5d9e348b1ba7b29f412b15544a0ec;hp=7fe94439ee8899300d822bf5fd4e2e742a077895;hpb=0907dd0dcd870afc12d4cb245d970fefff803898;p=vvp%2Fvalidation-scripts.git diff --git a/ice_validator/tests/conftest.py b/ice_validator/tests/conftest.py index 7fe9443..3ef7fe1 100644 --- a/ice_validator/tests/conftest.py +++ b/ice_validator/tests/conftest.py @@ -43,6 +43,11 @@ import json import os import re import time + +from preload.model import create_preloads +from config import get_generator_plugin_names +from tests.helpers import get_output_dir + try: from html import escape except ImportError: @@ -95,18 +100,6 @@ COLLECTION_FAILURES = [] ALL_RESULTS = [] -def get_output_dir(config): - """ - Retrieve the output directory for the reports and create it if necessary - :param config: pytest configuration - :return: output directory as string - """ - output_dir = config.option.output_dir or DEFAULT_OUTPUT_DIR - if not os.path.exists(output_dir): - os.makedirs(output_dir, exist_ok=True) - return output_dir - - def extract_error_msg(rep): """ If a custom error message was provided, then extract it otherwise @@ -352,6 +345,12 @@ def pytest_sessionfinish(session, exitstatus): ) +def pytest_terminal_summary(terminalreporter, exitstatus): + # Ensures all preload information and warnings appear after + # test results + create_preloads(terminalreporter.config, exitstatus) + + # noinspection PyUnusedLocal def pytest_collection_modifyitems(session, config, items): """ @@ -361,31 +360,27 @@ def pytest_collection_modifyitems(session, config, items): config.traceability_items = list(items) # save all items for traceability if not config.option.self_test: for item in items: - # checking if test belongs to a category - if hasattr(item.function, "categories"): - if config.option.test_categories: - test_categories = getattr(item.function, "categories") - passed_categories = config.option.test_categories - if not all( - category in passed_categories for category in test_categories - ): - item.add_marker( - pytest.mark.skip( - reason=( - "Test categories do not match " - "all the passed categories" - ) - ) + passed_categories = set(config.option.test_categories or []) + all_of_categories = getattr(item.function, "all_categories", set()) + any_of_categories = getattr(item.function, "any_categories", set()) + if all_of_categories and not all_of_categories.issubset(passed_categories): + item.add_marker( + pytest.mark.skip( + reason=( + "Test categories do not match " "all the passed categories" ) - else: - item.add_marker( - pytest.mark.skip( - reason=( - "Test belongs to a category but " - "no categories were passed" - ) + ) + ) + if any_of_categories and not passed_categories.intersection( + any_of_categories + ): + item.add_marker( + pytest.mark.skip( + reason=( + "Test categories do not match " "any the passed categories" ) ) + ) items.sort( key=lambda x: (0, x.name) @@ -749,8 +744,9 @@ def generate_html_report(outpath, categories, template_path, failures): { "file_links": make_href(failure.files, template_path), "test_id": failure.test_id, - "error_message": escape(failure.error_message).replace("\n", - "

"), + "error_message": escape(failure.error_message).replace( + "\n", "

" + ), "raw_output": escape(failure.raw_output), "requirements": docutils.core.publish_parts( writer_name="html", source=failure.requirement_text(reqs) @@ -829,6 +825,23 @@ def pytest_addoption(parser): help="optional category of test to execute", ) + parser.addoption( + "--env-directory", + dest="env_dir", + action="store", + help="optional directory of .env files for preload generation", + ) + + parser.addoption( + "--preload-format", + dest="preload_formats", + action="append", + help=( + "Preload format to create (multiple allowed). If not provided " + "then all available formats will be created: {}" + ).format(", ".join(get_generator_plugin_names())), + ) + def pytest_configure(config): """