import os
import re
import time
+
+from preload.model import create_preloads
+from config import get_generator_plugin_names
+from tests.helpers import get_output_dir
+
+try:
+ from html import escape
+except ImportError:
+ from cgi import escape
from collections import defaultdict
import traceback
ALL_RESULTS = []
-def get_output_dir(config):
- """
- Retrieve the output directory for the reports and create it if necessary
- :param config: pytest configuration
- :return: output directory as string
- """
- output_dir = config.option.output_dir or DEFAULT_OUTPUT_DIR
- if not os.path.exists(output_dir):
- os.makedirs(output_dir, exist_ok=True)
- return output_dir
-
-
def extract_error_msg(rep):
"""
If a custom error message was provided, then extract it otherwise
)
+def pytest_terminal_summary(terminalreporter, exitstatus):
+ # Ensures all preload information and warnings appear after
+ # test results
+ try:
+ create_preloads(terminalreporter.config, exitstatus)
+ except Exception:
+ print("Error creating preloads, skipping preload generation")
+ traceback.print_exc()
+
+
# noinspection PyUnusedLocal
def pytest_collection_modifyitems(session, config, items):
"""
config.traceability_items = list(items) # save all items for traceability
if not config.option.self_test:
for item in items:
- # checking if test belongs to a category
- if hasattr(item.function, "categories"):
- if config.option.test_categories:
- test_categories = getattr(item.function, "categories")
- passed_categories = config.option.test_categories
- if not all(
- category in passed_categories for category in test_categories
- ):
- item.add_marker(
- pytest.mark.skip(
- reason=(
- "Test categories do not match "
- "all the passed categories"
- )
- )
+ passed_categories = set(config.option.test_categories or [])
+ all_of_categories = getattr(item.function, "all_categories", set())
+ any_of_categories = getattr(item.function, "any_categories", set())
+ if all_of_categories and not all_of_categories.issubset(passed_categories):
+ item.add_marker(
+ pytest.mark.skip(
+ reason=(
+ "Test categories do not match " "all the passed categories"
)
- else:
- item.add_marker(
- pytest.mark.skip(
- reason=(
- "Test belongs to a category but "
- "no categories were passed"
- )
+ )
+ )
+ if any_of_categories and not passed_categories.intersection(
+ any_of_categories
+ ):
+ item.add_marker(
+ pytest.mark.skip(
+ reason=(
+ "Test categories do not match " "any the passed categories"
)
)
+ )
items.sort(
key=lambda x: (0, x.name)
def relative_paths(base_dir, paths):
- return [os.path.relpath(p, base_dir) for p in paths]
+ return [os.path.relpath(p, base_dir) for p in paths if p != ""]
# noinspection PyTypeChecker
{
"file_links": make_href(failure.files, template_path),
"test_id": failure.test_id,
- "error_message": failure.error_message.replace("\n", "<br/><br/>"),
- "raw_output": failure.raw_output,
+ "error_message": escape(failure.error_message).replace(
+ "\n", "<br/><br/>"
+ ),
+ "raw_output": escape(failure.raw_output),
"requirements": docutils.core.publish_parts(
writer_name="html", source=failure.requirement_text(reqs)
)["body"],
help="optional category of test to execute",
)
+ parser.addoption(
+ "--env-directory",
+ dest="env_dir",
+ action="store",
+ help="optional directory of .env files for preload generation",
+ )
+
+ parser.addoption(
+ "--preload-format",
+ dest="preload_formats",
+ action="append",
+ help=(
+ "Preload format to create (multiple allowed). If not provided "
+ "then all available formats will be created: {}"
+ ).format(", ".join(get_generator_plugin_names())),
+ )
+
def pytest_configure(config):
"""
:param path: string directory containing files
:return: string MD5 hash code (hex)
"""
- md5 = hashlib.md5()
+ md5 = hashlib.md5() # nosec
for dir_path, sub_dirs, filenames in os.walk(path):
for filename in filenames:
file_path = os.path.join(dir_path, filename)
rst_path = os.path.join(output_dir, "rst.csv")
with open(rst_path, "w", newline="") as f:
out = csv.writer(f)
- out.writerow(("Requirement ID", "Requirement", "Test Module", "Test Name"))
+ out.writerow(("Requirement ID", "Test Module", "Test Name"))
for req_id, metadata in data.items():
out.writerow(
(
metadata["full_title"],
- metadata["description"],
metadata["test_case"],
metadata["validated_by"],
)