From: Lovett, Trevor Date: Wed, 30 Jan 2019 15:53:22 +0000 (-0600) Subject: [VVP] Add new columns to traceability report X-Git-Tag: 5.0.0~78^2 X-Git-Url: https://gerrit.onap.org/r/gitweb?p=vvp%2Fvalidation-scripts.git;a=commitdiff_plain;h=350d9dcefe7f69d3e6a3923d2670fa2d140f2ba1 [VVP] Add new columns to traceability report Added the following columns: * Keyword - MUST, MUST NOT, etc. * Validation Mode - contents of :validation_mode: from vnfrqts * Is Testable - TRUE if requirement is testable based on keyword and validation mode, FALSE otherwise Also fixed issue following issues: * Traceability and mapping error reports were not going to the configured output directory if a custom value was provided * Removed duplicate rows from traceability report * Traceability report shows all Heat requirements Change-Id: I339b42e9e662f1681b78d1a49bd7e71e87cae21a Issue-ID: VVP-148 Signed-off-by: Lovett, Trevor --- diff --git a/ice_validator/tests/conftest.py b/ice_validator/tests/conftest.py index 1a47e63..807ac71 100644 --- a/ice_validator/tests/conftest.py +++ b/ice_validator/tests/conftest.py @@ -42,7 +42,6 @@ import io import json import os import re -import sys import time from collections import defaultdict from itertools import chain @@ -86,6 +85,11 @@ ALL_RESULTS = [] def get_output_dir(config): + """ + Retrieve the output directory for the reports and create it if necessary + :param config: pytest configuration + :return: output directory as string + """ output_dir = config.option.output_dir or DEFAULT_OUTPUT_DIR if not os.path.exists(output_dir): os.makedirs(output_dir, exist_ok=True) @@ -996,6 +1000,11 @@ def pytest_generate_tests(metafunc): def hash_directory(path): + """ + Create md5 hash using the contents of all files under ``path`` + :param path: string directory containing files + :return: string MD5 hash code (hex) + """ md5 = hashlib.md5() for dir_path, sub_dirs, filenames in os.walk(path): for filename in filenames: @@ -1013,45 +1022,20 @@ def load_current_requirements(): return data["versions"][version]["needs"] -def compat_open(path): - """Invokes open correctly depending on the Python version""" - if sys.version_info.major < 3: - return open(path, "wb") - else: - return open(path, "w", newline="") - - -def unicode_writerow(writer, row): - if sys.version_info.major < 3: - row = [s.encode("utf8") for s in row] - writer.writerow(row) - - -def parse_heat_requirements(reqs): - """Takes requirements and returns list of only Heat requirements""" - data = json.loads(reqs) - for key, values in list(data.items()): - if "Heat" in (values["docname"]): - if "MUST" not in (values["keyword"]): - del data[key] - else: - if "none" in (values["validation_mode"]): - del data[key] - else: - del data[key] - return data +def select_heat_requirements(reqs): + """Filters dict requirements to only those requirements pertaining to Heat""" + return {k: v for k, v in reqs.items() if "Heat" in v["docname"]} # noinspection PyUnusedLocal def pytest_report_collectionfinish(config, startdir, items): """Generates a simple traceability report to output/traceability.csv""" - traceability_path = os.path.join(__path__[0], "../output/traceability.csv") + traceability_path = os.path.join(get_output_dir(config), "traceability.csv") output_dir = os.path.split(traceability_path)[0] if not os.path.exists(output_dir): os.makedirs(output_dir) reqs = load_current_requirements() - reqs = json.dumps(reqs) - requirements = parse_heat_requirements(reqs) + requirements = select_heat_requirements(reqs) unmapped, mapped = partition( lambda i: hasattr(i.function, "requirement_ids"), items ) @@ -1067,38 +1051,59 @@ def pytest_report_collectionfinish(config, startdir, items): (req_id, item.function.__module__, item.function.__name__) ) - mapping_error_path = os.path.join(__path__[0], "../output/mapping_errors.csv") - with compat_open(mapping_error_path) as f: + mapping_error_path = os.path.join(get_output_dir(config), "mapping_errors.csv") + with open(mapping_error_path, "w", newline="") as f: writer = csv.writer(f) for err in mapping_errors: - unicode_writerow(writer, err) + writer.writerow(err) - with compat_open(traceability_path) as f: + with open(traceability_path, "w", newline="") as f: out = csv.writer(f) - unicode_writerow( - out, - ("Requirement ID", "Requirement", "Section", "Test Module", "Test Name"), + out.writerow( + ("Requirement ID", "Requirement", "Section", + "Keyword", "Validation Mode", "Is Testable", + "Test Module", "Test Name"), ) for req_id, metadata in requirements.items(): + keyword = metadata["keyword"].upper() + mode = metadata["validation_mode"].lower() + testable = keyword in {"MUST", "MUST NOT"} and mode != "none" if req_to_test[req_id]: for item in req_to_test[req_id]: - unicode_writerow( - out, + out.writerow( ( req_id, metadata["description"], metadata["section_name"], + keyword, + mode, + "TRUE" if testable else "FALSE", item.function.__module__, item.function.__name__, ), ) else: - unicode_writerow( - out, - (req_id, metadata["description"], metadata["section_name"], "", ""), + out.writerow( + (req_id, + metadata["description"], + metadata["section_name"], + keyword, + mode, + "TRUE" if testable else "FALSE", + "", # test module + ""), # test function ) # now write out any test methods that weren't mapped to requirements - for item in unmapped: - unicode_writerow( - out, ("", "", "", item.function.__module__, item.function.__name__) + unmapped_tests = {(item.function.__module__, item.function.__name__) + for item in unmapped} + for test_module, test_name in unmapped_tests: + out.writerow( + ("", # req ID + "", # description + "", # section name + "", # keyword + "static", # validation mode + "TRUE", # testable + test_module, + test_name) )