X-Git-Url: https://gerrit.onap.org/r/gitweb?a=blobdiff_plain;f=ice_validator%2Ftests%2Fconftest.py;h=07a66f20084e34ecf05fe3e7a562fe5fc8f067d5;hb=0018ec512d41f3609394414420bbb39deaa0d7e4;hp=61f4cd8c5d49b75c0f82a62bd5daa27ad0a752c0;hpb=ad5a66886c550eb6cb7c0d46bc097d382397fc87;p=vvp%2Fvalidation-scripts.git diff --git a/ice_validator/tests/conftest.py b/ice_validator/tests/conftest.py index 61f4cd8..07a66f2 100644 --- a/ice_validator/tests/conftest.py +++ b/ice_validator/tests/conftest.py @@ -63,8 +63,12 @@ DEFAULT_OUTPUT_DIR = "{}/../output".format(__path__[0]) RESOLUTION_STEPS_FILE = "resolution_steps.json" HEAT_REQUIREMENTS_FILE = os.path.join(__path__[0], "..", "heat_requirements.json") -TEST_SCRIPT_SITE = "https://github.com/onap/vvp-validation-scripts/blob/master/ice_validator/tests/" -VNFRQTS_ID_URL = "https://docs.onap.org/en/latest/submodules/vnfrqts/requirements.git/docs/" +TEST_SCRIPT_SITE = ( + "https://github.com/onap/vvp-validation-scripts/blob/master/ice_validator/tests/" +) +VNFRQTS_ID_URL = ( + "https://docs.onap.org/en/latest/submodules/vnfrqts/requirements.git/docs/" +) REPORT_COLUMNS = [ ("Input File", "file"), @@ -191,6 +195,13 @@ class TestResult: """ return self.item.function.__module__.split(".")[-1] + @property + def test_id(self): + """ + :return: ID of the test (test_module + test_case) + """ + return "{}::{}".format(self.test_module, self.test_case) + @property def raw_output(self): """ @@ -209,7 +220,8 @@ class TestResult: """ text = ( "\n\n{}: \n{}".format(r_id, curr_reqs[r_id]["description"]) - for r_id in self.requirement_ids if r_id in curr_reqs + for r_id in self.requirement_ids + if r_id in curr_reqs ) return "".join(text) @@ -307,7 +319,6 @@ def pytest_runtest_makereport(item, call): if outcome.get_result().when != "call": return # only capture results of test cases themselves result = TestResult(item, outcome) - ALL_RESULTS.append(result) if ( not item.config.option.continue_on_failure and result.is_base_test @@ -316,8 +327,12 @@ def pytest_runtest_makereport(item, call): msg = "!!Base Test Failure!! Halting test suite execution...\n{}".format( result.error_message ) + result.error_message = msg + ALL_RESULTS.append(result) pytest.exit("{}\n{}\n{}".format(msg, result.files, result.test_case)) + ALL_RESULTS.append(result) + def make_timestamp(): """ @@ -517,7 +532,7 @@ def generate_csv_report(output_dir, categories, template_path, failures): rows.append( [ "\n".join(failure.files), - failure.test_module, + failure.test_id, failure.requirement_text(reqs), failure.resolution_steps(resolutions), failure.error_message, @@ -581,7 +596,7 @@ def generate_excel_report(output_dir, categories, template_path, failures): # table content for row, failure in enumerate(failures, start=start_error_table_row + 2): worksheet.write(row, 0, "\n".join(failure.files), normal) - worksheet.write(row, 1, failure.test_module, normal) + worksheet.write(row, 1, failure.test_id, normal) worksheet.write(row, 2, failure.requirement_text(reqs), normal) worksheet.write(row, 3, failure.resolution_steps(resolutions), normal) worksheet.write(row, 4, failure.error_message, normal) @@ -800,7 +815,7 @@ def generate_html_report(outpath, categories, template_path, failures): fail_data.append( { "file_links": make_href(failure.files), - "test_id": failure.test_module, + "test_id": failure.test_id, "error_message": failure.error_message, "raw_output": failure.raw_output, "requirements": docutils.core.publish_parts( @@ -1037,29 +1052,59 @@ def select_heat_requirements(reqs): return {k: v for k, v in reqs.items() if "heat" in v["docname"].lower()} +def is_testable(reqs): + """Filters dict requirements to only those which are testable""" + for key, values in reqs.items(): + if (("MUST" in values.get("keyword", "").upper()) and ( + "none" not in values.get("validation_mode", "").lower() + )): + reqs[key]["testable"] = True + else: + reqs[key]["testable"] = False + return reqs + + def build_rst_json(reqs): """Takes requirements and returns list of only Heat requirements""" - data = json.loads(reqs) - for key, values in list(data.items()): - if "Heat" in (values["docname"]): - if "MUST" in (values["keyword"]): - if "none" in (values["validation_mode"]): - del data[key] - else: - # Creates links in RST format to requirements and test cases - if values["test_case"]: - mod = values["test_case"].split(".")[-1] - val = TEST_SCRIPT_SITE + mod + ".py" - rst_value = ("`" + mod + " <" + val + ">`_") - title = "`" + values["id"] + " <" + VNFRQTS_ID_URL + values["docname"].replace(" ", "%20") + ".html#" + values["id"] + ">`_" - data[key].update({'full_title': title, 'test_case': rst_value}) - else: - del data[key] + for key, values in list(reqs.items()): + if values["testable"]: + # Creates links in RST format to requirements and test cases + if values["test_case"]: + mod = values["test_case"].split(".")[-1] + val = TEST_SCRIPT_SITE + mod + ".py" + rst_value = "`" + mod + " <" + val + ">`_" + title = ( + "`" + + values["id"] + + " <" + + VNFRQTS_ID_URL + + values["docname"].replace(" ", "%20") + + ".html#" + + values["id"] + + ">`_" + ) + reqs[key].update({"full_title": title, "test_case": rst_value}) else: - del data[key] + title = ( + "`" + + values["id"] + + " <" + + VNFRQTS_ID_URL + + values["docname"].replace(" ", "%20") + + ".html#" + + values["id"] + + ">`_" + ) + reqs[key].update( + { + "full_title": title, + "test_case": "No test for requirement", + "validated_by": "static", + } + ) else: - del data[key] - return data + del reqs[key] + return reqs def generate_rst_table(output_dir, data): @@ -1067,9 +1112,7 @@ def generate_rst_table(output_dir, data): rst_path = os.path.join(output_dir, "rst.csv") with open(rst_path, "w", newline="") as f: out = csv.writer(f) - out.writerow( - ("Requirement ID", "Requirement", "Test Module", "Test Name"), - ) + out.writerow(("Requirement ID", "Requirement", "Test Module", "Test Name")) for req_id, metadata in data.items(): out.writerow( ( @@ -1090,6 +1133,7 @@ def pytest_report_collectionfinish(config, startdir, items): os.makedirs(output_dir) reqs = load_current_requirements() requirements = select_heat_requirements(reqs) + testable_requirements = is_testable(requirements) unmapped, mapped = partition( lambda i: hasattr(i.function, "requirement_ids"), items ) @@ -1101,8 +1145,12 @@ def pytest_report_collectionfinish(config, startdir, items): if req_id not in req_to_test: req_to_test[req_id].add(item) if req_id in requirements: - reqs[req_id].update({'test_case': item.function.__module__, - 'validated_by': item.function.__name__}) + reqs[req_id].update( + { + "test_case": item.function.__module__, + "validated_by": item.function.__name__, + } + ) if req_id not in requirements: mapping_errors.add( (req_id, item.function.__module__, item.function.__name__) @@ -1117,14 +1165,18 @@ def pytest_report_collectionfinish(config, startdir, items): with open(traceability_path, "w", newline="") as f: out = csv.writer(f) out.writerow( - ("Requirement ID", "Requirement", "Section", - "Keyword", "Validation Mode", "Is Testable", - "Test Module", "Test Name"), + ( + "Requirement ID", + "Requirement", + "Section", + "Keyword", + "Validation Mode", + "Is Testable", + "Test Module", + "Test Name", + ) ) - for req_id, metadata in requirements.items(): - keyword = metadata["keyword"].upper() - mode = metadata["validation_mode"].lower() - testable = keyword in {"MUST", "MUST NOT"} and mode != "none" + for req_id, metadata in testable_requirements.items(): if req_to_test[req_id]: for item in req_to_test[req_id]: out.writerow( @@ -1132,37 +1184,42 @@ def pytest_report_collectionfinish(config, startdir, items): req_id, metadata["description"], metadata["section_name"], - keyword, - mode, - "TRUE" if testable else "FALSE", + metadata["keyword"], + metadata["validation_mode"], + metadata["testable"], item.function.__module__, item.function.__name__, - ), + ) ) else: out.writerow( - (req_id, - metadata["description"], - metadata["section_name"], - keyword, - mode, - "TRUE" if testable else "FALSE", - "", # test module - ""), # test function + ( + req_id, + metadata["description"], + metadata["section_name"], + metadata["keyword"], + metadata["validation_mode"], + metadata["testable"], + "", # test module + "", + ) # test function ) # now write out any test methods that weren't mapped to requirements - unmapped_tests = {(item.function.__module__, item.function.__name__) for item in - unmapped} + unmapped_tests = { + (item.function.__module__, item.function.__name__) for item in unmapped + } for test_module, test_name in unmapped_tests: out.writerow( - ("", # req ID - "", # description - "", # section name - "", # keyword - "static", # validation mode - "TRUE", # testable - test_module, - test_name) + ( + "", # req ID + "", # description + "", # section name + "", # keyword + "static", # validation mode + "TRUE", # testable + test_module, + test_name, + ) ) - generate_rst_table(get_output_dir(config), build_rst_json(json.dumps(reqs))) + generate_rst_table(get_output_dir(config), build_rst_json(testable_requirements))