X-Git-Url: https://gerrit.onap.org/r/gitweb?a=blobdiff_plain;f=ice_validator%2Ftests%2Fconftest.py;h=61f4cd8c5d49b75c0f82a62bd5daa27ad0a752c0;hb=ad5a66886c550eb6cb7c0d46bc097d382397fc87;hp=a2f432126ff369cdce9bfad5774577ffec62c0db;hpb=ab01f96b1405bc037853847138a121581bb98f05;p=vvp%2Fvalidation-scripts.git diff --git a/ice_validator/tests/conftest.py b/ice_validator/tests/conftest.py index a2f4321..61f4cd8 100644 --- a/ice_validator/tests/conftest.py +++ b/ice_validator/tests/conftest.py @@ -209,7 +209,7 @@ class TestResult: """ text = ( "\n\n{}: \n{}".format(r_id, curr_reqs[r_id]["description"]) - for r_id in self.requirement_ids + for r_id in self.requirement_ids if r_id in curr_reqs ) return "".join(text) @@ -283,7 +283,8 @@ class TestResult: elif "yaml_files" in self.item.fixturenames: return self.item.funcargs["yaml_files"] else: - return [self.result.nodeid.split("[")[1][:-1]] + parts = self.result.nodeid.split("[") + return [""] if len(parts) == 1 else [parts[1][:-1]] def _get_error_message(self): """ @@ -431,12 +432,13 @@ def generate_report(outpath, template_path, categories, output_format="html"): failures = [r for r in ALL_RESULTS if r.is_failed] generate_failure_file(outpath) output_format = output_format.lower().strip() if output_format else "html" + generate_json(outpath, template_path, categories) if output_format == "html": generate_html_report(outpath, categories, template_path, failures) elif output_format == "excel": generate_excel_report(outpath, categories, template_path, failures) elif output_format == "json": - generate_json(outpath, template_path, categories) + return elif output_format == "csv": generate_csv_report(outpath, categories, template_path, failures) else: @@ -713,6 +715,10 @@ def collect_errors(r_id, collection_failures, test_result): return [e for e in errors if e] +def relative_paths(base_dir, paths): + return [os.path.relpath(p, base_dir) for p in paths] + + def generate_json(outpath, template_path, categories): """ Creates a JSON summary of the entire test run. @@ -720,7 +726,9 @@ def generate_json(outpath, template_path, categories): reqs = load_current_requirements() data = { "version": "dublin", - "template_directory": template_path, + "template_directory": os.path.splitdrive(template_path)[1].replace( + os.path.sep, "/" + ), "timestamp": make_iso_timestamp(), "checksum": hash_directory(template_path), "categories": categories, @@ -744,7 +752,7 @@ def generate_json(outpath, template_path, categories): for result in ALL_RESULTS: results.append( { - "files": result.files, + "files": relative_paths(template_path, result.files), "test_module": result.test_module, "test_case": result.test_case, "result": result.outcome, @@ -1026,7 +1034,7 @@ def load_current_requirements(): def select_heat_requirements(reqs): """Filters dict requirements to only those requirements pertaining to Heat""" - return {k: v for k, v in reqs.items() if "Heat" in v["docname"]} + return {k: v for k, v in reqs.items() if "heat" in v["docname"].lower()} def build_rst_json(reqs): @@ -1038,11 +1046,11 @@ def build_rst_json(reqs): if "none" in (values["validation_mode"]): del data[key] else: - """Creates links in RST format to requirements and test cases""" + # Creates links in RST format to requirements and test cases if values["test_case"]: - val_list = re.findall(r'(?<=\.).*', values["test_case"]) - val = TEST_SCRIPT_SITE + val_list[0] + ".py" - rst_value = ("`" + val_list[0] + " <" + val + ">`_") + mod = values["test_case"].split(".")[-1] + val = TEST_SCRIPT_SITE + mod + ".py" + rst_value = ("`" + mod + " <" + val + ">`_") title = "`" + values["id"] + " <" + VNFRQTS_ID_URL + values["docname"].replace(" ", "%20") + ".html#" + values["id"] + ">`_" data[key].update({'full_title': title, 'test_case': rst_value}) else: @@ -1054,9 +1062,9 @@ def build_rst_json(reqs): return data -def generate_rst_table(data): +def generate_rst_table(output_dir, data): """Generate a formatted csv to be used in RST""" - rst_path = os.path.join(__path__[0], "../output/rst.csv") + rst_path = os.path.join(output_dir, "rst.csv") with open(rst_path, "w", newline="") as f: out = csv.writer(f) out.writerow( @@ -1093,7 +1101,8 @@ def pytest_report_collectionfinish(config, startdir, items): if req_id not in req_to_test: req_to_test[req_id].add(item) if req_id in requirements: - reqs[req_id].update({'test_case': item.function.__module__, 'validated_by': item.function.__name__}) + reqs[req_id].update({'test_case': item.function.__module__, + 'validated_by': item.function.__name__}) if req_id not in requirements: mapping_errors.add( (req_id, item.function.__module__, item.function.__name__) @@ -1142,8 +1151,8 @@ def pytest_report_collectionfinish(config, startdir, items): ""), # test function ) # now write out any test methods that weren't mapped to requirements - unmapped_tests = {(item.function.__module__, item.function.__name__) - for item in unmapped} + unmapped_tests = {(item.function.__module__, item.function.__name__) for item in + unmapped} for test_module, test_name in unmapped_tests: out.writerow( ("", # req ID @@ -1156,4 +1165,4 @@ def pytest_report_collectionfinish(config, startdir, items): test_name) ) - generate_rst_table(build_rst_json(json.dumps(reqs))) + generate_rst_table(get_output_dir(config), build_rst_json(json.dumps(reqs)))