X-Git-Url: https://gerrit.onap.org/r/gitweb?a=blobdiff_plain;f=ice_validator%2Ftests%2Fconftest.py;h=2c88ece93b0ad386fbfabe051a0859871c3bdeaf;hb=refs%2Fchanges%2F52%2F78952%2F1;hp=a2f432126ff369cdce9bfad5774577ffec62c0db;hpb=ab01f96b1405bc037853847138a121581bb98f05;p=vvp%2Fvalidation-scripts.git diff --git a/ice_validator/tests/conftest.py b/ice_validator/tests/conftest.py index a2f4321..2c88ece 100644 --- a/ice_validator/tests/conftest.py +++ b/ice_validator/tests/conftest.py @@ -283,7 +283,8 @@ class TestResult: elif "yaml_files" in self.item.fixturenames: return self.item.funcargs["yaml_files"] else: - return [self.result.nodeid.split("[")[1][:-1]] + parts = self.result.nodeid.split("[") + return [""] if len(parts) == 1 else [parts[1][:-1]] def _get_error_message(self): """ @@ -713,6 +714,10 @@ def collect_errors(r_id, collection_failures, test_result): return [e for e in errors if e] +def relative_paths(base_dir, paths): + return [os.path.relpath(p, base_dir) for p in paths] + + def generate_json(outpath, template_path, categories): """ Creates a JSON summary of the entire test run. @@ -720,7 +725,9 @@ def generate_json(outpath, template_path, categories): reqs = load_current_requirements() data = { "version": "dublin", - "template_directory": template_path, + "template_directory": os.path.splitdrive(template_path)[1].replace( + os.path.sep, "/" + ), "timestamp": make_iso_timestamp(), "checksum": hash_directory(template_path), "categories": categories, @@ -744,7 +751,7 @@ def generate_json(outpath, template_path, categories): for result in ALL_RESULTS: results.append( { - "files": result.files, + "files": relative_paths(template_path, result.files), "test_module": result.test_module, "test_case": result.test_case, "result": result.outcome, @@ -1038,11 +1045,11 @@ def build_rst_json(reqs): if "none" in (values["validation_mode"]): del data[key] else: - """Creates links in RST format to requirements and test cases""" + # Creates links in RST format to requirements and test cases if values["test_case"]: - val_list = re.findall(r'(?<=\.).*', values["test_case"]) - val = TEST_SCRIPT_SITE + val_list[0] + ".py" - rst_value = ("`" + val_list[0] + " <" + val + ">`_") + mod = values["test_case"].split(".")[-1] + val = TEST_SCRIPT_SITE + mod + ".py" + rst_value = ("`" + mod + " <" + val + ">`_") title = "`" + values["id"] + " <" + VNFRQTS_ID_URL + values["docname"].replace(" ", "%20") + ".html#" + values["id"] + ">`_" data[key].update({'full_title': title, 'test_case': rst_value}) else: @@ -1093,7 +1100,8 @@ def pytest_report_collectionfinish(config, startdir, items): if req_id not in req_to_test: req_to_test[req_id].add(item) if req_id in requirements: - reqs[req_id].update({'test_case': item.function.__module__, 'validated_by': item.function.__name__}) + reqs[req_id].update({'test_case': item.function.__module__, + 'validated_by': item.function.__name__}) if req_id not in requirements: mapping_errors.add( (req_id, item.function.__module__, item.function.__name__) @@ -1142,8 +1150,8 @@ def pytest_report_collectionfinish(config, startdir, items): ""), # test function ) # now write out any test methods that weren't mapped to requirements - unmapped_tests = {(item.function.__module__, item.function.__name__) - for item in unmapped} + unmapped_tests = {(item.function.__module__, item.function.__name__) for item in + unmapped} for test_module, test_name in unmapped_tests: out.writerow( ("", # req ID