Code Review
/
vvp
/
validation-scripts.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
[VVP] JSON report shows relative paths for "files"
[vvp/validation-scripts.git]
/
ice_validator
/
tests
/
conftest.py
diff --git
a/ice_validator/tests/conftest.py
b/ice_validator/tests/conftest.py
index
a2f4321
..
2c88ece
100644
(file)
--- a/
ice_validator/tests/conftest.py
+++ b/
ice_validator/tests/conftest.py
@@
-283,7
+283,8
@@
class TestResult:
elif "yaml_files" in self.item.fixturenames:
return self.item.funcargs["yaml_files"]
else:
elif "yaml_files" in self.item.fixturenames:
return self.item.funcargs["yaml_files"]
else:
- return [self.result.nodeid.split("[")[1][:-1]]
+ parts = self.result.nodeid.split("[")
+ return [""] if len(parts) == 1 else [parts[1][:-1]]
def _get_error_message(self):
"""
def _get_error_message(self):
"""
@@
-713,6
+714,10
@@
def collect_errors(r_id, collection_failures, test_result):
return [e for e in errors if e]
return [e for e in errors if e]
+def relative_paths(base_dir, paths):
+ return [os.path.relpath(p, base_dir) for p in paths]
+
+
def generate_json(outpath, template_path, categories):
"""
Creates a JSON summary of the entire test run.
def generate_json(outpath, template_path, categories):
"""
Creates a JSON summary of the entire test run.
@@
-720,7
+725,9
@@
def generate_json(outpath, template_path, categories):
reqs = load_current_requirements()
data = {
"version": "dublin",
reqs = load_current_requirements()
data = {
"version": "dublin",
- "template_directory": template_path,
+ "template_directory": os.path.splitdrive(template_path)[1].replace(
+ os.path.sep, "/"
+ ),
"timestamp": make_iso_timestamp(),
"checksum": hash_directory(template_path),
"categories": categories,
"timestamp": make_iso_timestamp(),
"checksum": hash_directory(template_path),
"categories": categories,
@@
-744,7
+751,7
@@
def generate_json(outpath, template_path, categories):
for result in ALL_RESULTS:
results.append(
{
for result in ALL_RESULTS:
results.append(
{
- "files": re
sult.files
,
+ "files": re
lative_paths(template_path, result.files)
,
"test_module": result.test_module,
"test_case": result.test_case,
"result": result.outcome,
"test_module": result.test_module,
"test_case": result.test_case,
"result": result.outcome,
@@
-1038,11
+1045,11
@@
def build_rst_json(reqs):
if "none" in (values["validation_mode"]):
del data[key]
else:
if "none" in (values["validation_mode"]):
del data[key]
else:
- """Creates links in RST format to requirements and test cases"""
+ # Creates links in RST format to requirements and test cases
if values["test_case"]:
if values["test_case"]:
- val_list = re.findall(r'(?<=\.).*', values["test_case"])
- val = TEST_SCRIPT_SITE +
val_list[0]
+ ".py"
- rst_value = ("`" +
val_list[0]
+ " <" + val + ">`_")
+ mod = values["test_case"].split(".")[-1]
+ val = TEST_SCRIPT_SITE +
mod
+ ".py"
+ rst_value = ("`" +
mod
+ " <" + val + ">`_")
title = "`" + values["id"] + " <" + VNFRQTS_ID_URL + values["docname"].replace(" ", "%20") + ".html#" + values["id"] + ">`_"
data[key].update({'full_title': title, 'test_case': rst_value})
else:
title = "`" + values["id"] + " <" + VNFRQTS_ID_URL + values["docname"].replace(" ", "%20") + ".html#" + values["id"] + ">`_"
data[key].update({'full_title': title, 'test_case': rst_value})
else:
@@
-1093,7
+1100,8
@@
def pytest_report_collectionfinish(config, startdir, items):
if req_id not in req_to_test:
req_to_test[req_id].add(item)
if req_id in requirements:
if req_id not in req_to_test:
req_to_test[req_id].add(item)
if req_id in requirements:
- reqs[req_id].update({'test_case': item.function.__module__, 'validated_by': item.function.__name__})
+ reqs[req_id].update({'test_case': item.function.__module__,
+ 'validated_by': item.function.__name__})
if req_id not in requirements:
mapping_errors.add(
(req_id, item.function.__module__, item.function.__name__)
if req_id not in requirements:
mapping_errors.add(
(req_id, item.function.__module__, item.function.__name__)
@@
-1142,8
+1150,8
@@
def pytest_report_collectionfinish(config, startdir, items):
""), # test function
)
# now write out any test methods that weren't mapped to requirements
""), # test function
)
# now write out any test methods that weren't mapped to requirements
- unmapped_tests = {(item.function.__module__, item.function.__name__)
-
for item in
unmapped}
+ unmapped_tests = {(item.function.__module__, item.function.__name__)
for item in
+ unmapped}
for test_module, test_name in unmapped_tests:
out.writerow(
("", # req ID
for test_module, test_name in unmapped_tests:
out.writerow(
("", # req ID