2 # ============LICENSE_START=======================================================
3 # org.onap.vvp/validation-scripts
4 # ===================================================================
5 # Copyright © 2018 AT&T Intellectual Property. All rights reserved.
6 # ===================================================================
8 # Unless otherwise specified, all software contained herein is licensed
9 # under the Apache License, Version 2.0 (the "License");
10 # you may not use this software except in compliance with the License.
11 # You may obtain a copy of the License at
13 # http://www.apache.org/licenses/LICENSE-2.0
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS,
17 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 # See the License for the specific language governing permissions and
19 # limitations under the License.
23 # Unless otherwise specified, all documentation contained herein is licensed
24 # under the Creative Commons License, Attribution 4.0 Intl. (the "License");
25 # you may not use this documentation except in compliance with the License.
26 # You may obtain a copy of the License at
28 # https://creativecommons.org/licenses/by/4.0/
30 # Unless required by applicable law or agreed to in writing, documentation
31 # distributed under the License is distributed on an "AS IS" BASIS,
32 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
33 # See the License for the specific language governing permissions and
34 # limitations under the License.
36 # ============LICENSE_END============================================
51 from more_itertools import partition
52 from six import string_types
55 __path__ = [os.path.dirname(os.path.abspath(__file__))]
57 resolution_steps_file = "resolution_steps.json"
58 requirements_file = "requirements.json"
63 ("Input File", "file"),
64 ("Test", "test_file"),
65 ("Requirements", "req_description"),
66 ("Resolution Steps", "resolution_steps"),
67 ("Error Message", "message"),
68 ("Raw Test Output", "raw_output"),
70 report = collections.OrderedDict(report_columns)
73 def extract_error_msg(rep):
75 msg = str(rep.longrepr.reprcrash)
76 except AttributeError:
79 if "AssertionError:" in msg:
80 return msg.split("AssertionError:")[1]
85 @pytest.hookimpl(tryfirst=True, hookwrapper=True)
86 def pytest_runtest_makereport(item, call):
89 rep = outcome.get_result()
91 output_dir = "{}/../output".format(__path__[0])
92 if rep.outcome == "failed":
93 if not os.path.exists(output_dir):
96 if hasattr(item.function, "requirement_ids"):
97 requirement_ids = item.function.requirement_ids
101 if "environment_pair" in item.fixturenames:
102 resolved_pair = "{} environment pair".format(
103 item.funcargs["environment_pair"]["name"]
105 elif "heat_volume_pair" in item.fixturenames:
106 resolved_pair = "{} volume pair".format(
107 item.funcargs["heat_volume_pair"]["name"]
109 elif "heat_templates" in item.fixturenames:
110 resolved_pair = item.funcargs["heat_templates"]
111 elif "yaml_files" in item.fixturenames:
112 resolved_pair = item.funcargs["yaml_files"]
114 resolved_pair = rep.nodeid.split("[")[1][:-1]
116 FAILURE_DATA[len(FAILURE_DATA)] = {
117 "file": resolved_pair,
118 "vnfrqts": requirement_ids,
119 "test": item.function.__name__,
120 "test_file": item.function.__module__.split(".")[-1],
121 "raw_output": str(rep.longrepr),
122 "message": extract_error_msg(rep),
125 with open("{}/failures".format(output_dir), "w") as f:
126 json.dump(FAILURE_DATA, f, indent=4)
129 def make_timestamp():
130 timezone = time.tzname[time.localtime().tm_isdst]
131 return "{} {}".format(str(datetime.datetime.now()), timezone)
134 def pytest_sessionfinish(session, exitstatus):
135 if not session.config.option.template_dir:
137 template_path = os.path.abspath(session.config.option.template_dir[0])
138 profile_name = session.config.option.validation_profile_name
140 "{}/../output".format(__path__[0]),
143 session.config.option.report_format,
147 def pytest_runtest_setup(item):
148 profile = item.session.config.option.validation_profile
149 markers = set(m.name for m in item.iter_markers())
150 if not profile and markers and "xfail" not in markers:
151 pytest.skip("No validation profile selected. Skipping tests with marks.")
152 if profile and markers and profile not in markers and "xfail" not in markers:
153 pytest.skip("Doesn't match selection validation profile")
157 paths = [path] if isinstance(path, string_types) else path
160 abs_path = os.path.abspath(p)
161 filename = os.path.split(abs_path)[1]
163 "<a href='file://{abs_path}' target='_blank'>{filename}</a>".format(
164 abs_path=abs_path, filename=filename
167 return "<br/>".join(links)
170 def generate_report(outpath, template_path, profile_name, output_format):
171 failures = "{}/failures".format(outpath)
176 if os.path.exists(failures):
177 with open(failures, "r") as f:
178 faildata = json.loads(f.read())
182 resolution_steps = "{}/../{}".format(__path__[0], resolution_steps_file)
183 if os.path.exists(resolution_steps):
184 with open(resolution_steps, "r") as f:
185 rdata = json.loads(f.read())
187 heat_requirements = "{}/../{}".format(__path__[0], requirements_file)
188 if os.path.exists(heat_requirements):
189 with open(heat_requirements, "r") as f:
190 hdata = json.loads(f.read())
192 # point requirements at the most recent version
193 current_version = hdata["current_version"]
194 hdata = hdata["versions"][current_version]["needs"]
195 # mapping requirement IDs from failures to requirement descriptions
196 for k, v in faildata.items():
198 if v["vnfrqts"] != "":
199 for req in v["vnfrqts"]:
201 req_text += "\n\n{}: \n{}".format(req, hdata[req]["description"])
202 faildata[k]["req_description"] = req_text
204 # mapping resolution steps to module and test name
205 for k, v in faildata.items():
206 faildata[k]["resolution_steps"] = ""
208 if v["test_file"] == rs["module"] and v["test"] == rs["function"]:
209 faildata[k]["resolution_steps"] = "\n{}: \n{}".format(
210 rs["header"], rs["resolution_steps"]
212 output_format = output_format.lower().strip() if output_format else "html"
213 if output_format == "html":
214 generate_html_report(outpath, profile_name, template_path, faildata)
215 elif output_format == "excel":
216 generate_excel_report(outpath, profile_name, template_path, faildata)
217 elif output_format == "csv":
218 generate_csv_report(outpath, profile_name, template_path, faildata)
220 raise ValueError("Unsupported output format: " + output_format)
223 def generate_csv_report(output_dir, profile_name, template_path, faildata):
225 rows.append(["Validation Failures"])
227 ("Profile Selected:", profile_name),
228 ("Report Generated At:", make_timestamp()),
229 ("Directory Validated:", template_path),
230 ("Checksum:", hash_directory(template_path)),
231 ("Total Errors:", len(faildata)),
235 for header in headers:
240 rows.append([col for col, _ in report_columns])
243 for data in faildata.values():
246 data.get("file", ""),
247 data.get("test_file", ""),
248 data.get("req_description", ""),
249 data.get("resolution_steps", ""),
250 data.get("message", ""),
251 data.get("raw_output", ""),
255 output_path = os.path.join(output_dir, "report.csv")
256 with open(output_path, "w", newline="") as f:
257 writer = csv.writer(f)
262 def generate_excel_report(output_dir, profile_name, template_path, faildata):
263 output_path = os.path.join(output_dir, "report.xlsx")
264 workbook = xlsxwriter.Workbook(output_path)
265 bold = workbook.add_format({"bold": True})
266 code = workbook.add_format(({"font_name": "Courier", "text_wrap": True}))
267 normal = workbook.add_format({"text_wrap": True})
268 heading = workbook.add_format({"bold": True, "font_size": 18})
269 worksheet = workbook.add_worksheet("failures")
270 worksheet.write(0, 0, "Validation Failures", heading)
273 ("Profile Selected:", profile_name),
274 ("Report Generated At:", make_timestamp()),
275 ("Directory Validated:", template_path),
276 ("Checksum:", hash_directory(template_path)),
277 ("Total Errors:", len(faildata)),
279 for row, (header, value) in enumerate(headers, start=2):
280 worksheet.write(row, 0, header, bold)
281 worksheet.write(row, 1, value)
283 worksheet.set_column(0, len(headers) - 1, 40)
284 worksheet.set_column(len(headers), len(headers), 80)
287 start_error_table_row = 2 + len(headers) + 2
288 for col_num, (col_name, _) in enumerate(report_columns):
289 worksheet.write(start_error_table_row, col_num, col_name, bold)
292 for row, data in enumerate(faildata.values(), start=start_error_table_row + 1):
293 for col, key in enumerate(report.values()):
296 [data[key]] if isinstance(data[key], string_types) else data[key]
298 contents = "\n".join(paths)
299 worksheet.write(row, col, contents, normal)
300 elif key == "raw_output":
301 worksheet.write_string(row, col, data[key], code)
303 worksheet.write(row, col, data[key], normal)
308 def generate_html_report(outpath, profile_name, template_path, faildata):
309 with open("{}/report.html".format(outpath), "w") as of:
311 <style type="text/css">
313 font-family:Arial, sans-serif;
315 .tg {{border-collapse:collapse;border-spacing:0;}}
316 .tg td{{font-family:Arial, sans-serif;font-size:8px;padding:10px 5px;
317 border-style:solid;border-width:1px;overflow:hidden;word-break:normal;
318 border-color:black;}}
319 .tg th{{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;
320 padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;
321 word-break:normal;border-color:black;}}
322 .tg .tg-rwj1{{font-size:10px;font-family:Arial, Helvetica,
323 sans-serif !important;;border-color:inherit;vertical-align:top}}</style>
324 <h1>Validation Failures</h1>
326 <li><b>Profile Selected: </b> <tt>{profile}</tt></li>
327 <li><b>Report Generated At:</b> <tt>{timestamp}</tt></li>
328 <li><b>Directory Validated:</b> <tt>{template_dir}</tt></li>
329 <li><b>Checksum:</b> <tt>{checksum}</tt></li>
330 <li><b>Total Errors:</b> {num_failures}</li>
333 profile=profile_name,
334 timestamp=make_timestamp(),
335 checksum=hash_directory(template_path),
336 template_dir=template_path,
337 num_failures=len(faildata),
341 if len(faildata) == 0:
342 of.write("<p>Success! No validation failures detected.</p>")
345 table_begin = '<table class="tg">'
346 of.write(table_begin)
350 for k, v in report.items():
351 of.write('<th class="tg-rwj1">{}</th>'.format(k))
355 for k, v in faildata.items():
357 for rk, rv in report.items():
359 value = make_href(v[rv])
360 elif rv == "raw_output":
361 value = "<pre>{}</pre>".format(v[rv])
362 elif rv == "req_description":
363 parts = docutils.core.publish_parts(
364 writer_name="html", source=v[rv]
366 value = parts["body"]
368 value = v[rv].replace("\n", "<br />")
369 of.write(" <td>{}</td>".format(value))
375 def pytest_addoption(parser):
377 Add needed CLI arguments
380 "--template-directory",
383 help="Directory which holds the templates for validation",
390 help="Test the unit tests against their fixtured data",
394 "--validation-profile",
395 dest="validation_profile",
397 help="Runs all unmarked tests plus test with a matching marker",
401 "--validation-profile-name",
402 dest="validation_profile_name",
404 help="Friendly name of the validation profile used in reports",
409 dest="report_format",
411 help="Format of output report (html, csv, excel)",
415 def pytest_configure(config):
417 Ensure that we are receive either `--self-test` or
418 `--template-dir=<directory` as CLI arguments
420 if config.getoption("template_dir") and config.getoption("self_test"):
421 raise Exception('"--template-dir", and "--self-test"' " are mutually exclusive")
423 config.getoption("template_dir") or
424 config.getoption("self_test") or
425 config.getoption("help")
427 raise Exception('One of "--template-dir" or' ' "--self-test" must be specified')
430 def pytest_generate_tests(metafunc):
432 If a unit test requires an argument named 'filename'
433 we generate a test for the filenames selected. Either
434 the files contained in `template_dir` or if `template_dir`
435 is not specified on the CLI, the fixtures associated with this
438 if "filename" in metafunc.fixturenames:
439 from .parametrizers import parametrize_filename
441 parametrize_filename(metafunc)
443 if "filenames" in metafunc.fixturenames:
444 from .parametrizers import parametrize_filenames
446 parametrize_filenames(metafunc)
448 if "template_dir" in metafunc.fixturenames:
449 from .parametrizers import parametrize_template_dir
451 parametrize_template_dir(metafunc)
453 if "environment_pair" in metafunc.fixturenames:
454 from .parametrizers import parametrize_environment_pair
456 parametrize_environment_pair(metafunc)
458 if "heat_volume_pair" in metafunc.fixturenames:
459 from .parametrizers import parametrize_heat_volume_pair
461 parametrize_heat_volume_pair(metafunc)
463 if "yaml_files" in metafunc.fixturenames:
464 from .parametrizers import parametrize_yaml_files
466 parametrize_yaml_files(metafunc)
468 if "env_files" in metafunc.fixturenames:
469 from .parametrizers import parametrize_environment_files
471 parametrize_environment_files(metafunc)
473 if "yaml_file" in metafunc.fixturenames:
474 from .parametrizers import parametrize_yaml_file
476 parametrize_yaml_file(metafunc)
478 if "env_file" in metafunc.fixturenames:
479 from .parametrizers import parametrize_environment_file
481 parametrize_environment_file(metafunc)
483 if "parsed_yaml_file" in metafunc.fixturenames:
484 from .parametrizers import parametrize_parsed_yaml_file
486 parametrize_parsed_yaml_file(metafunc)
488 if "parsed_environment_file" in metafunc.fixturenames:
489 from .parametrizers import parametrize_parsed_environment_file
491 parametrize_parsed_environment_file(metafunc)
493 if "heat_template" in metafunc.fixturenames:
494 from .parametrizers import parametrize_heat_template
496 parametrize_heat_template(metafunc)
498 if "heat_templates" in metafunc.fixturenames:
499 from .parametrizers import parametrize_heat_templates
501 parametrize_heat_templates(metafunc)
503 if "volume_template" in metafunc.fixturenames:
504 from .parametrizers import parametrize_volume_template
506 parametrize_volume_template(metafunc)
508 if "volume_templates" in metafunc.fixturenames:
509 from .parametrizers import parametrize_volume_templates
511 parametrize_volume_templates(metafunc)
513 if "template" in metafunc.fixturenames:
514 from .parametrizers import parametrize_template
516 parametrize_template(metafunc)
518 if "templates" in metafunc.fixturenames:
519 from .parametrizers import parametrize_templates
521 parametrize_templates(metafunc)
524 def hash_directory(path):
526 for dir_path, sub_dirs, filenames in os.walk(path):
527 for filename in filenames:
528 file_path = os.path.join(dir_path, filename)
529 with open(file_path, "rb") as f:
531 return md5.hexdigest()
534 def load_current_requirements():
535 """Loads dict of current requirements or empty dict if file doesn't exist"""
537 url = 'https://onap.readthedocs.io/en/latest/_downloads/needs.json'
538 r = requests.get(url)
539 with open('requirements.json', 'wb') as needs:
540 needs.write(r.content)
541 path = "requirements.json"
542 if not os.path.exists(path):
544 with io.open(path, encoding="utf8", mode="r") as f:
546 version = data["current_version"]
547 return data["versions"][version]["needs"]
550 def compat_open(path):
551 """Invokes open correctly depending on the Python version"""
552 if sys.version_info.major < 3:
553 return open(path, "wb")
555 return open(path, "w", newline="")
558 def unicode_writerow(writer, row):
559 if sys.version_info.major < 3:
560 row = [s.encode("utf8") for s in row]
564 def pytest_report_collectionfinish(config, startdir, items):
565 """Generates a simple traceability report to output/traceability.csv"""
566 traceability_path = os.path.join(__path__[0], "../output/traceability.csv")
567 output_dir = os.path.split(traceability_path)[0]
568 if not os.path.exists(output_dir):
569 os.makedirs(output_dir)
570 requirements = load_current_requirements()
571 unmapped, mapped = partition(
572 lambda item: hasattr(item.function, "requirement_ids"), items
575 req_to_test = collections.defaultdict(set)
576 mapping_errors = set()
578 for req_id in item.function.requirement_ids:
579 if req_id not in req_to_test:
580 req_to_test[req_id].add(item)
581 if req_id not in requirements:
583 (req_id, item.function.__module__, item.function.__name__)
586 mapping_error_path = os.path.join(__path__[0], "../output/mapping_errors.csv")
587 with compat_open(mapping_error_path) as f:
588 writer = csv.writer(f)
589 for error in mapping_errors:
590 unicode_writerow(writer, error)
592 with compat_open(traceability_path) as f:
596 ("Requirement ID", "Requirement", "Section", "Test Module", "Test Name"),
598 for req_id, metadata in requirements.items():
599 if req_to_test[req_id]:
600 for item in req_to_test[req_id]:
605 metadata["description"],
606 metadata["section_name"],
607 item.function.__module__,
608 item.function.__name__,
614 (req_id, metadata["description"], metadata["section_name"], "", ""),
616 # now write out any test methods that weren't mapped to requirements
617 for item in unmapped:
619 out, ("", "", "", item.function.__module__, item.function.__name__)