2 # ============LICENSE_START=======================================================
3 # org.onap.vvp/validation-scripts
4 # ===================================================================
5 # Copyright © 2019 AT&T Intellectual Property. All rights reserved.
6 # ===================================================================
8 # Unless otherwise specified, all software contained herein is licensed
9 # under the Apache License, Version 2.0 (the "License");
10 # you may not use this software except in compliance with the License.
11 # You may obtain a copy of the License at
13 # http://www.apache.org/licenses/LICENSE-2.0
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS,
17 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 # See the License for the specific language governing permissions and
19 # limitations under the License.
23 # Unless otherwise specified, all documentation contained herein is licensed
24 # under the Creative Commons License, Attribution 4.0 Intl. (the "License");
25 # you may not use this documentation except in compliance with the License.
26 # You may obtain a copy of the License at
28 # https://creativecommons.org/licenses/by/4.0/
30 # Unless required by applicable law or agreed to in writing, documentation
31 # distributed under the License is distributed on an "AS IS" BASIS,
32 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
33 # See the License for the specific language governing permissions and
34 # limitations under the License.
36 # ============LICENSE_END============================================
46 from collections import defaultdict
47 from itertools import chain
54 from more_itertools import partition
56 from six import string_types
60 __path__ = [os.path.dirname(os.path.abspath(__file__))]
62 DEFAULT_OUTPUT_DIR = "{}/../output".format(__path__[0])
64 RESOLUTION_STEPS_FILE = "resolution_steps.json"
65 HEAT_REQUIREMENTS_FILE = os.path.join(__path__[0], "..", "heat_requirements.json")
66 TEST_SCRIPT_SITE = "https://github.com/onap/vvp-validation-scripts/blob/master/ice_validator/tests/"
67 VNFRQTS_ID_URL = "https://docs.onap.org/en/latest/submodules/vnfrqts/requirements.git/docs/"
70 ("Input File", "file"),
71 ("Test", "test_file"),
72 ("Requirements", "req_description"),
73 ("Resolution Steps", "resolution_steps"),
74 ("Error Message", "message"),
75 ("Raw Test Output", "raw_output"),
78 COLLECTION_FAILURE_WARNING = """WARNING: The following unexpected errors occurred
79 while preparing to validate the the input files. Some validations may not have been
80 executed. Please refer these issue to the VNF Validation Tool team.
83 COLLECTION_FAILURES = []
85 # Captures the results of every test run
89 def get_output_dir(config):
91 Retrieve the output directory for the reports and create it if necessary
92 :param config: pytest configuration
93 :return: output directory as string
95 output_dir = config.option.output_dir or DEFAULT_OUTPUT_DIR
96 if not os.path.exists(output_dir):
97 os.makedirs(output_dir, exist_ok=True)
101 def extract_error_msg(rep):
103 If a custom error message was provided, then extract it otherwise
104 just show the pytest assert message
106 if rep.outcome != "failed":
109 full_msg = str(rep.longrepr.reprcrash.message)
111 "AssertionError:(.*)^assert.*", full_msg, re.MULTILINE | re.DOTALL
113 if match: # custom message was provided
114 # Extract everything between AssertionError and the start
115 # of the assert statement expansion in the pytest report
118 msg = str(rep.longrepr.reprcrash)
119 if "AssertionError:" in msg:
120 msg = msg.split("AssertionError:")[1]
121 except AttributeError:
129 Wraps the test case and result to extract necessary metadata for
133 RESULT_MAPPING = {"passed": "PASS", "failed": "FAIL", "skipped": "SKIP"}
135 def __init__(self, item, outcome):
137 self.result = outcome.get_result()
138 self.files = [os.path.normpath(p) for p in self._get_files()]
139 self.error_message = self._get_error_message()
142 def requirement_ids(self):
144 Returns list of requirement IDs mapped to the test case.
146 :return: Returns a list of string requirement IDs the test was
147 annotated with ``validates`` otherwise returns and empty list
149 is_mapped = hasattr(self.item.function, "requirement_ids")
150 return self.item.function.requirement_ids if is_mapped else []
155 :return: Returns a set of pytest marker names for the test or an empty set
157 return set(m.name for m in self.item.iter_markers())
160 def is_base_test(self):
162 :return: Returns True if the test is annotated with a pytest marker called base
164 return "base" in self.markers
169 :return: True if the test failed
171 return self.outcome == "FAIL"
176 :return: Returns 'PASS', 'FAIL', or 'SKIP'
178 return self.RESULT_MAPPING[self.result.outcome]
183 :return: Name of the test case method
185 return self.item.function.__name__
188 def test_module(self):
190 :return: Name of the file containing the test case
192 return self.item.function.__module__.split(".")[-1]
195 def raw_output(self):
197 :return: Full output from pytest for the given test case
199 return str(self.result.longrepr)
201 def requirement_text(self, curr_reqs):
203 Creates a text summary for the requirement IDs mapped to the test case.
204 If no requirements are mapped, then it returns the empty string.
206 :param curr_reqs: mapping of requirement IDs to requirement metadata
207 loaded from the VNFRQTS projects needs.json output
208 :return: ID and text of the requirements mapped to the test case
211 "\n\n{}: \n{}".format(r_id, curr_reqs[r_id]["description"])
212 for r_id in self.requirement_ids
216 def requirements_metadata(self, curr_reqs):
218 Returns a list of dicts containing the following metadata for each
222 - text: Full text of the requirement
223 - keyword: MUST, MUST NOT, MAY, etc.
225 :param curr_reqs: mapping of requirement IDs to requirement metadata
226 loaded from the VNFRQTS projects needs.json output
227 :return: List of requirement metadata
230 for r_id in self.requirement_ids:
231 if r_id not in curr_reqs:
236 "text": curr_reqs[r_id]["description"],
237 "keyword": curr_reqs[r_id]["keyword"],
242 def resolution_steps(self, resolutions):
244 :param resolutions: Loaded from contents for resolution_steps.json
245 :return: Header and text for the resolution step associated with this
246 test case. Returns empty string if no resolutions are
250 "\n{}: \n{}".format(entry["header"], entry["resolution_steps"])
251 for entry in resolutions
252 if self._match(entry)
256 def _match(self, resolution_entry):
258 Returns True if the test result maps to the given entry in
262 self.test_case == resolution_entry["function"]
263 and self.test_module == resolution_entry["module"]
266 def _get_files(self):
268 Extracts the list of files passed into the test case.
269 :return: List of absolute paths to files
271 if "environment_pair" in self.item.fixturenames:
273 "{} environment pair".format(
274 self.item.funcargs["environment_pair"]["name"]
277 elif "heat_volume_pair" in self.item.fixturenames:
279 "{} volume pair".format(self.item.funcargs["heat_volume_pair"]["name"])
281 elif "heat_templates" in self.item.fixturenames:
282 return self.item.funcargs["heat_templates"]
283 elif "yaml_files" in self.item.fixturenames:
284 return self.item.funcargs["yaml_files"]
286 parts = self.result.nodeid.split("[")
287 return [""] if len(parts) == 1 else [parts[1][:-1]]
289 def _get_error_message(self):
291 :return: Error message or empty string if the test did not fail or error
294 return extract_error_msg(self.result)
299 # noinspection PyUnusedLocal
300 @pytest.hookimpl(tryfirst=True, hookwrapper=True)
301 def pytest_runtest_makereport(item, call):
303 Captures the test results for later reporting. This will also halt testing
304 if a base failure is encountered (can be overridden with continue-on-failure)
307 if outcome.get_result().when != "call":
308 return # only capture results of test cases themselves
309 result = TestResult(item, outcome)
310 ALL_RESULTS.append(result)
312 not item.config.option.continue_on_failure
313 and result.is_base_test
316 msg = "!!Base Test Failure!! Halting test suite execution...\n{}".format(
319 pytest.exit("{}\n{}\n{}".format(msg, result.files, result.test_case))
322 def make_timestamp():
324 :return: String make_iso_timestamp in format:
325 2019-01-19 10:18:49.865000 Central Standard Time
327 timezone = time.tzname[time.localtime().tm_isdst]
328 return "{} {}".format(str(datetime.datetime.now()), timezone)
331 # noinspection PyUnusedLocal
332 def pytest_sessionstart(session):
334 COLLECTION_FAILURES.clear()
337 # noinspection PyUnusedLocal
338 def pytest_sessionfinish(session, exitstatus):
340 If not a self-test run, generate the output reports
342 if not session.config.option.template_dir:
345 if session.config.option.template_source:
346 template_source = session.config.option.template_source[0]
348 template_source = os.path.abspath(session.config.option.template_dir[0])
350 categories_selected = session.config.option.test_categories or ""
352 get_output_dir(session.config),
355 session.config.option.report_format,
359 # noinspection PyUnusedLocal
360 def pytest_collection_modifyitems(session, config, items):
362 Selects tests based on the categories requested. Tests without
363 categories will always be executed.
365 config.traceability_items = list(items) # save all items for traceability
366 if not config.option.self_test:
368 # checking if test belongs to a category
369 if hasattr(item.function, "categories"):
370 if config.option.test_categories:
371 test_categories = getattr(item.function, "categories")
372 passed_categories = config.option.test_categories
374 category in passed_categories for category in test_categories
378 reason="Test categories do not match all the passed categories"
384 reason="Test belongs to a category but no categories were passed"
388 key=lambda item: 0 if "base" in set(m.name for m in item.iter_markers()) else 1
392 def make_href(paths):
394 Create an anchor tag to link to the file paths provided.
395 :param paths: string or list of file paths
396 :return: String of hrefs - one for each path, each seperated by a line
399 paths = [paths] if isinstance(paths, string_types) else paths
402 abs_path = os.path.abspath(p)
403 name = abs_path if os.path.isdir(abs_path) else os.path.split(abs_path)[1]
405 "<a href='file://{abs_path}' target='_blank'>{name}</a>".format(
406 abs_path=abs_path, name=name
409 return "<br/>".join(links)
412 def load_resolutions_file():
414 :return: dict of data loaded from resolutions_steps.json
416 resolution_steps = "{}/../{}".format(__path__[0], RESOLUTION_STEPS_FILE)
417 if os.path.exists(resolution_steps):
418 with open(resolution_steps, "r") as f:
419 return json.loads(f.read())
422 def generate_report(outpath, template_path, categories, output_format="html"):
424 Generates the various output reports.
426 :param outpath: destination directory for all reports
427 :param template_path: directory containing the Heat templates validated
428 :param categories: Optional categories selected
429 :param output_format: One of "html", "excel", or "csv". Default is "html"
430 :raises: ValueError if requested output format is unknown
432 failures = [r for r in ALL_RESULTS if r.is_failed]
433 generate_failure_file(outpath)
434 output_format = output_format.lower().strip() if output_format else "html"
435 if output_format == "html":
436 generate_html_report(outpath, categories, template_path, failures)
437 elif output_format == "excel":
438 generate_excel_report(outpath, categories, template_path, failures)
439 elif output_format == "json":
440 generate_json(outpath, template_path, categories)
441 elif output_format == "csv":
442 generate_csv_report(outpath, categories, template_path, failures)
444 raise ValueError("Unsupported output format: " + output_format)
447 def write_json(data, path):
449 Pretty print data as JSON to the output path requested
451 :param data: Data structure to be converted to JSON
452 :param path: Where to write output
454 with open(path, "w") as f:
455 json.dump(data, f, indent=2)
458 def generate_failure_file(outpath):
460 Writes a summary of test failures to a file named failures.
461 This is for backwards compatibility only. The report.json offers a
462 more comprehensive output.
464 failure_path = os.path.join(outpath, "failures")
465 failures = [r for r in ALL_RESULTS if r.is_failed]
467 for i, fail in enumerate(failures):
469 "file": fail.files[0] if len(fail.files) == 1 else fail.files,
470 "vnfrqts": fail.requirement_ids,
471 "test": fail.test_case,
472 "test_file": fail.test_module,
473 "raw_output": fail.raw_output,
474 "message": fail.error_message,
476 write_json(data, failure_path)
479 def generate_csv_report(output_dir, categories, template_path, failures):
480 rows = [["Validation Failures"]]
482 ("Categories Selected:", categories),
483 ("Tool Version:", version.VERSION),
484 ("Report Generated At:", make_timestamp()),
485 ("Directory Validated:", template_path),
486 ("Checksum:", hash_directory(template_path)),
487 ("Total Errors:", len(failures) + len(COLLECTION_FAILURES)),
490 for header in headers:
494 if COLLECTION_FAILURES:
495 rows.append([COLLECTION_FAILURE_WARNING])
496 rows.append(["Validation File", "Test", "Fixtures", "Error"])
497 for failure in COLLECTION_FAILURES:
502 ";".join(failure["fixtures"]),
509 rows.append([col for col, _ in REPORT_COLUMNS])
511 reqs = load_current_requirements()
512 resolutions = load_resolutions_file()
515 for failure in failures:
518 "\n".join(failure.files),
520 failure.requirement_text(reqs),
521 failure.resolution_steps(resolutions),
522 failure.error_message,
527 output_path = os.path.join(output_dir, "report.csv")
528 with open(output_path, "w", newline="") as f:
529 writer = csv.writer(f)
534 def generate_excel_report(output_dir, categories, template_path, failures):
535 output_path = os.path.join(output_dir, "report.xlsx")
536 workbook = xlsxwriter.Workbook(output_path)
537 bold = workbook.add_format({"bold": True})
538 code = workbook.add_format(({"font_name": "Courier", "text_wrap": True}))
539 normal = workbook.add_format({"text_wrap": True})
540 heading = workbook.add_format({"bold": True, "font_size": 18})
541 worksheet = workbook.add_worksheet("failures")
542 worksheet.write(0, 0, "Validation Failures", heading)
545 ("Categories Selected:", ",".join(categories)),
546 ("Tool Version:", version.VERSION),
547 ("Report Generated At:", make_timestamp()),
548 ("Directory Validated:", template_path),
549 ("Checksum:", hash_directory(template_path)),
550 ("Total Errors:", len(failures) + len(COLLECTION_FAILURES)),
552 for row, (header, value) in enumerate(headers, start=2):
553 worksheet.write(row, 0, header, bold)
554 worksheet.write(row, 1, value)
556 worksheet.set_column(0, len(headers) - 1, 40)
557 worksheet.set_column(len(headers), len(headers), 80)
559 if COLLECTION_FAILURES:
560 collection_failures_start = 2 + len(headers) + 2
561 worksheet.write(collection_failures_start, 0, COLLECTION_FAILURE_WARNING, bold)
562 collection_failure_headers = ["Validation File", "Test", "Fixtures", "Error"]
563 for col_num, col_name in enumerate(collection_failure_headers):
564 worksheet.write(collection_failures_start + 1, col_num, col_name, bold)
565 for row, data in enumerate(COLLECTION_FAILURES, collection_failures_start + 2):
566 worksheet.write(row, 0, data["module"])
567 worksheet.write(row, 1, data["test"])
568 worksheet.write(row, 2, ",".join(data["fixtures"]))
569 worksheet.write(row, 3, data["error"], code)
572 start_error_table_row = 2 + len(headers) + len(COLLECTION_FAILURES) + 4
573 worksheet.write(start_error_table_row, 0, "Validation Failures", bold)
574 for col_num, (col_name, _) in enumerate(REPORT_COLUMNS):
575 worksheet.write(start_error_table_row + 1, col_num, col_name, bold)
577 reqs = load_current_requirements()
578 resolutions = load_resolutions_file()
581 for row, failure in enumerate(failures, start=start_error_table_row + 2):
582 worksheet.write(row, 0, "\n".join(failure.files), normal)
583 worksheet.write(row, 1, failure.test_module, normal)
584 worksheet.write(row, 2, failure.requirement_text(reqs), normal)
585 worksheet.write(row, 3, failure.resolution_steps(resolutions), normal)
586 worksheet.write(row, 4, failure.error_message, normal)
587 worksheet.write(row, 5, failure.raw_output, code)
592 def make_iso_timestamp():
594 Creates a timestamp in ISO 8601 format in UTC format. Used for JSON output.
596 now = datetime.datetime.utcnow()
597 now.replace(tzinfo=datetime.timezone.utc)
598 return now.isoformat()
601 def aggregate_requirement_adherence(r_id, collection_failures, test_results):
603 Examines all tests associated with a given requirement and determines
604 the aggregate result (PASS, FAIL, ERROR, or SKIP) for the requirement.
606 * ERROR - At least one ERROR occurred
607 * PASS - At least one PASS and no FAIL or ERRORs.
608 * FAIL - At least one FAIL occurred (no ERRORs)
609 * SKIP - All tests were SKIP
612 :param r_id: Requirement ID to examing
613 :param collection_failures: Errors that occurred during test setup.
614 :param test_results: List of TestResult
615 :return: 'PASS', 'FAIL', 'SKIP', or 'ERROR'
617 errors = any(r_id in f["requirements"] for f in collection_failures)
618 outcomes = set(r.outcome for r in test_results if r_id in r.requirement_ids)
619 return aggregate_results(errors, outcomes, r_id)
622 def aggregate_results(has_errors, outcomes, r_id=None):
624 Determines the aggregate result for the conditions provided. Assumes the
625 results have been filtered and collected for analysis.
627 :param has_errors: True if collection failures occurred for the tests being
629 :param outcomes: set of outcomes from the TestResults
630 :param r_id: Optional requirement ID if known
631 :return: 'ERROR', 'PASS', 'FAIL', or 'SKIP'
632 (see aggregate_requirement_adherence for more detail)
639 elif "FAIL" in outcomes:
641 elif "PASS" in outcomes:
643 elif {"SKIP"} == outcomes:
647 "Unexpected error aggregating outcomes ({}) for requirement {}".format(
654 def aggregate_run_results(collection_failures, test_results):
656 Determines overall status of run based on all failures and results.
658 * 'ERROR' - At least one collection failure occurred during the run.
659 * 'FAIL' - Template failed at least one test
660 * 'PASS' - All tests executed properly and no failures were detected
662 :param collection_failures: failures occuring during test setup
663 :param test_results: list of all test executuion results
664 :return: one of 'ERROR', 'FAIL', or 'PASS'
666 if collection_failures:
668 elif any(r.is_failed for r in test_results):
674 def error(failure_or_result):
676 Extracts the error message from a collection failure or test result
677 :param failure_or_result: Entry from COLLECTION_FAILURE or a TestResult
678 :return: Error message as string
680 if isinstance(failure_or_result, TestResult):
681 return failure_or_result.error_message
683 return failure_or_result["error"]
686 def req_ids(failure_or_result):
688 Extracts the requirement IDs from a collection failure or test result
689 :param failure_or_result: Entry from COLLECTION_FAILURE or a TestResult
690 :return: set of Requirement IDs. If no requirements mapped, then an empty set
692 if isinstance(failure_or_result, TestResult):
693 return set(failure_or_result.requirement_ids)
695 return set(failure_or_result["requirements"])
698 def collect_errors(r_id, collection_failures, test_result):
700 Creates a list of error messages from the collection failures and
701 test results. If r_id is provided, then it collects the error messages
702 where the failure or test is associated with that requirement ID. If
703 r_id is None, then it collects all errors that occur on failures and
704 results that are not mapped to requirements
709 return r_id in req_ids(item)
711 return not req_ids(item)
713 errors = (error(x) for x in chain(collection_failures, test_result) if selector(x))
714 return [e for e in errors if e]
717 def relative_paths(base_dir, paths):
718 return [os.path.relpath(p, base_dir) for p in paths]
721 def generate_json(outpath, template_path, categories):
723 Creates a JSON summary of the entire test run.
725 reqs = load_current_requirements()
728 "template_directory": os.path.splitdrive(template_path)[1].replace(
731 "timestamp": make_iso_timestamp(),
732 "checksum": hash_directory(template_path),
733 "categories": categories,
734 "outcome": aggregate_run_results(COLLECTION_FAILURES, ALL_RESULTS),
739 results = data["tests"]
740 for result in COLLECTION_FAILURES:
744 "test_module": result["module"],
745 "test_case": result["test"],
747 "error": result["error"],
748 "requirements": result["requirements"],
751 for result in ALL_RESULTS:
754 "files": relative_paths(template_path, result.files),
755 "test_module": result.test_module,
756 "test_case": result.test_case,
757 "result": result.outcome,
758 "error": result.error_message if result.is_failed else "",
759 "requirements": result.requirements_metadata(reqs),
763 requirements = data["requirements"]
764 for r_id, r_data in reqs.items():
765 result = aggregate_requirement_adherence(r_id, COLLECTION_FAILURES, ALL_RESULTS)
770 "text": r_data["description"],
771 "keyword": r_data["keyword"],
773 "errors": collect_errors(r_id, COLLECTION_FAILURES, ALL_RESULTS),
776 # If there are tests that aren't mapped to a requirement, then we'll
777 # map them to a special entry so the results are coherent.
778 unmapped_outcomes = {r.outcome for r in ALL_RESULTS if not r.requirement_ids}
779 has_errors = any(not f["requirements"] for f in COLLECTION_FAILURES)
780 if unmapped_outcomes or has_errors:
784 "text": "Tests not mapped to requirements (see tests)",
785 "result": aggregate_results(has_errors, unmapped_outcomes),
786 "errors": collect_errors(None, COLLECTION_FAILURES, ALL_RESULTS),
790 report_path = os.path.join(outpath, "report.json")
791 write_json(data, report_path)
794 def generate_html_report(outpath, categories, template_path, failures):
795 reqs = load_current_requirements()
796 resolutions = load_resolutions_file()
798 for failure in failures:
801 "file_links": make_href(failure.files),
802 "test_id": failure.test_module,
803 "error_message": failure.error_message,
804 "raw_output": failure.raw_output,
805 "requirements": docutils.core.publish_parts(
806 writer_name="html", source=failure.requirement_text(reqs)
808 "resolution_steps": failure.resolution_steps(resolutions),
811 pkg_dir = os.path.split(__file__)[0]
812 j2_template_path = os.path.join(pkg_dir, "report.html.jinja2")
813 with open(j2_template_path, "r") as f:
814 report_template = jinja2.Template(f.read())
815 contents = report_template.render(
816 version=version.VERSION,
817 num_failures=len(failures) + len(COLLECTION_FAILURES),
818 categories=categories,
819 template_dir=make_href(template_path),
820 checksum=hash_directory(template_path),
821 timestamp=make_timestamp(),
823 collection_failures=COLLECTION_FAILURES,
825 with open(os.path.join(outpath, "report.html"), "w") as f:
829 def pytest_addoption(parser):
831 Add needed CLI arguments
834 "--template-directory",
837 help="Directory which holds the templates for validation",
842 dest="template_source",
844 help="Source Directory which holds the templates for validation",
851 help="Test the unit tests against their fixtured data",
856 dest="report_format",
858 help="Format of output report (html, csv, excel, json)",
862 "--continue-on-failure",
863 dest="continue_on_failure",
865 help="Continue validation even when structural errors exist in input files",
869 "--output-directory",
878 dest="test_categories",
880 help="optional category of test to execute",
884 def pytest_configure(config):
886 Ensure that we are receive either `--self-test` or
887 `--template-dir=<directory` as CLI arguments
889 if config.getoption("template_dir") and config.getoption("self_test"):
890 raise Exception('"--template-dir", and "--self-test"' " are mutually exclusive")
892 config.getoption("template_dir")
893 or config.getoption("self_test")
894 or config.getoption("help")
896 raise Exception('One of "--template-dir" or' ' "--self-test" must be specified')
899 def pytest_generate_tests(metafunc):
901 If a unit test requires an argument named 'filename'
902 we generate a test for the filenames selected. Either
903 the files contained in `template_dir` or if `template_dir`
904 is not specified on the CLI, the fixtures associated with this
908 # noinspection PyBroadException
910 if "filename" in metafunc.fixturenames:
911 from .parametrizers import parametrize_filename
913 parametrize_filename(metafunc)
915 if "filenames" in metafunc.fixturenames:
916 from .parametrizers import parametrize_filenames
918 parametrize_filenames(metafunc)
920 if "template_dir" in metafunc.fixturenames:
921 from .parametrizers import parametrize_template_dir
923 parametrize_template_dir(metafunc)
925 if "environment_pair" in metafunc.fixturenames:
926 from .parametrizers import parametrize_environment_pair
928 parametrize_environment_pair(metafunc)
930 if "heat_volume_pair" in metafunc.fixturenames:
931 from .parametrizers import parametrize_heat_volume_pair
933 parametrize_heat_volume_pair(metafunc)
935 if "yaml_files" in metafunc.fixturenames:
936 from .parametrizers import parametrize_yaml_files
938 parametrize_yaml_files(metafunc)
940 if "env_files" in metafunc.fixturenames:
941 from .parametrizers import parametrize_environment_files
943 parametrize_environment_files(metafunc)
945 if "yaml_file" in metafunc.fixturenames:
946 from .parametrizers import parametrize_yaml_file
948 parametrize_yaml_file(metafunc)
950 if "env_file" in metafunc.fixturenames:
951 from .parametrizers import parametrize_environment_file
953 parametrize_environment_file(metafunc)
955 if "parsed_yaml_file" in metafunc.fixturenames:
956 from .parametrizers import parametrize_parsed_yaml_file
958 parametrize_parsed_yaml_file(metafunc)
960 if "parsed_environment_file" in metafunc.fixturenames:
961 from .parametrizers import parametrize_parsed_environment_file
963 parametrize_parsed_environment_file(metafunc)
965 if "heat_template" in metafunc.fixturenames:
966 from .parametrizers import parametrize_heat_template
968 parametrize_heat_template(metafunc)
970 if "heat_templates" in metafunc.fixturenames:
971 from .parametrizers import parametrize_heat_templates
973 parametrize_heat_templates(metafunc)
975 if "volume_template" in metafunc.fixturenames:
976 from .parametrizers import parametrize_volume_template
978 parametrize_volume_template(metafunc)
980 if "volume_templates" in metafunc.fixturenames:
981 from .parametrizers import parametrize_volume_templates
983 parametrize_volume_templates(metafunc)
985 if "template" in metafunc.fixturenames:
986 from .parametrizers import parametrize_template
988 parametrize_template(metafunc)
990 if "templates" in metafunc.fixturenames:
991 from .parametrizers import parametrize_templates
993 parametrize_templates(metafunc)
994 except Exception as e:
995 # If an error occurs in the collection phase, then it won't be logged as a
996 # normal test failure. This means that failures could occur, but not
997 # be seen on the report resulting in a false positive success message. These
998 # errors will be stored and reported separately on the report
999 COLLECTION_FAILURES.append(
1001 "module": metafunc.module.__name__,
1002 "test": metafunc.function.__name__,
1003 "fixtures": metafunc.fixturenames,
1004 "error": traceback.format_exc(),
1005 "requirements": getattr(metafunc.function, "requirement_ids", []),
1011 def hash_directory(path):
1013 Create md5 hash using the contents of all files under ``path``
1014 :param path: string directory containing files
1015 :return: string MD5 hash code (hex)
1018 for dir_path, sub_dirs, filenames in os.walk(path):
1019 for filename in filenames:
1020 file_path = os.path.join(dir_path, filename)
1021 with open(file_path, "rb") as f:
1022 md5.update(f.read())
1023 return md5.hexdigest()
1026 def load_current_requirements():
1027 """Loads dict of current requirements or empty dict if file doesn't exist"""
1028 with io.open(HEAT_REQUIREMENTS_FILE, encoding="utf8", mode="r") as f:
1030 version = data["current_version"]
1031 return data["versions"][version]["needs"]
1034 def select_heat_requirements(reqs):
1035 """Filters dict requirements to only those requirements pertaining to Heat"""
1036 return {k: v for k, v in reqs.items() if "Heat" in v["docname"]}
1039 def build_rst_json(reqs):
1040 """Takes requirements and returns list of only Heat requirements"""
1041 data = json.loads(reqs)
1042 for key, values in list(data.items()):
1043 if "Heat" in (values["docname"]):
1044 if "MUST" in (values["keyword"]):
1045 if "none" in (values["validation_mode"]):
1048 # Creates links in RST format to requirements and test cases
1049 if values["test_case"]:
1050 mod = values["test_case"].split(".")[-1]
1051 val = TEST_SCRIPT_SITE + mod + ".py"
1052 rst_value = ("`" + mod + " <" + val + ">`_")
1053 title = "`" + values["id"] + " <" + VNFRQTS_ID_URL + values["docname"].replace(" ", "%20") + ".html#" + values["id"] + ">`_"
1054 data[key].update({'full_title': title, 'test_case': rst_value})
1064 def generate_rst_table(data):
1065 """Generate a formatted csv to be used in RST"""
1066 rst_path = os.path.join(__path__[0], "../output/rst.csv")
1067 with open(rst_path, "w", newline="") as f:
1070 ("Requirement ID", "Requirement", "Test Module", "Test Name"),
1072 for req_id, metadata in data.items():
1075 metadata["full_title"],
1076 metadata["description"],
1077 metadata["test_case"],
1078 metadata["validated_by"],
1083 # noinspection PyUnusedLocal
1084 def pytest_report_collectionfinish(config, startdir, items):
1085 """Generates a simple traceability report to output/traceability.csv"""
1086 traceability_path = os.path.join(get_output_dir(config), "traceability.csv")
1087 output_dir = os.path.split(traceability_path)[0]
1088 if not os.path.exists(output_dir):
1089 os.makedirs(output_dir)
1090 reqs = load_current_requirements()
1091 requirements = select_heat_requirements(reqs)
1092 unmapped, mapped = partition(
1093 lambda i: hasattr(i.function, "requirement_ids"), items
1096 req_to_test = defaultdict(set)
1097 mapping_errors = set()
1099 for req_id in item.function.requirement_ids:
1100 if req_id not in req_to_test:
1101 req_to_test[req_id].add(item)
1102 if req_id in requirements:
1103 reqs[req_id].update({'test_case': item.function.__module__,
1104 'validated_by': item.function.__name__})
1105 if req_id not in requirements:
1107 (req_id, item.function.__module__, item.function.__name__)
1110 mapping_error_path = os.path.join(get_output_dir(config), "mapping_errors.csv")
1111 with open(mapping_error_path, "w", newline="") as f:
1112 writer = csv.writer(f)
1113 for err in mapping_errors:
1114 writer.writerow(err)
1116 with open(traceability_path, "w", newline="") as f:
1119 ("Requirement ID", "Requirement", "Section",
1120 "Keyword", "Validation Mode", "Is Testable",
1121 "Test Module", "Test Name"),
1123 for req_id, metadata in requirements.items():
1124 keyword = metadata["keyword"].upper()
1125 mode = metadata["validation_mode"].lower()
1126 testable = keyword in {"MUST", "MUST NOT"} and mode != "none"
1127 if req_to_test[req_id]:
1128 for item in req_to_test[req_id]:
1132 metadata["description"],
1133 metadata["section_name"],
1136 "TRUE" if testable else "FALSE",
1137 item.function.__module__,
1138 item.function.__name__,
1144 metadata["description"],
1145 metadata["section_name"],
1148 "TRUE" if testable else "FALSE",
1150 ""), # test function
1152 # now write out any test methods that weren't mapped to requirements
1153 unmapped_tests = {(item.function.__module__, item.function.__name__) for item in
1155 for test_module, test_name in unmapped_tests:
1161 "static", # validation mode
1167 generate_rst_table(build_rst_json(json.dumps(reqs)))