[VVP] Performance Enhancements (report generation and test collection) 86/89386/2
authorLovett, Trevor <trevor.lovett@att.com>
Wed, 5 Jun 2019 15:13:46 +0000 (10:13 -0500)
committerTrevor Lovett <trevor.lovett@att.com>
Wed, 5 Jun 2019 15:29:28 +0000 (15:29 +0000)
Issue-ID: VVP-222

Signed-off-by: Lovett, Trevor (tl2972) <tl2972@att.com>
Change-Id: I9dd3506097cb0d6e69bd1434b2d42f6d1965023b

checks.py
ice_validator/tests/conftest.py
ice_validator/tests/test_valid_heat.py
ice_validator/tests/test_volume_templates.py
ice_validator/tests/utils/nested_files.py

index 70bdcd2..6f508eb 100644 (file)
--- a/checks.py
+++ b/checks.py
@@ -167,7 +167,9 @@ def check_non_testable_requirements_are_not_mapped():
 
 
 def check_flake8_passes():
-    result = subprocess.run(["flake8", "."], encoding="utf-8", capture_output=True)
+    result = subprocess.run(["flake8", "."], encoding="utf-8",
+                            stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
     msgs = result.stdout.split("\n") if result.returncode != 0 else []
     return ["flake8 errors detected:"] + [f"  {e}" for e in msgs] if msgs else []
 
index a6f83f1..d07669c 100644 (file)
@@ -44,7 +44,6 @@ import os
 import re
 import time
 from collections import defaultdict
-from itertools import chain
 
 import traceback
 
@@ -55,6 +54,7 @@ from more_itertools import partition
 import xlsxwriter
 from six import string_types
 
+# noinspection PyUnresolvedReferences
 import version
 import logging
 
@@ -393,17 +393,19 @@ def pytest_collection_modifyitems(session, config, items):
                     ):
                         item.add_marker(
                             pytest.mark.skip(
-                                reason="Test categories do not match all the passed categories"
+                                reason=("Test categories do not match "
+                                        "all the passed categories")
                             )
                         )
                 else:
                     item.add_marker(
                         pytest.mark.skip(
-                            reason="Test belongs to a category but no categories were passed"
+                            reason=("Test belongs to a category but "
+                                    "no categories were passed")
                         )
                     )
     items.sort(
-        key=lambda item: 0 if "base" in set(m.name for m in item.iter_markers()) else 1
+        key=lambda x: 0 if "base" in set(m.name for m in x.iter_markers()) else 1
     )
 
 
@@ -617,44 +619,20 @@ def make_iso_timestamp():
     return now.isoformat()
 
 
-def aggregate_requirement_adherence(r_id, collection_failures, test_results):
-    """
-    Examines all tests associated with a given requirement and determines
-    the aggregate result (PASS, FAIL, ERROR, or SKIP) for the requirement.
-
-    * ERROR - At least one ERROR occurred
-    * PASS -  At least one PASS and no FAIL or ERRORs.
-    * FAIL -  At least one FAIL occurred (no ERRORs)
-    * SKIP - All tests were SKIP
-
-
-    :param r_id: Requirement ID to examing
-    :param collection_failures: Errors that occurred during test setup.
-    :param test_results: List of TestResult
-    :return: 'PASS', 'FAIL', 'SKIP', or 'ERROR'
-    """
-    errors = any(r_id in f["requirements"] for f in collection_failures)
-    outcomes = set(r.outcome for r in test_results if r_id in r.requirement_ids)
-    return aggregate_results(errors, outcomes, r_id)
-
-
-def aggregate_results(has_errors, outcomes, r_id=None):
+def aggregate_results(outcomes, r_id=None):
     """
     Determines the aggregate result for the conditions provided.  Assumes the
     results have been filtered and collected for analysis.
 
-    :param has_errors: True if collection failures occurred for the tests being
-                       analyzed.
     :param outcomes: set of outcomes from the TestResults
     :param r_id: Optional requirement ID if known
     :return: 'ERROR', 'PASS', 'FAIL', or 'SKIP'
              (see aggregate_requirement_adherence for more detail)
     """
-    if has_errors:
-        return "ERROR"
-
     if not outcomes:
         return "PASS"
+    elif "ERROR" in outcomes:
+        return "ERROR"
     elif "FAIL" in outcomes:
         return "FAIL"
     elif "PASS" in outcomes:
@@ -690,53 +668,11 @@ def aggregate_run_results(collection_failures, test_results):
         return "PASS"
 
 
-def error(failure_or_result):
-    """
-    Extracts the error message from a collection failure or test result
-    :param failure_or_result: Entry from COLLECTION_FAILURE or a TestResult
-    :return: Error message as string
-    """
-    if isinstance(failure_or_result, TestResult):
-        return failure_or_result.error_message
-    else:
-        return failure_or_result["error"]
-
-
-def req_ids(failure_or_result):
-    """
-    Extracts the requirement IDs from a collection failure or test result
-    :param failure_or_result: Entry from COLLECTION_FAILURE or a TestResult
-    :return: set of Requirement IDs.  If no requirements mapped, then an empty set
-    """
-    if isinstance(failure_or_result, TestResult):
-        return set(failure_or_result.requirement_ids)
-    else:
-        return set(failure_or_result["requirements"])
-
-
-def collect_errors(r_id, collection_failures, test_result):
-    """
-    Creates a list of error messages from the collection failures and
-    test results.  If r_id is provided, then it collects the error messages
-    where the failure or test is associated with that requirement ID.  If
-    r_id is None, then it collects all errors that occur on failures and
-    results that are not mapped to requirements
-    """
-
-    def selector(item):
-        if r_id:
-            return r_id in req_ids(item)
-        else:
-            return not req_ids(item)
-
-    errors = (error(x) for x in chain(collection_failures, test_result) if selector(x))
-    return [e for e in errors if e]
-
-
 def relative_paths(base_dir, paths):
     return [os.path.relpath(p, base_dir) for p in paths]
 
 
+# noinspection PyTypeChecker
 def generate_json(outpath, template_path, categories):
     """
     Creates a JSON summary of the entire test run.
@@ -779,30 +715,40 @@ def generate_json(outpath, template_path, categories):
             }
         )
 
+    # Build a mapping of requirement ID to the results
+    r_id_results = defaultdict(lambda: {"errors": set(), "outcomes": set()})
+    for test_result in results:
+        test_reqs = test_result["requirements"]
+        r_ids = (
+            [r["id"] if isinstance(r, dict) else r for r in test_reqs]
+            if test_reqs
+            else ("",)
+        )
+        for r_id in r_ids:
+            item = r_id_results[r_id]
+            item["outcomes"].add(test_result["result"])
+            if test_result["error"]:
+                item["errors"].add(test_result["error"])
+
     requirements = data["requirements"]
     for r_id, r_data in reqs.items():
-        result = aggregate_requirement_adherence(r_id, COLLECTION_FAILURES, ALL_RESULTS)
-        if result:
-            requirements.append(
-                {
-                    "id": r_id,
-                    "text": r_data["description"],
-                    "keyword": r_data["keyword"],
-                    "result": result,
-                    "errors": collect_errors(r_id, COLLECTION_FAILURES, ALL_RESULTS),
-                }
-            )
-    # If there are tests that aren't mapped to a requirement, then we'll
-    # map them to a special entry so the results are coherent.
-    unmapped_outcomes = {r.outcome for r in ALL_RESULTS if not r.requirement_ids}
-    has_errors = any(not f["requirements"] for f in COLLECTION_FAILURES)
-    if unmapped_outcomes or has_errors:
+        requirements.append(
+            {
+                "id": r_id,
+                "text": r_data["description"],
+                "keyword": r_data["keyword"],
+                "result": aggregate_results(r_id_results[r_id]["outcomes"]),
+                "errors": list(r_id_results[r_id]["errors"]),
+            }
+        )
+
+    if r_id_results[""]["errors"] or r_id_results[""]["outcomes"]:
         requirements.append(
             {
                 "id": "Unmapped",
                 "text": "Tests not mapped to requirements (see tests)",
-                "result": aggregate_results(has_errors, unmapped_outcomes),
-                "errors": collect_errors(None, COLLECTION_FAILURES, ALL_RESULTS),
+                "result": aggregate_results(r_id_results[""]["outcomes"]),
+                "errors": list(r_id_results[""]["errors"]),
             }
         )
 
index 2387a2c..49ebf41 100644 (file)
@@ -132,7 +132,7 @@ def test_heat(yaml_file):
 
     files = {}
     dirname = os.path.dirname(yaml_file)
-    for file in set(get_list_of_nested_files(yml, dirname)):
+    for file in set(get_list_of_nested_files(yaml_file, dirname)):
         load_file(file, files)
 
     validator = HOTValidator(yaml_file, files, yml)
index f65d6d1..634da5c 100644 (file)
@@ -52,24 +52,16 @@ def test_volume_templates_contains_cinder_or_resource_group(volume_template):
     in fact volume templates
     """
     acceptable_resources = []
-
-    with open(volume_template) as fh:
-        yml = yaml.load(fh)
-
-    # skip if resources are not defined
-    if "resources" not in yml:
-        pytest.skip("No resources specified in the heat template")
-
     dirname = os.path.dirname(volume_template)
-    list_of_files = get_list_of_nested_files(yml, dirname)
+    list_of_files = get_list_of_nested_files(volume_template, dirname)
 
     list_of_files.append(volume_template)
 
     for file in list_of_files:
         with open(file) as fh:
             yml = yaml.load(fh)
-
-        for k, v in yml["resources"].items():
+        resources = yml.get("resources") or {}
+        for k, v in resources.items():
             if not isinstance(v, dict):
                 continue
             if "type" not in v:
index c7a5601..e5f5941 100644 (file)
 
 """nested files
 """
-
+from functools import lru_cache
 from os import path, listdir
 import re
 from tests import cached_yaml as yaml
 from tests.structures import Heat
 
+from tests.helpers import load_yaml
+
 VERSION = "1.4.0"
 
 """
@@ -108,40 +110,31 @@ def get_dict_of_nested_files(yml, dirpath):
     return nested_files
 
 
-def get_list_of_nested_files(yml, dirpath):
+@lru_cache(maxsize=None)
+def get_list_of_nested_files(yml_path, dirpath):
     """
     return a list of all nested files
     """
 
-    if not hasattr(yml, "items"):
-        return []
-
+    yml = load_yaml(yml_path)
     nested_files = []
+    resources = yml.get("resources") or {}
 
-    for v in yml.values():
+    for v in resources.values():
         if isinstance(v, dict) and "type" in v:
             t = v["type"]
             if t.endswith(".yml") or t.endswith(".yaml"):
                 filepath = path.join(dirpath, t)
                 if path.exists(filepath):
-                    with open(filepath) as fh:
-                        t_yml = yaml.load(fh)
                     nested_files.append(filepath)
-                    nested_files.extend(get_list_of_nested_files(t_yml, dirpath))
+                    nested_files.extend(get_list_of_nested_files(filepath, dirpath))
             elif t == "OS::Heat::ResourceGroup":
                 rdt = v.get("properties", {}).get("resource_def", {}).get("type", None)
                 if rdt and (rdt.endswith(".yml") or rdt.endswith(".yaml")):
                     filepath = path.join(dirpath, rdt)
                     if path.exists(filepath):
-                        with open(filepath) as fh:
-                            rdt_yml = yaml.load(fh)
                         nested_files.append(filepath)
-                        nested_files.extend(get_list_of_nested_files(rdt_yml, dirpath))
-        if isinstance(v, dict):
-            nested_files.extend(get_list_of_nested_files(v, dirpath))
-        elif isinstance(v, list):
-            for d in v:
-                nested_files.extend(get_list_of_nested_files(d, dirpath))
+                        nested_files.extend(get_list_of_nested_files(filepath, dirpath))
     return nested_files
 
 
@@ -271,6 +264,7 @@ def get_nested_files(filenames):
     return nested_files
 
 
+@lru_cache(maxsize=None)
 def file_is_a_nested_template(file):
     directory = path.dirname(file)
     nested_files = []
@@ -278,12 +272,8 @@ def file_is_a_nested_template(file):
         if filename.endswith(".yaml") or filename.endswith(".yml"):
             filename = "{}/{}".format(directory, filename)
             try:
-                with open(filename) as fh:
-                    yml = yaml.load(fh)
-                if "resources" not in yml:
-                    continue
                 nested_files.extend(
-                    get_list_of_nested_files(yml["resources"], path.dirname(filename))
+                    get_list_of_nested_files(filename, path.dirname(filename))
                 )
             except yaml.YAMLError as e:
                 print(e)  # pylint: disable=superfluous-parens