3 # ============LICENSE_START=======================================================
4 # Copyright (C) 2022 Orange, Ltd.
5 # ================================================================================
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
18 # SPDX-License-Identifier: Apache-2.0
19 # ============LICENSE_END=========================================================
21 Aggregate test results
27 from dataclasses import dataclass
28 from datetime import datetime
29 import matplotlib.pyplot as plt # pylint: disable=import-error
31 from jinja2 import Environment, select_autoescape, PackageLoader # pylint: disable=import-error
36 LOGGER = logging.getLogger("onaptests_bench")
37 LOGGER.setLevel(LOG_LEVEL)
39 RESULT_DIR_PATH = "/tmp/mytest"
40 RESULT_LOG_FILE = "xtesting.log"
41 RESULT_LOG_REPORTING_FILE = "reporting.html"
42 FIGURE_NAME = "mygraph.png"
43 USE_CASE_NAME = "unknwown" # could be checked with result parsing
44 TIMEOUT_RUN = 1200 # parameter to be provided by the launcher
45 TEST_DURATION = 120 # parameter to be provided by the launcher
46 NB_SIMULTANEOUS_TESTS = 10 # parameter to be provided by the launcher
47 REPORTING_DIR = "/tmp/"
51 """Test results retrieved from xtesting."""
54 start_date: datetime = "2000-01-01 00:00:01,123"
66 nb_occurences: int = 0
68 class OnaptestBenchReporting:
69 """Build html summary page."""
71 def __init__(self, nb_simultaneous_tests=NB_SIMULTANEOUS_TESTS,
72 duration=TEST_DURATION,
73 res_dir_path=RESULT_DIR_PATH,
74 reporting_dir=REPORTING_DIR) -> None:
75 """Initialization of the report."""
76 self._case_name = USE_CASE_NAME
77 self._nb_simultaneous_tests = nb_simultaneous_tests
78 self._test_duration = duration
79 self._result_dir_path = res_dir_path
80 self._reporting_dir = reporting_dir
82 def parse_xtesting_results(self, file_result):
83 """Retrieve data from a xtesting file."""
84 # we need to retrieve:
89 # note Data could be in DB but let's aggreage based on the log to avoid
90 # dependency to the DB
91 # 2021-01-22 07:01:58,467 - xtesting.ci.run_tests - INFO - Test result:
93 # +------------------------+---------------------+------------------+----------------+
94 # | TEST CASE | PROJECT | DURATION | RESULT |
95 # +------------------------+---------------------+------------------+----------------+
96 # | basic_onboard | integration | 19:53 | PASS |
97 # +------------------------+---------------------+------------------+----------------+
99 # 2021-01-22 07:01:58 - xtesting.ci.run_tests - INFO - Execution exit value: Result.EX_OK
102 duration = TIMEOUT_RUN
104 with open(file_result) as xtesting_result:
105 for cnt, line in enumerate(xtesting_result):
108 if "Running test case" in line:
109 start_date = line.split()[0] + " " + line.split()[1]
110 self._case_name = (re.search('\'(.*)\'', line)).group(1)
112 # if test ends properly, overwrite start tile with end time
113 # for a better display
114 if "Execution exit value" in line:
115 start_date = line.split()[0] + " " + line.split()[1]
117 # Look for the result table
118 if "|" in line and self._case_name in line:
119 duration_str = line.split()[5]
121 duration_str.split(":")[0])*60 + int(
122 duration_str.split(":")[1])
123 if line.split()[7] == "PASS":
128 testresult = TestResult(
131 start_date=datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S,%f'),
136 def calculate_stats(durations):
137 """From a duration results, retrieve the min, max, mean & median value."""
139 min_val = min(durations)
140 max_val = max(durations)
143 total = sum(durations)
144 length = len(durations)
145 for nums in [durations]:
147 mean_val = total / length
150 lst = sorted(durations)
151 med_val = sorted(lst)
153 index = (lst_len - 1) // 2
156 median_val = med_val[index]
158 median_val = (med_val[index] + med_val[index + 1])/2.0
160 return min_val, max_val, mean_val, median_val
163 def calculate_success_rate(criterias):
164 """Calculate Serie success rate."""
165 # calculate success rate
167 for criteria in criterias:
170 rate = score/len(criterias)
171 except ZeroDivisionError:
176 def parse_serie_durations(self): # pylint: disable=too-many-locals
177 """Find result series."""
178 # from the res directory find all the subdirectory and build an array of results
184 for root, dirs, files in os.walk(self._result_dir_path):
186 dirs.sort(key=lambda x: int(x.split("/")[-1][5:]))
188 LOGGER.debug("sort only what is sortable")
190 LOGGER.debug("Root: %s, Dirs: %s, Files: %s", root, dirs, files)
193 if name == RESULT_LOG_FILE:
194 serie_name = root.split("/")[-2]
195 # if new serie detected, initialize it
196 if serie_name not in serie_names:
197 serie_names.append(serie_name)
198 serie_durations[serie_name] = []
199 serie_criteria[serie_name] = []
200 serie_raw_results = self.parse_xtesting_results(
201 root + "/" + RESULT_LOG_FILE)
202 serie_durations[serie_name].append(
203 serie_raw_results.duration)
204 serie_criteria[serie_name].append(
205 serie_raw_results.status)
206 for serie in serie_names:
207 LOGGER.info("Calculate stats and success rate of serie %s", serie)
208 LOGGER.debug(serie_durations[serie])
209 LOGGER.debug(serie_criteria[serie])
211 min_val, max_val, mean_val, med_val = self.calculate_stats(
212 serie_durations[serie])
213 success_rate = self.calculate_success_rate(
214 serie_criteria[serie])
215 series.append(SerieResult(
221 success_rate=success_rate,
222 nb_occurences=len(serie_durations[serie])))
226 def create_duration_time_serie(self):
227 """Create Histogram and scattered figure."""
228 # duration,success = f(time)
233 for root, dirs, files in os.walk(self._result_dir_path):
234 LOGGER.debug("Root: %s, Dirs: %s, Files: %s", root, dirs, files)
236 if name == RESULT_LOG_FILE:
237 serie_raw_results = self.parse_xtesting_results(
238 root + "/" + RESULT_LOG_FILE)
239 LOGGER.debug("Date %s", serie_raw_results.start_date)
240 LOGGER.debug("Status %s", serie_raw_results.status)
241 LOGGER.debug("Duration %s", serie_raw_results.duration)
242 # x_array.append(serie_raw_results.start_date)
243 if serie_raw_results.status < 100:
244 y_array_fail.append(serie_raw_results.duration)
245 x_array_fail.append(serie_raw_results.start_date)
247 y_array_pass.append(serie_raw_results.duration)
248 x_array_pass.append(serie_raw_results.start_date)
249 plt.scatter(x_array_pass, y_array_pass, color='blue', label='PASS')
250 plt.scatter(x_array_fail, y_array_fail, color='red', label='FAIL')
252 plt.ylabel("Duration of the test (s)")
254 plt.savefig(self._reporting_dir + FIGURE_NAME)
258 plt.hist(y_array_pass)
259 plt.xlabel("Duration of the test")
260 plt.ylabel("Number of tests")
261 plt.savefig(self._reporting_dir + "histo_" + FIGURE_NAME)
264 def create_success_rate(self, series_bench):
265 """Draw success rate = f(serie ID)"""
266 # Create a vizualisation of success rate
267 # success_rate = f(time)
268 x_array_success_rate = []
269 y_array_success_rate = []
271 for serie in series_bench:
272 x_array_success_rate.append(serie.serie_id)
273 y_array_success_rate.append(int(serie.success_rate))
274 LOGGER.info(" Success rate vector: %s", y_array_success_rate)
275 plt.bar(range(len(y_array_success_rate)),
276 y_array_success_rate,
279 # plt.plot(x_array_success_rate, y_array_success_rate, '-o', color='orange')
281 plt.ylabel("Success rate (%)")
282 plt.savefig(self._reporting_dir + "bar_" + FIGURE_NAME)
285 def create_cumulated_success_rate(self, series_bench):
286 """Draw success rate = f(nb executed tests)"""
287 # Create success_rate=f(nb test executed)
288 x_array_cumulated_success_rate = []
289 y_array_cumulated_success_rate = []
292 for serie in series_bench:
293 # calculate the number of tests
294 nb_test += self._nb_simultaneous_tests
295 # recalculate success rate
296 nb_success_test += int(serie.success_rate)*self._nb_simultaneous_tests
297 success_rate = nb_success_test / nb_test
298 x_array_cumulated_success_rate.append(nb_test)
299 y_array_cumulated_success_rate.append(success_rate)
301 x_array_cumulated_success_rate,
302 y_array_cumulated_success_rate,
304 plt.xlabel("Nb of executed tests")
305 plt.ylabel("Success rate (%)")
306 plt.savefig(self._reporting_dir + "rate_" + FIGURE_NAME)
310 def generate_reporting(self):
311 """Generate Serie reporting."""
312 series_bench = self.parse_serie_durations()
313 LOGGER.info(series_bench)
316 jinja_env = Environment(
317 autoescape=select_autoescape(['html']),
318 loader=PackageLoader('onaptests_bench'))
321 page_info['usecase_name'] = self._case_name
322 page_info['nb_series'] = str(len(series_bench))
323 page_info['nb_simu_tests'] = str(self._nb_simultaneous_tests)
324 page_info['test_duration'] = self._test_duration
325 page_info['nb_tests'] = self._nb_simultaneous_tests * len(series_bench)
326 success_rate_vector = []
331 for serie in series_bench:
332 success_rate_vector.append(int(serie.success_rate))
333 min_durations.append(int(serie.min))
334 max_durations.append(int(serie.max))
335 mean_durations.append(int(serie.mean))
337 page_info['global_success_rate'] = int(self.calculate_success_rate(
338 success_rate_vector))
339 page_info['min_duration'] = min(min_durations)
340 page_info['max_duration'] = max(max_durations)
341 page_info['mean_duration'] = int(
342 self.calculate_success_rate(mean_durations))
343 jinja_env.get_template(
344 'onaptests_bench.html.j2').stream(
346 data=series_bench).dump(
347 '{}/onaptests_bench.html'.format(self._reporting_dir))
349 self.create_duration_time_serie()
350 self.create_success_rate(series_bench)
351 self.create_cumulated_success_rate(series_bench)