Add onaptests_bench into integration project repository
[integration.git] / test / onaptests_bench / src / onaptests_bench / reporting.py
1 #!/usr/bin/env python3
2
3 # ============LICENSE_START=======================================================
4 #  Copyright (C) 2022 Orange, Ltd.
5 # ================================================================================
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 #      http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18 # SPDX-License-Identifier: Apache-2.0
19 # ============LICENSE_END=========================================================
20 """
21 Aggregate test results
22 """
23 import logging
24 import os
25 import re
26
27 from dataclasses import dataclass
28 from datetime import datetime
29 import matplotlib.pyplot as plt # pylint: disable=import-error
30
31 from jinja2 import Environment, select_autoescape, PackageLoader # pylint: disable=import-error
32
33 # Logger
34 LOG_LEVEL = 'INFO'
35 logging.basicConfig()
36 LOGGER = logging.getLogger("onaptests_bench")
37 LOGGER.setLevel(LOG_LEVEL)
38
39 RESULT_DIR_PATH = "/tmp/mytest"
40 RESULT_LOG_FILE = "xtesting.log"
41 RESULT_LOG_REPORTING_FILE = "reporting.html"
42 FIGURE_NAME = "mygraph.png"
43 USE_CASE_NAME = "unknwown"  # could be checked with result parsing
44 TIMEOUT_RUN = 1200 # parameter to be provided by the launcher
45 TEST_DURATION = 120 # parameter to be provided by the launcher
46 NB_SIMULTANEOUS_TESTS = 10 # parameter to be provided by the launcher
47 REPORTING_DIR = "/tmp/"
48
49 @dataclass
50 class TestResult:
51     """Test results retrieved from xtesting."""
52     case_name: str
53     status: str = "FAIL"
54     start_date: datetime = "2000-01-01 00:00:01,123"
55     duration: int = 0
56
57 @dataclass
58 class SerieResult:
59     """Serie of tests."""
60     serie_id: str
61     success_rate: int = 0
62     min: int = 0
63     max: int = 0
64     mean: float = 0.0
65     median: float = 0.0
66     nb_occurences: int = 0
67
68 class OnaptestBenchReporting:
69     """Build html summary page."""
70
71     def __init__(self, nb_simultaneous_tests=NB_SIMULTANEOUS_TESTS,
72                  duration=TEST_DURATION,
73                  res_dir_path=RESULT_DIR_PATH,
74                  reporting_dir=REPORTING_DIR) -> None:
75         """Initialization of the report."""
76         self._case_name = USE_CASE_NAME
77         self._nb_simultaneous_tests = nb_simultaneous_tests
78         self._test_duration = duration
79         self._result_dir_path = res_dir_path
80         self._reporting_dir = reporting_dir
81
82     def parse_xtesting_results(self, file_result):
83         """Retrieve data from a xtesting file."""
84         # we need to retrieve:
85         # (- the name)
86         # - the start date
87         # - the status
88         # - the duration
89         # note Data could be in DB but let's aggreage based on the log to avoid
90         # dependency to the DB
91         # 2021-01-22 07:01:58,467 - xtesting.ci.run_tests - INFO - Test result:
92         #
93         # +------------------------+---------------------+------------------+----------------+
94         # |     TEST CASE          |       PROJECT       |     DURATION     |     RESULT     |
95         # +------------------------+---------------------+------------------+----------------+
96         # |      basic_onboard     |     integration     |      19:53       |      PASS      |
97         # +------------------------+---------------------+------------------+----------------+
98         #
99         # 2021-01-22 07:01:58 - xtesting.ci.run_tests - INFO - Execution exit value: Result.EX_OK
100         start_date = ""
101         case_name = ""
102         duration = TIMEOUT_RUN
103         status = 0
104         with open(file_result) as xtesting_result:
105             for cnt, line in enumerate(xtesting_result):
106                 LOGGER.debug(cnt)
107
108                 if "Running test case" in line:
109                     start_date = line.split()[0] + " " + line.split()[1]
110                     self._case_name = (re.search('\'(.*)\'', line)).group(1)
111
112                 # if test ends properly, overwrite start tile with end time
113                 # for a better display
114                 if "Execution exit value" in line:
115                     start_date = line.split()[0] + " " + line.split()[1]
116
117                 # Look for the result table
118                 if "|" in line and self._case_name in line:
119                     duration_str = line.split()[5]
120                     duration = int(
121                         duration_str.split(":")[0])*60 + int(
122                             duration_str.split(":")[1])
123                     if line.split()[7] == "PASS":
124                         status = 100
125                     else:
126                         status = 0
127
128         testresult = TestResult(
129             case_name=case_name,
130             status=status,
131             start_date=datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S,%f'),
132             duration=duration)
133         return testresult
134
135     @staticmethod
136     def calculate_stats(durations):
137         """From a duration results, retrieve the min, max, mean & median value."""
138
139         min_val = min(durations)
140         max_val = max(durations)
141
142         # Mean
143         total = sum(durations)
144         length = len(durations)
145         for nums in [durations]:
146             LOGGER.debug(nums)
147             mean_val = total / length
148
149         # Median
150         lst = sorted(durations)
151         med_val = sorted(lst)
152         lst_len = len(lst)
153         index = (lst_len - 1) // 2
154         median_val = 0
155         if lst_len % 2:
156             median_val = med_val[index]
157         else:
158             median_val = (med_val[index] + med_val[index + 1])/2.0
159
160         return min_val, max_val, mean_val, median_val
161
162     @staticmethod
163     def calculate_success_rate(criterias):
164         """Calculate Serie success rate."""
165         # calculate success rate
166         score = 0
167         for criteria in criterias:
168             score += criteria
169         try:
170             rate = score/len(criterias)
171         except ZeroDivisionError:
172             rate = 0
173         return rate
174
175
176     def parse_serie_durations(self): # pylint: disable=too-many-locals
177         """Find result series."""
178         # from the res directory find all the subdirectory and build an array of results
179         series = []
180         serie_names = []
181         serie_durations = {}
182         serie_criteria = {}
183
184         for root, dirs, files in os.walk(self._result_dir_path):
185             try:
186                 dirs.sort(key=lambda x: int(x.split("/")[-1][5:]))
187             except ValueError:
188                 LOGGER.debug("sort only what is sortable")
189
190             LOGGER.debug("Root: %s, Dirs: %s, Files: %s", root, dirs, files)
191
192             for name in files:
193                 if name == RESULT_LOG_FILE:
194                     serie_name = root.split("/")[-2]
195                     # if new serie detected, initialize it
196                     if serie_name not in serie_names:
197                         serie_names.append(serie_name)
198                         serie_durations[serie_name] = []
199                         serie_criteria[serie_name] = []
200                     serie_raw_results = self.parse_xtesting_results(
201                         root + "/" + RESULT_LOG_FILE)
202                     serie_durations[serie_name].append(
203                         serie_raw_results.duration)
204                     serie_criteria[serie_name].append(
205                         serie_raw_results.status)
206         for serie in serie_names:
207             LOGGER.info("Calculate stats and success rate of serie %s", serie)
208             LOGGER.debug(serie_durations[serie])
209             LOGGER.debug(serie_criteria[serie])
210             # calculate stats
211             min_val, max_val, mean_val, med_val = self.calculate_stats(
212                 serie_durations[serie])
213             success_rate = self.calculate_success_rate(
214                 serie_criteria[serie])
215             series.append(SerieResult(
216                 serie_id=serie,
217                 min=min_val,
218                 max=max_val,
219                 mean=mean_val,
220                 median=med_val,
221                 success_rate=success_rate,
222                 nb_occurences=len(serie_durations[serie])))
223
224         return series
225
226     def create_duration_time_serie(self):
227         """Create Histogram and scattered figure."""
228         # duration,success = f(time)
229         x_array_pass = []
230         x_array_fail = []
231         y_array_pass = []
232         y_array_fail = []
233         for root, dirs, files in os.walk(self._result_dir_path):
234             LOGGER.debug("Root: %s, Dirs: %s, Files: %s", root, dirs, files)
235             for name in files:
236                 if name == RESULT_LOG_FILE:
237                     serie_raw_results = self.parse_xtesting_results(
238                         root + "/" + RESULT_LOG_FILE)
239                     LOGGER.debug("Date %s", serie_raw_results.start_date)
240                     LOGGER.debug("Status %s", serie_raw_results.status)
241                     LOGGER.debug("Duration %s", serie_raw_results.duration)
242                     # x_array.append(serie_raw_results.start_date)
243                     if serie_raw_results.status < 100:
244                         y_array_fail.append(serie_raw_results.duration)
245                         x_array_fail.append(serie_raw_results.start_date)
246                     else:
247                         y_array_pass.append(serie_raw_results.duration)
248                         x_array_pass.append(serie_raw_results.start_date)
249         plt.scatter(x_array_pass, y_array_pass, color='blue', label='PASS')
250         plt.scatter(x_array_fail, y_array_fail, color='red', label='FAIL')
251         plt.xlabel("time")
252         plt.ylabel("Duration of the test (s)")
253         plt.legend()
254         plt.savefig(self._reporting_dir + FIGURE_NAME)
255         plt.close()
256
257         # Create Histogramme
258         plt.hist(y_array_pass)
259         plt.xlabel("Duration of the test")
260         plt.ylabel("Number of tests")
261         plt.savefig(self._reporting_dir + "histo_" + FIGURE_NAME)
262         plt.close()
263
264     def create_success_rate(self, series_bench):
265         """Draw success rate = f(serie ID)"""
266         # Create a vizualisation of success rate
267         # success_rate = f(time)
268         x_array_success_rate = []
269         y_array_success_rate = []
270
271         for serie in series_bench:
272             x_array_success_rate.append(serie.serie_id)
273             y_array_success_rate.append(int(serie.success_rate))
274         LOGGER.info(" Success rate vector: %s", y_array_success_rate)
275         plt.bar(range(len(y_array_success_rate)),
276                 y_array_success_rate,
277                 width=0.5,
278                 color='blue')
279         # plt.plot(x_array_success_rate, y_array_success_rate, '-o', color='orange')
280         plt.xlabel("Series")
281         plt.ylabel("Success rate (%)")
282         plt.savefig(self._reporting_dir + "bar_" + FIGURE_NAME)
283         plt.close()
284
285     def create_cumulated_success_rate(self, series_bench):
286         """Draw success rate = f(nb executed tests)"""
287         # Create success_rate=f(nb test executed)
288         x_array_cumulated_success_rate = []
289         y_array_cumulated_success_rate = []
290         nb_test = 0
291         nb_success_test = 0
292         for serie in series_bench:
293             # calculate the number of tests
294             nb_test += self._nb_simultaneous_tests
295             # recalculate success rate
296             nb_success_test += int(serie.success_rate)*self._nb_simultaneous_tests
297             success_rate = nb_success_test / nb_test
298             x_array_cumulated_success_rate.append(nb_test)
299             y_array_cumulated_success_rate.append(success_rate)
300         plt.plot(
301             x_array_cumulated_success_rate,
302             y_array_cumulated_success_rate,
303             '-o', color='blue')
304         plt.xlabel("Nb of executed tests")
305         plt.ylabel("Success rate (%)")
306         plt.savefig(self._reporting_dir + "rate_" + FIGURE_NAME)
307         plt.close()
308
309
310     def generate_reporting(self):
311         """Generate Serie reporting."""
312         series_bench = self.parse_serie_durations()
313         LOGGER.info(series_bench)
314
315         # create html page
316         jinja_env = Environment(
317             autoescape=select_autoescape(['html']),
318             loader=PackageLoader('onaptests_bench'))
319
320         page_info = {}
321         page_info['usecase_name'] = self._case_name
322         page_info['nb_series'] = str(len(series_bench))
323         page_info['nb_simu_tests'] = str(self._nb_simultaneous_tests)
324         page_info['test_duration'] = self._test_duration
325         page_info['nb_tests'] = self._nb_simultaneous_tests * len(series_bench)
326         success_rate_vector = []
327         min_durations = []
328         max_durations = []
329         mean_durations = []
330
331         for serie in series_bench:
332             success_rate_vector.append(int(serie.success_rate))
333             min_durations.append(int(serie.min))
334             max_durations.append(int(serie.max))
335             mean_durations.append(int(serie.mean))
336
337         page_info['global_success_rate'] = int(self.calculate_success_rate(
338             success_rate_vector))
339         page_info['min_duration'] = min(min_durations)
340         page_info['max_duration'] = max(max_durations)
341         page_info['mean_duration'] = int(
342             self.calculate_success_rate(mean_durations))
343         jinja_env.get_template(
344             'onaptests_bench.html.j2').stream(
345                 info=page_info,
346                 data=series_bench).dump(
347                     '{}/onaptests_bench.html'.format(self._reporting_dir))
348
349         self.create_duration_time_serie()
350         self.create_success_rate(series_bench)
351         self.create_cumulated_success_rate(series_bench)