PAL: suite visitor 2
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(VPP Version:\s*|VPP version:\s*)(.*)"
267     )
268     REGEX_VERSION_DPDK = re.compile(
269         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
270     )
271     REGEX_TCP = re.compile(
272         r'Total\s(rps|cps|throughput):\s(\d*).*$'
273     )
274     REGEX_MRR = re.compile(
275         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
276         r'tx\s(\d*),\srx\s(\d*)'
277     )
278     REGEX_BMRR = re.compile(
279         r'.*trial results.*: \[(.*)\]'
280     )
281     REGEX_RECONF_LOSS = re.compile(
282         r'Packets lost due to reconfig: (\d*)'
283     )
284     REGEX_RECONF_TIME = re.compile(
285         r'Implied time lost: (\d*.[\de-]*)'
286     )
287     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
288
289     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
290
291     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
292
293     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
294
295     REGEX_SH_RUN_HOST = re.compile(
296         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
297     )
298
299     def __init__(self, metadata, mapping, ignore, process_oper):
300         """Initialisation.
301
302         :param metadata: Key-value pairs to be included in "metadata" part of
303             JSON structure.
304         :param mapping: Mapping of the old names of test cases to the new
305             (actual) one.
306         :param ignore: List of TCs to be ignored.
307         :param process_oper: If True, operational data (show run, telemetry) is
308             processed.
309         :type metadata: dict
310         :type mapping: dict
311         :type ignore: list
312         :type process_oper: bool
313         """
314
315         # Type of message to parse out from the test messages
316         self._msg_type = None
317
318         # VPP version
319         self._version = None
320
321         # Timestamp
322         self._timestamp = None
323
324         # Testbed. The testbed is identified by TG node IP address.
325         self._testbed = None
326
327         # Mapping of TCs long names
328         self._mapping = mapping
329
330         # Ignore list
331         self._ignore = ignore
332
333         self._process_oper = process_oper
334
335         # Number of PAPI History messages found:
336         # 0 - no message
337         # 1 - PAPI History of DUT1
338         # 2 - PAPI History of DUT2
339         self._conf_history_lookup_nr = 0
340
341         self._sh_run_counter = 0
342         self._telemetry_kw_counter = 0
343         self._telemetry_msg_counter = 0
344
345         # Test ID of currently processed test- the lowercase full path to the
346         # test
347         self._test_id = None
348
349         # The main data structure
350         self._data = {
351             u"metadata": OrderedDict(),
352             u"suites": OrderedDict(),
353             u"tests": OrderedDict()
354         }
355
356         # Save the provided metadata
357         for key, val in metadata.items():
358             self._data[u"metadata"][key] = val
359
360         # Dictionary defining the methods used to parse different types of
361         # messages
362         self.parse_msg = {
363             u"vpp-version": self._get_vpp_version,
364             u"dpdk-version": self._get_dpdk_version,
365             u"teardown-papi-history": self._get_papi_history,
366             u"test-show-runtime": self._get_show_run,
367             u"testbed": self._get_testbed,
368             u"test-telemetry": self._get_telemetry
369         }
370
371     @property
372     def data(self):
373         """Getter - Data parsed from the XML file.
374
375         :returns: Data parsed from the XML file.
376         :rtype: dict
377         """
378         return self._data
379
380     def _get_data_from_mrr_test_msg(self, msg):
381         """Get info from message of MRR performance tests.
382
383         :param msg: Message to be processed.
384         :type msg: str
385         :returns: Processed message or original message if a problem occurs.
386         :rtype: str
387         """
388
389         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
390         if not groups or groups.lastindex != 1:
391             return u"Test Failed."
392
393         try:
394             data = groups.group(1).split(u", ")
395         except (AttributeError, IndexError, ValueError, KeyError):
396             return u"Test Failed."
397
398         out_str = u"["
399         try:
400             for item in data:
401                 out_str += f"{(float(item) / 1e6):.2f}, "
402             return out_str[:-2] + u"]"
403         except (AttributeError, IndexError, ValueError, KeyError):
404             return u"Test Failed."
405
406     def _get_data_from_cps_test_msg(self, msg):
407         """Get info from message of NDRPDR CPS tests.
408
409         :param msg: Message to be processed.
410         :type msg: str
411         :returns: Processed message or "Test Failed." if a problem occurs.
412         :rtype: str
413         """
414
415         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
416         if not groups or groups.lastindex != 2:
417             return u"Test Failed."
418
419         try:
420             return (
421                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
422                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
423             )
424         except (AttributeError, IndexError, ValueError, KeyError):
425             return u"Test Failed."
426
427     def _get_data_from_pps_test_msg(self, msg):
428         """Get info from message of NDRPDR PPS tests.
429
430         :param msg: Message to be processed.
431         :type msg: str
432         :returns: Processed message or "Test Failed." if a problem occurs.
433         :rtype: str
434         """
435
436         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
437         if not groups or groups.lastindex != 4:
438             return u"Test Failed."
439
440         try:
441             return (
442                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
443                 f"{float(groups.group(2)):5.2f}\n"
444                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
445                 f"{float(groups.group(4)):5.2f}"
446             )
447         except (AttributeError, IndexError, ValueError, KeyError):
448             return u"Test Failed."
449
450     def _get_data_from_perf_test_msg(self, msg):
451         """Get info from message of NDRPDR performance tests.
452
453         :param msg: Message to be processed.
454         :type msg: str
455         :returns: Processed message or "Test Failed." if a problem occurs.
456         :rtype: str
457         """
458
459         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
460         if not groups or groups.lastindex != 10:
461             return u"Test Failed."
462
463         try:
464             data = {
465                 u"ndr_low": float(groups.group(1)),
466                 u"ndr_low_b": float(groups.group(2)),
467                 u"pdr_low": float(groups.group(3)),
468                 u"pdr_low_b": float(groups.group(4)),
469                 u"pdr_lat_90_1": groups.group(5),
470                 u"pdr_lat_90_2": groups.group(6),
471                 u"pdr_lat_50_1": groups.group(7),
472                 u"pdr_lat_50_2": groups.group(8),
473                 u"pdr_lat_10_1": groups.group(9),
474                 u"pdr_lat_10_2": groups.group(10),
475             }
476         except (AttributeError, IndexError, ValueError, KeyError):
477             return u"Test Failed."
478
479         def _process_lat(in_str_1, in_str_2):
480             """Extract P50, P90 and P99 latencies or min, avg, max values from
481             latency string.
482
483             :param in_str_1: Latency string for one direction produced by robot
484                 framework.
485             :param in_str_2: Latency string for second direction produced by
486                 robot framework.
487             :type in_str_1: str
488             :type in_str_2: str
489             :returns: Processed latency string or None if a problem occurs.
490             :rtype: tuple
491             """
492             in_list_1 = in_str_1.split('/', 3)
493             in_list_2 = in_str_2.split('/', 3)
494
495             if len(in_list_1) != 4 and len(in_list_2) != 4:
496                 return None
497
498             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
499             try:
500                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
501             except hdrh.codec.HdrLengthException:
502                 hdr_lat_1 = None
503
504             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
505             try:
506                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
507             except hdrh.codec.HdrLengthException:
508                 hdr_lat_2 = None
509
510             if hdr_lat_1 and hdr_lat_2:
511                 hdr_lat = (
512                     hdr_lat_1.get_value_at_percentile(50.0),
513                     hdr_lat_1.get_value_at_percentile(90.0),
514                     hdr_lat_1.get_value_at_percentile(99.0),
515                     hdr_lat_2.get_value_at_percentile(50.0),
516                     hdr_lat_2.get_value_at_percentile(90.0),
517                     hdr_lat_2.get_value_at_percentile(99.0)
518                 )
519                 if all(hdr_lat):
520                     return hdr_lat
521
522             hdr_lat = (
523                 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
524                 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
525             )
526             for item in hdr_lat:
527                 if item in (-1, 4294967295, 0):
528                     return None
529             return hdr_lat
530
531         try:
532             out_msg = (
533                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
534                 f"{data[u'ndr_low_b']:5.2f}"
535                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
536                 f"{data[u'pdr_low_b']:5.2f}"
537             )
538             latency = (
539                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
540                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
541                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
542             )
543             if all(latency):
544                 max_len = len(str(max((max(item) for item in latency))))
545                 max_len = 4 if max_len < 4 else max_len
546
547                 for idx, lat in enumerate(latency):
548                     if not idx:
549                         out_msg += u"\n"
550                     out_msg += (
551                         f"\n{idx + 3}. "
552                         f"{lat[0]:{max_len}d} "
553                         f"{lat[1]:{max_len}d} "
554                         f"{lat[2]:{max_len}d}      "
555                         f"{lat[3]:{max_len}d} "
556                         f"{lat[4]:{max_len}d} "
557                         f"{lat[5]:{max_len}d} "
558                     )
559
560             return out_msg
561
562         except (AttributeError, IndexError, ValueError, KeyError):
563             return u"Test Failed."
564
565     def _get_testbed(self, msg):
566         """Called when extraction of testbed IP is required.
567         The testbed is identified by TG node IP address.
568
569         :param msg: Message to process.
570         :type msg: Message
571         :returns: Nothing.
572         """
573
574         if msg.message.count(u"Setup of TG node") or \
575                 msg.message.count(u"Setup of node TG host"):
576             reg_tg_ip = re.compile(
577                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
578             try:
579                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
580             except (KeyError, ValueError, IndexError, AttributeError):
581                 pass
582             finally:
583                 self._data[u"metadata"][u"testbed"] = self._testbed
584                 self._msg_type = None
585
586     def _get_vpp_version(self, msg):
587         """Called when extraction of VPP version is required.
588
589         :param msg: Message to process.
590         :type msg: Message
591         :returns: Nothing.
592         """
593
594         if msg.message.count(u"VPP version:") or \
595                 msg.message.count(u"VPP Version:"):
596             self._version = str(
597                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
598             )
599             self._data[u"metadata"][u"version"] = self._version
600             self._msg_type = None
601             logging.info(self._version)
602
603     def _get_dpdk_version(self, msg):
604         """Called when extraction of DPDK version is required.
605
606         :param msg: Message to process.
607         :type msg: Message
608         :returns: Nothing.
609         """
610
611         if msg.message.count(u"DPDK Version:"):
612             try:
613                 self._version = str(re.search(
614                     self.REGEX_VERSION_DPDK, msg.message).group(2))
615                 self._data[u"metadata"][u"version"] = self._version
616             except IndexError:
617                 pass
618             finally:
619                 self._msg_type = None
620
621     def _get_papi_history(self, msg):
622         """Called when extraction of PAPI command history is required.
623
624         :param msg: Message to process.
625         :type msg: Message
626         :returns: Nothing.
627         """
628         if msg.message.count(u"PAPI command history:"):
629             self._conf_history_lookup_nr += 1
630             if self._conf_history_lookup_nr == 1:
631                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
632             else:
633                 self._msg_type = None
634             text = re.sub(
635                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
636                 u"",
637                 msg.message,
638                 count=1
639             ).replace(u'"', u"'")
640             self._data[u"tests"][self._test_id][u"conf-history"] += (
641                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
642             )
643
644     def _get_show_run(self, msg):
645         """Called when extraction of VPP operational data (output of CLI command
646         Show Runtime) is required.
647
648         :param msg: Message to process.
649         :type msg: Message
650         :returns: Nothing.
651         """
652
653         if not msg.message.count(u"stats runtime"):
654             return
655
656         # Temporary solution
657         if self._sh_run_counter > 1:
658             return
659
660         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
661             self._data[u"tests"][self._test_id][u"show-run"] = dict()
662
663         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
664         if not groups:
665             return
666         try:
667             host = groups.group(1)
668         except (AttributeError, IndexError):
669             host = u""
670         try:
671             sock = groups.group(2)
672         except (AttributeError, IndexError):
673             sock = u""
674
675         dut = u"dut{nr}".format(
676             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
677
678         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
679             copy.copy(
680                 {
681                     u"host": host,
682                     u"socket": sock,
683                     u"runtime": str(msg.message).replace(u' ', u'').
684                                 replace(u'\n', u'').replace(u"'", u'"').
685                                 replace(u'b"', u'"').replace(u'u"', u'"').
686                                 split(u":", 1)[1]
687                 }
688             )
689
690     def _get_telemetry(self, msg):
691         """Called when extraction of VPP telemetry data is required.
692
693         :param msg: Message to process.
694         :type msg: Message
695         :returns: Nothing.
696         """
697
698         if self._telemetry_kw_counter > 1:
699             return
700         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
701             return
702
703         if u"telemetry-show-run" not in \
704                 self._data[u"tests"][self._test_id].keys():
705             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
706
707         self._telemetry_msg_counter += 1
708         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
709         if not groups:
710             return
711         try:
712             host = groups.group(1)
713         except (AttributeError, IndexError):
714             host = u""
715         try:
716             sock = groups.group(2)
717         except (AttributeError, IndexError):
718             sock = u""
719         runtime = {
720             u"source_type": u"node",
721             u"source_id": host,
722             u"msg_type": u"metric",
723             u"log_level": u"INFO",
724             u"timestamp": msg.timestamp,
725             u"msg": u"show_runtime",
726             u"host": host,
727             u"socket": sock,
728             u"data": list()
729         }
730         for line in msg.message.splitlines():
731             if not line.startswith(u"vpp_runtime_"):
732                 continue
733             try:
734                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
735                 cut = params.index(u"{")
736                 name = params[:cut].split(u"_", maxsplit=2)[-1]
737                 labels = eval(
738                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
739                 )
740                 labels[u"graph_node"] = labels.pop(u"name")
741                 runtime[u"data"].append(
742                     {
743                         u"name": name,
744                         u"value": value,
745                         u"timestamp": timestamp,
746                         u"labels": labels
747                     }
748                 )
749             except (TypeError, ValueError, IndexError):
750                 continue
751         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
752             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
753                 {
754                     u"host": host,
755                     u"socket": sock,
756                     u"runtime": runtime
757                 }
758             )
759
760     def _get_ndrpdr_throughput(self, msg):
761         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
762         message.
763
764         :param msg: The test message to be parsed.
765         :type msg: str
766         :returns: Parsed data as a dict and the status (PASS/FAIL).
767         :rtype: tuple(dict, str)
768         """
769
770         throughput = {
771             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
772             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
773         }
774         status = u"FAIL"
775         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
776
777         if groups is not None:
778             try:
779                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
780                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
781                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
782                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
783                 status = u"PASS"
784             except (IndexError, ValueError):
785                 pass
786
787         return throughput, status
788
789     def _get_ndrpdr_throughput_gbps(self, msg):
790         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
791         test message.
792
793         :param msg: The test message to be parsed.
794         :type msg: str
795         :returns: Parsed data as a dict and the status (PASS/FAIL).
796         :rtype: tuple(dict, str)
797         """
798
799         gbps = {
800             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
801             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
802         }
803         status = u"FAIL"
804         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
805
806         if groups is not None:
807             try:
808                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
809                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
810                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
811                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
812                 status = u"PASS"
813             except (IndexError, ValueError):
814                 pass
815
816         return gbps, status
817
818     def _get_plr_throughput(self, msg):
819         """Get PLRsearch lower bound and PLRsearch upper bound from the test
820         message.
821
822         :param msg: The test message to be parsed.
823         :type msg: str
824         :returns: Parsed data as a dict and the status (PASS/FAIL).
825         :rtype: tuple(dict, str)
826         """
827
828         throughput = {
829             u"LOWER": -1.0,
830             u"UPPER": -1.0
831         }
832         status = u"FAIL"
833         groups = re.search(self.REGEX_PLR_RATE, msg)
834
835         if groups is not None:
836             try:
837                 throughput[u"LOWER"] = float(groups.group(1))
838                 throughput[u"UPPER"] = float(groups.group(2))
839                 status = u"PASS"
840             except (IndexError, ValueError):
841                 pass
842
843         return throughput, status
844
845     def _get_ndrpdr_latency(self, msg):
846         """Get LATENCY from the test message.
847
848         :param msg: The test message to be parsed.
849         :type msg: str
850         :returns: Parsed data as a dict and the status (PASS/FAIL).
851         :rtype: tuple(dict, str)
852         """
853         latency_default = {
854             u"min": -1.0,
855             u"avg": -1.0,
856             u"max": -1.0,
857             u"hdrh": u""
858         }
859         latency = {
860             u"NDR": {
861                 u"direction1": copy.copy(latency_default),
862                 u"direction2": copy.copy(latency_default)
863             },
864             u"PDR": {
865                 u"direction1": copy.copy(latency_default),
866                 u"direction2": copy.copy(latency_default)
867             },
868             u"LAT0": {
869                 u"direction1": copy.copy(latency_default),
870                 u"direction2": copy.copy(latency_default)
871             },
872             u"PDR10": {
873                 u"direction1": copy.copy(latency_default),
874                 u"direction2": copy.copy(latency_default)
875             },
876             u"PDR50": {
877                 u"direction1": copy.copy(latency_default),
878                 u"direction2": copy.copy(latency_default)
879             },
880             u"PDR90": {
881                 u"direction1": copy.copy(latency_default),
882                 u"direction2": copy.copy(latency_default)
883             },
884         }
885
886         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
887         if groups is None:
888             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
889         if groups is None:
890             return latency, u"FAIL"
891
892         def process_latency(in_str):
893             """Return object with parsed latency values.
894
895             TODO: Define class for the return type.
896
897             :param in_str: Input string, min/avg/max/hdrh format.
898             :type in_str: str
899             :returns: Dict with corresponding keys, except hdrh float values.
900             :rtype dict:
901             :throws IndexError: If in_str does not have enough substrings.
902             :throws ValueError: If a substring does not convert to float.
903             """
904             in_list = in_str.split('/', 3)
905
906             rval = {
907                 u"min": float(in_list[0]),
908                 u"avg": float(in_list[1]),
909                 u"max": float(in_list[2]),
910                 u"hdrh": u""
911             }
912
913             if len(in_list) == 4:
914                 rval[u"hdrh"] = str(in_list[3])
915
916             return rval
917
918         try:
919             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
920             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
921             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
922             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
923             if groups.lastindex == 4:
924                 return latency, u"PASS"
925         except (IndexError, ValueError):
926             pass
927
928         try:
929             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
930             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
931             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
932             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
933             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
934             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
935             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
936             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
937             if groups.lastindex == 12:
938                 return latency, u"PASS"
939         except (IndexError, ValueError):
940             pass
941
942         return latency, u"FAIL"
943
944     @staticmethod
945     def _get_hoststack_data(msg, tags):
946         """Get data from the hoststack test message.
947
948         :param msg: The test message to be parsed.
949         :param tags: Test tags.
950         :type msg: str
951         :type tags: list
952         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
953         :rtype: tuple(dict, str)
954         """
955         result = dict()
956         status = u"FAIL"
957
958         msg = msg.replace(u"'", u'"').replace(u" ", u"")
959         if u"LDPRELOAD" in tags:
960             try:
961                 result = loads(msg)
962                 status = u"PASS"
963             except JSONDecodeError:
964                 pass
965         elif u"VPPECHO" in tags:
966             try:
967                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
968                 result = dict(
969                     client=loads(msg_lst[0]),
970                     server=loads(msg_lst[1])
971                 )
972                 status = u"PASS"
973             except (JSONDecodeError, IndexError):
974                 pass
975
976         return result, status
977
978     def _get_vsap_data(self, msg, tags):
979         """Get data from the vsap test message.
980
981         :param msg: The test message to be parsed.
982         :param tags: Test tags.
983         :type msg: str
984         :type tags: list
985         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
986         :rtype: tuple(dict, str)
987         """
988         result = dict()
989         status = u"FAIL"
990
991         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
992         if groups is not None:
993             try:
994                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
995                 result[u"latency"] = float(groups.group(2))
996                 result[u"completed-requests"] = int(groups.group(3))
997                 result[u"failed-requests"] = int(groups.group(4))
998                 result[u"bytes-transferred"] = int(groups.group(5))
999                 if u"TCP_CPS"in tags:
1000                     result[u"cps"] = float(groups.group(6))
1001                 elif u"TCP_RPS" in tags:
1002                     result[u"rps"] = float(groups.group(6))
1003                 else:
1004                     return result, status
1005                 status = u"PASS"
1006             except (IndexError, ValueError):
1007                 pass
1008
1009         return result, status
1010
1011     def visit_suite(self, suite):
1012         """Implements traversing through the suite and its direct children.
1013
1014         :param suite: Suite to process.
1015         :type suite: Suite
1016         :returns: Nothing.
1017         """
1018         if self.start_suite(suite) is not False:
1019             suite.suites.visit(self)
1020             suite.tests.visit(self)
1021             self.end_suite(suite)
1022
1023     def start_suite(self, suite):
1024         """Called when suite starts.
1025
1026         :param suite: Suite to process.
1027         :type suite: Suite
1028         :returns: Nothing.
1029         """
1030
1031         try:
1032             parent_name = suite.parent.name
1033         except AttributeError:
1034             return
1035
1036         self._data[u"suites"][suite.longname.lower().
1037                               replace(u'"', u"'").
1038                               replace(u" ", u"_")] = {
1039                                   u"name": suite.name.lower(),
1040                                   u"doc": suite.doc,
1041                                   u"parent": parent_name,
1042                                   u"level": len(suite.longname.split(u"."))
1043                               }
1044
1045         suite.setup.visit(self)
1046         suite.teardown.visit(self)
1047
1048     def end_suite(self, suite):
1049         """Called when suite ends.
1050
1051         :param suite: Suite to process.
1052         :type suite: Suite
1053         :returns: Nothing.
1054         """
1055
1056     def visit_test(self, test):
1057         """Implements traversing through the test.
1058
1059         :param test: Test to process.
1060         :type test: Test
1061         :returns: Nothing.
1062         """
1063         if self.start_test(test) is not False:
1064             test.setup.visit(self)
1065             test.body.visit(self)
1066             test.teardown.visit(self)
1067             self.end_test(test)
1068
1069     def start_test(self, test):
1070         """Called when test starts.
1071
1072         :param test: Test to process.
1073         :type test: Test
1074         :returns: Nothing.
1075         """
1076
1077         self._sh_run_counter = 0
1078         self._telemetry_kw_counter = 0
1079         self._telemetry_msg_counter = 0
1080
1081         longname_orig = test.longname.lower()
1082
1083         # Check the ignore list
1084         if longname_orig in self._ignore:
1085             return
1086
1087         tags = [str(tag) for tag in test.tags]
1088         test_result = dict()
1089
1090         # Change the TC long name and name if defined in the mapping table
1091         longname = self._mapping.get(longname_orig, None)
1092         if longname is not None:
1093             name = longname.split(u'.')[-1]
1094             logging.debug(
1095                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1096                 f"{name}"
1097             )
1098         else:
1099             longname = longname_orig
1100             name = test.name.lower()
1101
1102         # Remove TC number from the TC long name (backward compatibility):
1103         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1104         # Remove TC number from the TC name (not needed):
1105         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1106
1107         test_result[u"parent"] = test.parent.name.lower()
1108         test_result[u"tags"] = tags
1109         test_result["doc"] = test.doc
1110         test_result[u"type"] = u""
1111         test_result[u"status"] = test.status
1112         test_result[u"starttime"] = test.starttime
1113         test_result[u"endtime"] = test.endtime
1114
1115         if test.status == u"PASS":
1116             if u"NDRPDR" in tags:
1117                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1118                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1119                         test.message)
1120                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1121                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1122                         test.message)
1123                 else:
1124                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1125                         test.message)
1126             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1127                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1128                     test.message)
1129             else:
1130                 test_result[u"msg"] = test.message
1131         else:
1132             test_result[u"msg"] = test.message
1133
1134         if u"PERFTEST" in tags and u"TREX" not in tags:
1135             # Replace info about cores (e.g. -1c-) with the info about threads
1136             # and cores (e.g. -1t1c-) in the long test case names and in the
1137             # test case names if necessary.
1138             tag_count = 0
1139             tag_tc = str()
1140             for tag in test_result[u"tags"]:
1141                 groups = re.search(self.REGEX_TC_TAG, tag)
1142                 if groups:
1143                     tag_count += 1
1144                     tag_tc = tag
1145
1146             if tag_count == 1:
1147                 self._test_id = re.sub(
1148                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1149                     self._test_id, count=1
1150                 )
1151                 test_result[u"name"] = re.sub(
1152                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1153                     test_result["name"], count=1
1154                 )
1155             else:
1156                 test_result[u"status"] = u"FAIL"
1157                 self._data[u"tests"][self._test_id] = test_result
1158                 logging.debug(
1159                     f"The test {self._test_id} has no or more than one "
1160                     f"multi-threading tags.\n"
1161                     f"Tags: {test_result[u'tags']}"
1162                 )
1163                 return
1164
1165         if u"DEVICETEST" in tags:
1166             test_result[u"type"] = u"DEVICETEST"
1167         elif u"NDRPDR" in tags:
1168             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1169                 test_result[u"type"] = u"CPS"
1170             else:
1171                 test_result[u"type"] = u"NDRPDR"
1172             if test.status == u"PASS":
1173                 test_result[u"throughput"], test_result[u"status"] = \
1174                     self._get_ndrpdr_throughput(test.message)
1175                 test_result[u"gbps"], test_result[u"status"] = \
1176                     self._get_ndrpdr_throughput_gbps(test.message)
1177                 test_result[u"latency"], test_result[u"status"] = \
1178                     self._get_ndrpdr_latency(test.message)
1179         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1180             if u"MRR" in tags:
1181                 test_result[u"type"] = u"MRR"
1182             else:
1183                 test_result[u"type"] = u"BMRR"
1184             if test.status == u"PASS":
1185                 test_result[u"result"] = dict()
1186                 groups = re.search(self.REGEX_BMRR, test.message)
1187                 if groups is not None:
1188                     items_str = groups.group(1)
1189                     items_float = [
1190                         float(item.strip().replace(u"'", u""))
1191                         for item in items_str.split(",")
1192                     ]
1193                     # Use whole list in CSIT-1180.
1194                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1195                     test_result[u"result"][u"samples"] = items_float
1196                     test_result[u"result"][u"receive-rate"] = stats.avg
1197                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1198                 else:
1199                     groups = re.search(self.REGEX_MRR, test.message)
1200                     test_result[u"result"][u"receive-rate"] = \
1201                         float(groups.group(3)) / float(groups.group(1))
1202         elif u"SOAK" in tags:
1203             test_result[u"type"] = u"SOAK"
1204             if test.status == u"PASS":
1205                 test_result[u"throughput"], test_result[u"status"] = \
1206                     self._get_plr_throughput(test.message)
1207         elif u"LDP_NGINX" in tags:
1208             test_result[u"type"] = u"LDP_NGINX"
1209             test_result[u"result"], test_result[u"status"] = \
1210                 self._get_vsap_data(test.message, tags)
1211         elif u"HOSTSTACK" in tags:
1212             test_result[u"type"] = u"HOSTSTACK"
1213             if test.status == u"PASS":
1214                 test_result[u"result"], test_result[u"status"] = \
1215                     self._get_hoststack_data(test.message, tags)
1216         elif u"RECONF" in tags:
1217             test_result[u"type"] = u"RECONF"
1218             if test.status == u"PASS":
1219                 test_result[u"result"] = None
1220                 try:
1221                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1222                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1223                     test_result[u"result"] = {
1224                         u"loss": int(grps_loss.group(1)),
1225                         u"time": float(grps_time.group(1))
1226                     }
1227                 except (AttributeError, IndexError, ValueError, TypeError):
1228                     test_result[u"status"] = u"FAIL"
1229         else:
1230             test_result[u"status"] = u"FAIL"
1231
1232         self._data[u"tests"][self._test_id] = test_result
1233
1234     def end_test(self, test):
1235         """Called when test ends.
1236
1237         :param test: Test to process.
1238         :type test: Test
1239         :returns: Nothing.
1240         """
1241
1242     def visit_keyword(self, keyword):
1243         """Implements traversing through the keyword and its child keywords.
1244
1245         :param keyword: Keyword to process.
1246         :type keyword: Keyword
1247         :returns: Nothing.
1248         """
1249         if self.start_keyword(keyword) is not False:
1250             self.end_keyword(keyword)
1251
1252     def start_keyword(self, keyword):
1253         """Called when keyword starts. Default implementation does nothing.
1254
1255         :param keyword: Keyword to process.
1256         :type keyword: Keyword
1257         :returns: Nothing.
1258         """
1259         try:
1260             if keyword.type in ("setup", "SETUP"):
1261                 self.visit_setup_kw(keyword)
1262             elif keyword.type in ("teardown", "TEARDOWN"):
1263                 self.visit_teardown_kw(keyword)
1264             else:
1265                 self.visit_test_kw(keyword)
1266         except AttributeError:
1267             pass
1268
1269     def end_keyword(self, keyword):
1270         """Called when keyword ends. Default implementation does nothing.
1271
1272         :param keyword: Keyword to process.
1273         :type keyword: Keyword
1274         :returns: Nothing.
1275         """
1276
1277     def visit_test_kw(self, test_kw):
1278         """Implements traversing through the test keyword and its child
1279         keywords.
1280
1281         :param test_kw: Keyword to process.
1282         :type test_kw: Keyword
1283         :returns: Nothing.
1284         """
1285         for keyword in test_kw.body:
1286             if self.start_test_kw(keyword) is not False:
1287                 self.visit_test_kw(keyword)
1288                 self.end_test_kw(keyword)
1289
1290     def start_test_kw(self, test_kw):
1291         """Called when test keyword starts. Default implementation does
1292         nothing.
1293
1294         :param test_kw: Keyword to process.
1295         :type test_kw: Keyword
1296         :returns: Nothing.
1297         """
1298         if not self._process_oper:
1299             return
1300
1301         if test_kw.name.count(u"Run Telemetry On All Duts"):
1302             self._msg_type = u"test-telemetry"
1303             self._telemetry_kw_counter += 1
1304         elif test_kw.name.count(u"Show Runtime On All Duts"):
1305             self._msg_type = u"test-show-runtime"
1306             self._sh_run_counter += 1
1307         else:
1308             return
1309         test_kw.messages.visit(self)
1310
1311     def end_test_kw(self, test_kw):
1312         """Called when keyword ends. Default implementation does nothing.
1313
1314         :param test_kw: Keyword to process.
1315         :type test_kw: Keyword
1316         :returns: Nothing.
1317         """
1318
1319     def visit_setup_kw(self, setup_kw):
1320         """Implements traversing through the teardown keyword and its child
1321         keywords.
1322
1323         :param setup_kw: Keyword to process.
1324         :type setup_kw: Keyword
1325         :returns: Nothing.
1326         """
1327         for keyword in setup_kw.setup:
1328             if self.start_setup_kw(keyword) is not False:
1329                 self.visit_setup_kw(keyword)
1330                 self.end_setup_kw(keyword)
1331         for keyword in setup_kw.body:
1332             if self.start_setup_kw(keyword) is not False:
1333                 self.visit_setup_kw(keyword)
1334                 self.end_setup_kw(keyword)
1335
1336     def start_setup_kw(self, setup_kw):
1337         """Called when teardown keyword starts. Default implementation does
1338         nothing.
1339
1340         :param setup_kw: Keyword to process.
1341         :type setup_kw: Keyword
1342         :returns: Nothing.
1343         """
1344         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1345                 and not self._version:
1346             self._msg_type = u"vpp-version"
1347         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1348                 not self._version:
1349             self._msg_type = u"dpdk-version"
1350         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1351             self._msg_type = u"testbed"
1352         else:
1353             return
1354         setup_kw.messages.visit(self)
1355
1356     def end_setup_kw(self, setup_kw):
1357         """Called when keyword ends. Default implementation does nothing.
1358
1359         :param setup_kw: Keyword to process.
1360         :type setup_kw: Keyword
1361         :returns: Nothing.
1362         """
1363
1364     def visit_teardown_kw(self, teardown_kw):
1365         """Implements traversing through the teardown keyword and its child
1366         keywords.
1367
1368         :param teardown_kw: Keyword to process.
1369         :type teardown_kw: Keyword
1370         :returns: Nothing.
1371         """
1372         for keyword in teardown_kw.body:
1373             if self.start_teardown_kw(keyword) is not False:
1374                 self.visit_teardown_kw(keyword)
1375                 self.end_teardown_kw(keyword)
1376
1377     def start_teardown_kw(self, teardown_kw):
1378         """Called when teardown keyword starts
1379
1380         :param teardown_kw: Keyword to process.
1381         :type teardown_kw: Keyword
1382         :returns: Nothing.
1383         """
1384         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1385             self._conf_history_lookup_nr = 0
1386             self._msg_type = u"teardown-papi-history"
1387             teardown_kw.messages.visit(self)
1388
1389     def end_teardown_kw(self, teardown_kw):
1390         """Called when keyword ends. Default implementation does nothing.
1391
1392         :param teardown_kw: Keyword to process.
1393         :type teardown_kw: Keyword
1394         :returns: Nothing.
1395         """
1396
1397     def visit_message(self, msg):
1398         """Implements visiting the message.
1399
1400         :param msg: Message to process.
1401         :type msg: Message
1402         :returns: Nothing.
1403         """
1404         if self.start_message(msg) is not False:
1405             self.end_message(msg)
1406
1407     def start_message(self, msg):
1408         """Called when message starts. Get required information from messages:
1409         - VPP version.
1410
1411         :param msg: Message to process.
1412         :type msg: Message
1413         :returns: Nothing.
1414         """
1415         if self._msg_type:
1416             self.parse_msg[self._msg_type](msg)
1417
1418     def end_message(self, msg):
1419         """Called when message ends. Default implementation does nothing.
1420
1421         :param msg: Message to process.
1422         :type msg: Message
1423         :returns: Nothing.
1424         """
1425
1426
1427 class InputData:
1428     """Input data
1429
1430     The data is extracted from output.xml files generated by Jenkins jobs and
1431     stored in pandas' DataFrames.
1432
1433     The data structure:
1434     - job name
1435       - build number
1436         - metadata
1437           (as described in ExecutionChecker documentation)
1438         - suites
1439           (as described in ExecutionChecker documentation)
1440         - tests
1441           (as described in ExecutionChecker documentation)
1442     """
1443
1444     def __init__(self, spec, for_output):
1445         """Initialization.
1446
1447         :param spec: Specification.
1448         :param for_output: Output to be generated from downloaded data.
1449         :type spec: Specification
1450         :type for_output: str
1451         """
1452
1453         # Specification:
1454         self._cfg = spec
1455
1456         self._for_output = for_output
1457
1458         # Data store:
1459         self._input_data = pd.Series(dtype="float64")
1460
1461     @property
1462     def data(self):
1463         """Getter - Input data.
1464
1465         :returns: Input data
1466         :rtype: pandas.Series
1467         """
1468         return self._input_data
1469
1470     def metadata(self, job, build):
1471         """Getter - metadata
1472
1473         :param job: Job which metadata we want.
1474         :param build: Build which metadata we want.
1475         :type job: str
1476         :type build: str
1477         :returns: Metadata
1478         :rtype: pandas.Series
1479         """
1480         return self.data[job][build][u"metadata"]
1481
1482     def suites(self, job, build):
1483         """Getter - suites
1484
1485         :param job: Job which suites we want.
1486         :param build: Build which suites we want.
1487         :type job: str
1488         :type build: str
1489         :returns: Suites.
1490         :rtype: pandas.Series
1491         """
1492         return self.data[job][str(build)][u"suites"]
1493
1494     def tests(self, job, build):
1495         """Getter - tests
1496
1497         :param job: Job which tests we want.
1498         :param build: Build which tests we want.
1499         :type job: str
1500         :type build: str
1501         :returns: Tests.
1502         :rtype: pandas.Series
1503         """
1504         return self.data[job][build][u"tests"]
1505
1506     def _parse_tests(self, job, build):
1507         """Process data from robot output.xml file and return JSON structured
1508         data.
1509
1510         :param job: The name of job which build output data will be processed.
1511         :param build: The build which output data will be processed.
1512         :type job: str
1513         :type build: dict
1514         :returns: JSON data structure.
1515         :rtype: dict
1516         """
1517
1518         metadata = {
1519             u"job": job,
1520             u"build": build
1521         }
1522
1523         with open(build[u"file-name"], u'r') as data_file:
1524             try:
1525                 result = ExecutionResult(data_file)
1526             except errors.DataError as err:
1527                 logging.error(
1528                     f"Error occurred while parsing output.xml: {repr(err)}"
1529                 )
1530                 return None
1531
1532         process_oper = False
1533         if u"-vpp-perf-report-coverage-" in job:
1534             process_oper = True
1535         # elif u"-vpp-perf-report-iterative-" in job:
1536         #     # Exceptions for TBs where we do not have coverage data:
1537         #     for item in (u"-2n-icx", ):
1538         #         if item in job:
1539         #             process_oper = True
1540         #             break
1541         checker = ExecutionChecker(
1542             metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1543         )
1544         result.visit(checker)
1545
1546         checker.data[u"metadata"][u"tests_total"] = \
1547             result.statistics.total.total
1548         checker.data[u"metadata"][u"tests_passed"] = \
1549             result.statistics.total.passed
1550         checker.data[u"metadata"][u"tests_failed"] = \
1551             result.statistics.total.failed
1552         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1553         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1554
1555         return checker.data
1556
1557     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1558         """Download and parse the input data file.
1559
1560         :param pid: PID of the process executing this method.
1561         :param job: Name of the Jenkins job which generated the processed input
1562             file.
1563         :param build: Information about the Jenkins build which generated the
1564             processed input file.
1565         :param repeat: Repeat the download specified number of times if not
1566             successful.
1567         :type pid: int
1568         :type job: str
1569         :type build: dict
1570         :type repeat: int
1571         """
1572
1573         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1574
1575         state = u"failed"
1576         success = False
1577         data = None
1578         do_repeat = repeat
1579         while do_repeat:
1580             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1581             if success:
1582                 break
1583             do_repeat -= 1
1584         if not success:
1585             logging.error(
1586                 f"It is not possible to download the input data file from the "
1587                 f"job {job}, build {build[u'build']}, or it is damaged. "
1588                 f"Skipped."
1589             )
1590         if success:
1591             logging.info(f"  Processing data from build {build[u'build']}")
1592             data = self._parse_tests(job, build)
1593             if data is None:
1594                 logging.error(
1595                     f"Input data file from the job {job}, build "
1596                     f"{build[u'build']} is damaged. Skipped."
1597                 )
1598             else:
1599                 state = u"processed"
1600
1601             try:
1602                 remove(build[u"file-name"])
1603             except OSError as err:
1604                 logging.error(
1605                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1606                 )
1607
1608         # If the time-period is defined in the specification file, remove all
1609         # files which are outside the time period.
1610         is_last = False
1611         timeperiod = self._cfg.environment.get(u"time-period", None)
1612         if timeperiod and data:
1613             now = dt.utcnow()
1614             timeperiod = timedelta(int(timeperiod))
1615             metadata = data.get(u"metadata", None)
1616             if metadata:
1617                 generated = metadata.get(u"generated", None)
1618                 if generated:
1619                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1620                     if (now - generated) > timeperiod:
1621                         # Remove the data and the file:
1622                         state = u"removed"
1623                         data = None
1624                         is_last = True
1625                         logging.info(
1626                             f"  The build {job}/{build[u'build']} is "
1627                             f"outdated, will be removed."
1628                         )
1629         return {
1630             u"data": data,
1631             u"state": state,
1632             u"job": job,
1633             u"build": build,
1634             u"last": is_last
1635         }
1636
1637     def download_and_parse_data(self, repeat=1):
1638         """Download the input data files, parse input data from input files and
1639         store in pandas' Series.
1640
1641         :param repeat: Repeat the download specified number of times if not
1642             successful.
1643         :type repeat: int
1644         """
1645
1646         logging.info(u"Downloading and parsing input files ...")
1647
1648         for job, builds in self._cfg.input.items():
1649             for build in builds:
1650
1651                 result = self._download_and_parse_build(job, build, repeat)
1652                 if result[u"last"]:
1653                     break
1654                 build_nr = result[u"build"][u"build"]
1655
1656                 if result[u"data"]:
1657                     data = result[u"data"]
1658                     build_data = pd.Series({
1659                         u"metadata": pd.Series(
1660                             list(data[u"metadata"].values()),
1661                             index=list(data[u"metadata"].keys())
1662                         ),
1663                         u"suites": pd.Series(
1664                             list(data[u"suites"].values()),
1665                             index=list(data[u"suites"].keys())
1666                         ),
1667                         u"tests": pd.Series(
1668                             list(data[u"tests"].values()),
1669                             index=list(data[u"tests"].keys())
1670                         )
1671                     })
1672
1673                     if self._input_data.get(job, None) is None:
1674                         self._input_data[job] = pd.Series(dtype="float64")
1675                     self._input_data[job][str(build_nr)] = build_data
1676                     self._cfg.set_input_file_name(
1677                         job, build_nr, result[u"build"][u"file-name"]
1678                     )
1679                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1680
1681                 mem_alloc = \
1682                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1683                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1684
1685         logging.info(u"Done.")
1686
1687         msg = f"Successful downloads from the sources:\n"
1688         for source in self._cfg.environment[u"data-sources"]:
1689             if source[u"successful-downloads"]:
1690                 msg += (
1691                     f"{source[u'url']}/{source[u'path']}/"
1692                     f"{source[u'file-name']}: "
1693                     f"{source[u'successful-downloads']}\n"
1694                 )
1695         logging.info(msg)
1696
1697     def process_local_file(self, local_file, job=u"local", build_nr=1,
1698                            replace=True):
1699         """Process local XML file given as a command-line parameter.
1700
1701         :param local_file: The file to process.
1702         :param job: Job name.
1703         :param build_nr: Build number.
1704         :param replace: If True, the information about jobs and builds is
1705             replaced by the new one, otherwise the new jobs and builds are
1706             added.
1707         :type local_file: str
1708         :type job: str
1709         :type build_nr: int
1710         :type replace: bool
1711         :raises: PresentationError if an error occurs.
1712         """
1713         if not isfile(local_file):
1714             raise PresentationError(f"The file {local_file} does not exist.")
1715
1716         try:
1717             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1718         except (IndexError, ValueError):
1719             pass
1720
1721         build = {
1722             u"build": build_nr,
1723             u"status": u"failed",
1724             u"file-name": local_file
1725         }
1726         if replace:
1727             self._cfg.input = dict()
1728         self._cfg.add_build(job, build)
1729
1730         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1731         data = self._parse_tests(job, build)
1732         if data is None:
1733             raise PresentationError(
1734                 f"Error occurred while parsing the file {local_file}"
1735             )
1736
1737         build_data = pd.Series({
1738             u"metadata": pd.Series(
1739                 list(data[u"metadata"].values()),
1740                 index=list(data[u"metadata"].keys())
1741             ),
1742             u"suites": pd.Series(
1743                 list(data[u"suites"].values()),
1744                 index=list(data[u"suites"].keys())
1745             ),
1746             u"tests": pd.Series(
1747                 list(data[u"tests"].values()),
1748                 index=list(data[u"tests"].keys())
1749             )
1750         })
1751
1752         if self._input_data.get(job, None) is None:
1753             self._input_data[job] = pd.Series(dtype="float64")
1754         self._input_data[job][str(build_nr)] = build_data
1755
1756         self._cfg.set_input_state(job, build_nr, u"processed")
1757
1758     def process_local_directory(self, local_dir, replace=True):
1759         """Process local directory with XML file(s). The directory is processed
1760         as a 'job' and the XML files in it as builds.
1761         If the given directory contains only sub-directories, these
1762         sub-directories processed as jobs and corresponding XML files as builds
1763         of their job.
1764
1765         :param local_dir: Local directory to process.
1766         :param replace: If True, the information about jobs and builds is
1767             replaced by the new one, otherwise the new jobs and builds are
1768             added.
1769         :type local_dir: str
1770         :type replace: bool
1771         """
1772         if not isdir(local_dir):
1773             raise PresentationError(
1774                 f"The directory {local_dir} does not exist."
1775             )
1776
1777         # Check if the given directory includes only files, or only directories
1778         _, dirnames, filenames = next(walk(local_dir))
1779
1780         if filenames and not dirnames:
1781             filenames.sort()
1782             # local_builds:
1783             # key: dir (job) name, value: list of file names (builds)
1784             local_builds = {
1785                 local_dir: [join(local_dir, name) for name in filenames]
1786             }
1787
1788         elif dirnames and not filenames:
1789             dirnames.sort()
1790             # local_builds:
1791             # key: dir (job) name, value: list of file names (builds)
1792             local_builds = dict()
1793             for dirname in dirnames:
1794                 builds = [
1795                     join(local_dir, dirname, name)
1796                     for name in listdir(join(local_dir, dirname))
1797                     if isfile(join(local_dir, dirname, name))
1798                 ]
1799                 if builds:
1800                     local_builds[dirname] = sorted(builds)
1801
1802         elif not filenames and not dirnames:
1803             raise PresentationError(f"The directory {local_dir} is empty.")
1804         else:
1805             raise PresentationError(
1806                 f"The directory {local_dir} can include only files or only "
1807                 f"directories, not both.\nThe directory {local_dir} includes "
1808                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1809             )
1810
1811         if replace:
1812             self._cfg.input = dict()
1813
1814         for job, files in local_builds.items():
1815             for idx, local_file in enumerate(files):
1816                 self.process_local_file(local_file, job, idx + 1, replace=False)
1817
1818     @staticmethod
1819     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1820         """Return the index of character in the string which is the end of tag.
1821
1822         :param tag_filter: The string where the end of tag is being searched.
1823         :param start: The index where the searching is stated.
1824         :param closer: The character which is the tag closer.
1825         :type tag_filter: str
1826         :type start: int
1827         :type closer: str
1828         :returns: The index of the tag closer.
1829         :rtype: int
1830         """
1831         try:
1832             idx_opener = tag_filter.index(closer, start)
1833             return tag_filter.index(closer, idx_opener + 1)
1834         except ValueError:
1835             return None
1836
1837     @staticmethod
1838     def _condition(tag_filter):
1839         """Create a conditional statement from the given tag filter.
1840
1841         :param tag_filter: Filter based on tags from the element specification.
1842         :type tag_filter: str
1843         :returns: Conditional statement which can be evaluated.
1844         :rtype: str
1845         """
1846         index = 0
1847         while True:
1848             index = InputData._end_of_tag(tag_filter, index)
1849             if index is None:
1850                 return tag_filter
1851             index += 1
1852             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1853
1854     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1855                     continue_on_error=False):
1856         """Filter required data from the given jobs and builds.
1857
1858         The output data structure is:
1859         - job 1
1860           - build 1
1861             - test (or suite) 1 ID:
1862               - param 1
1863               - param 2
1864               ...
1865               - param n
1866             ...
1867             - test (or suite) n ID:
1868             ...
1869           ...
1870           - build n
1871         ...
1872         - job n
1873
1874         :param element: Element which will use the filtered data.
1875         :param params: Parameters which will be included in the output. If None,
1876             all parameters are included.
1877         :param data: If not None, this data is used instead of data specified
1878             in the element.
1879         :param data_set: The set of data to be filtered: tests, suites,
1880             metadata.
1881         :param continue_on_error: Continue if there is error while reading the
1882             data. The Item will be empty then
1883         :type element: pandas.Series
1884         :type params: list
1885         :type data: dict
1886         :type data_set: str
1887         :type continue_on_error: bool
1888         :returns: Filtered data.
1889         :rtype pandas.Series
1890         """
1891
1892         try:
1893             if data_set == "suites":
1894                 cond = u"True"
1895             elif element[u"filter"] in (u"all", u"template"):
1896                 cond = u"True"
1897             else:
1898                 cond = InputData._condition(element[u"filter"])
1899             logging.debug(f"   Filter: {cond}")
1900         except KeyError:
1901             logging.error(u"  No filter defined.")
1902             return None
1903
1904         if params is None:
1905             params = element.get(u"parameters", None)
1906             if params:
1907                 params.extend((u"type", u"status"))
1908
1909         data_to_filter = data if data else element[u"data"]
1910         data = pd.Series(dtype="float64")
1911         try:
1912             for job, builds in data_to_filter.items():
1913                 data[job] = pd.Series(dtype="float64")
1914                 for build in builds:
1915                     data[job][str(build)] = pd.Series(dtype="float64")
1916                     try:
1917                         data_dict = dict(
1918                             self.data[job][str(build)][data_set].items())
1919                     except KeyError:
1920                         if continue_on_error:
1921                             continue
1922                         return None
1923
1924                     for test_id, test_data in data_dict.items():
1925                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1926                             data[job][str(build)][test_id] = \
1927                                 pd.Series(dtype="float64")
1928                             if params is None:
1929                                 for param, val in test_data.items():
1930                                     data[job][str(build)][test_id][param] = val
1931                             else:
1932                                 for param in params:
1933                                     try:
1934                                         data[job][str(build)][test_id][param] =\
1935                                             test_data[param]
1936                                     except KeyError:
1937                                         data[job][str(build)][test_id][param] =\
1938                                             u"No Data"
1939             return data
1940
1941         except (KeyError, IndexError, ValueError) as err:
1942             logging.error(
1943                 f"Missing mandatory parameter in the element specification: "
1944                 f"{repr(err)}"
1945             )
1946             return None
1947         except AttributeError as err:
1948             logging.error(repr(err))
1949             return None
1950         except SyntaxError as err:
1951             logging.error(
1952                 f"The filter {cond} is not correct. Check if all tags are "
1953                 f"enclosed by apostrophes.\n{repr(err)}"
1954             )
1955             return None
1956
1957     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1958                              continue_on_error=False):
1959         """Filter required data from the given jobs and builds.
1960
1961         The output data structure is:
1962         - job 1
1963           - build 1
1964             - test (or suite) 1 ID:
1965               - param 1
1966               - param 2
1967               ...
1968               - param n
1969             ...
1970             - test (or suite) n ID:
1971             ...
1972           ...
1973           - build n
1974         ...
1975         - job n
1976
1977         :param element: Element which will use the filtered data.
1978         :param params: Parameters which will be included in the output. If None,
1979         all parameters are included.
1980         :param data_set: The set of data to be filtered: tests, suites,
1981         metadata.
1982         :param continue_on_error: Continue if there is error while reading the
1983         data. The Item will be empty then
1984         :type element: pandas.Series
1985         :type params: list
1986         :type data_set: str
1987         :type continue_on_error: bool
1988         :returns: Filtered data.
1989         :rtype pandas.Series
1990         """
1991
1992         include = element.get(u"include", None)
1993         if not include:
1994             logging.warning(u"No tests to include, skipping the element.")
1995             return None
1996
1997         if params is None:
1998             params = element.get(u"parameters", None)
1999             if params and u"type" not in params:
2000                 params.append(u"type")
2001
2002         cores = element.get(u"core", None)
2003         if cores:
2004             tests = list()
2005             for core in cores:
2006                 for test in include:
2007                     tests.append(test.format(core=core))
2008         else:
2009             tests = include
2010
2011         data = pd.Series(dtype="float64")
2012         try:
2013             for job, builds in element[u"data"].items():
2014                 data[job] = pd.Series(dtype="float64")
2015                 for build in builds:
2016                     data[job][str(build)] = pd.Series(dtype="float64")
2017                     for test in tests:
2018                         try:
2019                             reg_ex = re.compile(str(test).lower())
2020                             for test_id in self.data[job][
2021                                     str(build)][data_set].keys():
2022                                 if re.match(reg_ex, str(test_id).lower()):
2023                                     test_data = self.data[job][
2024                                         str(build)][data_set][test_id]
2025                                     data[job][str(build)][test_id] = \
2026                                         pd.Series(dtype="float64")
2027                                     if params is None:
2028                                         for param, val in test_data.items():
2029                                             data[job][str(build)][test_id]\
2030                                                 [param] = val
2031                                     else:
2032                                         for param in params:
2033                                             try:
2034                                                 data[job][str(build)][
2035                                                     test_id][param] = \
2036                                                     test_data[param]
2037                                             except KeyError:
2038                                                 data[job][str(build)][
2039                                                     test_id][param] = u"No Data"
2040                         except KeyError as err:
2041                             if continue_on_error:
2042                                 logging.debug(repr(err))
2043                                 continue
2044                             logging.error(repr(err))
2045                             return None
2046             return data
2047
2048         except (KeyError, IndexError, ValueError) as err:
2049             logging.error(
2050                 f"Missing mandatory parameter in the element "
2051                 f"specification: {repr(err)}"
2052             )
2053             return None
2054         except AttributeError as err:
2055             logging.error(repr(err))
2056             return None
2057
2058     @staticmethod
2059     def merge_data(data):
2060         """Merge data from more jobs and builds to a simple data structure.
2061
2062         The output data structure is:
2063
2064         - test (suite) 1 ID:
2065           - param 1
2066           - param 2
2067           ...
2068           - param n
2069         ...
2070         - test (suite) n ID:
2071         ...
2072
2073         :param data: Data to merge.
2074         :type data: pandas.Series
2075         :returns: Merged data.
2076         :rtype: pandas.Series
2077         """
2078
2079         logging.info(u"    Merging data ...")
2080
2081         merged_data = pd.Series(dtype="float64")
2082         for builds in data.values:
2083             for item in builds.values:
2084                 for item_id, item_data in item.items():
2085                     merged_data[item_id] = item_data
2086         return merged_data
2087
2088     def print_all_oper_data(self):
2089         """Print all operational data to console.
2090         """
2091
2092         for job in self._input_data.values:
2093             for build in job.values:
2094                 for test_id, test_data in build[u"tests"].items():
2095                     print(f"{test_id}")
2096                     if test_data.get(u"show-run", None) is None:
2097                         continue
2098                     for dut_name, data in test_data[u"show-run"].items():
2099                         if data.get(u"runtime", None) is None:
2100                             continue
2101                         runtime = loads(data[u"runtime"])
2102                         try:
2103                             threads_nr = len(runtime[0][u"clocks"])
2104                         except (IndexError, KeyError):
2105                             continue
2106                         threads = OrderedDict(
2107                             {idx: list() for idx in range(threads_nr)})
2108                         for item in runtime:
2109                             for idx in range(threads_nr):
2110                                 if item[u"vectors"][idx] > 0:
2111                                     clocks = item[u"clocks"][idx] / \
2112                                              item[u"vectors"][idx]
2113                                 elif item[u"calls"][idx] > 0:
2114                                     clocks = item[u"clocks"][idx] / \
2115                                              item[u"calls"][idx]
2116                                 elif item[u"suspends"][idx] > 0:
2117                                     clocks = item[u"clocks"][idx] / \
2118                                              item[u"suspends"][idx]
2119                                 else:
2120                                     clocks = 0.0
2121
2122                                 if item[u"calls"][idx] > 0:
2123                                     vectors_call = item[u"vectors"][idx] / \
2124                                                    item[u"calls"][idx]
2125                                 else:
2126                                     vectors_call = 0.0
2127
2128                                 if int(item[u"calls"][idx]) + int(
2129                                         item[u"vectors"][idx]) + \
2130                                         int(item[u"suspends"][idx]):
2131                                     threads[idx].append([
2132                                         item[u"name"],
2133                                         item[u"calls"][idx],
2134                                         item[u"vectors"][idx],
2135                                         item[u"suspends"][idx],
2136                                         clocks,
2137                                         vectors_call
2138                                     ])
2139
2140                         print(f"Host IP: {data.get(u'host', '')}, "
2141                               f"Socket: {data.get(u'socket', '')}")
2142                         for thread_nr, thread in threads.items():
2143                             txt_table = prettytable.PrettyTable(
2144                                 (
2145                                     u"Name",
2146                                     u"Nr of Vectors",
2147                                     u"Nr of Packets",
2148                                     u"Suspends",
2149                                     u"Cycles per Packet",
2150                                     u"Average Vector Size"
2151                                 )
2152                             )
2153                             avg = 0.0
2154                             for row in thread:
2155                                 txt_table.add_row(row)
2156                                 avg += row[-1]
2157                             if len(thread) == 0:
2158                                 avg = u""
2159                             else:
2160                                 avg = f", Average Vector Size per Node: " \
2161                                       f"{(avg / len(thread)):.2f}"
2162                             th_name = u"main" if thread_nr == 0 \
2163                                 else f"worker_{thread_nr}"
2164                             print(f"{dut_name}, {th_name}{avg}")
2165                             txt_table.float_format = u".2"
2166                             txt_table.align = u"r"
2167                             txt_table.align[u"Name"] = u"l"
2168                             print(f"{txt_table.get_string()}\n")