Pal: Process oper data only from coverage builds
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
291
292     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
293
294     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
295
296     REGEX_SH_RUN_HOST = re.compile(
297         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
298     )
299
300     def __init__(self, metadata, mapping, ignore, process_oper):
301         """Initialisation.
302
303         :param metadata: Key-value pairs to be included in "metadata" part of
304             JSON structure.
305         :param mapping: Mapping of the old names of test cases to the new
306             (actual) one.
307         :param ignore: List of TCs to be ignored.
308         :param process_oper: If True, operational data (show run, telemetry) is
309             processed.
310         :type metadata: dict
311         :type mapping: dict
312         :type ignore: list
313         :type process_oper: bool
314         """
315
316         # Type of message to parse out from the test messages
317         self._msg_type = None
318
319         # VPP version
320         self._version = None
321
322         # Timestamp
323         self._timestamp = None
324
325         # Testbed. The testbed is identified by TG node IP address.
326         self._testbed = None
327
328         # Mapping of TCs long names
329         self._mapping = mapping
330
331         # Ignore list
332         self._ignore = ignore
333
334         self._process_oper = process_oper
335
336         # Number of PAPI History messages found:
337         # 0 - no message
338         # 1 - PAPI History of DUT1
339         # 2 - PAPI History of DUT2
340         self._conf_history_lookup_nr = 0
341
342         self._sh_run_counter = 0
343         self._telemetry_kw_counter = 0
344         self._telemetry_msg_counter = 0
345
346         # Test ID of currently processed test- the lowercase full path to the
347         # test
348         self._test_id = None
349
350         # The main data structure
351         self._data = {
352             u"metadata": OrderedDict(),
353             u"suites": OrderedDict(),
354             u"tests": OrderedDict()
355         }
356
357         # Save the provided metadata
358         for key, val in metadata.items():
359             self._data[u"metadata"][key] = val
360
361         # Dictionary defining the methods used to parse different types of
362         # messages
363         self.parse_msg = {
364             u"vpp-version": self._get_vpp_version,
365             u"dpdk-version": self._get_dpdk_version,
366             u"teardown-papi-history": self._get_papi_history,
367             u"test-show-runtime": self._get_show_run,
368             u"testbed": self._get_testbed,
369             u"test-telemetry": self._get_telemetry
370         }
371
372     @property
373     def data(self):
374         """Getter - Data parsed from the XML file.
375
376         :returns: Data parsed from the XML file.
377         :rtype: dict
378         """
379         return self._data
380
381     def _get_data_from_mrr_test_msg(self, msg):
382         """Get info from message of MRR performance tests.
383
384         :param msg: Message to be processed.
385         :type msg: str
386         :returns: Processed message or original message if a problem occurs.
387         :rtype: str
388         """
389
390         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
391         if not groups or groups.lastindex != 1:
392             return u"Test Failed."
393
394         try:
395             data = groups.group(1).split(u", ")
396         except (AttributeError, IndexError, ValueError, KeyError):
397             return u"Test Failed."
398
399         out_str = u"["
400         try:
401             for item in data:
402                 out_str += f"{(float(item) / 1e6):.2f}, "
403             return out_str[:-2] + u"]"
404         except (AttributeError, IndexError, ValueError, KeyError):
405             return u"Test Failed."
406
407     def _get_data_from_cps_test_msg(self, msg):
408         """Get info from message of NDRPDR CPS tests.
409
410         :param msg: Message to be processed.
411         :type msg: str
412         :returns: Processed message or "Test Failed." if a problem occurs.
413         :rtype: str
414         """
415
416         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
417         if not groups or groups.lastindex != 2:
418             return u"Test Failed."
419
420         try:
421             return (
422                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
423                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
424             )
425         except (AttributeError, IndexError, ValueError, KeyError):
426             return u"Test Failed."
427
428     def _get_data_from_pps_test_msg(self, msg):
429         """Get info from message of NDRPDR PPS tests.
430
431         :param msg: Message to be processed.
432         :type msg: str
433         :returns: Processed message or "Test Failed." if a problem occurs.
434         :rtype: str
435         """
436
437         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
438         if not groups or groups.lastindex != 4:
439             return u"Test Failed."
440
441         try:
442             return (
443                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
444                 f"{float(groups.group(2)):5.2f}\n"
445                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
446                 f"{float(groups.group(4)):5.2f}"
447             )
448         except (AttributeError, IndexError, ValueError, KeyError):
449             return u"Test Failed."
450
451     def _get_data_from_perf_test_msg(self, msg):
452         """Get info from message of NDRPDR performance tests.
453
454         :param msg: Message to be processed.
455         :type msg: str
456         :returns: Processed message or "Test Failed." if a problem occurs.
457         :rtype: str
458         """
459
460         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
461         if not groups or groups.lastindex != 10:
462             return u"Test Failed."
463
464         try:
465             data = {
466                 u"ndr_low": float(groups.group(1)),
467                 u"ndr_low_b": float(groups.group(2)),
468                 u"pdr_low": float(groups.group(3)),
469                 u"pdr_low_b": float(groups.group(4)),
470                 u"pdr_lat_90_1": groups.group(5),
471                 u"pdr_lat_90_2": groups.group(6),
472                 u"pdr_lat_50_1": groups.group(7),
473                 u"pdr_lat_50_2": groups.group(8),
474                 u"pdr_lat_10_1": groups.group(9),
475                 u"pdr_lat_10_2": groups.group(10),
476             }
477         except (AttributeError, IndexError, ValueError, KeyError):
478             return u"Test Failed."
479
480         def _process_lat(in_str_1, in_str_2):
481             """Extract P50, P90 and P99 latencies or min, avg, max values from
482             latency string.
483
484             :param in_str_1: Latency string for one direction produced by robot
485                 framework.
486             :param in_str_2: Latency string for second direction produced by
487                 robot framework.
488             :type in_str_1: str
489             :type in_str_2: str
490             :returns: Processed latency string or None if a problem occurs.
491             :rtype: tuple
492             """
493             in_list_1 = in_str_1.split('/', 3)
494             in_list_2 = in_str_2.split('/', 3)
495
496             if len(in_list_1) != 4 and len(in_list_2) != 4:
497                 return None
498
499             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
500             try:
501                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
502             except hdrh.codec.HdrLengthException:
503                 hdr_lat_1 = None
504
505             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
506             try:
507                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
508             except hdrh.codec.HdrLengthException:
509                 hdr_lat_2 = None
510
511             if hdr_lat_1 and hdr_lat_2:
512                 hdr_lat = (
513                     hdr_lat_1.get_value_at_percentile(50.0),
514                     hdr_lat_1.get_value_at_percentile(90.0),
515                     hdr_lat_1.get_value_at_percentile(99.0),
516                     hdr_lat_2.get_value_at_percentile(50.0),
517                     hdr_lat_2.get_value_at_percentile(90.0),
518                     hdr_lat_2.get_value_at_percentile(99.0)
519                 )
520                 if all(hdr_lat):
521                     return hdr_lat
522
523             hdr_lat = (
524                 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
525                 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
526             )
527             for item in hdr_lat:
528                 if item in (-1, 4294967295, 0):
529                     return None
530             return hdr_lat
531
532         try:
533             out_msg = (
534                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
535                 f"{data[u'ndr_low_b']:5.2f}"
536                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
537                 f"{data[u'pdr_low_b']:5.2f}"
538             )
539             latency = (
540                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
541                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
542                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
543             )
544             if all(latency):
545                 max_len = len(str(max((max(item) for item in latency))))
546                 max_len = 4 if max_len < 4 else max_len
547
548                 for idx, lat in enumerate(latency):
549                     if not idx:
550                         out_msg += u"\n"
551                     out_msg += (
552                         f"\n{idx + 3}. "
553                         f"{lat[0]:{max_len}d} "
554                         f"{lat[1]:{max_len}d} "
555                         f"{lat[2]:{max_len}d}      "
556                         f"{lat[3]:{max_len}d} "
557                         f"{lat[4]:{max_len}d} "
558                         f"{lat[5]:{max_len}d} "
559                     )
560
561             return out_msg
562
563         except (AttributeError, IndexError, ValueError, KeyError):
564             return u"Test Failed."
565
566     def _get_testbed(self, msg):
567         """Called when extraction of testbed IP is required.
568         The testbed is identified by TG node IP address.
569
570         :param msg: Message to process.
571         :type msg: Message
572         :returns: Nothing.
573         """
574
575         if msg.message.count(u"Setup of TG node") or \
576                 msg.message.count(u"Setup of node TG host"):
577             reg_tg_ip = re.compile(
578                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
579             try:
580                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
581             except (KeyError, ValueError, IndexError, AttributeError):
582                 pass
583             finally:
584                 self._data[u"metadata"][u"testbed"] = self._testbed
585                 self._msg_type = None
586
587     def _get_vpp_version(self, msg):
588         """Called when extraction of VPP version is required.
589
590         :param msg: Message to process.
591         :type msg: Message
592         :returns: Nothing.
593         """
594
595         if msg.message.count(u"return STDOUT Version:") or \
596                 msg.message.count(u"VPP Version:") or \
597                 msg.message.count(u"VPP version:"):
598             self._version = str(
599                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
600             )
601             self._data[u"metadata"][u"version"] = self._version
602             self._msg_type = None
603
604     def _get_dpdk_version(self, msg):
605         """Called when extraction of DPDK version is required.
606
607         :param msg: Message to process.
608         :type msg: Message
609         :returns: Nothing.
610         """
611
612         if msg.message.count(u"DPDK Version:"):
613             try:
614                 self._version = str(re.search(
615                     self.REGEX_VERSION_DPDK, msg.message).group(2))
616                 self._data[u"metadata"][u"version"] = self._version
617             except IndexError:
618                 pass
619             finally:
620                 self._msg_type = None
621
622     def _get_papi_history(self, msg):
623         """Called when extraction of PAPI command history is required.
624
625         :param msg: Message to process.
626         :type msg: Message
627         :returns: Nothing.
628         """
629         if msg.message.count(u"PAPI command history:"):
630             self._conf_history_lookup_nr += 1
631             if self._conf_history_lookup_nr == 1:
632                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
633             else:
634                 self._msg_type = None
635             text = re.sub(
636                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
637                 u"",
638                 msg.message,
639                 count=1
640             ).replace(u'"', u"'")
641             self._data[u"tests"][self._test_id][u"conf-history"] += (
642                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
643             )
644
645     def _get_show_run(self, msg):
646         """Called when extraction of VPP operational data (output of CLI command
647         Show Runtime) is required.
648
649         :param msg: Message to process.
650         :type msg: Message
651         :returns: Nothing.
652         """
653
654         if not msg.message.count(u"stats runtime"):
655             return
656
657         # Temporary solution
658         if self._sh_run_counter > 1:
659             return
660
661         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
662             self._data[u"tests"][self._test_id][u"show-run"] = dict()
663
664         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
665         if not groups:
666             return
667         try:
668             host = groups.group(1)
669         except (AttributeError, IndexError):
670             host = u""
671         try:
672             sock = groups.group(2)
673         except (AttributeError, IndexError):
674             sock = u""
675
676         dut = u"dut{nr}".format(
677             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
678
679         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
680             copy.copy(
681                 {
682                     u"host": host,
683                     u"socket": sock,
684                     u"runtime": str(msg.message).replace(u' ', u'').
685                                 replace(u'\n', u'').replace(u"'", u'"').
686                                 replace(u'b"', u'"').replace(u'u"', u'"').
687                                 split(u":", 1)[1]
688                 }
689             )
690
691     def _get_telemetry(self, msg):
692         """Called when extraction of VPP telemetry data is required.
693
694         :param msg: Message to process.
695         :type msg: Message
696         :returns: Nothing.
697         """
698
699         if self._telemetry_kw_counter > 1:
700             return
701         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
702             return
703
704         if u"telemetry-show-run" not in \
705                 self._data[u"tests"][self._test_id].keys():
706             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
707
708         self._telemetry_msg_counter += 1
709         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
710         if not groups:
711             return
712         try:
713             host = groups.group(1)
714         except (AttributeError, IndexError):
715             host = u""
716         try:
717             sock = groups.group(2)
718         except (AttributeError, IndexError):
719             sock = u""
720         runtime = {
721             u"source_type": u"node",
722             u"source_id": host,
723             u"msg_type": u"metric",
724             u"log_level": u"INFO",
725             u"timestamp": msg.timestamp,
726             u"msg": u"show_runtime",
727             u"host": host,
728             u"socket": sock,
729             u"data": list()
730         }
731         for line in msg.message.splitlines():
732             if not line.startswith(u"vpp_runtime_"):
733                 continue
734             try:
735                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
736                 cut = params.index(u"{")
737                 name = params[:cut].split(u"_", maxsplit=2)[-1]
738                 labels = eval(
739                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
740                 )
741                 labels[u"graph_node"] = labels.pop(u"name")
742                 runtime[u"data"].append(
743                     {
744                         u"name": name,
745                         u"value": value,
746                         u"timestamp": timestamp,
747                         u"labels": labels
748                     }
749                 )
750             except (TypeError, ValueError, IndexError):
751                 continue
752         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
753             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
754                 {
755                     u"host": host,
756                     u"socket": sock,
757                     u"runtime": runtime
758                 }
759             )
760
761     def _get_ndrpdr_throughput(self, msg):
762         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
763         message.
764
765         :param msg: The test message to be parsed.
766         :type msg: str
767         :returns: Parsed data as a dict and the status (PASS/FAIL).
768         :rtype: tuple(dict, str)
769         """
770
771         throughput = {
772             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
773             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
774         }
775         status = u"FAIL"
776         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
777
778         if groups is not None:
779             try:
780                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
781                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
782                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
783                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
784                 status = u"PASS"
785             except (IndexError, ValueError):
786                 pass
787
788         return throughput, status
789
790     def _get_ndrpdr_throughput_gbps(self, msg):
791         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
792         test message.
793
794         :param msg: The test message to be parsed.
795         :type msg: str
796         :returns: Parsed data as a dict and the status (PASS/FAIL).
797         :rtype: tuple(dict, str)
798         """
799
800         gbps = {
801             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
802             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
803         }
804         status = u"FAIL"
805         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
806
807         if groups is not None:
808             try:
809                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
810                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
811                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
812                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
813                 status = u"PASS"
814             except (IndexError, ValueError):
815                 pass
816
817         return gbps, status
818
819     def _get_plr_throughput(self, msg):
820         """Get PLRsearch lower bound and PLRsearch upper bound from the test
821         message.
822
823         :param msg: The test message to be parsed.
824         :type msg: str
825         :returns: Parsed data as a dict and the status (PASS/FAIL).
826         :rtype: tuple(dict, str)
827         """
828
829         throughput = {
830             u"LOWER": -1.0,
831             u"UPPER": -1.0
832         }
833         status = u"FAIL"
834         groups = re.search(self.REGEX_PLR_RATE, msg)
835
836         if groups is not None:
837             try:
838                 throughput[u"LOWER"] = float(groups.group(1))
839                 throughput[u"UPPER"] = float(groups.group(2))
840                 status = u"PASS"
841             except (IndexError, ValueError):
842                 pass
843
844         return throughput, status
845
846     def _get_ndrpdr_latency(self, msg):
847         """Get LATENCY from the test message.
848
849         :param msg: The test message to be parsed.
850         :type msg: str
851         :returns: Parsed data as a dict and the status (PASS/FAIL).
852         :rtype: tuple(dict, str)
853         """
854         latency_default = {
855             u"min": -1.0,
856             u"avg": -1.0,
857             u"max": -1.0,
858             u"hdrh": u""
859         }
860         latency = {
861             u"NDR": {
862                 u"direction1": copy.copy(latency_default),
863                 u"direction2": copy.copy(latency_default)
864             },
865             u"PDR": {
866                 u"direction1": copy.copy(latency_default),
867                 u"direction2": copy.copy(latency_default)
868             },
869             u"LAT0": {
870                 u"direction1": copy.copy(latency_default),
871                 u"direction2": copy.copy(latency_default)
872             },
873             u"PDR10": {
874                 u"direction1": copy.copy(latency_default),
875                 u"direction2": copy.copy(latency_default)
876             },
877             u"PDR50": {
878                 u"direction1": copy.copy(latency_default),
879                 u"direction2": copy.copy(latency_default)
880             },
881             u"PDR90": {
882                 u"direction1": copy.copy(latency_default),
883                 u"direction2": copy.copy(latency_default)
884             },
885         }
886
887         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
888         if groups is None:
889             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
890         if groups is None:
891             return latency, u"FAIL"
892
893         def process_latency(in_str):
894             """Return object with parsed latency values.
895
896             TODO: Define class for the return type.
897
898             :param in_str: Input string, min/avg/max/hdrh format.
899             :type in_str: str
900             :returns: Dict with corresponding keys, except hdrh float values.
901             :rtype dict:
902             :throws IndexError: If in_str does not have enough substrings.
903             :throws ValueError: If a substring does not convert to float.
904             """
905             in_list = in_str.split('/', 3)
906
907             rval = {
908                 u"min": float(in_list[0]),
909                 u"avg": float(in_list[1]),
910                 u"max": float(in_list[2]),
911                 u"hdrh": u""
912             }
913
914             if len(in_list) == 4:
915                 rval[u"hdrh"] = str(in_list[3])
916
917             return rval
918
919         try:
920             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
921             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
922             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
923             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
924             if groups.lastindex == 4:
925                 return latency, u"PASS"
926         except (IndexError, ValueError):
927             pass
928
929         try:
930             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
931             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
932             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
933             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
934             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
935             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
936             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
937             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
938             if groups.lastindex == 12:
939                 return latency, u"PASS"
940         except (IndexError, ValueError):
941             pass
942
943         return latency, u"FAIL"
944
945     @staticmethod
946     def _get_hoststack_data(msg, tags):
947         """Get data from the hoststack test message.
948
949         :param msg: The test message to be parsed.
950         :param tags: Test tags.
951         :type msg: str
952         :type tags: list
953         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
954         :rtype: tuple(dict, str)
955         """
956         result = dict()
957         status = u"FAIL"
958
959         msg = msg.replace(u"'", u'"').replace(u" ", u"")
960         if u"LDPRELOAD" in tags:
961             try:
962                 result = loads(msg)
963                 status = u"PASS"
964             except JSONDecodeError:
965                 pass
966         elif u"VPPECHO" in tags:
967             try:
968                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
969                 result = dict(
970                     client=loads(msg_lst[0]),
971                     server=loads(msg_lst[1])
972                 )
973                 status = u"PASS"
974             except (JSONDecodeError, IndexError):
975                 pass
976
977         return result, status
978
979     def _get_vsap_data(self, msg, tags):
980         """Get data from the vsap test message.
981
982         :param msg: The test message to be parsed.
983         :param tags: Test tags.
984         :type msg: str
985         :type tags: list
986         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
987         :rtype: tuple(dict, str)
988         """
989         result = dict()
990         status = u"FAIL"
991
992         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
993         if groups is not None:
994             try:
995                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
996                 result[u"latency"] = float(groups.group(2))
997                 result[u"completed-requests"] = int(groups.group(3))
998                 result[u"failed-requests"] = int(groups.group(4))
999                 result[u"bytes-transferred"] = int(groups.group(5))
1000                 if u"TCP_CPS"in tags:
1001                     result[u"cps"] = float(groups.group(6))
1002                 elif u"TCP_RPS" in tags:
1003                     result[u"rps"] = float(groups.group(6))
1004                 else:
1005                     return result, status
1006                 status = u"PASS"
1007             except (IndexError, ValueError):
1008                 pass
1009
1010         return result, status
1011
1012     def visit_suite(self, suite):
1013         """Implements traversing through the suite and its direct children.
1014
1015         :param suite: Suite to process.
1016         :type suite: Suite
1017         :returns: Nothing.
1018         """
1019         if self.start_suite(suite) is not False:
1020             suite.suites.visit(self)
1021             suite.tests.visit(self)
1022             self.end_suite(suite)
1023
1024     def start_suite(self, suite):
1025         """Called when suite starts.
1026
1027         :param suite: Suite to process.
1028         :type suite: Suite
1029         :returns: Nothing.
1030         """
1031
1032         try:
1033             parent_name = suite.parent.name
1034         except AttributeError:
1035             return
1036
1037         self._data[u"suites"][suite.longname.lower().
1038                               replace(u'"', u"'").
1039                               replace(u" ", u"_")] = {
1040                                   u"name": suite.name.lower(),
1041                                   u"doc": suite.doc,
1042                                   u"parent": parent_name,
1043                                   u"level": len(suite.longname.split(u"."))
1044                               }
1045
1046         suite.keywords.visit(self)
1047
1048     def end_suite(self, suite):
1049         """Called when suite ends.
1050
1051         :param suite: Suite to process.
1052         :type suite: Suite
1053         :returns: Nothing.
1054         """
1055
1056     def visit_test(self, test):
1057         """Implements traversing through the test.
1058
1059         :param test: Test to process.
1060         :type test: Test
1061         :returns: Nothing.
1062         """
1063         if self.start_test(test) is not False:
1064             test.keywords.visit(self)
1065             self.end_test(test)
1066
1067     def start_test(self, test):
1068         """Called when test starts.
1069
1070         :param test: Test to process.
1071         :type test: Test
1072         :returns: Nothing.
1073         """
1074
1075         self._sh_run_counter = 0
1076         self._telemetry_kw_counter = 0
1077         self._telemetry_msg_counter = 0
1078
1079         longname_orig = test.longname.lower()
1080
1081         # Check the ignore list
1082         if longname_orig in self._ignore:
1083             return
1084
1085         tags = [str(tag) for tag in test.tags]
1086         test_result = dict()
1087
1088         # Change the TC long name and name if defined in the mapping table
1089         longname = self._mapping.get(longname_orig, None)
1090         if longname is not None:
1091             name = longname.split(u'.')[-1]
1092             logging.debug(
1093                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1094                 f"{name}"
1095             )
1096         else:
1097             longname = longname_orig
1098             name = test.name.lower()
1099
1100         # Remove TC number from the TC long name (backward compatibility):
1101         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1102         # Remove TC number from the TC name (not needed):
1103         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1104
1105         test_result[u"parent"] = test.parent.name.lower()
1106         test_result[u"tags"] = tags
1107         test_result["doc"] = test.doc
1108         test_result[u"type"] = u""
1109         test_result[u"status"] = test.status
1110         test_result[u"starttime"] = test.starttime
1111         test_result[u"endtime"] = test.endtime
1112
1113         if test.status == u"PASS":
1114             if u"NDRPDR" in tags:
1115                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1116                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1117                         test.message)
1118                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1119                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1120                         test.message)
1121                 else:
1122                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1123                         test.message)
1124             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1125                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1126                     test.message)
1127             else:
1128                 test_result[u"msg"] = test.message
1129         else:
1130             test_result[u"msg"] = test.message
1131
1132         if u"PERFTEST" in tags and u"TREX" not in tags:
1133             # Replace info about cores (e.g. -1c-) with the info about threads
1134             # and cores (e.g. -1t1c-) in the long test case names and in the
1135             # test case names if necessary.
1136             tag_count = 0
1137             tag_tc = str()
1138             for tag in test_result[u"tags"]:
1139                 groups = re.search(self.REGEX_TC_TAG, tag)
1140                 if groups:
1141                     tag_count += 1
1142                     tag_tc = tag
1143
1144             if tag_count == 1:
1145                 self._test_id = re.sub(
1146                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1147                     self._test_id, count=1
1148                 )
1149                 test_result[u"name"] = re.sub(
1150                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1151                     test_result["name"], count=1
1152                 )
1153             else:
1154                 test_result[u"status"] = u"FAIL"
1155                 self._data[u"tests"][self._test_id] = test_result
1156                 logging.debug(
1157                     f"The test {self._test_id} has no or more than one "
1158                     f"multi-threading tags.\n"
1159                     f"Tags: {test_result[u'tags']}"
1160                 )
1161                 return
1162
1163         if u"DEVICETEST" in tags:
1164             test_result[u"type"] = u"DEVICETEST"
1165         elif u"NDRPDR" in tags:
1166             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1167                 test_result[u"type"] = u"CPS"
1168             else:
1169                 test_result[u"type"] = u"NDRPDR"
1170             if test.status == u"PASS":
1171                 test_result[u"throughput"], test_result[u"status"] = \
1172                     self._get_ndrpdr_throughput(test.message)
1173                 test_result[u"gbps"], test_result[u"status"] = \
1174                     self._get_ndrpdr_throughput_gbps(test.message)
1175                 test_result[u"latency"], test_result[u"status"] = \
1176                     self._get_ndrpdr_latency(test.message)
1177         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1178             if u"MRR" in tags:
1179                 test_result[u"type"] = u"MRR"
1180             else:
1181                 test_result[u"type"] = u"BMRR"
1182             if test.status == u"PASS":
1183                 test_result[u"result"] = dict()
1184                 groups = re.search(self.REGEX_BMRR, test.message)
1185                 if groups is not None:
1186                     items_str = groups.group(1)
1187                     items_float = [
1188                         float(item.strip().replace(u"'", u""))
1189                         for item in items_str.split(",")
1190                     ]
1191                     # Use whole list in CSIT-1180.
1192                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1193                     test_result[u"result"][u"samples"] = items_float
1194                     test_result[u"result"][u"receive-rate"] = stats.avg
1195                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1196                 else:
1197                     groups = re.search(self.REGEX_MRR, test.message)
1198                     test_result[u"result"][u"receive-rate"] = \
1199                         float(groups.group(3)) / float(groups.group(1))
1200         elif u"SOAK" in tags:
1201             test_result[u"type"] = u"SOAK"
1202             if test.status == u"PASS":
1203                 test_result[u"throughput"], test_result[u"status"] = \
1204                     self._get_plr_throughput(test.message)
1205         elif u"HOSTSTACK" in tags:
1206             test_result[u"type"] = u"HOSTSTACK"
1207             if test.status == u"PASS":
1208                 test_result[u"result"], test_result[u"status"] = \
1209                     self._get_hoststack_data(test.message, tags)
1210         elif u"LDP_NGINX" in tags:
1211             test_result[u"type"] = u"LDP_NGINX"
1212             test_result[u"result"], test_result[u"status"] = \
1213                 self._get_vsap_data(test.message, tags)
1214         # elif u"TCP" in tags:  # This might be not used
1215         #     test_result[u"type"] = u"TCP"
1216         #     if test.status == u"PASS":
1217         #         groups = re.search(self.REGEX_TCP, test.message)
1218         #         test_result[u"result"] = int(groups.group(2))
1219         elif u"RECONF" in tags:
1220             test_result[u"type"] = u"RECONF"
1221             if test.status == u"PASS":
1222                 test_result[u"result"] = None
1223                 try:
1224                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1225                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1226                     test_result[u"result"] = {
1227                         u"loss": int(grps_loss.group(1)),
1228                         u"time": float(grps_time.group(1))
1229                     }
1230                 except (AttributeError, IndexError, ValueError, TypeError):
1231                     test_result[u"status"] = u"FAIL"
1232         else:
1233             test_result[u"status"] = u"FAIL"
1234
1235         self._data[u"tests"][self._test_id] = test_result
1236
1237     def end_test(self, test):
1238         """Called when test ends.
1239
1240         :param test: Test to process.
1241         :type test: Test
1242         :returns: Nothing.
1243         """
1244
1245     def visit_keyword(self, keyword):
1246         """Implements traversing through the keyword and its child keywords.
1247
1248         :param keyword: Keyword to process.
1249         :type keyword: Keyword
1250         :returns: Nothing.
1251         """
1252         if self.start_keyword(keyword) is not False:
1253             self.end_keyword(keyword)
1254
1255     def start_keyword(self, keyword):
1256         """Called when keyword starts. Default implementation does nothing.
1257
1258         :param keyword: Keyword to process.
1259         :type keyword: Keyword
1260         :returns: Nothing.
1261         """
1262         try:
1263             if keyword.type == u"setup":
1264                 self.visit_setup_kw(keyword)
1265             elif keyword.type == u"teardown":
1266                 self.visit_teardown_kw(keyword)
1267             else:
1268                 self.visit_test_kw(keyword)
1269         except AttributeError:
1270             pass
1271
1272     def end_keyword(self, keyword):
1273         """Called when keyword ends. Default implementation does nothing.
1274
1275         :param keyword: Keyword to process.
1276         :type keyword: Keyword
1277         :returns: Nothing.
1278         """
1279
1280     def visit_test_kw(self, test_kw):
1281         """Implements traversing through the test keyword and its child
1282         keywords.
1283
1284         :param test_kw: Keyword to process.
1285         :type test_kw: Keyword
1286         :returns: Nothing.
1287         """
1288         for keyword in test_kw.keywords:
1289             if self.start_test_kw(keyword) is not False:
1290                 self.visit_test_kw(keyword)
1291                 self.end_test_kw(keyword)
1292
1293     def start_test_kw(self, test_kw):
1294         """Called when test keyword starts. Default implementation does
1295         nothing.
1296
1297         :param test_kw: Keyword to process.
1298         :type test_kw: Keyword
1299         :returns: Nothing.
1300         """
1301         if not self._process_oper:
1302             return
1303
1304         if test_kw.name.count(u"Run Telemetry On All Duts"):
1305             self._msg_type = u"test-telemetry"
1306             self._telemetry_kw_counter += 1
1307         elif test_kw.name.count(u"Show Runtime On All Duts"):
1308             self._msg_type = u"test-show-runtime"
1309             self._sh_run_counter += 1
1310         else:
1311             return
1312         test_kw.messages.visit(self)
1313
1314     def end_test_kw(self, test_kw):
1315         """Called when keyword ends. Default implementation does nothing.
1316
1317         :param test_kw: Keyword to process.
1318         :type test_kw: Keyword
1319         :returns: Nothing.
1320         """
1321
1322     def visit_setup_kw(self, setup_kw):
1323         """Implements traversing through the teardown keyword and its child
1324         keywords.
1325
1326         :param setup_kw: Keyword to process.
1327         :type setup_kw: Keyword
1328         :returns: Nothing.
1329         """
1330         for keyword in setup_kw.keywords:
1331             if self.start_setup_kw(keyword) is not False:
1332                 self.visit_setup_kw(keyword)
1333                 self.end_setup_kw(keyword)
1334
1335     def start_setup_kw(self, setup_kw):
1336         """Called when teardown keyword starts. Default implementation does
1337         nothing.
1338
1339         :param setup_kw: Keyword to process.
1340         :type setup_kw: Keyword
1341         :returns: Nothing.
1342         """
1343         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1344                 and not self._version:
1345             self._msg_type = u"vpp-version"
1346         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1347                 not self._version:
1348             self._msg_type = u"dpdk-version"
1349         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1350             self._msg_type = u"testbed"
1351         else:
1352             return
1353         setup_kw.messages.visit(self)
1354
1355     def end_setup_kw(self, setup_kw):
1356         """Called when keyword ends. Default implementation does nothing.
1357
1358         :param setup_kw: Keyword to process.
1359         :type setup_kw: Keyword
1360         :returns: Nothing.
1361         """
1362
1363     def visit_teardown_kw(self, teardown_kw):
1364         """Implements traversing through the teardown keyword and its child
1365         keywords.
1366
1367         :param teardown_kw: Keyword to process.
1368         :type teardown_kw: Keyword
1369         :returns: Nothing.
1370         """
1371         for keyword in teardown_kw.keywords:
1372             if self.start_teardown_kw(keyword) is not False:
1373                 self.visit_teardown_kw(keyword)
1374                 self.end_teardown_kw(keyword)
1375
1376     def start_teardown_kw(self, teardown_kw):
1377         """Called when teardown keyword starts
1378
1379         :param teardown_kw: Keyword to process.
1380         :type teardown_kw: Keyword
1381         :returns: Nothing.
1382         """
1383         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1384             self._conf_history_lookup_nr = 0
1385             self._msg_type = u"teardown-papi-history"
1386             teardown_kw.messages.visit(self)
1387
1388     def end_teardown_kw(self, teardown_kw):
1389         """Called when keyword ends. Default implementation does nothing.
1390
1391         :param teardown_kw: Keyword to process.
1392         :type teardown_kw: Keyword
1393         :returns: Nothing.
1394         """
1395
1396     def visit_message(self, msg):
1397         """Implements visiting the message.
1398
1399         :param msg: Message to process.
1400         :type msg: Message
1401         :returns: Nothing.
1402         """
1403         if self.start_message(msg) is not False:
1404             self.end_message(msg)
1405
1406     def start_message(self, msg):
1407         """Called when message starts. Get required information from messages:
1408         - VPP version.
1409
1410         :param msg: Message to process.
1411         :type msg: Message
1412         :returns: Nothing.
1413         """
1414         if self._msg_type:
1415             self.parse_msg[self._msg_type](msg)
1416
1417     def end_message(self, msg):
1418         """Called when message ends. Default implementation does nothing.
1419
1420         :param msg: Message to process.
1421         :type msg: Message
1422         :returns: Nothing.
1423         """
1424
1425
1426 class InputData:
1427     """Input data
1428
1429     The data is extracted from output.xml files generated by Jenkins jobs and
1430     stored in pandas' DataFrames.
1431
1432     The data structure:
1433     - job name
1434       - build number
1435         - metadata
1436           (as described in ExecutionChecker documentation)
1437         - suites
1438           (as described in ExecutionChecker documentation)
1439         - tests
1440           (as described in ExecutionChecker documentation)
1441     """
1442
1443     def __init__(self, spec, for_output):
1444         """Initialization.
1445
1446         :param spec: Specification.
1447         :param for_output: Output to be generated from downloaded data.
1448         :type spec: Specification
1449         :type for_output: str
1450         """
1451
1452         # Specification:
1453         self._cfg = spec
1454
1455         self._for_output = for_output
1456
1457         # Data store:
1458         self._input_data = pd.Series()
1459
1460     @property
1461     def data(self):
1462         """Getter - Input data.
1463
1464         :returns: Input data
1465         :rtype: pandas.Series
1466         """
1467         return self._input_data
1468
1469     def metadata(self, job, build):
1470         """Getter - metadata
1471
1472         :param job: Job which metadata we want.
1473         :param build: Build which metadata we want.
1474         :type job: str
1475         :type build: str
1476         :returns: Metadata
1477         :rtype: pandas.Series
1478         """
1479         return self.data[job][build][u"metadata"]
1480
1481     def suites(self, job, build):
1482         """Getter - suites
1483
1484         :param job: Job which suites we want.
1485         :param build: Build which suites we want.
1486         :type job: str
1487         :type build: str
1488         :returns: Suites.
1489         :rtype: pandas.Series
1490         """
1491         return self.data[job][str(build)][u"suites"]
1492
1493     def tests(self, job, build):
1494         """Getter - tests
1495
1496         :param job: Job which tests we want.
1497         :param build: Build which tests we want.
1498         :type job: str
1499         :type build: str
1500         :returns: Tests.
1501         :rtype: pandas.Series
1502         """
1503         return self.data[job][build][u"tests"]
1504
1505     def _parse_tests(self, job, build):
1506         """Process data from robot output.xml file and return JSON structured
1507         data.
1508
1509         :param job: The name of job which build output data will be processed.
1510         :param build: The build which output data will be processed.
1511         :type job: str
1512         :type build: dict
1513         :returns: JSON data structure.
1514         :rtype: dict
1515         """
1516
1517         metadata = {
1518             u"job": job,
1519             u"build": build
1520         }
1521
1522         with open(build[u"file-name"], u'r') as data_file:
1523             try:
1524                 result = ExecutionResult(data_file)
1525             except errors.DataError as err:
1526                 logging.error(
1527                     f"Error occurred while parsing output.xml: {repr(err)}"
1528                 )
1529                 return None
1530
1531         process_oper = False
1532         if u"-vpp-perf-report-coverage-" in job:
1533             process_oper = True
1534         elif u"-vpp-perf-report-iterative-" in job:
1535             # Exceptions for TBs where we do not have coverage data:
1536             for item in (u"-2n-icx", u"-3n-icx", u"-2n-aws", u"-3n-aws"):
1537                 if item in job:
1538                     process_oper = True
1539         checker = ExecutionChecker(
1540             metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1541         )
1542         result.visit(checker)
1543
1544         checker.data[u"metadata"][u"tests_total"] = \
1545             result.statistics.total.all.total
1546         checker.data[u"metadata"][u"tests_passed"] = \
1547             result.statistics.total.all.passed
1548         checker.data[u"metadata"][u"tests_failed"] = \
1549             result.statistics.total.all.failed
1550         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1551         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1552
1553         return checker.data
1554
1555     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1556         """Download and parse the input data file.
1557
1558         :param pid: PID of the process executing this method.
1559         :param job: Name of the Jenkins job which generated the processed input
1560             file.
1561         :param build: Information about the Jenkins build which generated the
1562             processed input file.
1563         :param repeat: Repeat the download specified number of times if not
1564             successful.
1565         :type pid: int
1566         :type job: str
1567         :type build: dict
1568         :type repeat: int
1569         """
1570
1571         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1572
1573         state = u"failed"
1574         success = False
1575         data = None
1576         do_repeat = repeat
1577         while do_repeat:
1578             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1579             if success:
1580                 break
1581             do_repeat -= 1
1582         if not success:
1583             logging.error(
1584                 f"It is not possible to download the input data file from the "
1585                 f"job {job}, build {build[u'build']}, or it is damaged. "
1586                 f"Skipped."
1587             )
1588         if success:
1589             logging.info(f"  Processing data from build {build[u'build']}")
1590             data = self._parse_tests(job, build)
1591             if data is None:
1592                 logging.error(
1593                     f"Input data file from the job {job}, build "
1594                     f"{build[u'build']} is damaged. Skipped."
1595                 )
1596             else:
1597                 state = u"processed"
1598
1599             try:
1600                 remove(build[u"file-name"])
1601             except OSError as err:
1602                 logging.error(
1603                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1604                 )
1605
1606         # If the time-period is defined in the specification file, remove all
1607         # files which are outside the time period.
1608         is_last = False
1609         timeperiod = self._cfg.environment.get(u"time-period", None)
1610         if timeperiod and data:
1611             now = dt.utcnow()
1612             timeperiod = timedelta(int(timeperiod))
1613             metadata = data.get(u"metadata", None)
1614             if metadata:
1615                 generated = metadata.get(u"generated", None)
1616                 if generated:
1617                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1618                     if (now - generated) > timeperiod:
1619                         # Remove the data and the file:
1620                         state = u"removed"
1621                         data = None
1622                         is_last = True
1623                         logging.info(
1624                             f"  The build {job}/{build[u'build']} is "
1625                             f"outdated, will be removed."
1626                         )
1627         return {
1628             u"data": data,
1629             u"state": state,
1630             u"job": job,
1631             u"build": build,
1632             u"last": is_last
1633         }
1634
1635     def download_and_parse_data(self, repeat=1):
1636         """Download the input data files, parse input data from input files and
1637         store in pandas' Series.
1638
1639         :param repeat: Repeat the download specified number of times if not
1640             successful.
1641         :type repeat: int
1642         """
1643
1644         logging.info(u"Downloading and parsing input files ...")
1645
1646         for job, builds in self._cfg.input.items():
1647             for build in builds:
1648
1649                 result = self._download_and_parse_build(job, build, repeat)
1650                 if result[u"last"]:
1651                     break
1652                 build_nr = result[u"build"][u"build"]
1653
1654                 if result[u"data"]:
1655                     data = result[u"data"]
1656                     build_data = pd.Series({
1657                         u"metadata": pd.Series(
1658                             list(data[u"metadata"].values()),
1659                             index=list(data[u"metadata"].keys())
1660                         ),
1661                         u"suites": pd.Series(
1662                             list(data[u"suites"].values()),
1663                             index=list(data[u"suites"].keys())
1664                         ),
1665                         u"tests": pd.Series(
1666                             list(data[u"tests"].values()),
1667                             index=list(data[u"tests"].keys())
1668                         )
1669                     })
1670
1671                     if self._input_data.get(job, None) is None:
1672                         self._input_data[job] = pd.Series()
1673                     self._input_data[job][str(build_nr)] = build_data
1674                     self._cfg.set_input_file_name(
1675                         job, build_nr, result[u"build"][u"file-name"]
1676                     )
1677                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1678
1679                 mem_alloc = \
1680                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1681                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1682
1683         logging.info(u"Done.")
1684
1685         msg = f"Successful downloads from the sources:\n"
1686         for source in self._cfg.environment[u"data-sources"]:
1687             if source[u"successful-downloads"]:
1688                 msg += (
1689                     f"{source[u'url']}/{source[u'path']}/"
1690                     f"{source[u'file-name']}: "
1691                     f"{source[u'successful-downloads']}\n"
1692                 )
1693         logging.info(msg)
1694
1695     def process_local_file(self, local_file, job=u"local", build_nr=1,
1696                            replace=True):
1697         """Process local XML file given as a command-line parameter.
1698
1699         :param local_file: The file to process.
1700         :param job: Job name.
1701         :param build_nr: Build number.
1702         :param replace: If True, the information about jobs and builds is
1703             replaced by the new one, otherwise the new jobs and builds are
1704             added.
1705         :type local_file: str
1706         :type job: str
1707         :type build_nr: int
1708         :type replace: bool
1709         :raises: PresentationError if an error occurs.
1710         """
1711         if not isfile(local_file):
1712             raise PresentationError(f"The file {local_file} does not exist.")
1713
1714         try:
1715             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1716         except (IndexError, ValueError):
1717             pass
1718
1719         build = {
1720             u"build": build_nr,
1721             u"status": u"failed",
1722             u"file-name": local_file
1723         }
1724         if replace:
1725             self._cfg.input = dict()
1726         self._cfg.add_build(job, build)
1727
1728         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1729         data = self._parse_tests(job, build)
1730         if data is None:
1731             raise PresentationError(
1732                 f"Error occurred while parsing the file {local_file}"
1733             )
1734
1735         build_data = pd.Series({
1736             u"metadata": pd.Series(
1737                 list(data[u"metadata"].values()),
1738                 index=list(data[u"metadata"].keys())
1739             ),
1740             u"suites": pd.Series(
1741                 list(data[u"suites"].values()),
1742                 index=list(data[u"suites"].keys())
1743             ),
1744             u"tests": pd.Series(
1745                 list(data[u"tests"].values()),
1746                 index=list(data[u"tests"].keys())
1747             )
1748         })
1749
1750         if self._input_data.get(job, None) is None:
1751             self._input_data[job] = pd.Series()
1752         self._input_data[job][str(build_nr)] = build_data
1753
1754         self._cfg.set_input_state(job, build_nr, u"processed")
1755
1756     def process_local_directory(self, local_dir, replace=True):
1757         """Process local directory with XML file(s). The directory is processed
1758         as a 'job' and the XML files in it as builds.
1759         If the given directory contains only sub-directories, these
1760         sub-directories processed as jobs and corresponding XML files as builds
1761         of their job.
1762
1763         :param local_dir: Local directory to process.
1764         :param replace: If True, the information about jobs and builds is
1765             replaced by the new one, otherwise the new jobs and builds are
1766             added.
1767         :type local_dir: str
1768         :type replace: bool
1769         """
1770         if not isdir(local_dir):
1771             raise PresentationError(
1772                 f"The directory {local_dir} does not exist."
1773             )
1774
1775         # Check if the given directory includes only files, or only directories
1776         _, dirnames, filenames = next(walk(local_dir))
1777
1778         if filenames and not dirnames:
1779             filenames.sort()
1780             # local_builds:
1781             # key: dir (job) name, value: list of file names (builds)
1782             local_builds = {
1783                 local_dir: [join(local_dir, name) for name in filenames]
1784             }
1785
1786         elif dirnames and not filenames:
1787             dirnames.sort()
1788             # local_builds:
1789             # key: dir (job) name, value: list of file names (builds)
1790             local_builds = dict()
1791             for dirname in dirnames:
1792                 builds = [
1793                     join(local_dir, dirname, name)
1794                     for name in listdir(join(local_dir, dirname))
1795                     if isfile(join(local_dir, dirname, name))
1796                 ]
1797                 if builds:
1798                     local_builds[dirname] = sorted(builds)
1799
1800         elif not filenames and not dirnames:
1801             raise PresentationError(f"The directory {local_dir} is empty.")
1802         else:
1803             raise PresentationError(
1804                 f"The directory {local_dir} can include only files or only "
1805                 f"directories, not both.\nThe directory {local_dir} includes "
1806                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1807             )
1808
1809         if replace:
1810             self._cfg.input = dict()
1811
1812         for job, files in local_builds.items():
1813             for idx, local_file in enumerate(files):
1814                 self.process_local_file(local_file, job, idx + 1, replace=False)
1815
1816     @staticmethod
1817     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1818         """Return the index of character in the string which is the end of tag.
1819
1820         :param tag_filter: The string where the end of tag is being searched.
1821         :param start: The index where the searching is stated.
1822         :param closer: The character which is the tag closer.
1823         :type tag_filter: str
1824         :type start: int
1825         :type closer: str
1826         :returns: The index of the tag closer.
1827         :rtype: int
1828         """
1829         try:
1830             idx_opener = tag_filter.index(closer, start)
1831             return tag_filter.index(closer, idx_opener + 1)
1832         except ValueError:
1833             return None
1834
1835     @staticmethod
1836     def _condition(tag_filter):
1837         """Create a conditional statement from the given tag filter.
1838
1839         :param tag_filter: Filter based on tags from the element specification.
1840         :type tag_filter: str
1841         :returns: Conditional statement which can be evaluated.
1842         :rtype: str
1843         """
1844         index = 0
1845         while True:
1846             index = InputData._end_of_tag(tag_filter, index)
1847             if index is None:
1848                 return tag_filter
1849             index += 1
1850             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1851
1852     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1853                     continue_on_error=False):
1854         """Filter required data from the given jobs and builds.
1855
1856         The output data structure is:
1857         - job 1
1858           - build 1
1859             - test (or suite) 1 ID:
1860               - param 1
1861               - param 2
1862               ...
1863               - param n
1864             ...
1865             - test (or suite) n ID:
1866             ...
1867           ...
1868           - build n
1869         ...
1870         - job n
1871
1872         :param element: Element which will use the filtered data.
1873         :param params: Parameters which will be included in the output. If None,
1874             all parameters are included.
1875         :param data: If not None, this data is used instead of data specified
1876             in the element.
1877         :param data_set: The set of data to be filtered: tests, suites,
1878             metadata.
1879         :param continue_on_error: Continue if there is error while reading the
1880             data. The Item will be empty then
1881         :type element: pandas.Series
1882         :type params: list
1883         :type data: dict
1884         :type data_set: str
1885         :type continue_on_error: bool
1886         :returns: Filtered data.
1887         :rtype pandas.Series
1888         """
1889
1890         try:
1891             if data_set == "suites":
1892                 cond = u"True"
1893             elif element[u"filter"] in (u"all", u"template"):
1894                 cond = u"True"
1895             else:
1896                 cond = InputData._condition(element[u"filter"])
1897             logging.debug(f"   Filter: {cond}")
1898         except KeyError:
1899             logging.error(u"  No filter defined.")
1900             return None
1901
1902         if params is None:
1903             params = element.get(u"parameters", None)
1904             if params:
1905                 params.extend((u"type", u"status"))
1906
1907         data_to_filter = data if data else element[u"data"]
1908         data = pd.Series()
1909         try:
1910             for job, builds in data_to_filter.items():
1911                 data[job] = pd.Series()
1912                 for build in builds:
1913                     data[job][str(build)] = pd.Series()
1914                     try:
1915                         data_dict = dict(
1916                             self.data[job][str(build)][data_set].items())
1917                     except KeyError:
1918                         if continue_on_error:
1919                             continue
1920                         return None
1921
1922                     for test_id, test_data in data_dict.items():
1923                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1924                             data[job][str(build)][test_id] = pd.Series()
1925                             if params is None:
1926                                 for param, val in test_data.items():
1927                                     data[job][str(build)][test_id][param] = val
1928                             else:
1929                                 for param in params:
1930                                     try:
1931                                         data[job][str(build)][test_id][param] =\
1932                                             test_data[param]
1933                                     except KeyError:
1934                                         data[job][str(build)][test_id][param] =\
1935                                             u"No Data"
1936             return data
1937
1938         except (KeyError, IndexError, ValueError) as err:
1939             logging.error(
1940                 f"Missing mandatory parameter in the element specification: "
1941                 f"{repr(err)}"
1942             )
1943             return None
1944         except AttributeError as err:
1945             logging.error(repr(err))
1946             return None
1947         except SyntaxError as err:
1948             logging.error(
1949                 f"The filter {cond} is not correct. Check if all tags are "
1950                 f"enclosed by apostrophes.\n{repr(err)}"
1951             )
1952             return None
1953
1954     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1955                              continue_on_error=False):
1956         """Filter required data from the given jobs and builds.
1957
1958         The output data structure is:
1959         - job 1
1960           - build 1
1961             - test (or suite) 1 ID:
1962               - param 1
1963               - param 2
1964               ...
1965               - param n
1966             ...
1967             - test (or suite) n ID:
1968             ...
1969           ...
1970           - build n
1971         ...
1972         - job n
1973
1974         :param element: Element which will use the filtered data.
1975         :param params: Parameters which will be included in the output. If None,
1976         all parameters are included.
1977         :param data_set: The set of data to be filtered: tests, suites,
1978         metadata.
1979         :param continue_on_error: Continue if there is error while reading the
1980         data. The Item will be empty then
1981         :type element: pandas.Series
1982         :type params: list
1983         :type data_set: str
1984         :type continue_on_error: bool
1985         :returns: Filtered data.
1986         :rtype pandas.Series
1987         """
1988
1989         include = element.get(u"include", None)
1990         if not include:
1991             logging.warning(u"No tests to include, skipping the element.")
1992             return None
1993
1994         if params is None:
1995             params = element.get(u"parameters", None)
1996             if params and u"type" not in params:
1997                 params.append(u"type")
1998
1999         cores = element.get(u"core", None)
2000         if cores:
2001             tests = list()
2002             for core in cores:
2003                 for test in include:
2004                     tests.append(test.format(core=core))
2005         else:
2006             tests = include
2007
2008         data = pd.Series()
2009         try:
2010             for job, builds in element[u"data"].items():
2011                 data[job] = pd.Series()
2012                 for build in builds:
2013                     data[job][str(build)] = pd.Series()
2014                     for test in tests:
2015                         try:
2016                             reg_ex = re.compile(str(test).lower())
2017                             for test_id in self.data[job][
2018                                     str(build)][data_set].keys():
2019                                 if re.match(reg_ex, str(test_id).lower()):
2020                                     test_data = self.data[job][
2021                                         str(build)][data_set][test_id]
2022                                     data[job][str(build)][test_id] = pd.Series()
2023                                     if params is None:
2024                                         for param, val in test_data.items():
2025                                             data[job][str(build)][test_id]\
2026                                                 [param] = val
2027                                     else:
2028                                         for param in params:
2029                                             try:
2030                                                 data[job][str(build)][
2031                                                     test_id][param] = \
2032                                                     test_data[param]
2033                                             except KeyError:
2034                                                 data[job][str(build)][
2035                                                     test_id][param] = u"No Data"
2036                         except KeyError as err:
2037                             if continue_on_error:
2038                                 logging.debug(repr(err))
2039                                 continue
2040                             logging.error(repr(err))
2041                             return None
2042             return data
2043
2044         except (KeyError, IndexError, ValueError) as err:
2045             logging.error(
2046                 f"Missing mandatory parameter in the element "
2047                 f"specification: {repr(err)}"
2048             )
2049             return None
2050         except AttributeError as err:
2051             logging.error(repr(err))
2052             return None
2053
2054     @staticmethod
2055     def merge_data(data):
2056         """Merge data from more jobs and builds to a simple data structure.
2057
2058         The output data structure is:
2059
2060         - test (suite) 1 ID:
2061           - param 1
2062           - param 2
2063           ...
2064           - param n
2065         ...
2066         - test (suite) n ID:
2067         ...
2068
2069         :param data: Data to merge.
2070         :type data: pandas.Series
2071         :returns: Merged data.
2072         :rtype: pandas.Series
2073         """
2074
2075         logging.info(u"    Merging data ...")
2076
2077         merged_data = pd.Series()
2078         for builds in data.values:
2079             for item in builds.values:
2080                 for item_id, item_data in item.items():
2081                     merged_data[item_id] = item_data
2082         return merged_data
2083
2084     def print_all_oper_data(self):
2085         """Print all operational data to console.
2086         """
2087
2088         for job in self._input_data.values:
2089             for build in job.values:
2090                 for test_id, test_data in build[u"tests"].items():
2091                     print(f"{test_id}")
2092                     if test_data.get(u"show-run", None) is None:
2093                         continue
2094                     for dut_name, data in test_data[u"show-run"].items():
2095                         if data.get(u"runtime", None) is None:
2096                             continue
2097                         runtime = loads(data[u"runtime"])
2098                         try:
2099                             threads_nr = len(runtime[0][u"clocks"])
2100                         except (IndexError, KeyError):
2101                             continue
2102                         threads = OrderedDict(
2103                             {idx: list() for idx in range(threads_nr)})
2104                         for item in runtime:
2105                             for idx in range(threads_nr):
2106                                 if item[u"vectors"][idx] > 0:
2107                                     clocks = item[u"clocks"][idx] / \
2108                                              item[u"vectors"][idx]
2109                                 elif item[u"calls"][idx] > 0:
2110                                     clocks = item[u"clocks"][idx] / \
2111                                              item[u"calls"][idx]
2112                                 elif item[u"suspends"][idx] > 0:
2113                                     clocks = item[u"clocks"][idx] / \
2114                                              item[u"suspends"][idx]
2115                                 else:
2116                                     clocks = 0.0
2117
2118                                 if item[u"calls"][idx] > 0:
2119                                     vectors_call = item[u"vectors"][idx] / \
2120                                                    item[u"calls"][idx]
2121                                 else:
2122                                     vectors_call = 0.0
2123
2124                                 if int(item[u"calls"][idx]) + int(
2125                                         item[u"vectors"][idx]) + \
2126                                         int(item[u"suspends"][idx]):
2127                                     threads[idx].append([
2128                                         item[u"name"],
2129                                         item[u"calls"][idx],
2130                                         item[u"vectors"][idx],
2131                                         item[u"suspends"][idx],
2132                                         clocks,
2133                                         vectors_call
2134                                     ])
2135
2136                         print(f"Host IP: {data.get(u'host', '')}, "
2137                               f"Socket: {data.get(u'socket', '')}")
2138                         for thread_nr, thread in threads.items():
2139                             txt_table = prettytable.PrettyTable(
2140                                 (
2141                                     u"Name",
2142                                     u"Nr of Vectors",
2143                                     u"Nr of Packets",
2144                                     u"Suspends",
2145                                     u"Cycles per Packet",
2146                                     u"Average Vector Size"
2147                                 )
2148                             )
2149                             avg = 0.0
2150                             for row in thread:
2151                                 txt_table.add_row(row)
2152                                 avg += row[-1]
2153                             if len(thread) == 0:
2154                                 avg = u""
2155                             else:
2156                                 avg = f", Average Vector Size per Node: " \
2157                                       f"{(avg / len(thread)):.2f}"
2158                             th_name = u"main" if thread_nr == 0 \
2159                                 else f"worker_{thread_nr}"
2160                             print(f"{dut_name}, {th_name}{avg}")
2161                             txt_table.float_format = u".2"
2162                             txt_table.align = u"r"
2163                             txt_table.align[u"Name"] = u"l"
2164                             print(f"{txt_table.get_string()}\n")