64aab493e480e6c11a0a03a75ebf5913a7ac3775
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
291
292     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
293
294     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
295
296     REGEX_SH_RUN_HOST = re.compile(
297         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
298     )
299
300     def __init__(self, metadata, mapping, ignore, process_oper):
301         """Initialisation.
302
303         :param metadata: Key-value pairs to be included in "metadata" part of
304             JSON structure.
305         :param mapping: Mapping of the old names of test cases to the new
306             (actual) one.
307         :param ignore: List of TCs to be ignored.
308         :param process_oper: If True, operational data (show run, telemetry) is
309             processed.
310         :type metadata: dict
311         :type mapping: dict
312         :type ignore: list
313         :type process_oper: bool
314         """
315
316         # Type of message to parse out from the test messages
317         self._msg_type = None
318
319         # VPP version
320         self._version = None
321
322         # Timestamp
323         self._timestamp = None
324
325         # Testbed. The testbed is identified by TG node IP address.
326         self._testbed = None
327
328         # Mapping of TCs long names
329         self._mapping = mapping
330
331         # Ignore list
332         self._ignore = ignore
333
334         self._process_oper = process_oper
335
336         # Number of PAPI History messages found:
337         # 0 - no message
338         # 1 - PAPI History of DUT1
339         # 2 - PAPI History of DUT2
340         self._conf_history_lookup_nr = 0
341
342         self._sh_run_counter = 0
343         self._telemetry_kw_counter = 0
344         self._telemetry_msg_counter = 0
345
346         # Test ID of currently processed test- the lowercase full path to the
347         # test
348         self._test_id = None
349
350         # The main data structure
351         self._data = {
352             u"metadata": OrderedDict(),
353             u"suites": OrderedDict(),
354             u"tests": OrderedDict()
355         }
356
357         # Save the provided metadata
358         for key, val in metadata.items():
359             self._data[u"metadata"][key] = val
360
361         # Dictionary defining the methods used to parse different types of
362         # messages
363         self.parse_msg = {
364             u"vpp-version": self._get_vpp_version,
365             u"dpdk-version": self._get_dpdk_version,
366             u"teardown-papi-history": self._get_papi_history,
367             u"test-show-runtime": self._get_show_run,
368             u"testbed": self._get_testbed,
369             u"test-telemetry": self._get_telemetry
370         }
371
372     @property
373     def data(self):
374         """Getter - Data parsed from the XML file.
375
376         :returns: Data parsed from the XML file.
377         :rtype: dict
378         """
379         return self._data
380
381     def _get_data_from_mrr_test_msg(self, msg):
382         """Get info from message of MRR performance tests.
383
384         :param msg: Message to be processed.
385         :type msg: str
386         :returns: Processed message or original message if a problem occurs.
387         :rtype: str
388         """
389
390         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
391         if not groups or groups.lastindex != 1:
392             return u"Test Failed."
393
394         try:
395             data = groups.group(1).split(u", ")
396         except (AttributeError, IndexError, ValueError, KeyError):
397             return u"Test Failed."
398
399         out_str = u"["
400         try:
401             for item in data:
402                 out_str += f"{(float(item) / 1e6):.2f}, "
403             return out_str[:-2] + u"]"
404         except (AttributeError, IndexError, ValueError, KeyError):
405             return u"Test Failed."
406
407     def _get_data_from_cps_test_msg(self, msg):
408         """Get info from message of NDRPDR CPS tests.
409
410         :param msg: Message to be processed.
411         :type msg: str
412         :returns: Processed message or "Test Failed." if a problem occurs.
413         :rtype: str
414         """
415
416         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
417         if not groups or groups.lastindex != 2:
418             return u"Test Failed."
419
420         try:
421             return (
422                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
423                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
424             )
425         except (AttributeError, IndexError, ValueError, KeyError):
426             return u"Test Failed."
427
428     def _get_data_from_pps_test_msg(self, msg):
429         """Get info from message of NDRPDR PPS tests.
430
431         :param msg: Message to be processed.
432         :type msg: str
433         :returns: Processed message or "Test Failed." if a problem occurs.
434         :rtype: str
435         """
436
437         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
438         if not groups or groups.lastindex != 4:
439             return u"Test Failed."
440
441         try:
442             return (
443                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
444                 f"{float(groups.group(2)):5.2f}\n"
445                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
446                 f"{float(groups.group(4)):5.2f}"
447             )
448         except (AttributeError, IndexError, ValueError, KeyError):
449             return u"Test Failed."
450
451     def _get_data_from_perf_test_msg(self, msg):
452         """Get info from message of NDRPDR performance tests.
453
454         :param msg: Message to be processed.
455         :type msg: str
456         :returns: Processed message or "Test Failed." if a problem occurs.
457         :rtype: str
458         """
459
460         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
461         if not groups or groups.lastindex != 10:
462             return u"Test Failed."
463
464         try:
465             data = {
466                 u"ndr_low": float(groups.group(1)),
467                 u"ndr_low_b": float(groups.group(2)),
468                 u"pdr_low": float(groups.group(3)),
469                 u"pdr_low_b": float(groups.group(4)),
470                 u"pdr_lat_90_1": groups.group(5),
471                 u"pdr_lat_90_2": groups.group(6),
472                 u"pdr_lat_50_1": groups.group(7),
473                 u"pdr_lat_50_2": groups.group(8),
474                 u"pdr_lat_10_1": groups.group(9),
475                 u"pdr_lat_10_2": groups.group(10),
476             }
477         except (AttributeError, IndexError, ValueError, KeyError):
478             return u"Test Failed."
479
480         def _process_lat(in_str_1, in_str_2):
481             """Extract P50, P90 and P99 latencies or min, avg, max values from
482             latency string.
483
484             :param in_str_1: Latency string for one direction produced by robot
485                 framework.
486             :param in_str_2: Latency string for second direction produced by
487                 robot framework.
488             :type in_str_1: str
489             :type in_str_2: str
490             :returns: Processed latency string or None if a problem occurs.
491             :rtype: tuple
492             """
493             in_list_1 = in_str_1.split('/', 3)
494             in_list_2 = in_str_2.split('/', 3)
495
496             if len(in_list_1) != 4 and len(in_list_2) != 4:
497                 return None
498
499             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
500             try:
501                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
502             except hdrh.codec.HdrLengthException:
503                 hdr_lat_1 = None
504
505             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
506             try:
507                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
508             except hdrh.codec.HdrLengthException:
509                 hdr_lat_2 = None
510
511             if hdr_lat_1 and hdr_lat_2:
512                 hdr_lat = (
513                     hdr_lat_1.get_value_at_percentile(50.0),
514                     hdr_lat_1.get_value_at_percentile(90.0),
515                     hdr_lat_1.get_value_at_percentile(99.0),
516                     hdr_lat_2.get_value_at_percentile(50.0),
517                     hdr_lat_2.get_value_at_percentile(90.0),
518                     hdr_lat_2.get_value_at_percentile(99.0)
519                 )
520                 if all(hdr_lat):
521                     return hdr_lat
522
523             hdr_lat = (
524                 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
525                 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
526             )
527             for item in hdr_lat:
528                 if item in (-1, 4294967295, 0):
529                     return None
530             return hdr_lat
531
532         try:
533             out_msg = (
534                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
535                 f"{data[u'ndr_low_b']:5.2f}"
536                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
537                 f"{data[u'pdr_low_b']:5.2f}"
538             )
539             latency = (
540                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
541                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
542                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
543             )
544             if all(latency):
545                 max_len = len(str(max((max(item) for item in latency))))
546                 max_len = 4 if max_len < 4 else max_len
547
548                 for idx, lat in enumerate(latency):
549                     if not idx:
550                         out_msg += u"\n"
551                     out_msg += (
552                         f"\n{idx + 3}. "
553                         f"{lat[0]:{max_len}d} "
554                         f"{lat[1]:{max_len}d} "
555                         f"{lat[2]:{max_len}d}      "
556                         f"{lat[3]:{max_len}d} "
557                         f"{lat[4]:{max_len}d} "
558                         f"{lat[5]:{max_len}d} "
559                     )
560
561             return out_msg
562
563         except (AttributeError, IndexError, ValueError, KeyError):
564             return u"Test Failed."
565
566     def _get_testbed(self, msg):
567         """Called when extraction of testbed IP is required.
568         The testbed is identified by TG node IP address.
569
570         :param msg: Message to process.
571         :type msg: Message
572         :returns: Nothing.
573         """
574
575         if msg.message.count(u"Setup of TG node") or \
576                 msg.message.count(u"Setup of node TG host"):
577             reg_tg_ip = re.compile(
578                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
579             try:
580                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
581             except (KeyError, ValueError, IndexError, AttributeError):
582                 pass
583             finally:
584                 self._data[u"metadata"][u"testbed"] = self._testbed
585                 self._msg_type = None
586
587     def _get_vpp_version(self, msg):
588         """Called when extraction of VPP version is required.
589
590         :param msg: Message to process.
591         :type msg: Message
592         :returns: Nothing.
593         """
594
595         if msg.message.count(u"return STDOUT Version:") or \
596                 msg.message.count(u"VPP Version:") or \
597                 msg.message.count(u"VPP version:"):
598             self._version = str(
599                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
600             )
601             self._data[u"metadata"][u"version"] = self._version
602             self._msg_type = None
603
604     def _get_dpdk_version(self, msg):
605         """Called when extraction of DPDK version is required.
606
607         :param msg: Message to process.
608         :type msg: Message
609         :returns: Nothing.
610         """
611
612         if msg.message.count(u"DPDK Version:"):
613             try:
614                 self._version = str(re.search(
615                     self.REGEX_VERSION_DPDK, msg.message).group(2))
616                 self._data[u"metadata"][u"version"] = self._version
617             except IndexError:
618                 pass
619             finally:
620                 self._msg_type = None
621
622     def _get_papi_history(self, msg):
623         """Called when extraction of PAPI command history is required.
624
625         :param msg: Message to process.
626         :type msg: Message
627         :returns: Nothing.
628         """
629         if msg.message.count(u"PAPI command history:"):
630             self._conf_history_lookup_nr += 1
631             if self._conf_history_lookup_nr == 1:
632                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
633             else:
634                 self._msg_type = None
635             text = re.sub(
636                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
637                 u"",
638                 msg.message,
639                 count=1
640             ).replace(u'"', u"'")
641             self._data[u"tests"][self._test_id][u"conf-history"] += (
642                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
643             )
644
645     def _get_show_run(self, msg):
646         """Called when extraction of VPP operational data (output of CLI command
647         Show Runtime) is required.
648
649         :param msg: Message to process.
650         :type msg: Message
651         :returns: Nothing.
652         """
653
654         if not msg.message.count(u"stats runtime"):
655             return
656
657         # Temporary solution
658         if self._sh_run_counter > 1:
659             return
660
661         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
662             self._data[u"tests"][self._test_id][u"show-run"] = dict()
663
664         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
665         if not groups:
666             return
667         try:
668             host = groups.group(1)
669         except (AttributeError, IndexError):
670             host = u""
671         try:
672             sock = groups.group(2)
673         except (AttributeError, IndexError):
674             sock = u""
675
676         dut = u"dut{nr}".format(
677             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
678
679         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
680             copy.copy(
681                 {
682                     u"host": host,
683                     u"socket": sock,
684                     u"runtime": str(msg.message).replace(u' ', u'').
685                                 replace(u'\n', u'').replace(u"'", u'"').
686                                 replace(u'b"', u'"').replace(u'u"', u'"').
687                                 split(u":", 1)[1]
688                 }
689             )
690
691     def _get_telemetry(self, msg):
692         """Called when extraction of VPP telemetry data is required.
693
694         :param msg: Message to process.
695         :type msg: Message
696         :returns: Nothing.
697         """
698
699         if self._telemetry_kw_counter > 1:
700             return
701         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
702             return
703
704         if u"telemetry-show-run" not in \
705                 self._data[u"tests"][self._test_id].keys():
706             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
707
708         self._telemetry_msg_counter += 1
709         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
710         if not groups:
711             return
712         try:
713             host = groups.group(1)
714         except (AttributeError, IndexError):
715             host = u""
716         try:
717             sock = groups.group(2)
718         except (AttributeError, IndexError):
719             sock = u""
720         runtime = {
721             u"source_type": u"node",
722             u"source_id": host,
723             u"msg_type": u"metric",
724             u"log_level": u"INFO",
725             u"timestamp": msg.timestamp,
726             u"msg": u"show_runtime",
727             u"host": host,
728             u"socket": sock,
729             u"data": list()
730         }
731         for line in msg.message.splitlines():
732             if not line.startswith(u"vpp_runtime_"):
733                 continue
734             try:
735                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
736                 cut = params.index(u"{")
737                 name = params[:cut].split(u"_", maxsplit=2)[-1]
738                 labels = eval(
739                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
740                 )
741                 labels[u"graph_node"] = labels.pop(u"name")
742                 runtime[u"data"].append(
743                     {
744                         u"name": name,
745                         u"value": value,
746                         u"timestamp": timestamp,
747                         u"labels": labels
748                     }
749                 )
750             except (TypeError, ValueError, IndexError):
751                 continue
752         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
753             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
754                 {
755                     u"host": host,
756                     u"socket": sock,
757                     u"runtime": runtime
758                 }
759             )
760
761     def _get_ndrpdr_throughput(self, msg):
762         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
763         message.
764
765         :param msg: The test message to be parsed.
766         :type msg: str
767         :returns: Parsed data as a dict and the status (PASS/FAIL).
768         :rtype: tuple(dict, str)
769         """
770
771         throughput = {
772             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
773             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
774         }
775         status = u"FAIL"
776         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
777
778         if groups is not None:
779             try:
780                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
781                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
782                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
783                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
784                 status = u"PASS"
785             except (IndexError, ValueError):
786                 pass
787
788         return throughput, status
789
790     def _get_ndrpdr_throughput_gbps(self, msg):
791         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
792         test message.
793
794         :param msg: The test message to be parsed.
795         :type msg: str
796         :returns: Parsed data as a dict and the status (PASS/FAIL).
797         :rtype: tuple(dict, str)
798         """
799
800         gbps = {
801             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
802             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
803         }
804         status = u"FAIL"
805         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
806
807         if groups is not None:
808             try:
809                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
810                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
811                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
812                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
813                 status = u"PASS"
814             except (IndexError, ValueError):
815                 pass
816
817         return gbps, status
818
819     def _get_plr_throughput(self, msg):
820         """Get PLRsearch lower bound and PLRsearch upper bound from the test
821         message.
822
823         :param msg: The test message to be parsed.
824         :type msg: str
825         :returns: Parsed data as a dict and the status (PASS/FAIL).
826         :rtype: tuple(dict, str)
827         """
828
829         throughput = {
830             u"LOWER": -1.0,
831             u"UPPER": -1.0
832         }
833         status = u"FAIL"
834         groups = re.search(self.REGEX_PLR_RATE, msg)
835
836         if groups is not None:
837             try:
838                 throughput[u"LOWER"] = float(groups.group(1))
839                 throughput[u"UPPER"] = float(groups.group(2))
840                 status = u"PASS"
841             except (IndexError, ValueError):
842                 pass
843
844         return throughput, status
845
846     def _get_ndrpdr_latency(self, msg):
847         """Get LATENCY from the test message.
848
849         :param msg: The test message to be parsed.
850         :type msg: str
851         :returns: Parsed data as a dict and the status (PASS/FAIL).
852         :rtype: tuple(dict, str)
853         """
854         latency_default = {
855             u"min": -1.0,
856             u"avg": -1.0,
857             u"max": -1.0,
858             u"hdrh": u""
859         }
860         latency = {
861             u"NDR": {
862                 u"direction1": copy.copy(latency_default),
863                 u"direction2": copy.copy(latency_default)
864             },
865             u"PDR": {
866                 u"direction1": copy.copy(latency_default),
867                 u"direction2": copy.copy(latency_default)
868             },
869             u"LAT0": {
870                 u"direction1": copy.copy(latency_default),
871                 u"direction2": copy.copy(latency_default)
872             },
873             u"PDR10": {
874                 u"direction1": copy.copy(latency_default),
875                 u"direction2": copy.copy(latency_default)
876             },
877             u"PDR50": {
878                 u"direction1": copy.copy(latency_default),
879                 u"direction2": copy.copy(latency_default)
880             },
881             u"PDR90": {
882                 u"direction1": copy.copy(latency_default),
883                 u"direction2": copy.copy(latency_default)
884             },
885         }
886
887         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
888         if groups is None:
889             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
890         if groups is None:
891             return latency, u"FAIL"
892
893         def process_latency(in_str):
894             """Return object with parsed latency values.
895
896             TODO: Define class for the return type.
897
898             :param in_str: Input string, min/avg/max/hdrh format.
899             :type in_str: str
900             :returns: Dict with corresponding keys, except hdrh float values.
901             :rtype dict:
902             :throws IndexError: If in_str does not have enough substrings.
903             :throws ValueError: If a substring does not convert to float.
904             """
905             in_list = in_str.split('/', 3)
906
907             rval = {
908                 u"min": float(in_list[0]),
909                 u"avg": float(in_list[1]),
910                 u"max": float(in_list[2]),
911                 u"hdrh": u""
912             }
913
914             if len(in_list) == 4:
915                 rval[u"hdrh"] = str(in_list[3])
916
917             return rval
918
919         try:
920             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
921             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
922             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
923             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
924             if groups.lastindex == 4:
925                 return latency, u"PASS"
926         except (IndexError, ValueError):
927             pass
928
929         try:
930             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
931             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
932             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
933             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
934             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
935             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
936             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
937             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
938             if groups.lastindex == 12:
939                 return latency, u"PASS"
940         except (IndexError, ValueError):
941             pass
942
943         return latency, u"FAIL"
944
945     @staticmethod
946     def _get_hoststack_data(msg, tags):
947         """Get data from the hoststack test message.
948
949         :param msg: The test message to be parsed.
950         :param tags: Test tags.
951         :type msg: str
952         :type tags: list
953         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
954         :rtype: tuple(dict, str)
955         """
956         result = dict()
957         status = u"FAIL"
958
959         msg = msg.replace(u"'", u'"').replace(u" ", u"")
960         if u"LDPRELOAD" in tags:
961             try:
962                 result = loads(msg)
963                 status = u"PASS"
964             except JSONDecodeError:
965                 pass
966         elif u"VPPECHO" in tags:
967             try:
968                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
969                 result = dict(
970                     client=loads(msg_lst[0]),
971                     server=loads(msg_lst[1])
972                 )
973                 status = u"PASS"
974             except (JSONDecodeError, IndexError):
975                 pass
976
977         return result, status
978
979     def _get_vsap_data(self, msg, tags):
980         """Get data from the vsap test message.
981
982         :param msg: The test message to be parsed.
983         :param tags: Test tags.
984         :type msg: str
985         :type tags: list
986         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
987         :rtype: tuple(dict, str)
988         """
989         result = dict()
990         status = u"FAIL"
991
992         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
993         if groups is not None:
994             try:
995                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
996                 result[u"latency"] = float(groups.group(2))
997                 result[u"completed-requests"] = int(groups.group(3))
998                 result[u"failed-requests"] = int(groups.group(4))
999                 result[u"bytes-transferred"] = int(groups.group(5))
1000                 if u"TCP_CPS"in tags:
1001                     result[u"cps"] = float(groups.group(6))
1002                 elif u"TCP_RPS" in tags:
1003                     result[u"rps"] = float(groups.group(6))
1004                 else:
1005                     return result, status
1006                 status = u"PASS"
1007             except (IndexError, ValueError):
1008                 pass
1009
1010         return result, status
1011
1012     def visit_suite(self, suite):
1013         """Implements traversing through the suite and its direct children.
1014
1015         :param suite: Suite to process.
1016         :type suite: Suite
1017         :returns: Nothing.
1018         """
1019         if self.start_suite(suite) is not False:
1020             suite.suites.visit(self)
1021             suite.tests.visit(self)
1022             self.end_suite(suite)
1023
1024     def start_suite(self, suite):
1025         """Called when suite starts.
1026
1027         :param suite: Suite to process.
1028         :type suite: Suite
1029         :returns: Nothing.
1030         """
1031
1032         try:
1033             parent_name = suite.parent.name
1034         except AttributeError:
1035             return
1036
1037         self._data[u"suites"][suite.longname.lower().
1038                               replace(u'"', u"'").
1039                               replace(u" ", u"_")] = {
1040                                   u"name": suite.name.lower(),
1041                                   u"doc": suite.doc,
1042                                   u"parent": parent_name,
1043                                   u"level": len(suite.longname.split(u"."))
1044                               }
1045
1046         suite.keywords.visit(self)
1047
1048     def end_suite(self, suite):
1049         """Called when suite ends.
1050
1051         :param suite: Suite to process.
1052         :type suite: Suite
1053         :returns: Nothing.
1054         """
1055
1056     def visit_test(self, test):
1057         """Implements traversing through the test.
1058
1059         :param test: Test to process.
1060         :type test: Test
1061         :returns: Nothing.
1062         """
1063         if self.start_test(test) is not False:
1064             test.keywords.visit(self)
1065             self.end_test(test)
1066
1067     def start_test(self, test):
1068         """Called when test starts.
1069
1070         :param test: Test to process.
1071         :type test: Test
1072         :returns: Nothing.
1073         """
1074
1075         self._sh_run_counter = 0
1076         self._telemetry_kw_counter = 0
1077         self._telemetry_msg_counter = 0
1078
1079         longname_orig = test.longname.lower()
1080
1081         # Check the ignore list
1082         if longname_orig in self._ignore:
1083             return
1084
1085         tags = [str(tag) for tag in test.tags]
1086         test_result = dict()
1087
1088         # Change the TC long name and name if defined in the mapping table
1089         longname = self._mapping.get(longname_orig, None)
1090         if longname is not None:
1091             name = longname.split(u'.')[-1]
1092             logging.debug(
1093                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1094                 f"{name}"
1095             )
1096         else:
1097             longname = longname_orig
1098             name = test.name.lower()
1099
1100         # Remove TC number from the TC long name (backward compatibility):
1101         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1102         # Remove TC number from the TC name (not needed):
1103         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1104
1105         test_result[u"parent"] = test.parent.name.lower()
1106         test_result[u"tags"] = tags
1107         test_result["doc"] = test.doc
1108         test_result[u"type"] = u""
1109         test_result[u"status"] = test.status
1110         test_result[u"starttime"] = test.starttime
1111         test_result[u"endtime"] = test.endtime
1112
1113         if test.status == u"PASS":
1114             if u"NDRPDR" in tags:
1115                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1116                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1117                         test.message)
1118                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1119                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1120                         test.message)
1121                 else:
1122                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1123                         test.message)
1124             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1125                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1126                     test.message)
1127             else:
1128                 test_result[u"msg"] = test.message
1129         else:
1130             test_result[u"msg"] = test.message
1131
1132         if u"PERFTEST" in tags and u"TREX" not in tags:
1133             # Replace info about cores (e.g. -1c-) with the info about threads
1134             # and cores (e.g. -1t1c-) in the long test case names and in the
1135             # test case names if necessary.
1136             tag_count = 0
1137             tag_tc = str()
1138             for tag in test_result[u"tags"]:
1139                 groups = re.search(self.REGEX_TC_TAG, tag)
1140                 if groups:
1141                     tag_count += 1
1142                     tag_tc = tag
1143
1144             if tag_count == 1:
1145                 self._test_id = re.sub(
1146                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1147                     self._test_id, count=1
1148                 )
1149                 test_result[u"name"] = re.sub(
1150                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1151                     test_result["name"], count=1
1152                 )
1153             else:
1154                 test_result[u"status"] = u"FAIL"
1155                 self._data[u"tests"][self._test_id] = test_result
1156                 logging.debug(
1157                     f"The test {self._test_id} has no or more than one "
1158                     f"multi-threading tags.\n"
1159                     f"Tags: {test_result[u'tags']}"
1160                 )
1161                 return
1162
1163         if u"DEVICETEST" in tags:
1164             test_result[u"type"] = u"DEVICETEST"
1165         elif u"NDRPDR" in tags:
1166             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1167                 test_result[u"type"] = u"CPS"
1168             else:
1169                 test_result[u"type"] = u"NDRPDR"
1170             if test.status == u"PASS":
1171                 test_result[u"throughput"], test_result[u"status"] = \
1172                     self._get_ndrpdr_throughput(test.message)
1173                 test_result[u"gbps"], test_result[u"status"] = \
1174                     self._get_ndrpdr_throughput_gbps(test.message)
1175                 test_result[u"latency"], test_result[u"status"] = \
1176                     self._get_ndrpdr_latency(test.message)
1177         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1178             if u"MRR" in tags:
1179                 test_result[u"type"] = u"MRR"
1180             else:
1181                 test_result[u"type"] = u"BMRR"
1182             if test.status == u"PASS":
1183                 test_result[u"result"] = dict()
1184                 groups = re.search(self.REGEX_BMRR, test.message)
1185                 if groups is not None:
1186                     items_str = groups.group(1)
1187                     items_float = [
1188                         float(item.strip().replace(u"'", u""))
1189                         for item in items_str.split(",")
1190                     ]
1191                     # Use whole list in CSIT-1180.
1192                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1193                     test_result[u"result"][u"samples"] = items_float
1194                     test_result[u"result"][u"receive-rate"] = stats.avg
1195                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1196                 else:
1197                     groups = re.search(self.REGEX_MRR, test.message)
1198                     test_result[u"result"][u"receive-rate"] = \
1199                         float(groups.group(3)) / float(groups.group(1))
1200         elif u"SOAK" in tags:
1201             test_result[u"type"] = u"SOAK"
1202             if test.status == u"PASS":
1203                 test_result[u"throughput"], test_result[u"status"] = \
1204                     self._get_plr_throughput(test.message)
1205         elif u"LDP_NGINX" in tags:
1206             test_result[u"type"] = u"LDP_NGINX"
1207             test_result[u"result"], test_result[u"status"] = \
1208                 self._get_vsap_data(test.message, tags)
1209         elif u"HOSTSTACK" in tags:
1210             test_result[u"type"] = u"HOSTSTACK"
1211             if test.status == u"PASS":
1212                 test_result[u"result"], test_result[u"status"] = \
1213                     self._get_hoststack_data(test.message, tags)
1214         # elif u"TCP" in tags:  # This might be not used
1215         #     test_result[u"type"] = u"TCP"
1216         #     if test.status == u"PASS":
1217         #         groups = re.search(self.REGEX_TCP, test.message)
1218         #         test_result[u"result"] = int(groups.group(2))
1219         elif u"RECONF" in tags:
1220             test_result[u"type"] = u"RECONF"
1221             if test.status == u"PASS":
1222                 test_result[u"result"] = None
1223                 try:
1224                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1225                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1226                     test_result[u"result"] = {
1227                         u"loss": int(grps_loss.group(1)),
1228                         u"time": float(grps_time.group(1))
1229                     }
1230                 except (AttributeError, IndexError, ValueError, TypeError):
1231                     test_result[u"status"] = u"FAIL"
1232         else:
1233             test_result[u"status"] = u"FAIL"
1234
1235         self._data[u"tests"][self._test_id] = test_result
1236
1237     def end_test(self, test):
1238         """Called when test ends.
1239
1240         :param test: Test to process.
1241         :type test: Test
1242         :returns: Nothing.
1243         """
1244
1245     def visit_keyword(self, keyword):
1246         """Implements traversing through the keyword and its child keywords.
1247
1248         :param keyword: Keyword to process.
1249         :type keyword: Keyword
1250         :returns: Nothing.
1251         """
1252         if self.start_keyword(keyword) is not False:
1253             self.end_keyword(keyword)
1254
1255     def start_keyword(self, keyword):
1256         """Called when keyword starts. Default implementation does nothing.
1257
1258         :param keyword: Keyword to process.
1259         :type keyword: Keyword
1260         :returns: Nothing.
1261         """
1262         try:
1263             if keyword.type == u"setup":
1264                 self.visit_setup_kw(keyword)
1265             elif keyword.type == u"teardown":
1266                 self.visit_teardown_kw(keyword)
1267             else:
1268                 self.visit_test_kw(keyword)
1269         except AttributeError:
1270             pass
1271
1272     def end_keyword(self, keyword):
1273         """Called when keyword ends. Default implementation does nothing.
1274
1275         :param keyword: Keyword to process.
1276         :type keyword: Keyword
1277         :returns: Nothing.
1278         """
1279
1280     def visit_test_kw(self, test_kw):
1281         """Implements traversing through the test keyword and its child
1282         keywords.
1283
1284         :param test_kw: Keyword to process.
1285         :type test_kw: Keyword
1286         :returns: Nothing.
1287         """
1288         for keyword in test_kw.keywords:
1289             if self.start_test_kw(keyword) is not False:
1290                 self.visit_test_kw(keyword)
1291                 self.end_test_kw(keyword)
1292
1293     def start_test_kw(self, test_kw):
1294         """Called when test keyword starts. Default implementation does
1295         nothing.
1296
1297         :param test_kw: Keyword to process.
1298         :type test_kw: Keyword
1299         :returns: Nothing.
1300         """
1301         if not self._process_oper:
1302             return
1303
1304         if test_kw.name.count(u"Run Telemetry On All Duts"):
1305             self._msg_type = u"test-telemetry"
1306             self._telemetry_kw_counter += 1
1307         elif test_kw.name.count(u"Show Runtime On All Duts"):
1308             self._msg_type = u"test-show-runtime"
1309             self._sh_run_counter += 1
1310         else:
1311             return
1312         test_kw.messages.visit(self)
1313
1314     def end_test_kw(self, test_kw):
1315         """Called when keyword ends. Default implementation does nothing.
1316
1317         :param test_kw: Keyword to process.
1318         :type test_kw: Keyword
1319         :returns: Nothing.
1320         """
1321
1322     def visit_setup_kw(self, setup_kw):
1323         """Implements traversing through the teardown keyword and its child
1324         keywords.
1325
1326         :param setup_kw: Keyword to process.
1327         :type setup_kw: Keyword
1328         :returns: Nothing.
1329         """
1330         for keyword in setup_kw.keywords:
1331             if self.start_setup_kw(keyword) is not False:
1332                 self.visit_setup_kw(keyword)
1333                 self.end_setup_kw(keyword)
1334
1335     def start_setup_kw(self, setup_kw):
1336         """Called when teardown keyword starts. Default implementation does
1337         nothing.
1338
1339         :param setup_kw: Keyword to process.
1340         :type setup_kw: Keyword
1341         :returns: Nothing.
1342         """
1343         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1344                 and not self._version:
1345             self._msg_type = u"vpp-version"
1346         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1347                 not self._version:
1348             self._msg_type = u"dpdk-version"
1349         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1350             self._msg_type = u"testbed"
1351         else:
1352             return
1353         setup_kw.messages.visit(self)
1354
1355     def end_setup_kw(self, setup_kw):
1356         """Called when keyword ends. Default implementation does nothing.
1357
1358         :param setup_kw: Keyword to process.
1359         :type setup_kw: Keyword
1360         :returns: Nothing.
1361         """
1362
1363     def visit_teardown_kw(self, teardown_kw):
1364         """Implements traversing through the teardown keyword and its child
1365         keywords.
1366
1367         :param teardown_kw: Keyword to process.
1368         :type teardown_kw: Keyword
1369         :returns: Nothing.
1370         """
1371         for keyword in teardown_kw.keywords:
1372             if self.start_teardown_kw(keyword) is not False:
1373                 self.visit_teardown_kw(keyword)
1374                 self.end_teardown_kw(keyword)
1375
1376     def start_teardown_kw(self, teardown_kw):
1377         """Called when teardown keyword starts
1378
1379         :param teardown_kw: Keyword to process.
1380         :type teardown_kw: Keyword
1381         :returns: Nothing.
1382         """
1383         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1384             self._conf_history_lookup_nr = 0
1385             self._msg_type = u"teardown-papi-history"
1386             teardown_kw.messages.visit(self)
1387
1388     def end_teardown_kw(self, teardown_kw):
1389         """Called when keyword ends. Default implementation does nothing.
1390
1391         :param teardown_kw: Keyword to process.
1392         :type teardown_kw: Keyword
1393         :returns: Nothing.
1394         """
1395
1396     def visit_message(self, msg):
1397         """Implements visiting the message.
1398
1399         :param msg: Message to process.
1400         :type msg: Message
1401         :returns: Nothing.
1402         """
1403         if self.start_message(msg) is not False:
1404             self.end_message(msg)
1405
1406     def start_message(self, msg):
1407         """Called when message starts. Get required information from messages:
1408         - VPP version.
1409
1410         :param msg: Message to process.
1411         :type msg: Message
1412         :returns: Nothing.
1413         """
1414         if self._msg_type:
1415             self.parse_msg[self._msg_type](msg)
1416
1417     def end_message(self, msg):
1418         """Called when message ends. Default implementation does nothing.
1419
1420         :param msg: Message to process.
1421         :type msg: Message
1422         :returns: Nothing.
1423         """
1424
1425
1426 class InputData:
1427     """Input data
1428
1429     The data is extracted from output.xml files generated by Jenkins jobs and
1430     stored in pandas' DataFrames.
1431
1432     The data structure:
1433     - job name
1434       - build number
1435         - metadata
1436           (as described in ExecutionChecker documentation)
1437         - suites
1438           (as described in ExecutionChecker documentation)
1439         - tests
1440           (as described in ExecutionChecker documentation)
1441     """
1442
1443     def __init__(self, spec, for_output):
1444         """Initialization.
1445
1446         :param spec: Specification.
1447         :param for_output: Output to be generated from downloaded data.
1448         :type spec: Specification
1449         :type for_output: str
1450         """
1451
1452         # Specification:
1453         self._cfg = spec
1454
1455         self._for_output = for_output
1456
1457         # Data store:
1458         self._input_data = pd.Series()
1459
1460     @property
1461     def data(self):
1462         """Getter - Input data.
1463
1464         :returns: Input data
1465         :rtype: pandas.Series
1466         """
1467         return self._input_data
1468
1469     def metadata(self, job, build):
1470         """Getter - metadata
1471
1472         :param job: Job which metadata we want.
1473         :param build: Build which metadata we want.
1474         :type job: str
1475         :type build: str
1476         :returns: Metadata
1477         :rtype: pandas.Series
1478         """
1479         return self.data[job][build][u"metadata"]
1480
1481     def suites(self, job, build):
1482         """Getter - suites
1483
1484         :param job: Job which suites we want.
1485         :param build: Build which suites we want.
1486         :type job: str
1487         :type build: str
1488         :returns: Suites.
1489         :rtype: pandas.Series
1490         """
1491         return self.data[job][str(build)][u"suites"]
1492
1493     def tests(self, job, build):
1494         """Getter - tests
1495
1496         :param job: Job which tests we want.
1497         :param build: Build which tests we want.
1498         :type job: str
1499         :type build: str
1500         :returns: Tests.
1501         :rtype: pandas.Series
1502         """
1503         return self.data[job][build][u"tests"]
1504
1505     def _parse_tests(self, job, build):
1506         """Process data from robot output.xml file and return JSON structured
1507         data.
1508
1509         :param job: The name of job which build output data will be processed.
1510         :param build: The build which output data will be processed.
1511         :type job: str
1512         :type build: dict
1513         :returns: JSON data structure.
1514         :rtype: dict
1515         """
1516
1517         metadata = {
1518             u"job": job,
1519             u"build": build
1520         }
1521
1522         with open(build[u"file-name"], u'r') as data_file:
1523             try:
1524                 result = ExecutionResult(data_file)
1525             except errors.DataError as err:
1526                 logging.error(
1527                     f"Error occurred while parsing output.xml: {repr(err)}"
1528                 )
1529                 return None
1530
1531         process_oper = False
1532         if u"-vpp-perf-report-coverage-" in job:
1533             process_oper = True
1534         # elif u"-vpp-perf-report-iterative-" in job:
1535         #     # Exceptions for TBs where we do not have coverage data:
1536         #     for item in (u"-2n-icx", ):
1537         #         if item in job:
1538         #             process_oper = True
1539         #             break
1540         checker = ExecutionChecker(
1541             metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1542         )
1543         result.visit(checker)
1544
1545         checker.data[u"metadata"][u"tests_total"] = \
1546             result.statistics.total.all.total
1547         checker.data[u"metadata"][u"tests_passed"] = \
1548             result.statistics.total.all.passed
1549         checker.data[u"metadata"][u"tests_failed"] = \
1550             result.statistics.total.all.failed
1551         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1552         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1553
1554         return checker.data
1555
1556     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1557         """Download and parse the input data file.
1558
1559         :param pid: PID of the process executing this method.
1560         :param job: Name of the Jenkins job which generated the processed input
1561             file.
1562         :param build: Information about the Jenkins build which generated the
1563             processed input file.
1564         :param repeat: Repeat the download specified number of times if not
1565             successful.
1566         :type pid: int
1567         :type job: str
1568         :type build: dict
1569         :type repeat: int
1570         """
1571
1572         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1573
1574         state = u"failed"
1575         success = False
1576         data = None
1577         do_repeat = repeat
1578         while do_repeat:
1579             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1580             if success:
1581                 break
1582             do_repeat -= 1
1583         if not success:
1584             logging.error(
1585                 f"It is not possible to download the input data file from the "
1586                 f"job {job}, build {build[u'build']}, or it is damaged. "
1587                 f"Skipped."
1588             )
1589         if success:
1590             logging.info(f"  Processing data from build {build[u'build']}")
1591             data = self._parse_tests(job, build)
1592             if data is None:
1593                 logging.error(
1594                     f"Input data file from the job {job}, build "
1595                     f"{build[u'build']} is damaged. Skipped."
1596                 )
1597             else:
1598                 state = u"processed"
1599
1600             try:
1601                 remove(build[u"file-name"])
1602             except OSError as err:
1603                 logging.error(
1604                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1605                 )
1606
1607         # If the time-period is defined in the specification file, remove all
1608         # files which are outside the time period.
1609         is_last = False
1610         timeperiod = self._cfg.environment.get(u"time-period", None)
1611         if timeperiod and data:
1612             now = dt.utcnow()
1613             timeperiod = timedelta(int(timeperiod))
1614             metadata = data.get(u"metadata", None)
1615             if metadata:
1616                 generated = metadata.get(u"generated", None)
1617                 if generated:
1618                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1619                     if (now - generated) > timeperiod:
1620                         # Remove the data and the file:
1621                         state = u"removed"
1622                         data = None
1623                         is_last = True
1624                         logging.info(
1625                             f"  The build {job}/{build[u'build']} is "
1626                             f"outdated, will be removed."
1627                         )
1628         return {
1629             u"data": data,
1630             u"state": state,
1631             u"job": job,
1632             u"build": build,
1633             u"last": is_last
1634         }
1635
1636     def download_and_parse_data(self, repeat=1):
1637         """Download the input data files, parse input data from input files and
1638         store in pandas' Series.
1639
1640         :param repeat: Repeat the download specified number of times if not
1641             successful.
1642         :type repeat: int
1643         """
1644
1645         logging.info(u"Downloading and parsing input files ...")
1646
1647         for job, builds in self._cfg.input.items():
1648             for build in builds:
1649
1650                 result = self._download_and_parse_build(job, build, repeat)
1651                 if result[u"last"]:
1652                     break
1653                 build_nr = result[u"build"][u"build"]
1654
1655                 if result[u"data"]:
1656                     data = result[u"data"]
1657                     build_data = pd.Series({
1658                         u"metadata": pd.Series(
1659                             list(data[u"metadata"].values()),
1660                             index=list(data[u"metadata"].keys())
1661                         ),
1662                         u"suites": pd.Series(
1663                             list(data[u"suites"].values()),
1664                             index=list(data[u"suites"].keys())
1665                         ),
1666                         u"tests": pd.Series(
1667                             list(data[u"tests"].values()),
1668                             index=list(data[u"tests"].keys())
1669                         )
1670                     })
1671
1672                     if self._input_data.get(job, None) is None:
1673                         self._input_data[job] = pd.Series()
1674                     self._input_data[job][str(build_nr)] = build_data
1675                     self._cfg.set_input_file_name(
1676                         job, build_nr, result[u"build"][u"file-name"]
1677                     )
1678                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1679
1680                 mem_alloc = \
1681                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1682                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1683
1684         logging.info(u"Done.")
1685
1686         msg = f"Successful downloads from the sources:\n"
1687         for source in self._cfg.environment[u"data-sources"]:
1688             if source[u"successful-downloads"]:
1689                 msg += (
1690                     f"{source[u'url']}/{source[u'path']}/"
1691                     f"{source[u'file-name']}: "
1692                     f"{source[u'successful-downloads']}\n"
1693                 )
1694         logging.info(msg)
1695
1696     def process_local_file(self, local_file, job=u"local", build_nr=1,
1697                            replace=True):
1698         """Process local XML file given as a command-line parameter.
1699
1700         :param local_file: The file to process.
1701         :param job: Job name.
1702         :param build_nr: Build number.
1703         :param replace: If True, the information about jobs and builds is
1704             replaced by the new one, otherwise the new jobs and builds are
1705             added.
1706         :type local_file: str
1707         :type job: str
1708         :type build_nr: int
1709         :type replace: bool
1710         :raises: PresentationError if an error occurs.
1711         """
1712         if not isfile(local_file):
1713             raise PresentationError(f"The file {local_file} does not exist.")
1714
1715         try:
1716             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1717         except (IndexError, ValueError):
1718             pass
1719
1720         build = {
1721             u"build": build_nr,
1722             u"status": u"failed",
1723             u"file-name": local_file
1724         }
1725         if replace:
1726             self._cfg.input = dict()
1727         self._cfg.add_build(job, build)
1728
1729         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1730         data = self._parse_tests(job, build)
1731         if data is None:
1732             raise PresentationError(
1733                 f"Error occurred while parsing the file {local_file}"
1734             )
1735
1736         build_data = pd.Series({
1737             u"metadata": pd.Series(
1738                 list(data[u"metadata"].values()),
1739                 index=list(data[u"metadata"].keys())
1740             ),
1741             u"suites": pd.Series(
1742                 list(data[u"suites"].values()),
1743                 index=list(data[u"suites"].keys())
1744             ),
1745             u"tests": pd.Series(
1746                 list(data[u"tests"].values()),
1747                 index=list(data[u"tests"].keys())
1748             )
1749         })
1750
1751         if self._input_data.get(job, None) is None:
1752             self._input_data[job] = pd.Series()
1753         self._input_data[job][str(build_nr)] = build_data
1754
1755         self._cfg.set_input_state(job, build_nr, u"processed")
1756
1757     def process_local_directory(self, local_dir, replace=True):
1758         """Process local directory with XML file(s). The directory is processed
1759         as a 'job' and the XML files in it as builds.
1760         If the given directory contains only sub-directories, these
1761         sub-directories processed as jobs and corresponding XML files as builds
1762         of their job.
1763
1764         :param local_dir: Local directory to process.
1765         :param replace: If True, the information about jobs and builds is
1766             replaced by the new one, otherwise the new jobs and builds are
1767             added.
1768         :type local_dir: str
1769         :type replace: bool
1770         """
1771         if not isdir(local_dir):
1772             raise PresentationError(
1773                 f"The directory {local_dir} does not exist."
1774             )
1775
1776         # Check if the given directory includes only files, or only directories
1777         _, dirnames, filenames = next(walk(local_dir))
1778
1779         if filenames and not dirnames:
1780             filenames.sort()
1781             # local_builds:
1782             # key: dir (job) name, value: list of file names (builds)
1783             local_builds = {
1784                 local_dir: [join(local_dir, name) for name in filenames]
1785             }
1786
1787         elif dirnames and not filenames:
1788             dirnames.sort()
1789             # local_builds:
1790             # key: dir (job) name, value: list of file names (builds)
1791             local_builds = dict()
1792             for dirname in dirnames:
1793                 builds = [
1794                     join(local_dir, dirname, name)
1795                     for name in listdir(join(local_dir, dirname))
1796                     if isfile(join(local_dir, dirname, name))
1797                 ]
1798                 if builds:
1799                     local_builds[dirname] = sorted(builds)
1800
1801         elif not filenames and not dirnames:
1802             raise PresentationError(f"The directory {local_dir} is empty.")
1803         else:
1804             raise PresentationError(
1805                 f"The directory {local_dir} can include only files or only "
1806                 f"directories, not both.\nThe directory {local_dir} includes "
1807                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1808             )
1809
1810         if replace:
1811             self._cfg.input = dict()
1812
1813         for job, files in local_builds.items():
1814             for idx, local_file in enumerate(files):
1815                 self.process_local_file(local_file, job, idx + 1, replace=False)
1816
1817     @staticmethod
1818     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1819         """Return the index of character in the string which is the end of tag.
1820
1821         :param tag_filter: The string where the end of tag is being searched.
1822         :param start: The index where the searching is stated.
1823         :param closer: The character which is the tag closer.
1824         :type tag_filter: str
1825         :type start: int
1826         :type closer: str
1827         :returns: The index of the tag closer.
1828         :rtype: int
1829         """
1830         try:
1831             idx_opener = tag_filter.index(closer, start)
1832             return tag_filter.index(closer, idx_opener + 1)
1833         except ValueError:
1834             return None
1835
1836     @staticmethod
1837     def _condition(tag_filter):
1838         """Create a conditional statement from the given tag filter.
1839
1840         :param tag_filter: Filter based on tags from the element specification.
1841         :type tag_filter: str
1842         :returns: Conditional statement which can be evaluated.
1843         :rtype: str
1844         """
1845         index = 0
1846         while True:
1847             index = InputData._end_of_tag(tag_filter, index)
1848             if index is None:
1849                 return tag_filter
1850             index += 1
1851             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1852
1853     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1854                     continue_on_error=False):
1855         """Filter required data from the given jobs and builds.
1856
1857         The output data structure is:
1858         - job 1
1859           - build 1
1860             - test (or suite) 1 ID:
1861               - param 1
1862               - param 2
1863               ...
1864               - param n
1865             ...
1866             - test (or suite) n ID:
1867             ...
1868           ...
1869           - build n
1870         ...
1871         - job n
1872
1873         :param element: Element which will use the filtered data.
1874         :param params: Parameters which will be included in the output. If None,
1875             all parameters are included.
1876         :param data: If not None, this data is used instead of data specified
1877             in the element.
1878         :param data_set: The set of data to be filtered: tests, suites,
1879             metadata.
1880         :param continue_on_error: Continue if there is error while reading the
1881             data. The Item will be empty then
1882         :type element: pandas.Series
1883         :type params: list
1884         :type data: dict
1885         :type data_set: str
1886         :type continue_on_error: bool
1887         :returns: Filtered data.
1888         :rtype pandas.Series
1889         """
1890
1891         try:
1892             if data_set == "suites":
1893                 cond = u"True"
1894             elif element[u"filter"] in (u"all", u"template"):
1895                 cond = u"True"
1896             else:
1897                 cond = InputData._condition(element[u"filter"])
1898             logging.debug(f"   Filter: {cond}")
1899         except KeyError:
1900             logging.error(u"  No filter defined.")
1901             return None
1902
1903         if params is None:
1904             params = element.get(u"parameters", None)
1905             if params:
1906                 params.extend((u"type", u"status"))
1907
1908         data_to_filter = data if data else element[u"data"]
1909         data = pd.Series()
1910         try:
1911             for job, builds in data_to_filter.items():
1912                 data[job] = pd.Series()
1913                 for build in builds:
1914                     data[job][str(build)] = pd.Series()
1915                     try:
1916                         data_dict = dict(
1917                             self.data[job][str(build)][data_set].items())
1918                     except KeyError:
1919                         if continue_on_error:
1920                             continue
1921                         return None
1922
1923                     for test_id, test_data in data_dict.items():
1924                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1925                             data[job][str(build)][test_id] = pd.Series()
1926                             if params is None:
1927                                 for param, val in test_data.items():
1928                                     data[job][str(build)][test_id][param] = val
1929                             else:
1930                                 for param in params:
1931                                     try:
1932                                         data[job][str(build)][test_id][param] =\
1933                                             test_data[param]
1934                                     except KeyError:
1935                                         data[job][str(build)][test_id][param] =\
1936                                             u"No Data"
1937             return data
1938
1939         except (KeyError, IndexError, ValueError) as err:
1940             logging.error(
1941                 f"Missing mandatory parameter in the element specification: "
1942                 f"{repr(err)}"
1943             )
1944             return None
1945         except AttributeError as err:
1946             logging.error(repr(err))
1947             return None
1948         except SyntaxError as err:
1949             logging.error(
1950                 f"The filter {cond} is not correct. Check if all tags are "
1951                 f"enclosed by apostrophes.\n{repr(err)}"
1952             )
1953             return None
1954
1955     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1956                              continue_on_error=False):
1957         """Filter required data from the given jobs and builds.
1958
1959         The output data structure is:
1960         - job 1
1961           - build 1
1962             - test (or suite) 1 ID:
1963               - param 1
1964               - param 2
1965               ...
1966               - param n
1967             ...
1968             - test (or suite) n ID:
1969             ...
1970           ...
1971           - build n
1972         ...
1973         - job n
1974
1975         :param element: Element which will use the filtered data.
1976         :param params: Parameters which will be included in the output. If None,
1977         all parameters are included.
1978         :param data_set: The set of data to be filtered: tests, suites,
1979         metadata.
1980         :param continue_on_error: Continue if there is error while reading the
1981         data. The Item will be empty then
1982         :type element: pandas.Series
1983         :type params: list
1984         :type data_set: str
1985         :type continue_on_error: bool
1986         :returns: Filtered data.
1987         :rtype pandas.Series
1988         """
1989
1990         include = element.get(u"include", None)
1991         if not include:
1992             logging.warning(u"No tests to include, skipping the element.")
1993             return None
1994
1995         if params is None:
1996             params = element.get(u"parameters", None)
1997             if params and u"type" not in params:
1998                 params.append(u"type")
1999
2000         cores = element.get(u"core", None)
2001         if cores:
2002             tests = list()
2003             for core in cores:
2004                 for test in include:
2005                     tests.append(test.format(core=core))
2006         else:
2007             tests = include
2008
2009         data = pd.Series()
2010         try:
2011             for job, builds in element[u"data"].items():
2012                 data[job] = pd.Series()
2013                 for build in builds:
2014                     data[job][str(build)] = pd.Series()
2015                     for test in tests:
2016                         try:
2017                             reg_ex = re.compile(str(test).lower())
2018                             for test_id in self.data[job][
2019                                     str(build)][data_set].keys():
2020                                 if re.match(reg_ex, str(test_id).lower()):
2021                                     test_data = self.data[job][
2022                                         str(build)][data_set][test_id]
2023                                     data[job][str(build)][test_id] = pd.Series()
2024                                     if params is None:
2025                                         for param, val in test_data.items():
2026                                             data[job][str(build)][test_id]\
2027                                                 [param] = val
2028                                     else:
2029                                         for param in params:
2030                                             try:
2031                                                 data[job][str(build)][
2032                                                     test_id][param] = \
2033                                                     test_data[param]
2034                                             except KeyError:
2035                                                 data[job][str(build)][
2036                                                     test_id][param] = u"No Data"
2037                         except KeyError as err:
2038                             if continue_on_error:
2039                                 logging.debug(repr(err))
2040                                 continue
2041                             logging.error(repr(err))
2042                             return None
2043             return data
2044
2045         except (KeyError, IndexError, ValueError) as err:
2046             logging.error(
2047                 f"Missing mandatory parameter in the element "
2048                 f"specification: {repr(err)}"
2049             )
2050             return None
2051         except AttributeError as err:
2052             logging.error(repr(err))
2053             return None
2054
2055     @staticmethod
2056     def merge_data(data):
2057         """Merge data from more jobs and builds to a simple data structure.
2058
2059         The output data structure is:
2060
2061         - test (suite) 1 ID:
2062           - param 1
2063           - param 2
2064           ...
2065           - param n
2066         ...
2067         - test (suite) n ID:
2068         ...
2069
2070         :param data: Data to merge.
2071         :type data: pandas.Series
2072         :returns: Merged data.
2073         :rtype: pandas.Series
2074         """
2075
2076         logging.info(u"    Merging data ...")
2077
2078         merged_data = pd.Series()
2079         for builds in data.values:
2080             for item in builds.values:
2081                 for item_id, item_data in item.items():
2082                     merged_data[item_id] = item_data
2083         return merged_data
2084
2085     def print_all_oper_data(self):
2086         """Print all operational data to console.
2087         """
2088
2089         for job in self._input_data.values:
2090             for build in job.values:
2091                 for test_id, test_data in build[u"tests"].items():
2092                     print(f"{test_id}")
2093                     if test_data.get(u"show-run", None) is None:
2094                         continue
2095                     for dut_name, data in test_data[u"show-run"].items():
2096                         if data.get(u"runtime", None) is None:
2097                             continue
2098                         runtime = loads(data[u"runtime"])
2099                         try:
2100                             threads_nr = len(runtime[0][u"clocks"])
2101                         except (IndexError, KeyError):
2102                             continue
2103                         threads = OrderedDict(
2104                             {idx: list() for idx in range(threads_nr)})
2105                         for item in runtime:
2106                             for idx in range(threads_nr):
2107                                 if item[u"vectors"][idx] > 0:
2108                                     clocks = item[u"clocks"][idx] / \
2109                                              item[u"vectors"][idx]
2110                                 elif item[u"calls"][idx] > 0:
2111                                     clocks = item[u"clocks"][idx] / \
2112                                              item[u"calls"][idx]
2113                                 elif item[u"suspends"][idx] > 0:
2114                                     clocks = item[u"clocks"][idx] / \
2115                                              item[u"suspends"][idx]
2116                                 else:
2117                                     clocks = 0.0
2118
2119                                 if item[u"calls"][idx] > 0:
2120                                     vectors_call = item[u"vectors"][idx] / \
2121                                                    item[u"calls"][idx]
2122                                 else:
2123                                     vectors_call = 0.0
2124
2125                                 if int(item[u"calls"][idx]) + int(
2126                                         item[u"vectors"][idx]) + \
2127                                         int(item[u"suspends"][idx]):
2128                                     threads[idx].append([
2129                                         item[u"name"],
2130                                         item[u"calls"][idx],
2131                                         item[u"vectors"][idx],
2132                                         item[u"suspends"][idx],
2133                                         clocks,
2134                                         vectors_call
2135                                     ])
2136
2137                         print(f"Host IP: {data.get(u'host', '')}, "
2138                               f"Socket: {data.get(u'socket', '')}")
2139                         for thread_nr, thread in threads.items():
2140                             txt_table = prettytable.PrettyTable(
2141                                 (
2142                                     u"Name",
2143                                     u"Nr of Vectors",
2144                                     u"Nr of Packets",
2145                                     u"Suspends",
2146                                     u"Cycles per Packet",
2147                                     u"Average Vector Size"
2148                                 )
2149                             )
2150                             avg = 0.0
2151                             for row in thread:
2152                                 txt_table.add_row(row)
2153                                 avg += row[-1]
2154                             if len(thread) == 0:
2155                                 avg = u""
2156                             else:
2157                                 avg = f", Average Vector Size per Node: " \
2158                                       f"{(avg / len(thread)):.2f}"
2159                             th_name = u"main" if thread_nr == 0 \
2160                                 else f"worker_{thread_nr}"
2161                             print(f"{dut_name}, {th_name}{avg}")
2162                             txt_table.float_format = u".2"
2163                             txt_table.align = u"r"
2164                             txt_table.align[u"Name"] = u"l"
2165                             print(f"{txt_table.get_string()}\n")