PAL: suite visitor
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(VPP Version:\s*|VPP version:\s*)(.*)"
267     )
268     REGEX_VERSION_DPDK = re.compile(
269         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
270     )
271     REGEX_TCP = re.compile(
272         r'Total\s(rps|cps|throughput):\s(\d*).*$'
273     )
274     REGEX_MRR = re.compile(
275         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
276         r'tx\s(\d*),\srx\s(\d*)'
277     )
278     REGEX_BMRR = re.compile(
279         r'.*trial results.*: \[(.*)\]'
280     )
281     REGEX_RECONF_LOSS = re.compile(
282         r'Packets lost due to reconfig: (\d*)'
283     )
284     REGEX_RECONF_TIME = re.compile(
285         r'Implied time lost: (\d*.[\de-]*)'
286     )
287     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
288
289     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
290
291     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
292
293     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
294
295     REGEX_SH_RUN_HOST = re.compile(
296         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
297     )
298
299     def __init__(self, metadata, mapping, ignore, process_oper):
300         """Initialisation.
301
302         :param metadata: Key-value pairs to be included in "metadata" part of
303             JSON structure.
304         :param mapping: Mapping of the old names of test cases to the new
305             (actual) one.
306         :param ignore: List of TCs to be ignored.
307         :param process_oper: If True, operational data (show run, telemetry) is
308             processed.
309         :type metadata: dict
310         :type mapping: dict
311         :type ignore: list
312         :type process_oper: bool
313         """
314
315         # Type of message to parse out from the test messages
316         self._msg_type = None
317
318         # VPP version
319         self._version = None
320
321         # Timestamp
322         self._timestamp = None
323
324         # Testbed. The testbed is identified by TG node IP address.
325         self._testbed = None
326
327         # Mapping of TCs long names
328         self._mapping = mapping
329
330         # Ignore list
331         self._ignore = ignore
332
333         self._process_oper = process_oper
334
335         # Number of PAPI History messages found:
336         # 0 - no message
337         # 1 - PAPI History of DUT1
338         # 2 - PAPI History of DUT2
339         self._conf_history_lookup_nr = 0
340
341         self._sh_run_counter = 0
342         self._telemetry_kw_counter = 0
343         self._telemetry_msg_counter = 0
344
345         # Test ID of currently processed test- the lowercase full path to the
346         # test
347         self._test_id = None
348
349         # The main data structure
350         self._data = {
351             u"metadata": OrderedDict(),
352             u"suites": OrderedDict(),
353             u"tests": OrderedDict()
354         }
355
356         # Save the provided metadata
357         for key, val in metadata.items():
358             self._data[u"metadata"][key] = val
359
360         # Dictionary defining the methods used to parse different types of
361         # messages
362         self.parse_msg = {
363             u"vpp-version": self._get_vpp_version,
364             u"dpdk-version": self._get_dpdk_version,
365             u"teardown-papi-history": self._get_papi_history,
366             u"test-show-runtime": self._get_show_run,
367             u"testbed": self._get_testbed,
368             u"test-telemetry": self._get_telemetry
369         }
370
371     @property
372     def data(self):
373         """Getter - Data parsed from the XML file.
374
375         :returns: Data parsed from the XML file.
376         :rtype: dict
377         """
378         return self._data
379
380     def _get_data_from_mrr_test_msg(self, msg):
381         """Get info from message of MRR performance tests.
382
383         :param msg: Message to be processed.
384         :type msg: str
385         :returns: Processed message or original message if a problem occurs.
386         :rtype: str
387         """
388
389         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
390         if not groups or groups.lastindex != 1:
391             return u"Test Failed."
392
393         try:
394             data = groups.group(1).split(u", ")
395         except (AttributeError, IndexError, ValueError, KeyError):
396             return u"Test Failed."
397
398         out_str = u"["
399         try:
400             for item in data:
401                 out_str += f"{(float(item) / 1e6):.2f}, "
402             return out_str[:-2] + u"]"
403         except (AttributeError, IndexError, ValueError, KeyError):
404             return u"Test Failed."
405
406     def _get_data_from_cps_test_msg(self, msg):
407         """Get info from message of NDRPDR CPS tests.
408
409         :param msg: Message to be processed.
410         :type msg: str
411         :returns: Processed message or "Test Failed." if a problem occurs.
412         :rtype: str
413         """
414
415         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
416         if not groups or groups.lastindex != 2:
417             return u"Test Failed."
418
419         try:
420             return (
421                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
422                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
423             )
424         except (AttributeError, IndexError, ValueError, KeyError):
425             return u"Test Failed."
426
427     def _get_data_from_pps_test_msg(self, msg):
428         """Get info from message of NDRPDR PPS tests.
429
430         :param msg: Message to be processed.
431         :type msg: str
432         :returns: Processed message or "Test Failed." if a problem occurs.
433         :rtype: str
434         """
435
436         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
437         if not groups or groups.lastindex != 4:
438             return u"Test Failed."
439
440         try:
441             return (
442                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
443                 f"{float(groups.group(2)):5.2f}\n"
444                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
445                 f"{float(groups.group(4)):5.2f}"
446             )
447         except (AttributeError, IndexError, ValueError, KeyError):
448             return u"Test Failed."
449
450     def _get_data_from_perf_test_msg(self, msg):
451         """Get info from message of NDRPDR performance tests.
452
453         :param msg: Message to be processed.
454         :type msg: str
455         :returns: Processed message or "Test Failed." if a problem occurs.
456         :rtype: str
457         """
458
459         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
460         if not groups or groups.lastindex != 10:
461             return u"Test Failed."
462
463         try:
464             data = {
465                 u"ndr_low": float(groups.group(1)),
466                 u"ndr_low_b": float(groups.group(2)),
467                 u"pdr_low": float(groups.group(3)),
468                 u"pdr_low_b": float(groups.group(4)),
469                 u"pdr_lat_90_1": groups.group(5),
470                 u"pdr_lat_90_2": groups.group(6),
471                 u"pdr_lat_50_1": groups.group(7),
472                 u"pdr_lat_50_2": groups.group(8),
473                 u"pdr_lat_10_1": groups.group(9),
474                 u"pdr_lat_10_2": groups.group(10),
475             }
476         except (AttributeError, IndexError, ValueError, KeyError):
477             return u"Test Failed."
478
479         def _process_lat(in_str_1, in_str_2):
480             """Extract P50, P90 and P99 latencies or min, avg, max values from
481             latency string.
482
483             :param in_str_1: Latency string for one direction produced by robot
484                 framework.
485             :param in_str_2: Latency string for second direction produced by
486                 robot framework.
487             :type in_str_1: str
488             :type in_str_2: str
489             :returns: Processed latency string or None if a problem occurs.
490             :rtype: tuple
491             """
492             in_list_1 = in_str_1.split('/', 3)
493             in_list_2 = in_str_2.split('/', 3)
494
495             if len(in_list_1) != 4 and len(in_list_2) != 4:
496                 return None
497
498             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
499             try:
500                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
501             except hdrh.codec.HdrLengthException:
502                 hdr_lat_1 = None
503
504             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
505             try:
506                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
507             except hdrh.codec.HdrLengthException:
508                 hdr_lat_2 = None
509
510             if hdr_lat_1 and hdr_lat_2:
511                 hdr_lat = (
512                     hdr_lat_1.get_value_at_percentile(50.0),
513                     hdr_lat_1.get_value_at_percentile(90.0),
514                     hdr_lat_1.get_value_at_percentile(99.0),
515                     hdr_lat_2.get_value_at_percentile(50.0),
516                     hdr_lat_2.get_value_at_percentile(90.0),
517                     hdr_lat_2.get_value_at_percentile(99.0)
518                 )
519                 if all(hdr_lat):
520                     return hdr_lat
521
522             hdr_lat = (
523                 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
524                 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
525             )
526             for item in hdr_lat:
527                 if item in (-1, 4294967295, 0):
528                     return None
529             return hdr_lat
530
531         try:
532             out_msg = (
533                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
534                 f"{data[u'ndr_low_b']:5.2f}"
535                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
536                 f"{data[u'pdr_low_b']:5.2f}"
537             )
538             latency = (
539                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
540                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
541                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
542             )
543             if all(latency):
544                 max_len = len(str(max((max(item) for item in latency))))
545                 max_len = 4 if max_len < 4 else max_len
546
547                 for idx, lat in enumerate(latency):
548                     if not idx:
549                         out_msg += u"\n"
550                     out_msg += (
551                         f"\n{idx + 3}. "
552                         f"{lat[0]:{max_len}d} "
553                         f"{lat[1]:{max_len}d} "
554                         f"{lat[2]:{max_len}d}      "
555                         f"{lat[3]:{max_len}d} "
556                         f"{lat[4]:{max_len}d} "
557                         f"{lat[5]:{max_len}d} "
558                     )
559
560             return out_msg
561
562         except (AttributeError, IndexError, ValueError, KeyError):
563             return u"Test Failed."
564
565     def _get_testbed(self, msg):
566         """Called when extraction of testbed IP is required.
567         The testbed is identified by TG node IP address.
568
569         :param msg: Message to process.
570         :type msg: Message
571         :returns: Nothing.
572         """
573
574         if msg.message.count(u"Setup of TG node") or \
575                 msg.message.count(u"Setup of node TG host"):
576             reg_tg_ip = re.compile(
577                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
578             try:
579                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
580             except (KeyError, ValueError, IndexError, AttributeError):
581                 pass
582             finally:
583                 self._data[u"metadata"][u"testbed"] = self._testbed
584                 self._msg_type = None
585
586     def _get_vpp_version(self, msg):
587         """Called when extraction of VPP version is required.
588
589         :param msg: Message to process.
590         :type msg: Message
591         :returns: Nothing.
592         """
593
594         if msg.message.count(u"VPP version:") or \
595                 msg.message.count(u"VPP Version:"):
596             self._version = str(
597                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
598             )
599             self._data[u"metadata"][u"version"] = self._version
600             self._msg_type = None
601             logging.info(self._version)
602
603     def _get_dpdk_version(self, msg):
604         """Called when extraction of DPDK version is required.
605
606         :param msg: Message to process.
607         :type msg: Message
608         :returns: Nothing.
609         """
610
611         if msg.message.count(u"DPDK Version:"):
612             try:
613                 self._version = str(re.search(
614                     self.REGEX_VERSION_DPDK, msg.message).group(2))
615                 self._data[u"metadata"][u"version"] = self._version
616             except IndexError:
617                 pass
618             finally:
619                 self._msg_type = None
620
621     def _get_papi_history(self, msg):
622         """Called when extraction of PAPI command history is required.
623
624         :param msg: Message to process.
625         :type msg: Message
626         :returns: Nothing.
627         """
628         if msg.message.count(u"PAPI command history:"):
629             self._conf_history_lookup_nr += 1
630             if self._conf_history_lookup_nr == 1:
631                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
632             else:
633                 self._msg_type = None
634             text = re.sub(
635                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
636                 u"",
637                 msg.message,
638                 count=1
639             ).replace(u'"', u"'")
640             self._data[u"tests"][self._test_id][u"conf-history"] += (
641                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
642             )
643
644     def _get_show_run(self, msg):
645         """Called when extraction of VPP operational data (output of CLI command
646         Show Runtime) is required.
647
648         :param msg: Message to process.
649         :type msg: Message
650         :returns: Nothing.
651         """
652
653         if not msg.message.count(u"stats runtime"):
654             return
655
656         # Temporary solution
657         if self._sh_run_counter > 1:
658             return
659
660         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
661             self._data[u"tests"][self._test_id][u"show-run"] = dict()
662
663         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
664         if not groups:
665             return
666         try:
667             host = groups.group(1)
668         except (AttributeError, IndexError):
669             host = u""
670         try:
671             sock = groups.group(2)
672         except (AttributeError, IndexError):
673             sock = u""
674
675         dut = u"dut{nr}".format(
676             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
677
678         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
679             copy.copy(
680                 {
681                     u"host": host,
682                     u"socket": sock,
683                     u"runtime": str(msg.message).replace(u' ', u'').
684                                 replace(u'\n', u'').replace(u"'", u'"').
685                                 replace(u'b"', u'"').replace(u'u"', u'"').
686                                 split(u":", 1)[1]
687                 }
688             )
689
690     def _get_telemetry(self, msg):
691         """Called when extraction of VPP telemetry data is required.
692
693         :param msg: Message to process.
694         :type msg: Message
695         :returns: Nothing.
696         """
697
698         if self._telemetry_kw_counter > 1:
699             return
700         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
701             return
702
703         if u"telemetry-show-run" not in \
704                 self._data[u"tests"][self._test_id].keys():
705             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
706
707         self._telemetry_msg_counter += 1
708         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
709         if not groups:
710             return
711         try:
712             host = groups.group(1)
713         except (AttributeError, IndexError):
714             host = u""
715         try:
716             sock = groups.group(2)
717         except (AttributeError, IndexError):
718             sock = u""
719         runtime = {
720             u"source_type": u"node",
721             u"source_id": host,
722             u"msg_type": u"metric",
723             u"log_level": u"INFO",
724             u"timestamp": msg.timestamp,
725             u"msg": u"show_runtime",
726             u"host": host,
727             u"socket": sock,
728             u"data": list()
729         }
730         for line in msg.message.splitlines():
731             if not line.startswith(u"vpp_runtime_"):
732                 continue
733             try:
734                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
735                 cut = params.index(u"{")
736                 name = params[:cut].split(u"_", maxsplit=2)[-1]
737                 labels = eval(
738                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
739                 )
740                 labels[u"graph_node"] = labels.pop(u"name")
741                 runtime[u"data"].append(
742                     {
743                         u"name": name,
744                         u"value": value,
745                         u"timestamp": timestamp,
746                         u"labels": labels
747                     }
748                 )
749             except (TypeError, ValueError, IndexError):
750                 continue
751         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
752             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
753                 {
754                     u"host": host,
755                     u"socket": sock,
756                     u"runtime": runtime
757                 }
758             )
759
760     def _get_ndrpdr_throughput(self, msg):
761         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
762         message.
763
764         :param msg: The test message to be parsed.
765         :type msg: str
766         :returns: Parsed data as a dict and the status (PASS/FAIL).
767         :rtype: tuple(dict, str)
768         """
769
770         throughput = {
771             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
772             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
773         }
774         status = u"FAIL"
775         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
776
777         if groups is not None:
778             try:
779                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
780                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
781                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
782                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
783                 status = u"PASS"
784             except (IndexError, ValueError):
785                 pass
786
787         return throughput, status
788
789     def _get_ndrpdr_throughput_gbps(self, msg):
790         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
791         test message.
792
793         :param msg: The test message to be parsed.
794         :type msg: str
795         :returns: Parsed data as a dict and the status (PASS/FAIL).
796         :rtype: tuple(dict, str)
797         """
798
799         gbps = {
800             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
801             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
802         }
803         status = u"FAIL"
804         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
805
806         if groups is not None:
807             try:
808                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
809                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
810                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
811                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
812                 status = u"PASS"
813             except (IndexError, ValueError):
814                 pass
815
816         return gbps, status
817
818     def _get_plr_throughput(self, msg):
819         """Get PLRsearch lower bound and PLRsearch upper bound from the test
820         message.
821
822         :param msg: The test message to be parsed.
823         :type msg: str
824         :returns: Parsed data as a dict and the status (PASS/FAIL).
825         :rtype: tuple(dict, str)
826         """
827
828         throughput = {
829             u"LOWER": -1.0,
830             u"UPPER": -1.0
831         }
832         status = u"FAIL"
833         groups = re.search(self.REGEX_PLR_RATE, msg)
834
835         if groups is not None:
836             try:
837                 throughput[u"LOWER"] = float(groups.group(1))
838                 throughput[u"UPPER"] = float(groups.group(2))
839                 status = u"PASS"
840             except (IndexError, ValueError):
841                 pass
842
843         return throughput, status
844
845     def _get_ndrpdr_latency(self, msg):
846         """Get LATENCY from the test message.
847
848         :param msg: The test message to be parsed.
849         :type msg: str
850         :returns: Parsed data as a dict and the status (PASS/FAIL).
851         :rtype: tuple(dict, str)
852         """
853         latency_default = {
854             u"min": -1.0,
855             u"avg": -1.0,
856             u"max": -1.0,
857             u"hdrh": u""
858         }
859         latency = {
860             u"NDR": {
861                 u"direction1": copy.copy(latency_default),
862                 u"direction2": copy.copy(latency_default)
863             },
864             u"PDR": {
865                 u"direction1": copy.copy(latency_default),
866                 u"direction2": copy.copy(latency_default)
867             },
868             u"LAT0": {
869                 u"direction1": copy.copy(latency_default),
870                 u"direction2": copy.copy(latency_default)
871             },
872             u"PDR10": {
873                 u"direction1": copy.copy(latency_default),
874                 u"direction2": copy.copy(latency_default)
875             },
876             u"PDR50": {
877                 u"direction1": copy.copy(latency_default),
878                 u"direction2": copy.copy(latency_default)
879             },
880             u"PDR90": {
881                 u"direction1": copy.copy(latency_default),
882                 u"direction2": copy.copy(latency_default)
883             },
884         }
885
886         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
887         if groups is None:
888             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
889         if groups is None:
890             return latency, u"FAIL"
891
892         def process_latency(in_str):
893             """Return object with parsed latency values.
894
895             TODO: Define class for the return type.
896
897             :param in_str: Input string, min/avg/max/hdrh format.
898             :type in_str: str
899             :returns: Dict with corresponding keys, except hdrh float values.
900             :rtype dict:
901             :throws IndexError: If in_str does not have enough substrings.
902             :throws ValueError: If a substring does not convert to float.
903             """
904             in_list = in_str.split('/', 3)
905
906             rval = {
907                 u"min": float(in_list[0]),
908                 u"avg": float(in_list[1]),
909                 u"max": float(in_list[2]),
910                 u"hdrh": u""
911             }
912
913             if len(in_list) == 4:
914                 rval[u"hdrh"] = str(in_list[3])
915
916             return rval
917
918         try:
919             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
920             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
921             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
922             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
923             if groups.lastindex == 4:
924                 return latency, u"PASS"
925         except (IndexError, ValueError):
926             pass
927
928         try:
929             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
930             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
931             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
932             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
933             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
934             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
935             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
936             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
937             if groups.lastindex == 12:
938                 return latency, u"PASS"
939         except (IndexError, ValueError):
940             pass
941
942         return latency, u"FAIL"
943
944     @staticmethod
945     def _get_hoststack_data(msg, tags):
946         """Get data from the hoststack test message.
947
948         :param msg: The test message to be parsed.
949         :param tags: Test tags.
950         :type msg: str
951         :type tags: list
952         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
953         :rtype: tuple(dict, str)
954         """
955         result = dict()
956         status = u"FAIL"
957
958         msg = msg.replace(u"'", u'"').replace(u" ", u"")
959         if u"LDPRELOAD" in tags:
960             try:
961                 result = loads(msg)
962                 status = u"PASS"
963             except JSONDecodeError:
964                 pass
965         elif u"VPPECHO" in tags:
966             try:
967                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
968                 result = dict(
969                     client=loads(msg_lst[0]),
970                     server=loads(msg_lst[1])
971                 )
972                 status = u"PASS"
973             except (JSONDecodeError, IndexError):
974                 pass
975
976         return result, status
977
978     def _get_vsap_data(self, msg, tags):
979         """Get data from the vsap test message.
980
981         :param msg: The test message to be parsed.
982         :param tags: Test tags.
983         :type msg: str
984         :type tags: list
985         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
986         :rtype: tuple(dict, str)
987         """
988         result = dict()
989         status = u"FAIL"
990
991         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
992         if groups is not None:
993             try:
994                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
995                 result[u"latency"] = float(groups.group(2))
996                 result[u"completed-requests"] = int(groups.group(3))
997                 result[u"failed-requests"] = int(groups.group(4))
998                 result[u"bytes-transferred"] = int(groups.group(5))
999                 if u"TCP_CPS"in tags:
1000                     result[u"cps"] = float(groups.group(6))
1001                 elif u"TCP_RPS" in tags:
1002                     result[u"rps"] = float(groups.group(6))
1003                 else:
1004                     return result, status
1005                 status = u"PASS"
1006             except (IndexError, ValueError):
1007                 pass
1008
1009         return result, status
1010
1011     def visit_suite(self, suite):
1012         """Implements traversing through the suite and its direct children.
1013
1014         :param suite: Suite to process.
1015         :type suite: Suite
1016         :returns: Nothing.
1017         """
1018         if self.start_suite(suite) is not False:
1019             suite.suites.visit(self)
1020             suite.tests.visit(self)
1021             self.end_suite(suite)
1022
1023     def start_suite(self, suite):
1024         """Called when suite starts.
1025
1026         :param suite: Suite to process.
1027         :type suite: Suite
1028         :returns: Nothing.
1029         """
1030
1031         try:
1032             parent_name = suite.parent.name
1033         except AttributeError:
1034             return
1035
1036         self._data[u"suites"][suite.longname.lower().
1037                               replace(u'"', u"'").
1038                               replace(u" ", u"_")] = {
1039                                   u"name": suite.name.lower(),
1040                                   u"doc": suite.doc,
1041                                   u"parent": parent_name,
1042                                   u"level": len(suite.longname.split(u"."))
1043                               }
1044
1045         suite.setup.visit(self)
1046         suite.body.visit(self)
1047         suite.teardown.visit(self)
1048
1049     def end_suite(self, suite):
1050         """Called when suite ends.
1051
1052         :param suite: Suite to process.
1053         :type suite: Suite
1054         :returns: Nothing.
1055         """
1056
1057     def visit_test(self, test):
1058         """Implements traversing through the test.
1059
1060         :param test: Test to process.
1061         :type test: Test
1062         :returns: Nothing.
1063         """
1064         if self.start_test(test) is not False:
1065             test.setup.visit(self)
1066             test.body.visit(self)
1067             test.teardown.visit(self)
1068             self.end_test(test)
1069
1070     def start_test(self, test):
1071         """Called when test starts.
1072
1073         :param test: Test to process.
1074         :type test: Test
1075         :returns: Nothing.
1076         """
1077
1078         self._sh_run_counter = 0
1079         self._telemetry_kw_counter = 0
1080         self._telemetry_msg_counter = 0
1081
1082         longname_orig = test.longname.lower()
1083
1084         # Check the ignore list
1085         if longname_orig in self._ignore:
1086             return
1087
1088         tags = [str(tag) for tag in test.tags]
1089         test_result = dict()
1090
1091         # Change the TC long name and name if defined in the mapping table
1092         longname = self._mapping.get(longname_orig, None)
1093         if longname is not None:
1094             name = longname.split(u'.')[-1]
1095             logging.debug(
1096                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1097                 f"{name}"
1098             )
1099         else:
1100             longname = longname_orig
1101             name = test.name.lower()
1102
1103         # Remove TC number from the TC long name (backward compatibility):
1104         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1105         # Remove TC number from the TC name (not needed):
1106         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1107
1108         test_result[u"parent"] = test.parent.name.lower()
1109         test_result[u"tags"] = tags
1110         test_result["doc"] = test.doc
1111         test_result[u"type"] = u""
1112         test_result[u"status"] = test.status
1113         test_result[u"starttime"] = test.starttime
1114         test_result[u"endtime"] = test.endtime
1115
1116         if test.status == u"PASS":
1117             if u"NDRPDR" in tags:
1118                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1119                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1120                         test.message)
1121                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1122                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1123                         test.message)
1124                 else:
1125                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1126                         test.message)
1127             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1128                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1129                     test.message)
1130             else:
1131                 test_result[u"msg"] = test.message
1132         else:
1133             test_result[u"msg"] = test.message
1134
1135         if u"PERFTEST" in tags and u"TREX" not in tags:
1136             # Replace info about cores (e.g. -1c-) with the info about threads
1137             # and cores (e.g. -1t1c-) in the long test case names and in the
1138             # test case names if necessary.
1139             tag_count = 0
1140             tag_tc = str()
1141             for tag in test_result[u"tags"]:
1142                 groups = re.search(self.REGEX_TC_TAG, tag)
1143                 if groups:
1144                     tag_count += 1
1145                     tag_tc = tag
1146
1147             if tag_count == 1:
1148                 self._test_id = re.sub(
1149                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1150                     self._test_id, count=1
1151                 )
1152                 test_result[u"name"] = re.sub(
1153                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1154                     test_result["name"], count=1
1155                 )
1156             else:
1157                 test_result[u"status"] = u"FAIL"
1158                 self._data[u"tests"][self._test_id] = test_result
1159                 logging.debug(
1160                     f"The test {self._test_id} has no or more than one "
1161                     f"multi-threading tags.\n"
1162                     f"Tags: {test_result[u'tags']}"
1163                 )
1164                 return
1165
1166         if u"DEVICETEST" in tags:
1167             test_result[u"type"] = u"DEVICETEST"
1168         elif u"NDRPDR" in tags:
1169             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1170                 test_result[u"type"] = u"CPS"
1171             else:
1172                 test_result[u"type"] = u"NDRPDR"
1173             if test.status == u"PASS":
1174                 test_result[u"throughput"], test_result[u"status"] = \
1175                     self._get_ndrpdr_throughput(test.message)
1176                 test_result[u"gbps"], test_result[u"status"] = \
1177                     self._get_ndrpdr_throughput_gbps(test.message)
1178                 test_result[u"latency"], test_result[u"status"] = \
1179                     self._get_ndrpdr_latency(test.message)
1180         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1181             if u"MRR" in tags:
1182                 test_result[u"type"] = u"MRR"
1183             else:
1184                 test_result[u"type"] = u"BMRR"
1185             if test.status == u"PASS":
1186                 test_result[u"result"] = dict()
1187                 groups = re.search(self.REGEX_BMRR, test.message)
1188                 if groups is not None:
1189                     items_str = groups.group(1)
1190                     items_float = [
1191                         float(item.strip().replace(u"'", u""))
1192                         for item in items_str.split(",")
1193                     ]
1194                     # Use whole list in CSIT-1180.
1195                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1196                     test_result[u"result"][u"samples"] = items_float
1197                     test_result[u"result"][u"receive-rate"] = stats.avg
1198                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1199                 else:
1200                     groups = re.search(self.REGEX_MRR, test.message)
1201                     test_result[u"result"][u"receive-rate"] = \
1202                         float(groups.group(3)) / float(groups.group(1))
1203         elif u"SOAK" in tags:
1204             test_result[u"type"] = u"SOAK"
1205             if test.status == u"PASS":
1206                 test_result[u"throughput"], test_result[u"status"] = \
1207                     self._get_plr_throughput(test.message)
1208         elif u"LDP_NGINX" in tags:
1209             test_result[u"type"] = u"LDP_NGINX"
1210             test_result[u"result"], test_result[u"status"] = \
1211                 self._get_vsap_data(test.message, tags)
1212         elif u"HOSTSTACK" in tags:
1213             test_result[u"type"] = u"HOSTSTACK"
1214             if test.status == u"PASS":
1215                 test_result[u"result"], test_result[u"status"] = \
1216                     self._get_hoststack_data(test.message, tags)
1217         elif u"RECONF" in tags:
1218             test_result[u"type"] = u"RECONF"
1219             if test.status == u"PASS":
1220                 test_result[u"result"] = None
1221                 try:
1222                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1223                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1224                     test_result[u"result"] = {
1225                         u"loss": int(grps_loss.group(1)),
1226                         u"time": float(grps_time.group(1))
1227                     }
1228                 except (AttributeError, IndexError, ValueError, TypeError):
1229                     test_result[u"status"] = u"FAIL"
1230         else:
1231             test_result[u"status"] = u"FAIL"
1232
1233         self._data[u"tests"][self._test_id] = test_result
1234
1235     def end_test(self, test):
1236         """Called when test ends.
1237
1238         :param test: Test to process.
1239         :type test: Test
1240         :returns: Nothing.
1241         """
1242
1243     def visit_keyword(self, keyword):
1244         """Implements traversing through the keyword and its child keywords.
1245
1246         :param keyword: Keyword to process.
1247         :type keyword: Keyword
1248         :returns: Nothing.
1249         """
1250         if self.start_keyword(keyword) is not False:
1251             self.end_keyword(keyword)
1252
1253     def start_keyword(self, keyword):
1254         """Called when keyword starts. Default implementation does nothing.
1255
1256         :param keyword: Keyword to process.
1257         :type keyword: Keyword
1258         :returns: Nothing.
1259         """
1260         try:
1261             if keyword.type in ("setup", "SETUP"):
1262                 self.visit_setup_kw(keyword)
1263             elif keyword.type in ("teardown", "TEARDOWN"):
1264                 self.visit_teardown_kw(keyword)
1265             else:
1266                 self.visit_test_kw(keyword)
1267         except AttributeError:
1268             pass
1269
1270     def end_keyword(self, keyword):
1271         """Called when keyword ends. Default implementation does nothing.
1272
1273         :param keyword: Keyword to process.
1274         :type keyword: Keyword
1275         :returns: Nothing.
1276         """
1277
1278     def visit_test_kw(self, test_kw):
1279         """Implements traversing through the test keyword and its child
1280         keywords.
1281
1282         :param test_kw: Keyword to process.
1283         :type test_kw: Keyword
1284         :returns: Nothing.
1285         """
1286         for keyword in test_kw.body:
1287             if self.start_test_kw(keyword) is not False:
1288                 self.visit_test_kw(keyword)
1289                 self.end_test_kw(keyword)
1290
1291     def start_test_kw(self, test_kw):
1292         """Called when test keyword starts. Default implementation does
1293         nothing.
1294
1295         :param test_kw: Keyword to process.
1296         :type test_kw: Keyword
1297         :returns: Nothing.
1298         """
1299         if not self._process_oper:
1300             return
1301
1302         if test_kw.name.count(u"Run Telemetry On All Duts"):
1303             self._msg_type = u"test-telemetry"
1304             self._telemetry_kw_counter += 1
1305         elif test_kw.name.count(u"Show Runtime On All Duts"):
1306             self._msg_type = u"test-show-runtime"
1307             self._sh_run_counter += 1
1308         else:
1309             return
1310         test_kw.messages.visit(self)
1311
1312     def end_test_kw(self, test_kw):
1313         """Called when keyword ends. Default implementation does nothing.
1314
1315         :param test_kw: Keyword to process.
1316         :type test_kw: Keyword
1317         :returns: Nothing.
1318         """
1319
1320     def visit_setup_kw(self, setup_kw):
1321         """Implements traversing through the teardown keyword and its child
1322         keywords.
1323
1324         :param setup_kw: Keyword to process.
1325         :type setup_kw: Keyword
1326         :returns: Nothing.
1327         """
1328         for keyword in setup_kw.setup:
1329             if self.start_setup_kw(keyword) is not False:
1330                 self.visit_setup_kw(keyword)
1331                 self.end_setup_kw(keyword)
1332         for keyword in setup_kw.body:
1333             if self.start_setup_kw(keyword) is not False:
1334                 self.visit_setup_kw(keyword)
1335                 self.end_setup_kw(keyword)
1336
1337     def start_setup_kw(self, setup_kw):
1338         """Called when teardown keyword starts. Default implementation does
1339         nothing.
1340
1341         :param setup_kw: Keyword to process.
1342         :type setup_kw: Keyword
1343         :returns: Nothing.
1344         """
1345         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1346                 and not self._version:
1347             self._msg_type = u"vpp-version"
1348         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1349                 not self._version:
1350             self._msg_type = u"dpdk-version"
1351         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1352             self._msg_type = u"testbed"
1353         else:
1354             return
1355         setup_kw.messages.visit(self)
1356
1357     def end_setup_kw(self, setup_kw):
1358         """Called when keyword ends. Default implementation does nothing.
1359
1360         :param setup_kw: Keyword to process.
1361         :type setup_kw: Keyword
1362         :returns: Nothing.
1363         """
1364
1365     def visit_teardown_kw(self, teardown_kw):
1366         """Implements traversing through the teardown keyword and its child
1367         keywords.
1368
1369         :param teardown_kw: Keyword to process.
1370         :type teardown_kw: Keyword
1371         :returns: Nothing.
1372         """
1373         for keyword in teardown_kw.body:
1374             if self.start_teardown_kw(keyword) is not False:
1375                 self.visit_teardown_kw(keyword)
1376                 self.end_teardown_kw(keyword)
1377
1378     def start_teardown_kw(self, teardown_kw):
1379         """Called when teardown keyword starts
1380
1381         :param teardown_kw: Keyword to process.
1382         :type teardown_kw: Keyword
1383         :returns: Nothing.
1384         """
1385         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1386             self._conf_history_lookup_nr = 0
1387             self._msg_type = u"teardown-papi-history"
1388             teardown_kw.messages.visit(self)
1389
1390     def end_teardown_kw(self, teardown_kw):
1391         """Called when keyword ends. Default implementation does nothing.
1392
1393         :param teardown_kw: Keyword to process.
1394         :type teardown_kw: Keyword
1395         :returns: Nothing.
1396         """
1397
1398     def visit_message(self, msg):
1399         """Implements visiting the message.
1400
1401         :param msg: Message to process.
1402         :type msg: Message
1403         :returns: Nothing.
1404         """
1405         if self.start_message(msg) is not False:
1406             self.end_message(msg)
1407
1408     def start_message(self, msg):
1409         """Called when message starts. Get required information from messages:
1410         - VPP version.
1411
1412         :param msg: Message to process.
1413         :type msg: Message
1414         :returns: Nothing.
1415         """
1416         if self._msg_type:
1417             self.parse_msg[self._msg_type](msg)
1418
1419     def end_message(self, msg):
1420         """Called when message ends. Default implementation does nothing.
1421
1422         :param msg: Message to process.
1423         :type msg: Message
1424         :returns: Nothing.
1425         """
1426
1427
1428 class InputData:
1429     """Input data
1430
1431     The data is extracted from output.xml files generated by Jenkins jobs and
1432     stored in pandas' DataFrames.
1433
1434     The data structure:
1435     - job name
1436       - build number
1437         - metadata
1438           (as described in ExecutionChecker documentation)
1439         - suites
1440           (as described in ExecutionChecker documentation)
1441         - tests
1442           (as described in ExecutionChecker documentation)
1443     """
1444
1445     def __init__(self, spec, for_output):
1446         """Initialization.
1447
1448         :param spec: Specification.
1449         :param for_output: Output to be generated from downloaded data.
1450         :type spec: Specification
1451         :type for_output: str
1452         """
1453
1454         # Specification:
1455         self._cfg = spec
1456
1457         self._for_output = for_output
1458
1459         # Data store:
1460         self._input_data = pd.Series(dtype="float64")
1461
1462     @property
1463     def data(self):
1464         """Getter - Input data.
1465
1466         :returns: Input data
1467         :rtype: pandas.Series
1468         """
1469         return self._input_data
1470
1471     def metadata(self, job, build):
1472         """Getter - metadata
1473
1474         :param job: Job which metadata we want.
1475         :param build: Build which metadata we want.
1476         :type job: str
1477         :type build: str
1478         :returns: Metadata
1479         :rtype: pandas.Series
1480         """
1481         return self.data[job][build][u"metadata"]
1482
1483     def suites(self, job, build):
1484         """Getter - suites
1485
1486         :param job: Job which suites we want.
1487         :param build: Build which suites we want.
1488         :type job: str
1489         :type build: str
1490         :returns: Suites.
1491         :rtype: pandas.Series
1492         """
1493         return self.data[job][str(build)][u"suites"]
1494
1495     def tests(self, job, build):
1496         """Getter - tests
1497
1498         :param job: Job which tests we want.
1499         :param build: Build which tests we want.
1500         :type job: str
1501         :type build: str
1502         :returns: Tests.
1503         :rtype: pandas.Series
1504         """
1505         return self.data[job][build][u"tests"]
1506
1507     def _parse_tests(self, job, build):
1508         """Process data from robot output.xml file and return JSON structured
1509         data.
1510
1511         :param job: The name of job which build output data will be processed.
1512         :param build: The build which output data will be processed.
1513         :type job: str
1514         :type build: dict
1515         :returns: JSON data structure.
1516         :rtype: dict
1517         """
1518
1519         metadata = {
1520             u"job": job,
1521             u"build": build
1522         }
1523
1524         with open(build[u"file-name"], u'r') as data_file:
1525             try:
1526                 result = ExecutionResult(data_file)
1527             except errors.DataError as err:
1528                 logging.error(
1529                     f"Error occurred while parsing output.xml: {repr(err)}"
1530                 )
1531                 return None
1532
1533         process_oper = False
1534         if u"-vpp-perf-report-coverage-" in job:
1535             process_oper = True
1536         # elif u"-vpp-perf-report-iterative-" in job:
1537         #     # Exceptions for TBs where we do not have coverage data:
1538         #     for item in (u"-2n-icx", ):
1539         #         if item in job:
1540         #             process_oper = True
1541         #             break
1542         checker = ExecutionChecker(
1543             metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1544         )
1545         result.visit(checker)
1546
1547         checker.data[u"metadata"][u"tests_total"] = \
1548             result.statistics.total.total
1549         checker.data[u"metadata"][u"tests_passed"] = \
1550             result.statistics.total.passed
1551         checker.data[u"metadata"][u"tests_failed"] = \
1552             result.statistics.total.failed
1553         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1554         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1555
1556         return checker.data
1557
1558     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1559         """Download and parse the input data file.
1560
1561         :param pid: PID of the process executing this method.
1562         :param job: Name of the Jenkins job which generated the processed input
1563             file.
1564         :param build: Information about the Jenkins build which generated the
1565             processed input file.
1566         :param repeat: Repeat the download specified number of times if not
1567             successful.
1568         :type pid: int
1569         :type job: str
1570         :type build: dict
1571         :type repeat: int
1572         """
1573
1574         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1575
1576         state = u"failed"
1577         success = False
1578         data = None
1579         do_repeat = repeat
1580         while do_repeat:
1581             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1582             if success:
1583                 break
1584             do_repeat -= 1
1585         if not success:
1586             logging.error(
1587                 f"It is not possible to download the input data file from the "
1588                 f"job {job}, build {build[u'build']}, or it is damaged. "
1589                 f"Skipped."
1590             )
1591         if success:
1592             logging.info(f"  Processing data from build {build[u'build']}")
1593             data = self._parse_tests(job, build)
1594             if data is None:
1595                 logging.error(
1596                     f"Input data file from the job {job}, build "
1597                     f"{build[u'build']} is damaged. Skipped."
1598                 )
1599             else:
1600                 state = u"processed"
1601
1602             try:
1603                 remove(build[u"file-name"])
1604             except OSError as err:
1605                 logging.error(
1606                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1607                 )
1608
1609         # If the time-period is defined in the specification file, remove all
1610         # files which are outside the time period.
1611         is_last = False
1612         timeperiod = self._cfg.environment.get(u"time-period", None)
1613         if timeperiod and data:
1614             now = dt.utcnow()
1615             timeperiod = timedelta(int(timeperiod))
1616             metadata = data.get(u"metadata", None)
1617             if metadata:
1618                 generated = metadata.get(u"generated", None)
1619                 if generated:
1620                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1621                     if (now - generated) > timeperiod:
1622                         # Remove the data and the file:
1623                         state = u"removed"
1624                         data = None
1625                         is_last = True
1626                         logging.info(
1627                             f"  The build {job}/{build[u'build']} is "
1628                             f"outdated, will be removed."
1629                         )
1630         return {
1631             u"data": data,
1632             u"state": state,
1633             u"job": job,
1634             u"build": build,
1635             u"last": is_last
1636         }
1637
1638     def download_and_parse_data(self, repeat=1):
1639         """Download the input data files, parse input data from input files and
1640         store in pandas' Series.
1641
1642         :param repeat: Repeat the download specified number of times if not
1643             successful.
1644         :type repeat: int
1645         """
1646
1647         logging.info(u"Downloading and parsing input files ...")
1648
1649         for job, builds in self._cfg.input.items():
1650             for build in builds:
1651
1652                 result = self._download_and_parse_build(job, build, repeat)
1653                 if result[u"last"]:
1654                     break
1655                 build_nr = result[u"build"][u"build"]
1656
1657                 if result[u"data"]:
1658                     data = result[u"data"]
1659                     build_data = pd.Series({
1660                         u"metadata": pd.Series(
1661                             list(data[u"metadata"].values()),
1662                             index=list(data[u"metadata"].keys())
1663                         ),
1664                         u"suites": pd.Series(
1665                             list(data[u"suites"].values()),
1666                             index=list(data[u"suites"].keys())
1667                         ),
1668                         u"tests": pd.Series(
1669                             list(data[u"tests"].values()),
1670                             index=list(data[u"tests"].keys())
1671                         )
1672                     })
1673
1674                     if self._input_data.get(job, None) is None:
1675                         self._input_data[job] = pd.Series(dtype="float64")
1676                     self._input_data[job][str(build_nr)] = build_data
1677                     self._cfg.set_input_file_name(
1678                         job, build_nr, result[u"build"][u"file-name"]
1679                     )
1680                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1681
1682                 mem_alloc = \
1683                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1684                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1685
1686         logging.info(u"Done.")
1687
1688         msg = f"Successful downloads from the sources:\n"
1689         for source in self._cfg.environment[u"data-sources"]:
1690             if source[u"successful-downloads"]:
1691                 msg += (
1692                     f"{source[u'url']}/{source[u'path']}/"
1693                     f"{source[u'file-name']}: "
1694                     f"{source[u'successful-downloads']}\n"
1695                 )
1696         logging.info(msg)
1697
1698     def process_local_file(self, local_file, job=u"local", build_nr=1,
1699                            replace=True):
1700         """Process local XML file given as a command-line parameter.
1701
1702         :param local_file: The file to process.
1703         :param job: Job name.
1704         :param build_nr: Build number.
1705         :param replace: If True, the information about jobs and builds is
1706             replaced by the new one, otherwise the new jobs and builds are
1707             added.
1708         :type local_file: str
1709         :type job: str
1710         :type build_nr: int
1711         :type replace: bool
1712         :raises: PresentationError if an error occurs.
1713         """
1714         if not isfile(local_file):
1715             raise PresentationError(f"The file {local_file} does not exist.")
1716
1717         try:
1718             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1719         except (IndexError, ValueError):
1720             pass
1721
1722         build = {
1723             u"build": build_nr,
1724             u"status": u"failed",
1725             u"file-name": local_file
1726         }
1727         if replace:
1728             self._cfg.input = dict()
1729         self._cfg.add_build(job, build)
1730
1731         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1732         data = self._parse_tests(job, build)
1733         if data is None:
1734             raise PresentationError(
1735                 f"Error occurred while parsing the file {local_file}"
1736             )
1737
1738         build_data = pd.Series({
1739             u"metadata": pd.Series(
1740                 list(data[u"metadata"].values()),
1741                 index=list(data[u"metadata"].keys())
1742             ),
1743             u"suites": pd.Series(
1744                 list(data[u"suites"].values()),
1745                 index=list(data[u"suites"].keys())
1746             ),
1747             u"tests": pd.Series(
1748                 list(data[u"tests"].values()),
1749                 index=list(data[u"tests"].keys())
1750             )
1751         })
1752
1753         if self._input_data.get(job, None) is None:
1754             self._input_data[job] = pd.Series(dtype="float64")
1755         self._input_data[job][str(build_nr)] = build_data
1756
1757         self._cfg.set_input_state(job, build_nr, u"processed")
1758
1759     def process_local_directory(self, local_dir, replace=True):
1760         """Process local directory with XML file(s). The directory is processed
1761         as a 'job' and the XML files in it as builds.
1762         If the given directory contains only sub-directories, these
1763         sub-directories processed as jobs and corresponding XML files as builds
1764         of their job.
1765
1766         :param local_dir: Local directory to process.
1767         :param replace: If True, the information about jobs and builds is
1768             replaced by the new one, otherwise the new jobs and builds are
1769             added.
1770         :type local_dir: str
1771         :type replace: bool
1772         """
1773         if not isdir(local_dir):
1774             raise PresentationError(
1775                 f"The directory {local_dir} does not exist."
1776             )
1777
1778         # Check if the given directory includes only files, or only directories
1779         _, dirnames, filenames = next(walk(local_dir))
1780
1781         if filenames and not dirnames:
1782             filenames.sort()
1783             # local_builds:
1784             # key: dir (job) name, value: list of file names (builds)
1785             local_builds = {
1786                 local_dir: [join(local_dir, name) for name in filenames]
1787             }
1788
1789         elif dirnames and not filenames:
1790             dirnames.sort()
1791             # local_builds:
1792             # key: dir (job) name, value: list of file names (builds)
1793             local_builds = dict()
1794             for dirname in dirnames:
1795                 builds = [
1796                     join(local_dir, dirname, name)
1797                     for name in listdir(join(local_dir, dirname))
1798                     if isfile(join(local_dir, dirname, name))
1799                 ]
1800                 if builds:
1801                     local_builds[dirname] = sorted(builds)
1802
1803         elif not filenames and not dirnames:
1804             raise PresentationError(f"The directory {local_dir} is empty.")
1805         else:
1806             raise PresentationError(
1807                 f"The directory {local_dir} can include only files or only "
1808                 f"directories, not both.\nThe directory {local_dir} includes "
1809                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1810             )
1811
1812         if replace:
1813             self._cfg.input = dict()
1814
1815         for job, files in local_builds.items():
1816             for idx, local_file in enumerate(files):
1817                 self.process_local_file(local_file, job, idx + 1, replace=False)
1818
1819     @staticmethod
1820     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1821         """Return the index of character in the string which is the end of tag.
1822
1823         :param tag_filter: The string where the end of tag is being searched.
1824         :param start: The index where the searching is stated.
1825         :param closer: The character which is the tag closer.
1826         :type tag_filter: str
1827         :type start: int
1828         :type closer: str
1829         :returns: The index of the tag closer.
1830         :rtype: int
1831         """
1832         try:
1833             idx_opener = tag_filter.index(closer, start)
1834             return tag_filter.index(closer, idx_opener + 1)
1835         except ValueError:
1836             return None
1837
1838     @staticmethod
1839     def _condition(tag_filter):
1840         """Create a conditional statement from the given tag filter.
1841
1842         :param tag_filter: Filter based on tags from the element specification.
1843         :type tag_filter: str
1844         :returns: Conditional statement which can be evaluated.
1845         :rtype: str
1846         """
1847         index = 0
1848         while True:
1849             index = InputData._end_of_tag(tag_filter, index)
1850             if index is None:
1851                 return tag_filter
1852             index += 1
1853             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1854
1855     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1856                     continue_on_error=False):
1857         """Filter required data from the given jobs and builds.
1858
1859         The output data structure is:
1860         - job 1
1861           - build 1
1862             - test (or suite) 1 ID:
1863               - param 1
1864               - param 2
1865               ...
1866               - param n
1867             ...
1868             - test (or suite) n ID:
1869             ...
1870           ...
1871           - build n
1872         ...
1873         - job n
1874
1875         :param element: Element which will use the filtered data.
1876         :param params: Parameters which will be included in the output. If None,
1877             all parameters are included.
1878         :param data: If not None, this data is used instead of data specified
1879             in the element.
1880         :param data_set: The set of data to be filtered: tests, suites,
1881             metadata.
1882         :param continue_on_error: Continue if there is error while reading the
1883             data. The Item will be empty then
1884         :type element: pandas.Series
1885         :type params: list
1886         :type data: dict
1887         :type data_set: str
1888         :type continue_on_error: bool
1889         :returns: Filtered data.
1890         :rtype pandas.Series
1891         """
1892
1893         try:
1894             if data_set == "suites":
1895                 cond = u"True"
1896             elif element[u"filter"] in (u"all", u"template"):
1897                 cond = u"True"
1898             else:
1899                 cond = InputData._condition(element[u"filter"])
1900             logging.debug(f"   Filter: {cond}")
1901         except KeyError:
1902             logging.error(u"  No filter defined.")
1903             return None
1904
1905         if params is None:
1906             params = element.get(u"parameters", None)
1907             if params:
1908                 params.extend((u"type", u"status"))
1909
1910         data_to_filter = data if data else element[u"data"]
1911         data = pd.Series(dtype="float64")
1912         try:
1913             for job, builds in data_to_filter.items():
1914                 data[job] = pd.Series(dtype="float64")
1915                 for build in builds:
1916                     data[job][str(build)] = pd.Series(dtype="float64")
1917                     try:
1918                         data_dict = dict(
1919                             self.data[job][str(build)][data_set].items())
1920                     except KeyError:
1921                         if continue_on_error:
1922                             continue
1923                         return None
1924
1925                     for test_id, test_data in data_dict.items():
1926                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1927                             data[job][str(build)][test_id] = \
1928                                 pd.Series(dtype="float64")
1929                             if params is None:
1930                                 for param, val in test_data.items():
1931                                     data[job][str(build)][test_id][param] = val
1932                             else:
1933                                 for param in params:
1934                                     try:
1935                                         data[job][str(build)][test_id][param] =\
1936                                             test_data[param]
1937                                     except KeyError:
1938                                         data[job][str(build)][test_id][param] =\
1939                                             u"No Data"
1940             return data
1941
1942         except (KeyError, IndexError, ValueError) as err:
1943             logging.error(
1944                 f"Missing mandatory parameter in the element specification: "
1945                 f"{repr(err)}"
1946             )
1947             return None
1948         except AttributeError as err:
1949             logging.error(repr(err))
1950             return None
1951         except SyntaxError as err:
1952             logging.error(
1953                 f"The filter {cond} is not correct. Check if all tags are "
1954                 f"enclosed by apostrophes.\n{repr(err)}"
1955             )
1956             return None
1957
1958     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1959                              continue_on_error=False):
1960         """Filter required data from the given jobs and builds.
1961
1962         The output data structure is:
1963         - job 1
1964           - build 1
1965             - test (or suite) 1 ID:
1966               - param 1
1967               - param 2
1968               ...
1969               - param n
1970             ...
1971             - test (or suite) n ID:
1972             ...
1973           ...
1974           - build n
1975         ...
1976         - job n
1977
1978         :param element: Element which will use the filtered data.
1979         :param params: Parameters which will be included in the output. If None,
1980         all parameters are included.
1981         :param data_set: The set of data to be filtered: tests, suites,
1982         metadata.
1983         :param continue_on_error: Continue if there is error while reading the
1984         data. The Item will be empty then
1985         :type element: pandas.Series
1986         :type params: list
1987         :type data_set: str
1988         :type continue_on_error: bool
1989         :returns: Filtered data.
1990         :rtype pandas.Series
1991         """
1992
1993         include = element.get(u"include", None)
1994         if not include:
1995             logging.warning(u"No tests to include, skipping the element.")
1996             return None
1997
1998         if params is None:
1999             params = element.get(u"parameters", None)
2000             if params and u"type" not in params:
2001                 params.append(u"type")
2002
2003         cores = element.get(u"core", None)
2004         if cores:
2005             tests = list()
2006             for core in cores:
2007                 for test in include:
2008                     tests.append(test.format(core=core))
2009         else:
2010             tests = include
2011
2012         data = pd.Series(dtype="float64")
2013         try:
2014             for job, builds in element[u"data"].items():
2015                 data[job] = pd.Series(dtype="float64")
2016                 for build in builds:
2017                     data[job][str(build)] = pd.Series(dtype="float64")
2018                     for test in tests:
2019                         try:
2020                             reg_ex = re.compile(str(test).lower())
2021                             for test_id in self.data[job][
2022                                     str(build)][data_set].keys():
2023                                 if re.match(reg_ex, str(test_id).lower()):
2024                                     test_data = self.data[job][
2025                                         str(build)][data_set][test_id]
2026                                     data[job][str(build)][test_id] = \
2027                                         pd.Series(dtype="float64")
2028                                     if params is None:
2029                                         for param, val in test_data.items():
2030                                             data[job][str(build)][test_id]\
2031                                                 [param] = val
2032                                     else:
2033                                         for param in params:
2034                                             try:
2035                                                 data[job][str(build)][
2036                                                     test_id][param] = \
2037                                                     test_data[param]
2038                                             except KeyError:
2039                                                 data[job][str(build)][
2040                                                     test_id][param] = u"No Data"
2041                         except KeyError as err:
2042                             if continue_on_error:
2043                                 logging.debug(repr(err))
2044                                 continue
2045                             logging.error(repr(err))
2046                             return None
2047             return data
2048
2049         except (KeyError, IndexError, ValueError) as err:
2050             logging.error(
2051                 f"Missing mandatory parameter in the element "
2052                 f"specification: {repr(err)}"
2053             )
2054             return None
2055         except AttributeError as err:
2056             logging.error(repr(err))
2057             return None
2058
2059     @staticmethod
2060     def merge_data(data):
2061         """Merge data from more jobs and builds to a simple data structure.
2062
2063         The output data structure is:
2064
2065         - test (suite) 1 ID:
2066           - param 1
2067           - param 2
2068           ...
2069           - param n
2070         ...
2071         - test (suite) n ID:
2072         ...
2073
2074         :param data: Data to merge.
2075         :type data: pandas.Series
2076         :returns: Merged data.
2077         :rtype: pandas.Series
2078         """
2079
2080         logging.info(u"    Merging data ...")
2081
2082         merged_data = pd.Series(dtype="float64")
2083         for builds in data.values:
2084             for item in builds.values:
2085                 for item_id, item_data in item.items():
2086                     merged_data[item_id] = item_data
2087         return merged_data
2088
2089     def print_all_oper_data(self):
2090         """Print all operational data to console.
2091         """
2092
2093         for job in self._input_data.values:
2094             for build in job.values:
2095                 for test_id, test_data in build[u"tests"].items():
2096                     print(f"{test_id}")
2097                     if test_data.get(u"show-run", None) is None:
2098                         continue
2099                     for dut_name, data in test_data[u"show-run"].items():
2100                         if data.get(u"runtime", None) is None:
2101                             continue
2102                         runtime = loads(data[u"runtime"])
2103                         try:
2104                             threads_nr = len(runtime[0][u"clocks"])
2105                         except (IndexError, KeyError):
2106                             continue
2107                         threads = OrderedDict(
2108                             {idx: list() for idx in range(threads_nr)})
2109                         for item in runtime:
2110                             for idx in range(threads_nr):
2111                                 if item[u"vectors"][idx] > 0:
2112                                     clocks = item[u"clocks"][idx] / \
2113                                              item[u"vectors"][idx]
2114                                 elif item[u"calls"][idx] > 0:
2115                                     clocks = item[u"clocks"][idx] / \
2116                                              item[u"calls"][idx]
2117                                 elif item[u"suspends"][idx] > 0:
2118                                     clocks = item[u"clocks"][idx] / \
2119                                              item[u"suspends"][idx]
2120                                 else:
2121                                     clocks = 0.0
2122
2123                                 if item[u"calls"][idx] > 0:
2124                                     vectors_call = item[u"vectors"][idx] / \
2125                                                    item[u"calls"][idx]
2126                                 else:
2127                                     vectors_call = 0.0
2128
2129                                 if int(item[u"calls"][idx]) + int(
2130                                         item[u"vectors"][idx]) + \
2131                                         int(item[u"suspends"][idx]):
2132                                     threads[idx].append([
2133                                         item[u"name"],
2134                                         item[u"calls"][idx],
2135                                         item[u"vectors"][idx],
2136                                         item[u"suspends"][idx],
2137                                         clocks,
2138                                         vectors_call
2139                                     ])
2140
2141                         print(f"Host IP: {data.get(u'host', '')}, "
2142                               f"Socket: {data.get(u'socket', '')}")
2143                         for thread_nr, thread in threads.items():
2144                             txt_table = prettytable.PrettyTable(
2145                                 (
2146                                     u"Name",
2147                                     u"Nr of Vectors",
2148                                     u"Nr of Packets",
2149                                     u"Suspends",
2150                                     u"Cycles per Packet",
2151                                     u"Average Vector Size"
2152                                 )
2153                             )
2154                             avg = 0.0
2155                             for row in thread:
2156                                 txt_table.add_row(row)
2157                                 avg += row[-1]
2158                             if len(thread) == 0:
2159                                 avg = u""
2160                             else:
2161                                 avg = f", Average Vector Size per Node: " \
2162                                       f"{(avg / len(thread)):.2f}"
2163                             th_name = u"main" if thread_nr == 0 \
2164                                 else f"worker_{thread_nr}"
2165                             print(f"{dut_name}, {th_name}{avg}")
2166                             txt_table.float_format = u".2"
2167                             txt_table.align = u"r"
2168                             txt_table.align[u"Name"] = u"l"
2169                             print(f"{txt_table.get_string()}\n")