PAL: vpp_version
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
291
292     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
293
294     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
295
296     REGEX_SH_RUN_HOST = re.compile(
297         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
298     )
299
300     def __init__(self, metadata, mapping, ignore, process_oper):
301         """Initialisation.
302
303         :param metadata: Key-value pairs to be included in "metadata" part of
304             JSON structure.
305         :param mapping: Mapping of the old names of test cases to the new
306             (actual) one.
307         :param ignore: List of TCs to be ignored.
308         :param process_oper: If True, operational data (show run, telemetry) is
309             processed.
310         :type metadata: dict
311         :type mapping: dict
312         :type ignore: list
313         :type process_oper: bool
314         """
315
316         # Type of message to parse out from the test messages
317         self._msg_type = None
318
319         # VPP version
320         self._version = None
321
322         # Timestamp
323         self._timestamp = None
324
325         # Testbed. The testbed is identified by TG node IP address.
326         self._testbed = None
327
328         # Mapping of TCs long names
329         self._mapping = mapping
330
331         # Ignore list
332         self._ignore = ignore
333
334         self._process_oper = process_oper
335
336         # Number of PAPI History messages found:
337         # 0 - no message
338         # 1 - PAPI History of DUT1
339         # 2 - PAPI History of DUT2
340         self._conf_history_lookup_nr = 0
341
342         self._sh_run_counter = 0
343         self._telemetry_kw_counter = 0
344         self._telemetry_msg_counter = 0
345
346         # Test ID of currently processed test- the lowercase full path to the
347         # test
348         self._test_id = None
349
350         # The main data structure
351         self._data = {
352             u"metadata": OrderedDict(),
353             u"suites": OrderedDict(),
354             u"tests": OrderedDict()
355         }
356
357         # Save the provided metadata
358         for key, val in metadata.items():
359             self._data[u"metadata"][key] = val
360
361         # Dictionary defining the methods used to parse different types of
362         # messages
363         self.parse_msg = {
364             u"vpp-version": self._get_vpp_version,
365             u"dpdk-version": self._get_dpdk_version,
366             u"teardown-papi-history": self._get_papi_history,
367             u"test-show-runtime": self._get_show_run,
368             u"testbed": self._get_testbed,
369             u"test-telemetry": self._get_telemetry
370         }
371
372     @property
373     def data(self):
374         """Getter - Data parsed from the XML file.
375
376         :returns: Data parsed from the XML file.
377         :rtype: dict
378         """
379         return self._data
380
381     def _get_data_from_mrr_test_msg(self, msg):
382         """Get info from message of MRR performance tests.
383
384         :param msg: Message to be processed.
385         :type msg: str
386         :returns: Processed message or original message if a problem occurs.
387         :rtype: str
388         """
389
390         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
391         if not groups or groups.lastindex != 1:
392             return u"Test Failed."
393
394         try:
395             data = groups.group(1).split(u", ")
396         except (AttributeError, IndexError, ValueError, KeyError):
397             return u"Test Failed."
398
399         out_str = u"["
400         try:
401             for item in data:
402                 out_str += f"{(float(item) / 1e6):.2f}, "
403             return out_str[:-2] + u"]"
404         except (AttributeError, IndexError, ValueError, KeyError):
405             return u"Test Failed."
406
407     def _get_data_from_cps_test_msg(self, msg):
408         """Get info from message of NDRPDR CPS tests.
409
410         :param msg: Message to be processed.
411         :type msg: str
412         :returns: Processed message or "Test Failed." if a problem occurs.
413         :rtype: str
414         """
415
416         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
417         if not groups or groups.lastindex != 2:
418             return u"Test Failed."
419
420         try:
421             return (
422                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
423                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
424             )
425         except (AttributeError, IndexError, ValueError, KeyError):
426             return u"Test Failed."
427
428     def _get_data_from_pps_test_msg(self, msg):
429         """Get info from message of NDRPDR PPS tests.
430
431         :param msg: Message to be processed.
432         :type msg: str
433         :returns: Processed message or "Test Failed." if a problem occurs.
434         :rtype: str
435         """
436
437         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
438         if not groups or groups.lastindex != 4:
439             return u"Test Failed."
440
441         try:
442             return (
443                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
444                 f"{float(groups.group(2)):5.2f}\n"
445                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
446                 f"{float(groups.group(4)):5.2f}"
447             )
448         except (AttributeError, IndexError, ValueError, KeyError):
449             return u"Test Failed."
450
451     def _get_data_from_perf_test_msg(self, msg):
452         """Get info from message of NDRPDR performance tests.
453
454         :param msg: Message to be processed.
455         :type msg: str
456         :returns: Processed message or "Test Failed." if a problem occurs.
457         :rtype: str
458         """
459
460         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
461         if not groups or groups.lastindex != 10:
462             return u"Test Failed."
463
464         try:
465             data = {
466                 u"ndr_low": float(groups.group(1)),
467                 u"ndr_low_b": float(groups.group(2)),
468                 u"pdr_low": float(groups.group(3)),
469                 u"pdr_low_b": float(groups.group(4)),
470                 u"pdr_lat_90_1": groups.group(5),
471                 u"pdr_lat_90_2": groups.group(6),
472                 u"pdr_lat_50_1": groups.group(7),
473                 u"pdr_lat_50_2": groups.group(8),
474                 u"pdr_lat_10_1": groups.group(9),
475                 u"pdr_lat_10_2": groups.group(10),
476             }
477         except (AttributeError, IndexError, ValueError, KeyError):
478             return u"Test Failed."
479
480         def _process_lat(in_str_1, in_str_2):
481             """Extract P50, P90 and P99 latencies or min, avg, max values from
482             latency string.
483
484             :param in_str_1: Latency string for one direction produced by robot
485                 framework.
486             :param in_str_2: Latency string for second direction produced by
487                 robot framework.
488             :type in_str_1: str
489             :type in_str_2: str
490             :returns: Processed latency string or None if a problem occurs.
491             :rtype: tuple
492             """
493             in_list_1 = in_str_1.split('/', 3)
494             in_list_2 = in_str_2.split('/', 3)
495
496             if len(in_list_1) != 4 and len(in_list_2) != 4:
497                 return None
498
499             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
500             try:
501                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
502             except hdrh.codec.HdrLengthException:
503                 hdr_lat_1 = None
504
505             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
506             try:
507                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
508             except hdrh.codec.HdrLengthException:
509                 hdr_lat_2 = None
510
511             if hdr_lat_1 and hdr_lat_2:
512                 hdr_lat = (
513                     hdr_lat_1.get_value_at_percentile(50.0),
514                     hdr_lat_1.get_value_at_percentile(90.0),
515                     hdr_lat_1.get_value_at_percentile(99.0),
516                     hdr_lat_2.get_value_at_percentile(50.0),
517                     hdr_lat_2.get_value_at_percentile(90.0),
518                     hdr_lat_2.get_value_at_percentile(99.0)
519                 )
520                 if all(hdr_lat):
521                     return hdr_lat
522
523             hdr_lat = (
524                 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
525                 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
526             )
527             for item in hdr_lat:
528                 if item in (-1, 4294967295, 0):
529                     return None
530             return hdr_lat
531
532         try:
533             out_msg = (
534                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
535                 f"{data[u'ndr_low_b']:5.2f}"
536                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
537                 f"{data[u'pdr_low_b']:5.2f}"
538             )
539             latency = (
540                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
541                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
542                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
543             )
544             if all(latency):
545                 max_len = len(str(max((max(item) for item in latency))))
546                 max_len = 4 if max_len < 4 else max_len
547
548                 for idx, lat in enumerate(latency):
549                     if not idx:
550                         out_msg += u"\n"
551                     out_msg += (
552                         f"\n{idx + 3}. "
553                         f"{lat[0]:{max_len}d} "
554                         f"{lat[1]:{max_len}d} "
555                         f"{lat[2]:{max_len}d}      "
556                         f"{lat[3]:{max_len}d} "
557                         f"{lat[4]:{max_len}d} "
558                         f"{lat[5]:{max_len}d} "
559                     )
560
561             return out_msg
562
563         except (AttributeError, IndexError, ValueError, KeyError):
564             return u"Test Failed."
565
566     def _get_testbed(self, msg):
567         """Called when extraction of testbed IP is required.
568         The testbed is identified by TG node IP address.
569
570         :param msg: Message to process.
571         :type msg: Message
572         :returns: Nothing.
573         """
574
575         if msg.message.count(u"Setup of TG node") or \
576                 msg.message.count(u"Setup of node TG host"):
577             reg_tg_ip = re.compile(
578                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
579             try:
580                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
581             except (KeyError, ValueError, IndexError, AttributeError):
582                 pass
583             finally:
584                 self._data[u"metadata"][u"testbed"] = self._testbed
585                 self._msg_type = None
586
587     def _get_vpp_version(self, msg):
588         """Called when extraction of VPP version is required.
589
590         :param msg: Message to process.
591         :type msg: Message
592         :returns: Nothing.
593         """
594
595         if msg.message.count(u"return STDOUT Version:") or \
596                 msg.message.count(u"VPP Version:") or \
597                 msg.message.count(u"VPP version:"):
598             self._version = str(
599                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
600             )
601             self._data[u"metadata"][u"version"] = self._version
602             self._msg_type = None
603             logging.info(self._version)
604
605     def _get_dpdk_version(self, msg):
606         """Called when extraction of DPDK version is required.
607
608         :param msg: Message to process.
609         :type msg: Message
610         :returns: Nothing.
611         """
612
613         if msg.message.count(u"DPDK Version:"):
614             try:
615                 self._version = str(re.search(
616                     self.REGEX_VERSION_DPDK, msg.message).group(2))
617                 self._data[u"metadata"][u"version"] = self._version
618             except IndexError:
619                 pass
620             finally:
621                 self._msg_type = None
622
623     def _get_papi_history(self, msg):
624         """Called when extraction of PAPI command history is required.
625
626         :param msg: Message to process.
627         :type msg: Message
628         :returns: Nothing.
629         """
630         if msg.message.count(u"PAPI command history:"):
631             self._conf_history_lookup_nr += 1
632             if self._conf_history_lookup_nr == 1:
633                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
634             else:
635                 self._msg_type = None
636             text = re.sub(
637                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
638                 u"",
639                 msg.message,
640                 count=1
641             ).replace(u'"', u"'")
642             self._data[u"tests"][self._test_id][u"conf-history"] += (
643                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
644             )
645
646     def _get_show_run(self, msg):
647         """Called when extraction of VPP operational data (output of CLI command
648         Show Runtime) is required.
649
650         :param msg: Message to process.
651         :type msg: Message
652         :returns: Nothing.
653         """
654
655         if not msg.message.count(u"stats runtime"):
656             return
657
658         # Temporary solution
659         if self._sh_run_counter > 1:
660             return
661
662         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
663             self._data[u"tests"][self._test_id][u"show-run"] = dict()
664
665         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
666         if not groups:
667             return
668         try:
669             host = groups.group(1)
670         except (AttributeError, IndexError):
671             host = u""
672         try:
673             sock = groups.group(2)
674         except (AttributeError, IndexError):
675             sock = u""
676
677         dut = u"dut{nr}".format(
678             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
679
680         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
681             copy.copy(
682                 {
683                     u"host": host,
684                     u"socket": sock,
685                     u"runtime": str(msg.message).replace(u' ', u'').
686                                 replace(u'\n', u'').replace(u"'", u'"').
687                                 replace(u'b"', u'"').replace(u'u"', u'"').
688                                 split(u":", 1)[1]
689                 }
690             )
691
692     def _get_telemetry(self, msg):
693         """Called when extraction of VPP telemetry data is required.
694
695         :param msg: Message to process.
696         :type msg: Message
697         :returns: Nothing.
698         """
699
700         if self._telemetry_kw_counter > 1:
701             return
702         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
703             return
704
705         if u"telemetry-show-run" not in \
706                 self._data[u"tests"][self._test_id].keys():
707             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
708
709         self._telemetry_msg_counter += 1
710         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
711         if not groups:
712             return
713         try:
714             host = groups.group(1)
715         except (AttributeError, IndexError):
716             host = u""
717         try:
718             sock = groups.group(2)
719         except (AttributeError, IndexError):
720             sock = u""
721         runtime = {
722             u"source_type": u"node",
723             u"source_id": host,
724             u"msg_type": u"metric",
725             u"log_level": u"INFO",
726             u"timestamp": msg.timestamp,
727             u"msg": u"show_runtime",
728             u"host": host,
729             u"socket": sock,
730             u"data": list()
731         }
732         for line in msg.message.splitlines():
733             if not line.startswith(u"vpp_runtime_"):
734                 continue
735             try:
736                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
737                 cut = params.index(u"{")
738                 name = params[:cut].split(u"_", maxsplit=2)[-1]
739                 labels = eval(
740                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
741                 )
742                 labels[u"graph_node"] = labels.pop(u"name")
743                 runtime[u"data"].append(
744                     {
745                         u"name": name,
746                         u"value": value,
747                         u"timestamp": timestamp,
748                         u"labels": labels
749                     }
750                 )
751             except (TypeError, ValueError, IndexError):
752                 continue
753         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
754             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
755                 {
756                     u"host": host,
757                     u"socket": sock,
758                     u"runtime": runtime
759                 }
760             )
761
762     def _get_ndrpdr_throughput(self, msg):
763         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
764         message.
765
766         :param msg: The test message to be parsed.
767         :type msg: str
768         :returns: Parsed data as a dict and the status (PASS/FAIL).
769         :rtype: tuple(dict, str)
770         """
771
772         throughput = {
773             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
774             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
775         }
776         status = u"FAIL"
777         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
778
779         if groups is not None:
780             try:
781                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
782                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
783                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
784                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
785                 status = u"PASS"
786             except (IndexError, ValueError):
787                 pass
788
789         return throughput, status
790
791     def _get_ndrpdr_throughput_gbps(self, msg):
792         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
793         test message.
794
795         :param msg: The test message to be parsed.
796         :type msg: str
797         :returns: Parsed data as a dict and the status (PASS/FAIL).
798         :rtype: tuple(dict, str)
799         """
800
801         gbps = {
802             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
803             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
804         }
805         status = u"FAIL"
806         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
807
808         if groups is not None:
809             try:
810                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
811                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
812                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
813                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
814                 status = u"PASS"
815             except (IndexError, ValueError):
816                 pass
817
818         return gbps, status
819
820     def _get_plr_throughput(self, msg):
821         """Get PLRsearch lower bound and PLRsearch upper bound from the test
822         message.
823
824         :param msg: The test message to be parsed.
825         :type msg: str
826         :returns: Parsed data as a dict and the status (PASS/FAIL).
827         :rtype: tuple(dict, str)
828         """
829
830         throughput = {
831             u"LOWER": -1.0,
832             u"UPPER": -1.0
833         }
834         status = u"FAIL"
835         groups = re.search(self.REGEX_PLR_RATE, msg)
836
837         if groups is not None:
838             try:
839                 throughput[u"LOWER"] = float(groups.group(1))
840                 throughput[u"UPPER"] = float(groups.group(2))
841                 status = u"PASS"
842             except (IndexError, ValueError):
843                 pass
844
845         return throughput, status
846
847     def _get_ndrpdr_latency(self, msg):
848         """Get LATENCY from the test message.
849
850         :param msg: The test message to be parsed.
851         :type msg: str
852         :returns: Parsed data as a dict and the status (PASS/FAIL).
853         :rtype: tuple(dict, str)
854         """
855         latency_default = {
856             u"min": -1.0,
857             u"avg": -1.0,
858             u"max": -1.0,
859             u"hdrh": u""
860         }
861         latency = {
862             u"NDR": {
863                 u"direction1": copy.copy(latency_default),
864                 u"direction2": copy.copy(latency_default)
865             },
866             u"PDR": {
867                 u"direction1": copy.copy(latency_default),
868                 u"direction2": copy.copy(latency_default)
869             },
870             u"LAT0": {
871                 u"direction1": copy.copy(latency_default),
872                 u"direction2": copy.copy(latency_default)
873             },
874             u"PDR10": {
875                 u"direction1": copy.copy(latency_default),
876                 u"direction2": copy.copy(latency_default)
877             },
878             u"PDR50": {
879                 u"direction1": copy.copy(latency_default),
880                 u"direction2": copy.copy(latency_default)
881             },
882             u"PDR90": {
883                 u"direction1": copy.copy(latency_default),
884                 u"direction2": copy.copy(latency_default)
885             },
886         }
887
888         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
889         if groups is None:
890             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
891         if groups is None:
892             return latency, u"FAIL"
893
894         def process_latency(in_str):
895             """Return object with parsed latency values.
896
897             TODO: Define class for the return type.
898
899             :param in_str: Input string, min/avg/max/hdrh format.
900             :type in_str: str
901             :returns: Dict with corresponding keys, except hdrh float values.
902             :rtype dict:
903             :throws IndexError: If in_str does not have enough substrings.
904             :throws ValueError: If a substring does not convert to float.
905             """
906             in_list = in_str.split('/', 3)
907
908             rval = {
909                 u"min": float(in_list[0]),
910                 u"avg": float(in_list[1]),
911                 u"max": float(in_list[2]),
912                 u"hdrh": u""
913             }
914
915             if len(in_list) == 4:
916                 rval[u"hdrh"] = str(in_list[3])
917
918             return rval
919
920         try:
921             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
922             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
923             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
924             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
925             if groups.lastindex == 4:
926                 return latency, u"PASS"
927         except (IndexError, ValueError):
928             pass
929
930         try:
931             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
932             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
933             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
934             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
935             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
936             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
937             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
938             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
939             if groups.lastindex == 12:
940                 return latency, u"PASS"
941         except (IndexError, ValueError):
942             pass
943
944         return latency, u"FAIL"
945
946     @staticmethod
947     def _get_hoststack_data(msg, tags):
948         """Get data from the hoststack test message.
949
950         :param msg: The test message to be parsed.
951         :param tags: Test tags.
952         :type msg: str
953         :type tags: list
954         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
955         :rtype: tuple(dict, str)
956         """
957         result = dict()
958         status = u"FAIL"
959
960         msg = msg.replace(u"'", u'"').replace(u" ", u"")
961         if u"LDPRELOAD" in tags:
962             try:
963                 result = loads(msg)
964                 status = u"PASS"
965             except JSONDecodeError:
966                 pass
967         elif u"VPPECHO" in tags:
968             try:
969                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
970                 result = dict(
971                     client=loads(msg_lst[0]),
972                     server=loads(msg_lst[1])
973                 )
974                 status = u"PASS"
975             except (JSONDecodeError, IndexError):
976                 pass
977
978         return result, status
979
980     def _get_vsap_data(self, msg, tags):
981         """Get data from the vsap test message.
982
983         :param msg: The test message to be parsed.
984         :param tags: Test tags.
985         :type msg: str
986         :type tags: list
987         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
988         :rtype: tuple(dict, str)
989         """
990         result = dict()
991         status = u"FAIL"
992
993         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
994         if groups is not None:
995             try:
996                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
997                 result[u"latency"] = float(groups.group(2))
998                 result[u"completed-requests"] = int(groups.group(3))
999                 result[u"failed-requests"] = int(groups.group(4))
1000                 result[u"bytes-transferred"] = int(groups.group(5))
1001                 if u"TCP_CPS"in tags:
1002                     result[u"cps"] = float(groups.group(6))
1003                 elif u"TCP_RPS" in tags:
1004                     result[u"rps"] = float(groups.group(6))
1005                 else:
1006                     return result, status
1007                 status = u"PASS"
1008             except (IndexError, ValueError):
1009                 pass
1010
1011         return result, status
1012
1013     def visit_suite(self, suite):
1014         """Implements traversing through the suite and its direct children.
1015
1016         :param suite: Suite to process.
1017         :type suite: Suite
1018         :returns: Nothing.
1019         """
1020         if self.start_suite(suite) is not False:
1021             suite.suites.visit(self)
1022             suite.tests.visit(self)
1023             self.end_suite(suite)
1024
1025     def start_suite(self, suite):
1026         """Called when suite starts.
1027
1028         :param suite: Suite to process.
1029         :type suite: Suite
1030         :returns: Nothing.
1031         """
1032
1033         try:
1034             parent_name = suite.parent.name
1035         except AttributeError:
1036             return
1037
1038         self._data[u"suites"][suite.longname.lower().
1039                               replace(u'"', u"'").
1040                               replace(u" ", u"_")] = {
1041                                   u"name": suite.name.lower(),
1042                                   u"doc": suite.doc,
1043                                   u"parent": parent_name,
1044                                   u"level": len(suite.longname.split(u"."))
1045                               }
1046
1047         suite.setup.visit(self)
1048
1049     def end_suite(self, suite):
1050         """Called when suite ends.
1051
1052         :param suite: Suite to process.
1053         :type suite: Suite
1054         :returns: Nothing.
1055         """
1056
1057     def visit_test(self, test):
1058         """Implements traversing through the test.
1059
1060         :param test: Test to process.
1061         :type test: Test
1062         :returns: Nothing.
1063         """
1064         if self.start_test(test) is not False:
1065             test.body.visit(self)
1066             self.end_test(test)
1067
1068     def start_test(self, test):
1069         """Called when test starts.
1070
1071         :param test: Test to process.
1072         :type test: Test
1073         :returns: Nothing.
1074         """
1075
1076         self._sh_run_counter = 0
1077         self._telemetry_kw_counter = 0
1078         self._telemetry_msg_counter = 0
1079
1080         longname_orig = test.longname.lower()
1081
1082         # Check the ignore list
1083         if longname_orig in self._ignore:
1084             return
1085
1086         tags = [str(tag) for tag in test.tags]
1087         test_result = dict()
1088
1089         # Change the TC long name and name if defined in the mapping table
1090         longname = self._mapping.get(longname_orig, None)
1091         if longname is not None:
1092             name = longname.split(u'.')[-1]
1093             logging.debug(
1094                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1095                 f"{name}"
1096             )
1097         else:
1098             longname = longname_orig
1099             name = test.name.lower()
1100
1101         # Remove TC number from the TC long name (backward compatibility):
1102         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1103         # Remove TC number from the TC name (not needed):
1104         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1105
1106         test_result[u"parent"] = test.parent.name.lower()
1107         test_result[u"tags"] = tags
1108         test_result["doc"] = test.doc
1109         test_result[u"type"] = u""
1110         test_result[u"status"] = test.status
1111         test_result[u"starttime"] = test.starttime
1112         test_result[u"endtime"] = test.endtime
1113
1114         if test.status == u"PASS":
1115             if u"NDRPDR" in tags:
1116                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1117                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1118                         test.message)
1119                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1120                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1121                         test.message)
1122                 else:
1123                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1124                         test.message)
1125             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1126                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1127                     test.message)
1128             else:
1129                 test_result[u"msg"] = test.message
1130         else:
1131             test_result[u"msg"] = test.message
1132
1133         if u"PERFTEST" in tags and u"TREX" not in tags:
1134             # Replace info about cores (e.g. -1c-) with the info about threads
1135             # and cores (e.g. -1t1c-) in the long test case names and in the
1136             # test case names if necessary.
1137             tag_count = 0
1138             tag_tc = str()
1139             for tag in test_result[u"tags"]:
1140                 groups = re.search(self.REGEX_TC_TAG, tag)
1141                 if groups:
1142                     tag_count += 1
1143                     tag_tc = tag
1144
1145             if tag_count == 1:
1146                 self._test_id = re.sub(
1147                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1148                     self._test_id, count=1
1149                 )
1150                 test_result[u"name"] = re.sub(
1151                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1152                     test_result["name"], count=1
1153                 )
1154             else:
1155                 test_result[u"status"] = u"FAIL"
1156                 self._data[u"tests"][self._test_id] = test_result
1157                 logging.debug(
1158                     f"The test {self._test_id} has no or more than one "
1159                     f"multi-threading tags.\n"
1160                     f"Tags: {test_result[u'tags']}"
1161                 )
1162                 return
1163
1164         if u"DEVICETEST" in tags:
1165             test_result[u"type"] = u"DEVICETEST"
1166         elif u"NDRPDR" in tags:
1167             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1168                 test_result[u"type"] = u"CPS"
1169             else:
1170                 test_result[u"type"] = u"NDRPDR"
1171             if test.status == u"PASS":
1172                 test_result[u"throughput"], test_result[u"status"] = \
1173                     self._get_ndrpdr_throughput(test.message)
1174                 test_result[u"gbps"], test_result[u"status"] = \
1175                     self._get_ndrpdr_throughput_gbps(test.message)
1176                 test_result[u"latency"], test_result[u"status"] = \
1177                     self._get_ndrpdr_latency(test.message)
1178         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1179             if u"MRR" in tags:
1180                 test_result[u"type"] = u"MRR"
1181             else:
1182                 test_result[u"type"] = u"BMRR"
1183             if test.status == u"PASS":
1184                 test_result[u"result"] = dict()
1185                 groups = re.search(self.REGEX_BMRR, test.message)
1186                 if groups is not None:
1187                     items_str = groups.group(1)
1188                     items_float = [
1189                         float(item.strip().replace(u"'", u""))
1190                         for item in items_str.split(",")
1191                     ]
1192                     # Use whole list in CSIT-1180.
1193                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1194                     test_result[u"result"][u"samples"] = items_float
1195                     test_result[u"result"][u"receive-rate"] = stats.avg
1196                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1197                 else:
1198                     groups = re.search(self.REGEX_MRR, test.message)
1199                     test_result[u"result"][u"receive-rate"] = \
1200                         float(groups.group(3)) / float(groups.group(1))
1201         elif u"SOAK" in tags:
1202             test_result[u"type"] = u"SOAK"
1203             if test.status == u"PASS":
1204                 test_result[u"throughput"], test_result[u"status"] = \
1205                     self._get_plr_throughput(test.message)
1206         elif u"LDP_NGINX" in tags:
1207             test_result[u"type"] = u"LDP_NGINX"
1208             test_result[u"result"], test_result[u"status"] = \
1209                 self._get_vsap_data(test.message, tags)
1210         elif u"HOSTSTACK" in tags:
1211             test_result[u"type"] = u"HOSTSTACK"
1212             if test.status == u"PASS":
1213                 test_result[u"result"], test_result[u"status"] = \
1214                     self._get_hoststack_data(test.message, tags)
1215         elif u"RECONF" in tags:
1216             test_result[u"type"] = u"RECONF"
1217             if test.status == u"PASS":
1218                 test_result[u"result"] = None
1219                 try:
1220                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1221                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1222                     test_result[u"result"] = {
1223                         u"loss": int(grps_loss.group(1)),
1224                         u"time": float(grps_time.group(1))
1225                     }
1226                 except (AttributeError, IndexError, ValueError, TypeError):
1227                     test_result[u"status"] = u"FAIL"
1228         else:
1229             test_result[u"status"] = u"FAIL"
1230
1231         self._data[u"tests"][self._test_id] = test_result
1232
1233     def end_test(self, test):
1234         """Called when test ends.
1235
1236         :param test: Test to process.
1237         :type test: Test
1238         :returns: Nothing.
1239         """
1240
1241     def visit_keyword(self, keyword):
1242         """Implements traversing through the keyword and its child keywords.
1243
1244         :param keyword: Keyword to process.
1245         :type keyword: Keyword
1246         :returns: Nothing.
1247         """
1248         if self.start_keyword(keyword) is not False:
1249             self.end_keyword(keyword)
1250
1251     def start_keyword(self, keyword):
1252         """Called when keyword starts. Default implementation does nothing.
1253
1254         :param keyword: Keyword to process.
1255         :type keyword: Keyword
1256         :returns: Nothing.
1257         """
1258         try:
1259             if keyword.type == u"setup":
1260                 self.visit_setup_kw(keyword)
1261             elif keyword.type == u"teardown":
1262                 self.visit_teardown_kw(keyword)
1263             else:
1264                 self.visit_test_kw(keyword)
1265         except AttributeError:
1266             pass
1267
1268     def end_keyword(self, keyword):
1269         """Called when keyword ends. Default implementation does nothing.
1270
1271         :param keyword: Keyword to process.
1272         :type keyword: Keyword
1273         :returns: Nothing.
1274         """
1275
1276     def visit_test_kw(self, test_kw):
1277         """Implements traversing through the test keyword and its child
1278         keywords.
1279
1280         :param test_kw: Keyword to process.
1281         :type test_kw: Keyword
1282         :returns: Nothing.
1283         """
1284         for keyword in test_kw.body:
1285             if self.start_test_kw(keyword) is not False:
1286                 self.visit_test_kw(keyword)
1287                 self.end_test_kw(keyword)
1288
1289     def start_test_kw(self, test_kw):
1290         """Called when test keyword starts. Default implementation does
1291         nothing.
1292
1293         :param test_kw: Keyword to process.
1294         :type test_kw: Keyword
1295         :returns: Nothing.
1296         """
1297         if not self._process_oper:
1298             return
1299
1300         if test_kw.name.count(u"Run Telemetry On All Duts"):
1301             self._msg_type = u"test-telemetry"
1302             self._telemetry_kw_counter += 1
1303         elif test_kw.name.count(u"Show Runtime On All Duts"):
1304             self._msg_type = u"test-show-runtime"
1305             self._sh_run_counter += 1
1306         else:
1307             return
1308         test_kw.messages.visit(self)
1309
1310     def end_test_kw(self, test_kw):
1311         """Called when keyword ends. Default implementation does nothing.
1312
1313         :param test_kw: Keyword to process.
1314         :type test_kw: Keyword
1315         :returns: Nothing.
1316         """
1317
1318     def visit_setup_kw(self, setup_kw):
1319         """Implements traversing through the teardown keyword and its child
1320         keywords.
1321
1322         :param setup_kw: Keyword to process.
1323         :type setup_kw: Keyword
1324         :returns: Nothing.
1325         """
1326         for keyword in setup_kw.body:
1327             if self.start_setup_kw(keyword) is not False:
1328                 self.visit_setup_kw(keyword)
1329                 self.end_setup_kw(keyword)
1330
1331     def start_setup_kw(self, setup_kw):
1332         """Called when teardown keyword starts. Default implementation does
1333         nothing.
1334
1335         :param setup_kw: Keyword to process.
1336         :type setup_kw: Keyword
1337         :returns: Nothing.
1338         """
1339         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1340                 and not self._version:
1341             self._msg_type = u"vpp-version"
1342         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1343                 not self._version:
1344             self._msg_type = u"dpdk-version"
1345         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1346             self._msg_type = u"testbed"
1347         else:
1348             return
1349         setup_kw.messages.visit(self)
1350
1351     def end_setup_kw(self, setup_kw):
1352         """Called when keyword ends. Default implementation does nothing.
1353
1354         :param setup_kw: Keyword to process.
1355         :type setup_kw: Keyword
1356         :returns: Nothing.
1357         """
1358
1359     def visit_teardown_kw(self, teardown_kw):
1360         """Implements traversing through the teardown keyword and its child
1361         keywords.
1362
1363         :param teardown_kw: Keyword to process.
1364         :type teardown_kw: Keyword
1365         :returns: Nothing.
1366         """
1367         for keyword in teardown_kw.body:
1368             if self.start_teardown_kw(keyword) is not False:
1369                 self.visit_teardown_kw(keyword)
1370                 self.end_teardown_kw(keyword)
1371
1372     def start_teardown_kw(self, teardown_kw):
1373         """Called when teardown keyword starts
1374
1375         :param teardown_kw: Keyword to process.
1376         :type teardown_kw: Keyword
1377         :returns: Nothing.
1378         """
1379         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1380             self._conf_history_lookup_nr = 0
1381             self._msg_type = u"teardown-papi-history"
1382             teardown_kw.messages.visit(self)
1383
1384     def end_teardown_kw(self, teardown_kw):
1385         """Called when keyword ends. Default implementation does nothing.
1386
1387         :param teardown_kw: Keyword to process.
1388         :type teardown_kw: Keyword
1389         :returns: Nothing.
1390         """
1391
1392     def visit_message(self, msg):
1393         """Implements visiting the message.
1394
1395         :param msg: Message to process.
1396         :type msg: Message
1397         :returns: Nothing.
1398         """
1399         if self.start_message(msg) is not False:
1400             self.end_message(msg)
1401
1402     def start_message(self, msg):
1403         """Called when message starts. Get required information from messages:
1404         - VPP version.
1405
1406         :param msg: Message to process.
1407         :type msg: Message
1408         :returns: Nothing.
1409         """
1410         if self._msg_type:
1411             self.parse_msg[self._msg_type](msg)
1412
1413     def end_message(self, msg):
1414         """Called when message ends. Default implementation does nothing.
1415
1416         :param msg: Message to process.
1417         :type msg: Message
1418         :returns: Nothing.
1419         """
1420
1421
1422 class InputData:
1423     """Input data
1424
1425     The data is extracted from output.xml files generated by Jenkins jobs and
1426     stored in pandas' DataFrames.
1427
1428     The data structure:
1429     - job name
1430       - build number
1431         - metadata
1432           (as described in ExecutionChecker documentation)
1433         - suites
1434           (as described in ExecutionChecker documentation)
1435         - tests
1436           (as described in ExecutionChecker documentation)
1437     """
1438
1439     def __init__(self, spec, for_output):
1440         """Initialization.
1441
1442         :param spec: Specification.
1443         :param for_output: Output to be generated from downloaded data.
1444         :type spec: Specification
1445         :type for_output: str
1446         """
1447
1448         # Specification:
1449         self._cfg = spec
1450
1451         self._for_output = for_output
1452
1453         # Data store:
1454         self._input_data = pd.Series(dtype="float64")
1455
1456     @property
1457     def data(self):
1458         """Getter - Input data.
1459
1460         :returns: Input data
1461         :rtype: pandas.Series
1462         """
1463         return self._input_data
1464
1465     def metadata(self, job, build):
1466         """Getter - metadata
1467
1468         :param job: Job which metadata we want.
1469         :param build: Build which metadata we want.
1470         :type job: str
1471         :type build: str
1472         :returns: Metadata
1473         :rtype: pandas.Series
1474         """
1475         return self.data[job][build][u"metadata"]
1476
1477     def suites(self, job, build):
1478         """Getter - suites
1479
1480         :param job: Job which suites we want.
1481         :param build: Build which suites we want.
1482         :type job: str
1483         :type build: str
1484         :returns: Suites.
1485         :rtype: pandas.Series
1486         """
1487         return self.data[job][str(build)][u"suites"]
1488
1489     def tests(self, job, build):
1490         """Getter - tests
1491
1492         :param job: Job which tests we want.
1493         :param build: Build which tests we want.
1494         :type job: str
1495         :type build: str
1496         :returns: Tests.
1497         :rtype: pandas.Series
1498         """
1499         return self.data[job][build][u"tests"]
1500
1501     def _parse_tests(self, job, build):
1502         """Process data from robot output.xml file and return JSON structured
1503         data.
1504
1505         :param job: The name of job which build output data will be processed.
1506         :param build: The build which output data will be processed.
1507         :type job: str
1508         :type build: dict
1509         :returns: JSON data structure.
1510         :rtype: dict
1511         """
1512
1513         metadata = {
1514             u"job": job,
1515             u"build": build
1516         }
1517
1518         with open(build[u"file-name"], u'r') as data_file:
1519             try:
1520                 result = ExecutionResult(data_file)
1521             except errors.DataError as err:
1522                 logging.error(
1523                     f"Error occurred while parsing output.xml: {repr(err)}"
1524                 )
1525                 return None
1526
1527         process_oper = False
1528         if u"-vpp-perf-report-coverage-" in job:
1529             process_oper = True
1530         # elif u"-vpp-perf-report-iterative-" in job:
1531         #     # Exceptions for TBs where we do not have coverage data:
1532         #     for item in (u"-2n-icx", ):
1533         #         if item in job:
1534         #             process_oper = True
1535         #             break
1536         checker = ExecutionChecker(
1537             metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1538         )
1539         result.visit(checker)
1540
1541         checker.data[u"metadata"][u"tests_total"] = \
1542             result.statistics.total.total
1543         checker.data[u"metadata"][u"tests_passed"] = \
1544             result.statistics.total.passed
1545         checker.data[u"metadata"][u"tests_failed"] = \
1546             result.statistics.total.failed
1547         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1548         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1549
1550         return checker.data
1551
1552     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1553         """Download and parse the input data file.
1554
1555         :param pid: PID of the process executing this method.
1556         :param job: Name of the Jenkins job which generated the processed input
1557             file.
1558         :param build: Information about the Jenkins build which generated the
1559             processed input file.
1560         :param repeat: Repeat the download specified number of times if not
1561             successful.
1562         :type pid: int
1563         :type job: str
1564         :type build: dict
1565         :type repeat: int
1566         """
1567
1568         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1569
1570         state = u"failed"
1571         success = False
1572         data = None
1573         do_repeat = repeat
1574         while do_repeat:
1575             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1576             if success:
1577                 break
1578             do_repeat -= 1
1579         if not success:
1580             logging.error(
1581                 f"It is not possible to download the input data file from the "
1582                 f"job {job}, build {build[u'build']}, or it is damaged. "
1583                 f"Skipped."
1584             )
1585         if success:
1586             logging.info(f"  Processing data from build {build[u'build']}")
1587             data = self._parse_tests(job, build)
1588             if data is None:
1589                 logging.error(
1590                     f"Input data file from the job {job}, build "
1591                     f"{build[u'build']} is damaged. Skipped."
1592                 )
1593             else:
1594                 state = u"processed"
1595
1596             try:
1597                 remove(build[u"file-name"])
1598             except OSError as err:
1599                 logging.error(
1600                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1601                 )
1602
1603         # If the time-period is defined in the specification file, remove all
1604         # files which are outside the time period.
1605         is_last = False
1606         timeperiod = self._cfg.environment.get(u"time-period", None)
1607         if timeperiod and data:
1608             now = dt.utcnow()
1609             timeperiod = timedelta(int(timeperiod))
1610             metadata = data.get(u"metadata", None)
1611             if metadata:
1612                 generated = metadata.get(u"generated", None)
1613                 if generated:
1614                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1615                     if (now - generated) > timeperiod:
1616                         # Remove the data and the file:
1617                         state = u"removed"
1618                         data = None
1619                         is_last = True
1620                         logging.info(
1621                             f"  The build {job}/{build[u'build']} is "
1622                             f"outdated, will be removed."
1623                         )
1624         return {
1625             u"data": data,
1626             u"state": state,
1627             u"job": job,
1628             u"build": build,
1629             u"last": is_last
1630         }
1631
1632     def download_and_parse_data(self, repeat=1):
1633         """Download the input data files, parse input data from input files and
1634         store in pandas' Series.
1635
1636         :param repeat: Repeat the download specified number of times if not
1637             successful.
1638         :type repeat: int
1639         """
1640
1641         logging.info(u"Downloading and parsing input files ...")
1642
1643         for job, builds in self._cfg.input.items():
1644             for build in builds:
1645
1646                 result = self._download_and_parse_build(job, build, repeat)
1647                 if result[u"last"]:
1648                     break
1649                 build_nr = result[u"build"][u"build"]
1650
1651                 if result[u"data"]:
1652                     data = result[u"data"]
1653                     build_data = pd.Series({
1654                         u"metadata": pd.Series(
1655                             list(data[u"metadata"].values()),
1656                             index=list(data[u"metadata"].keys())
1657                         ),
1658                         u"suites": pd.Series(
1659                             list(data[u"suites"].values()),
1660                             index=list(data[u"suites"].keys())
1661                         ),
1662                         u"tests": pd.Series(
1663                             list(data[u"tests"].values()),
1664                             index=list(data[u"tests"].keys())
1665                         )
1666                     })
1667
1668                     if self._input_data.get(job, None) is None:
1669                         self._input_data[job] = pd.Series(dtype="float64")
1670                     self._input_data[job][str(build_nr)] = build_data
1671                     self._cfg.set_input_file_name(
1672                         job, build_nr, result[u"build"][u"file-name"]
1673                     )
1674                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1675
1676                 mem_alloc = \
1677                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1678                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1679
1680         logging.info(u"Done.")
1681
1682         msg = f"Successful downloads from the sources:\n"
1683         for source in self._cfg.environment[u"data-sources"]:
1684             if source[u"successful-downloads"]:
1685                 msg += (
1686                     f"{source[u'url']}/{source[u'path']}/"
1687                     f"{source[u'file-name']}: "
1688                     f"{source[u'successful-downloads']}\n"
1689                 )
1690         logging.info(msg)
1691
1692     def process_local_file(self, local_file, job=u"local", build_nr=1,
1693                            replace=True):
1694         """Process local XML file given as a command-line parameter.
1695
1696         :param local_file: The file to process.
1697         :param job: Job name.
1698         :param build_nr: Build number.
1699         :param replace: If True, the information about jobs and builds is
1700             replaced by the new one, otherwise the new jobs and builds are
1701             added.
1702         :type local_file: str
1703         :type job: str
1704         :type build_nr: int
1705         :type replace: bool
1706         :raises: PresentationError if an error occurs.
1707         """
1708         if not isfile(local_file):
1709             raise PresentationError(f"The file {local_file} does not exist.")
1710
1711         try:
1712             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1713         except (IndexError, ValueError):
1714             pass
1715
1716         build = {
1717             u"build": build_nr,
1718             u"status": u"failed",
1719             u"file-name": local_file
1720         }
1721         if replace:
1722             self._cfg.input = dict()
1723         self._cfg.add_build(job, build)
1724
1725         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1726         data = self._parse_tests(job, build)
1727         if data is None:
1728             raise PresentationError(
1729                 f"Error occurred while parsing the file {local_file}"
1730             )
1731
1732         build_data = pd.Series({
1733             u"metadata": pd.Series(
1734                 list(data[u"metadata"].values()),
1735                 index=list(data[u"metadata"].keys())
1736             ),
1737             u"suites": pd.Series(
1738                 list(data[u"suites"].values()),
1739                 index=list(data[u"suites"].keys())
1740             ),
1741             u"tests": pd.Series(
1742                 list(data[u"tests"].values()),
1743                 index=list(data[u"tests"].keys())
1744             )
1745         })
1746
1747         if self._input_data.get(job, None) is None:
1748             self._input_data[job] = pd.Series(dtype="float64")
1749         self._input_data[job][str(build_nr)] = build_data
1750
1751         self._cfg.set_input_state(job, build_nr, u"processed")
1752
1753     def process_local_directory(self, local_dir, replace=True):
1754         """Process local directory with XML file(s). The directory is processed
1755         as a 'job' and the XML files in it as builds.
1756         If the given directory contains only sub-directories, these
1757         sub-directories processed as jobs and corresponding XML files as builds
1758         of their job.
1759
1760         :param local_dir: Local directory to process.
1761         :param replace: If True, the information about jobs and builds is
1762             replaced by the new one, otherwise the new jobs and builds are
1763             added.
1764         :type local_dir: str
1765         :type replace: bool
1766         """
1767         if not isdir(local_dir):
1768             raise PresentationError(
1769                 f"The directory {local_dir} does not exist."
1770             )
1771
1772         # Check if the given directory includes only files, or only directories
1773         _, dirnames, filenames = next(walk(local_dir))
1774
1775         if filenames and not dirnames:
1776             filenames.sort()
1777             # local_builds:
1778             # key: dir (job) name, value: list of file names (builds)
1779             local_builds = {
1780                 local_dir: [join(local_dir, name) for name in filenames]
1781             }
1782
1783         elif dirnames and not filenames:
1784             dirnames.sort()
1785             # local_builds:
1786             # key: dir (job) name, value: list of file names (builds)
1787             local_builds = dict()
1788             for dirname in dirnames:
1789                 builds = [
1790                     join(local_dir, dirname, name)
1791                     for name in listdir(join(local_dir, dirname))
1792                     if isfile(join(local_dir, dirname, name))
1793                 ]
1794                 if builds:
1795                     local_builds[dirname] = sorted(builds)
1796
1797         elif not filenames and not dirnames:
1798             raise PresentationError(f"The directory {local_dir} is empty.")
1799         else:
1800             raise PresentationError(
1801                 f"The directory {local_dir} can include only files or only "
1802                 f"directories, not both.\nThe directory {local_dir} includes "
1803                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1804             )
1805
1806         if replace:
1807             self._cfg.input = dict()
1808
1809         for job, files in local_builds.items():
1810             for idx, local_file in enumerate(files):
1811                 self.process_local_file(local_file, job, idx + 1, replace=False)
1812
1813     @staticmethod
1814     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1815         """Return the index of character in the string which is the end of tag.
1816
1817         :param tag_filter: The string where the end of tag is being searched.
1818         :param start: The index where the searching is stated.
1819         :param closer: The character which is the tag closer.
1820         :type tag_filter: str
1821         :type start: int
1822         :type closer: str
1823         :returns: The index of the tag closer.
1824         :rtype: int
1825         """
1826         try:
1827             idx_opener = tag_filter.index(closer, start)
1828             return tag_filter.index(closer, idx_opener + 1)
1829         except ValueError:
1830             return None
1831
1832     @staticmethod
1833     def _condition(tag_filter):
1834         """Create a conditional statement from the given tag filter.
1835
1836         :param tag_filter: Filter based on tags from the element specification.
1837         :type tag_filter: str
1838         :returns: Conditional statement which can be evaluated.
1839         :rtype: str
1840         """
1841         index = 0
1842         while True:
1843             index = InputData._end_of_tag(tag_filter, index)
1844             if index is None:
1845                 return tag_filter
1846             index += 1
1847             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1848
1849     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1850                     continue_on_error=False):
1851         """Filter required data from the given jobs and builds.
1852
1853         The output data structure is:
1854         - job 1
1855           - build 1
1856             - test (or suite) 1 ID:
1857               - param 1
1858               - param 2
1859               ...
1860               - param n
1861             ...
1862             - test (or suite) n ID:
1863             ...
1864           ...
1865           - build n
1866         ...
1867         - job n
1868
1869         :param element: Element which will use the filtered data.
1870         :param params: Parameters which will be included in the output. If None,
1871             all parameters are included.
1872         :param data: If not None, this data is used instead of data specified
1873             in the element.
1874         :param data_set: The set of data to be filtered: tests, suites,
1875             metadata.
1876         :param continue_on_error: Continue if there is error while reading the
1877             data. The Item will be empty then
1878         :type element: pandas.Series
1879         :type params: list
1880         :type data: dict
1881         :type data_set: str
1882         :type continue_on_error: bool
1883         :returns: Filtered data.
1884         :rtype pandas.Series
1885         """
1886
1887         try:
1888             if data_set == "suites":
1889                 cond = u"True"
1890             elif element[u"filter"] in (u"all", u"template"):
1891                 cond = u"True"
1892             else:
1893                 cond = InputData._condition(element[u"filter"])
1894             logging.debug(f"   Filter: {cond}")
1895         except KeyError:
1896             logging.error(u"  No filter defined.")
1897             return None
1898
1899         if params is None:
1900             params = element.get(u"parameters", None)
1901             if params:
1902                 params.extend((u"type", u"status"))
1903
1904         data_to_filter = data if data else element[u"data"]
1905         data = pd.Series(dtype="float64")
1906         try:
1907             for job, builds in data_to_filter.items():
1908                 data[job] = pd.Series(dtype="float64")
1909                 for build in builds:
1910                     data[job][str(build)] = pd.Series(dtype="float64")
1911                     try:
1912                         data_dict = dict(
1913                             self.data[job][str(build)][data_set].items())
1914                     except KeyError:
1915                         if continue_on_error:
1916                             continue
1917                         return None
1918
1919                     for test_id, test_data in data_dict.items():
1920                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1921                             data[job][str(build)][test_id] = \
1922                                 pd.Series(dtype="float64")
1923                             if params is None:
1924                                 for param, val in test_data.items():
1925                                     data[job][str(build)][test_id][param] = val
1926                             else:
1927                                 for param in params:
1928                                     try:
1929                                         data[job][str(build)][test_id][param] =\
1930                                             test_data[param]
1931                                     except KeyError:
1932                                         data[job][str(build)][test_id][param] =\
1933                                             u"No Data"
1934             return data
1935
1936         except (KeyError, IndexError, ValueError) as err:
1937             logging.error(
1938                 f"Missing mandatory parameter in the element specification: "
1939                 f"{repr(err)}"
1940             )
1941             return None
1942         except AttributeError as err:
1943             logging.error(repr(err))
1944             return None
1945         except SyntaxError as err:
1946             logging.error(
1947                 f"The filter {cond} is not correct. Check if all tags are "
1948                 f"enclosed by apostrophes.\n{repr(err)}"
1949             )
1950             return None
1951
1952     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1953                              continue_on_error=False):
1954         """Filter required data from the given jobs and builds.
1955
1956         The output data structure is:
1957         - job 1
1958           - build 1
1959             - test (or suite) 1 ID:
1960               - param 1
1961               - param 2
1962               ...
1963               - param n
1964             ...
1965             - test (or suite) n ID:
1966             ...
1967           ...
1968           - build n
1969         ...
1970         - job n
1971
1972         :param element: Element which will use the filtered data.
1973         :param params: Parameters which will be included in the output. If None,
1974         all parameters are included.
1975         :param data_set: The set of data to be filtered: tests, suites,
1976         metadata.
1977         :param continue_on_error: Continue if there is error while reading the
1978         data. The Item will be empty then
1979         :type element: pandas.Series
1980         :type params: list
1981         :type data_set: str
1982         :type continue_on_error: bool
1983         :returns: Filtered data.
1984         :rtype pandas.Series
1985         """
1986
1987         include = element.get(u"include", None)
1988         if not include:
1989             logging.warning(u"No tests to include, skipping the element.")
1990             return None
1991
1992         if params is None:
1993             params = element.get(u"parameters", None)
1994             if params and u"type" not in params:
1995                 params.append(u"type")
1996
1997         cores = element.get(u"core", None)
1998         if cores:
1999             tests = list()
2000             for core in cores:
2001                 for test in include:
2002                     tests.append(test.format(core=core))
2003         else:
2004             tests = include
2005
2006         data = pd.Series(dtype="float64")
2007         try:
2008             for job, builds in element[u"data"].items():
2009                 data[job] = pd.Series(dtype="float64")
2010                 for build in builds:
2011                     data[job][str(build)] = pd.Series(dtype="float64")
2012                     for test in tests:
2013                         try:
2014                             reg_ex = re.compile(str(test).lower())
2015                             for test_id in self.data[job][
2016                                     str(build)][data_set].keys():
2017                                 if re.match(reg_ex, str(test_id).lower()):
2018                                     test_data = self.data[job][
2019                                         str(build)][data_set][test_id]
2020                                     data[job][str(build)][test_id] = \
2021                                         pd.Series(dtype="float64")
2022                                     if params is None:
2023                                         for param, val in test_data.items():
2024                                             data[job][str(build)][test_id]\
2025                                                 [param] = val
2026                                     else:
2027                                         for param in params:
2028                                             try:
2029                                                 data[job][str(build)][
2030                                                     test_id][param] = \
2031                                                     test_data[param]
2032                                             except KeyError:
2033                                                 data[job][str(build)][
2034                                                     test_id][param] = u"No Data"
2035                         except KeyError as err:
2036                             if continue_on_error:
2037                                 logging.debug(repr(err))
2038                                 continue
2039                             logging.error(repr(err))
2040                             return None
2041             return data
2042
2043         except (KeyError, IndexError, ValueError) as err:
2044             logging.error(
2045                 f"Missing mandatory parameter in the element "
2046                 f"specification: {repr(err)}"
2047             )
2048             return None
2049         except AttributeError as err:
2050             logging.error(repr(err))
2051             return None
2052
2053     @staticmethod
2054     def merge_data(data):
2055         """Merge data from more jobs and builds to a simple data structure.
2056
2057         The output data structure is:
2058
2059         - test (suite) 1 ID:
2060           - param 1
2061           - param 2
2062           ...
2063           - param n
2064         ...
2065         - test (suite) n ID:
2066         ...
2067
2068         :param data: Data to merge.
2069         :type data: pandas.Series
2070         :returns: Merged data.
2071         :rtype: pandas.Series
2072         """
2073
2074         logging.info(u"    Merging data ...")
2075
2076         merged_data = pd.Series(dtype="float64")
2077         for builds in data.values:
2078             for item in builds.values:
2079                 for item_id, item_data in item.items():
2080                     merged_data[item_id] = item_data
2081         return merged_data
2082
2083     def print_all_oper_data(self):
2084         """Print all operational data to console.
2085         """
2086
2087         for job in self._input_data.values:
2088             for build in job.values:
2089                 for test_id, test_data in build[u"tests"].items():
2090                     print(f"{test_id}")
2091                     if test_data.get(u"show-run", None) is None:
2092                         continue
2093                     for dut_name, data in test_data[u"show-run"].items():
2094                         if data.get(u"runtime", None) is None:
2095                             continue
2096                         runtime = loads(data[u"runtime"])
2097                         try:
2098                             threads_nr = len(runtime[0][u"clocks"])
2099                         except (IndexError, KeyError):
2100                             continue
2101                         threads = OrderedDict(
2102                             {idx: list() for idx in range(threads_nr)})
2103                         for item in runtime:
2104                             for idx in range(threads_nr):
2105                                 if item[u"vectors"][idx] > 0:
2106                                     clocks = item[u"clocks"][idx] / \
2107                                              item[u"vectors"][idx]
2108                                 elif item[u"calls"][idx] > 0:
2109                                     clocks = item[u"clocks"][idx] / \
2110                                              item[u"calls"][idx]
2111                                 elif item[u"suspends"][idx] > 0:
2112                                     clocks = item[u"clocks"][idx] / \
2113                                              item[u"suspends"][idx]
2114                                 else:
2115                                     clocks = 0.0
2116
2117                                 if item[u"calls"][idx] > 0:
2118                                     vectors_call = item[u"vectors"][idx] / \
2119                                                    item[u"calls"][idx]
2120                                 else:
2121                                     vectors_call = 0.0
2122
2123                                 if int(item[u"calls"][idx]) + int(
2124                                         item[u"vectors"][idx]) + \
2125                                         int(item[u"suspends"][idx]):
2126                                     threads[idx].append([
2127                                         item[u"name"],
2128                                         item[u"calls"][idx],
2129                                         item[u"vectors"][idx],
2130                                         item[u"suspends"][idx],
2131                                         clocks,
2132                                         vectors_call
2133                                     ])
2134
2135                         print(f"Host IP: {data.get(u'host', '')}, "
2136                               f"Socket: {data.get(u'socket', '')}")
2137                         for thread_nr, thread in threads.items():
2138                             txt_table = prettytable.PrettyTable(
2139                                 (
2140                                     u"Name",
2141                                     u"Nr of Vectors",
2142                                     u"Nr of Packets",
2143                                     u"Suspends",
2144                                     u"Cycles per Packet",
2145                                     u"Average Vector Size"
2146                                 )
2147                             )
2148                             avg = 0.0
2149                             for row in thread:
2150                                 txt_table.add_row(row)
2151                                 avg += row[-1]
2152                             if len(thread) == 0:
2153                                 avg = u""
2154                             else:
2155                                 avg = f", Average Vector Size per Node: " \
2156                                       f"{(avg / len(thread)):.2f}"
2157                             th_name = u"main" if thread_nr == 0 \
2158                                 else f"worker_{thread_nr}"
2159                             print(f"{dut_name}, {th_name}{avg}")
2160                             txt_table.float_format = u".2"
2161                             txt_table.align = u"r"
2162                             txt_table.align[u"Name"] = u"l"
2163                             print(f"{txt_table.get_string()}\n")