7499935ee8bd175543600c7f46b40066ab9dab73
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
291
292     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
293
294     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
295
296     REGEX_SH_RUN_HOST = re.compile(
297         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
298     )
299
300     def __init__(self, metadata, mapping, ignore, process_oper):
301         """Initialisation.
302
303         :param metadata: Key-value pairs to be included in "metadata" part of
304             JSON structure.
305         :param mapping: Mapping of the old names of test cases to the new
306             (actual) one.
307         :param ignore: List of TCs to be ignored.
308         :param process_oper: If True, operational data (show run, telemetry) is
309             processed.
310         :type metadata: dict
311         :type mapping: dict
312         :type ignore: list
313         :type process_oper: bool
314         """
315
316         # Type of message to parse out from the test messages
317         self._msg_type = None
318
319         # VPP version
320         self._version = None
321
322         # Timestamp
323         self._timestamp = None
324
325         # Testbed. The testbed is identified by TG node IP address.
326         self._testbed = None
327
328         # Mapping of TCs long names
329         self._mapping = mapping
330
331         # Ignore list
332         self._ignore = ignore
333
334         self._process_oper = process_oper
335
336         # Number of PAPI History messages found:
337         # 0 - no message
338         # 1 - PAPI History of DUT1
339         # 2 - PAPI History of DUT2
340         self._conf_history_lookup_nr = 0
341
342         self._sh_run_counter = 0
343         self._telemetry_kw_counter = 0
344         self._telemetry_msg_counter = 0
345
346         # Test ID of currently processed test- the lowercase full path to the
347         # test
348         self._test_id = None
349
350         # The main data structure
351         self._data = {
352             u"metadata": OrderedDict(),
353             u"suites": OrderedDict(),
354             u"tests": OrderedDict()
355         }
356
357         # Save the provided metadata
358         for key, val in metadata.items():
359             self._data[u"metadata"][key] = val
360
361         # Dictionary defining the methods used to parse different types of
362         # messages
363         self.parse_msg = {
364             u"vpp-version": self._get_vpp_version,
365             u"dpdk-version": self._get_dpdk_version,
366             u"teardown-papi-history": self._get_papi_history,
367             u"test-show-runtime": self._get_show_run,
368             u"testbed": self._get_testbed,
369             u"test-telemetry": self._get_telemetry
370         }
371
372     @property
373     def data(self):
374         """Getter - Data parsed from the XML file.
375
376         :returns: Data parsed from the XML file.
377         :rtype: dict
378         """
379         return self._data
380
381     def _get_data_from_mrr_test_msg(self, msg):
382         """Get info from message of MRR performance tests.
383
384         :param msg: Message to be processed.
385         :type msg: str
386         :returns: Processed message or original message if a problem occurs.
387         :rtype: str
388         """
389
390         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
391         if not groups or groups.lastindex != 1:
392             return u"Test Failed."
393
394         try:
395             data = groups.group(1).split(u", ")
396         except (AttributeError, IndexError, ValueError, KeyError):
397             return u"Test Failed."
398
399         out_str = u"["
400         try:
401             for item in data:
402                 out_str += f"{(float(item) / 1e6):.2f}, "
403             return out_str[:-2] + u"]"
404         except (AttributeError, IndexError, ValueError, KeyError):
405             return u"Test Failed."
406
407     def _get_data_from_cps_test_msg(self, msg):
408         """Get info from message of NDRPDR CPS tests.
409
410         :param msg: Message to be processed.
411         :type msg: str
412         :returns: Processed message or "Test Failed." if a problem occurs.
413         :rtype: str
414         """
415
416         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
417         if not groups or groups.lastindex != 2:
418             return u"Test Failed."
419
420         try:
421             return (
422                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
423                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
424             )
425         except (AttributeError, IndexError, ValueError, KeyError):
426             return u"Test Failed."
427
428     def _get_data_from_pps_test_msg(self, msg):
429         """Get info from message of NDRPDR PPS tests.
430
431         :param msg: Message to be processed.
432         :type msg: str
433         :returns: Processed message or "Test Failed." if a problem occurs.
434         :rtype: str
435         """
436
437         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
438         if not groups or groups.lastindex != 4:
439             return u"Test Failed."
440
441         try:
442             return (
443                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
444                 f"{float(groups.group(2)):5.2f}\n"
445                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
446                 f"{float(groups.group(4)):5.2f}"
447             )
448         except (AttributeError, IndexError, ValueError, KeyError):
449             return u"Test Failed."
450
451     def _get_data_from_perf_test_msg(self, msg):
452         """Get info from message of NDRPDR performance tests.
453
454         :param msg: Message to be processed.
455         :type msg: str
456         :returns: Processed message or "Test Failed." if a problem occurs.
457         :rtype: str
458         """
459
460         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
461         if not groups or groups.lastindex != 10:
462             return u"Test Failed."
463
464         try:
465             data = {
466                 u"ndr_low": float(groups.group(1)),
467                 u"ndr_low_b": float(groups.group(2)),
468                 u"pdr_low": float(groups.group(3)),
469                 u"pdr_low_b": float(groups.group(4)),
470                 u"pdr_lat_90_1": groups.group(5),
471                 u"pdr_lat_90_2": groups.group(6),
472                 u"pdr_lat_50_1": groups.group(7),
473                 u"pdr_lat_50_2": groups.group(8),
474                 u"pdr_lat_10_1": groups.group(9),
475                 u"pdr_lat_10_2": groups.group(10),
476             }
477         except (AttributeError, IndexError, ValueError, KeyError):
478             return u"Test Failed."
479
480         def _process_lat(in_str_1, in_str_2):
481             """Extract P50, P90 and P99 latencies or min, avg, max values from
482             latency string.
483
484             :param in_str_1: Latency string for one direction produced by robot
485                 framework.
486             :param in_str_2: Latency string for second direction produced by
487                 robot framework.
488             :type in_str_1: str
489             :type in_str_2: str
490             :returns: Processed latency string or None if a problem occurs.
491             :rtype: tuple
492             """
493             in_list_1 = in_str_1.split('/', 3)
494             in_list_2 = in_str_2.split('/', 3)
495
496             if len(in_list_1) != 4 and len(in_list_2) != 4:
497                 return None
498
499             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
500             try:
501                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
502             except hdrh.codec.HdrLengthException:
503                 hdr_lat_1 = None
504
505             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
506             try:
507                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
508             except hdrh.codec.HdrLengthException:
509                 hdr_lat_2 = None
510
511             if hdr_lat_1 and hdr_lat_2:
512                 hdr_lat = (
513                     hdr_lat_1.get_value_at_percentile(50.0),
514                     hdr_lat_1.get_value_at_percentile(90.0),
515                     hdr_lat_1.get_value_at_percentile(99.0),
516                     hdr_lat_2.get_value_at_percentile(50.0),
517                     hdr_lat_2.get_value_at_percentile(90.0),
518                     hdr_lat_2.get_value_at_percentile(99.0)
519                 )
520                 if all(hdr_lat):
521                     return hdr_lat
522
523             hdr_lat = (
524                 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
525                 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
526             )
527             for item in hdr_lat:
528                 if item in (-1, 4294967295, 0):
529                     return None
530             return hdr_lat
531
532         try:
533             out_msg = (
534                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
535                 f"{data[u'ndr_low_b']:5.2f}"
536                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
537                 f"{data[u'pdr_low_b']:5.2f}"
538             )
539             latency = (
540                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
541                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
542                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
543             )
544             if all(latency):
545                 max_len = len(str(max((max(item) for item in latency))))
546                 max_len = 4 if max_len < 4 else max_len
547
548                 for idx, lat in enumerate(latency):
549                     if not idx:
550                         out_msg += u"\n"
551                     out_msg += (
552                         f"\n{idx + 3}. "
553                         f"{lat[0]:{max_len}d} "
554                         f"{lat[1]:{max_len}d} "
555                         f"{lat[2]:{max_len}d}      "
556                         f"{lat[3]:{max_len}d} "
557                         f"{lat[4]:{max_len}d} "
558                         f"{lat[5]:{max_len}d} "
559                     )
560
561             return out_msg
562
563         except (AttributeError, IndexError, ValueError, KeyError):
564             return u"Test Failed."
565
566     def _get_testbed(self, msg):
567         """Called when extraction of testbed IP is required.
568         The testbed is identified by TG node IP address.
569
570         :param msg: Message to process.
571         :type msg: Message
572         :returns: Nothing.
573         """
574
575         if msg.message.count(u"Setup of TG node") or \
576                 msg.message.count(u"Setup of node TG host"):
577             reg_tg_ip = re.compile(
578                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
579             try:
580                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
581             except (KeyError, ValueError, IndexError, AttributeError):
582                 pass
583             finally:
584                 self._data[u"metadata"][u"testbed"] = self._testbed
585                 self._msg_type = None
586
587     def _get_vpp_version(self, msg):
588         """Called when extraction of VPP version is required.
589
590         :param msg: Message to process.
591         :type msg: Message
592         :returns: Nothing.
593         """
594
595         if msg.message.count(u"return STDOUT Version:") or \
596                 msg.message.count(u"VPP Version:") or \
597                 msg.message.count(u"VPP version:"):
598             self._version = str(
599                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
600             )
601             self._data[u"metadata"][u"version"] = self._version
602             self._msg_type = None
603
604     def _get_dpdk_version(self, msg):
605         """Called when extraction of DPDK version is required.
606
607         :param msg: Message to process.
608         :type msg: Message
609         :returns: Nothing.
610         """
611
612         if msg.message.count(u"DPDK Version:"):
613             try:
614                 self._version = str(re.search(
615                     self.REGEX_VERSION_DPDK, msg.message).group(2))
616                 self._data[u"metadata"][u"version"] = self._version
617             except IndexError:
618                 pass
619             finally:
620                 self._msg_type = None
621
622     def _get_papi_history(self, msg):
623         """Called when extraction of PAPI command history is required.
624
625         :param msg: Message to process.
626         :type msg: Message
627         :returns: Nothing.
628         """
629         if msg.message.count(u"PAPI command history:"):
630             self._conf_history_lookup_nr += 1
631             if self._conf_history_lookup_nr == 1:
632                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
633             else:
634                 self._msg_type = None
635             text = re.sub(
636                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
637                 u"",
638                 msg.message,
639                 count=1
640             ).replace(u'"', u"'")
641             self._data[u"tests"][self._test_id][u"conf-history"] += (
642                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
643             )
644
645     def _get_show_run(self, msg):
646         """Called when extraction of VPP operational data (output of CLI command
647         Show Runtime) is required.
648
649         :param msg: Message to process.
650         :type msg: Message
651         :returns: Nothing.
652         """
653
654         if not msg.message.count(u"stats runtime"):
655             return
656
657         # Temporary solution
658         if self._sh_run_counter > 1:
659             return
660
661         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
662             self._data[u"tests"][self._test_id][u"show-run"] = dict()
663
664         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
665         if not groups:
666             return
667         try:
668             host = groups.group(1)
669         except (AttributeError, IndexError):
670             host = u""
671         try:
672             sock = groups.group(2)
673         except (AttributeError, IndexError):
674             sock = u""
675
676         dut = u"dut{nr}".format(
677             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
678
679         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
680             copy.copy(
681                 {
682                     u"host": host,
683                     u"socket": sock,
684                     u"runtime": str(msg.message).replace(u' ', u'').
685                                 replace(u'\n', u'').replace(u"'", u'"').
686                                 replace(u'b"', u'"').replace(u'u"', u'"').
687                                 split(u":", 1)[1]
688                 }
689             )
690
691     def _get_telemetry(self, msg):
692         """Called when extraction of VPP telemetry data is required.
693
694         :param msg: Message to process.
695         :type msg: Message
696         :returns: Nothing.
697         """
698
699         if self._telemetry_kw_counter > 1:
700             return
701         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
702             return
703
704         if u"telemetry-show-run" not in \
705                 self._data[u"tests"][self._test_id].keys():
706             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
707
708         self._telemetry_msg_counter += 1
709         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
710         if not groups:
711             return
712         try:
713             host = groups.group(1)
714         except (AttributeError, IndexError):
715             host = u""
716         try:
717             sock = groups.group(2)
718         except (AttributeError, IndexError):
719             sock = u""
720         runtime = {
721             u"source_type": u"node",
722             u"source_id": host,
723             u"msg_type": u"metric",
724             u"log_level": u"INFO",
725             u"timestamp": msg.timestamp,
726             u"msg": u"show_runtime",
727             u"host": host,
728             u"socket": sock,
729             u"data": list()
730         }
731         for line in msg.message.splitlines():
732             if not line.startswith(u"vpp_runtime_"):
733                 continue
734             try:
735                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
736                 cut = params.index(u"{")
737                 name = params[:cut].split(u"_", maxsplit=2)[-1]
738                 labels = eval(
739                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
740                 )
741                 labels[u"graph_node"] = labels.pop(u"name")
742                 runtime[u"data"].append(
743                     {
744                         u"name": name,
745                         u"value": value,
746                         u"timestamp": timestamp,
747                         u"labels": labels
748                     }
749                 )
750             except (TypeError, ValueError, IndexError):
751                 continue
752         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
753             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
754                 {
755                     u"host": host,
756                     u"socket": sock,
757                     u"runtime": runtime
758                 }
759             )
760
761     def _get_ndrpdr_throughput(self, msg):
762         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
763         message.
764
765         :param msg: The test message to be parsed.
766         :type msg: str
767         :returns: Parsed data as a dict and the status (PASS/FAIL).
768         :rtype: tuple(dict, str)
769         """
770
771         throughput = {
772             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
773             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
774         }
775         status = u"FAIL"
776         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
777
778         if groups is not None:
779             try:
780                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
781                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
782                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
783                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
784                 status = u"PASS"
785             except (IndexError, ValueError):
786                 pass
787
788         return throughput, status
789
790     def _get_ndrpdr_throughput_gbps(self, msg):
791         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
792         test message.
793
794         :param msg: The test message to be parsed.
795         :type msg: str
796         :returns: Parsed data as a dict and the status (PASS/FAIL).
797         :rtype: tuple(dict, str)
798         """
799
800         gbps = {
801             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
802             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
803         }
804         status = u"FAIL"
805         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
806
807         if groups is not None:
808             try:
809                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
810                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
811                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
812                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
813                 status = u"PASS"
814             except (IndexError, ValueError):
815                 pass
816
817         return gbps, status
818
819     def _get_plr_throughput(self, msg):
820         """Get PLRsearch lower bound and PLRsearch upper bound from the test
821         message.
822
823         :param msg: The test message to be parsed.
824         :type msg: str
825         :returns: Parsed data as a dict and the status (PASS/FAIL).
826         :rtype: tuple(dict, str)
827         """
828
829         throughput = {
830             u"LOWER": -1.0,
831             u"UPPER": -1.0
832         }
833         status = u"FAIL"
834         groups = re.search(self.REGEX_PLR_RATE, msg)
835
836         if groups is not None:
837             try:
838                 throughput[u"LOWER"] = float(groups.group(1))
839                 throughput[u"UPPER"] = float(groups.group(2))
840                 status = u"PASS"
841             except (IndexError, ValueError):
842                 pass
843
844         return throughput, status
845
846     def _get_ndrpdr_latency(self, msg):
847         """Get LATENCY from the test message.
848
849         :param msg: The test message to be parsed.
850         :type msg: str
851         :returns: Parsed data as a dict and the status (PASS/FAIL).
852         :rtype: tuple(dict, str)
853         """
854         latency_default = {
855             u"min": -1.0,
856             u"avg": -1.0,
857             u"max": -1.0,
858             u"hdrh": u""
859         }
860         latency = {
861             u"NDR": {
862                 u"direction1": copy.copy(latency_default),
863                 u"direction2": copy.copy(latency_default)
864             },
865             u"PDR": {
866                 u"direction1": copy.copy(latency_default),
867                 u"direction2": copy.copy(latency_default)
868             },
869             u"LAT0": {
870                 u"direction1": copy.copy(latency_default),
871                 u"direction2": copy.copy(latency_default)
872             },
873             u"PDR10": {
874                 u"direction1": copy.copy(latency_default),
875                 u"direction2": copy.copy(latency_default)
876             },
877             u"PDR50": {
878                 u"direction1": copy.copy(latency_default),
879                 u"direction2": copy.copy(latency_default)
880             },
881             u"PDR90": {
882                 u"direction1": copy.copy(latency_default),
883                 u"direction2": copy.copy(latency_default)
884             },
885         }
886
887         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
888         if groups is None:
889             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
890         if groups is None:
891             return latency, u"FAIL"
892
893         def process_latency(in_str):
894             """Return object with parsed latency values.
895
896             TODO: Define class for the return type.
897
898             :param in_str: Input string, min/avg/max/hdrh format.
899             :type in_str: str
900             :returns: Dict with corresponding keys, except hdrh float values.
901             :rtype dict:
902             :throws IndexError: If in_str does not have enough substrings.
903             :throws ValueError: If a substring does not convert to float.
904             """
905             in_list = in_str.split('/', 3)
906
907             rval = {
908                 u"min": float(in_list[0]),
909                 u"avg": float(in_list[1]),
910                 u"max": float(in_list[2]),
911                 u"hdrh": u""
912             }
913
914             if len(in_list) == 4:
915                 rval[u"hdrh"] = str(in_list[3])
916
917             return rval
918
919         try:
920             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
921             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
922             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
923             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
924             if groups.lastindex == 4:
925                 return latency, u"PASS"
926         except (IndexError, ValueError):
927             pass
928
929         try:
930             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
931             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
932             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
933             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
934             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
935             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
936             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
937             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
938             if groups.lastindex == 12:
939                 return latency, u"PASS"
940         except (IndexError, ValueError):
941             pass
942
943         return latency, u"FAIL"
944
945     @staticmethod
946     def _get_hoststack_data(msg, tags):
947         """Get data from the hoststack test message.
948
949         :param msg: The test message to be parsed.
950         :param tags: Test tags.
951         :type msg: str
952         :type tags: list
953         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
954         :rtype: tuple(dict, str)
955         """
956         result = dict()
957         status = u"FAIL"
958
959         msg = msg.replace(u"'", u'"').replace(u" ", u"")
960         if u"LDPRELOAD" in tags:
961             try:
962                 result = loads(msg)
963                 status = u"PASS"
964             except JSONDecodeError:
965                 pass
966         elif u"VPPECHO" in tags:
967             try:
968                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
969                 result = dict(
970                     client=loads(msg_lst[0]),
971                     server=loads(msg_lst[1])
972                 )
973                 status = u"PASS"
974             except (JSONDecodeError, IndexError):
975                 pass
976
977         return result, status
978
979     def _get_vsap_data(self, msg, tags):
980         """Get data from the vsap test message.
981
982         :param msg: The test message to be parsed.
983         :param tags: Test tags.
984         :type msg: str
985         :type tags: list
986         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
987         :rtype: tuple(dict, str)
988         """
989         result = dict()
990         status = u"FAIL"
991
992         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
993         if groups is not None:
994             try:
995                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
996                 result[u"latency"] = float(groups.group(2))
997                 result[u"completed-requests"] = int(groups.group(3))
998                 result[u"failed-requests"] = int(groups.group(4))
999                 result[u"bytes-transferred"] = int(groups.group(5))
1000                 if u"TCP_CPS"in tags:
1001                     result[u"cps"] = float(groups.group(6))
1002                 elif u"TCP_RPS" in tags:
1003                     result[u"rps"] = float(groups.group(6))
1004                 else:
1005                     return result, status
1006                 status = u"PASS"
1007             except (IndexError, ValueError):
1008                 pass
1009
1010         return result, status
1011
1012     def visit_suite(self, suite):
1013         """Implements traversing through the suite and its direct children.
1014
1015         :param suite: Suite to process.
1016         :type suite: Suite
1017         :returns: Nothing.
1018         """
1019         if self.start_suite(suite) is not False:
1020             suite.suites.visit(self)
1021             suite.tests.visit(self)
1022             self.end_suite(suite)
1023
1024     def start_suite(self, suite):
1025         """Called when suite starts.
1026
1027         :param suite: Suite to process.
1028         :type suite: Suite
1029         :returns: Nothing.
1030         """
1031
1032         try:
1033             parent_name = suite.parent.name
1034         except AttributeError:
1035             return
1036
1037         self._data[u"suites"][suite.longname.lower().
1038                               replace(u'"', u"'").
1039                               replace(u" ", u"_")] = {
1040                                   u"name": suite.name.lower(),
1041                                   u"doc": suite.doc,
1042                                   u"parent": parent_name,
1043                                   u"level": len(suite.longname.split(u"."))
1044                               }
1045
1046         suite.setup.visit(self)
1047
1048     def end_suite(self, suite):
1049         """Called when suite ends.
1050
1051         :param suite: Suite to process.
1052         :type suite: Suite
1053         :returns: Nothing.
1054         """
1055
1056     def visit_test(self, test):
1057         """Implements traversing through the test.
1058
1059         :param test: Test to process.
1060         :type test: Test
1061         :returns: Nothing.
1062         """
1063         if self.start_test(test) is not False:
1064             test.body.visit(self)
1065             self.end_test(test)
1066
1067     def start_test(self, test):
1068         """Called when test starts.
1069
1070         :param test: Test to process.
1071         :type test: Test
1072         :returns: Nothing.
1073         """
1074
1075         self._sh_run_counter = 0
1076         self._telemetry_kw_counter = 0
1077         self._telemetry_msg_counter = 0
1078
1079         longname_orig = test.longname.lower()
1080
1081         # Check the ignore list
1082         if longname_orig in self._ignore:
1083             return
1084
1085         tags = [str(tag) for tag in test.tags]
1086         test_result = dict()
1087
1088         # Change the TC long name and name if defined in the mapping table
1089         longname = self._mapping.get(longname_orig, None)
1090         if longname is not None:
1091             name = longname.split(u'.')[-1]
1092             logging.debug(
1093                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1094                 f"{name}"
1095             )
1096         else:
1097             longname = longname_orig
1098             name = test.name.lower()
1099
1100         # Remove TC number from the TC long name (backward compatibility):
1101         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1102         # Remove TC number from the TC name (not needed):
1103         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1104
1105         test_result[u"parent"] = test.parent.name.lower()
1106         test_result[u"tags"] = tags
1107         test_result["doc"] = test.doc
1108         test_result[u"type"] = u""
1109         test_result[u"status"] = test.status
1110         test_result[u"starttime"] = test.starttime
1111         test_result[u"endtime"] = test.endtime
1112
1113         if test.status == u"PASS":
1114             if u"NDRPDR" in tags:
1115                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1116                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1117                         test.message)
1118                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1119                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1120                         test.message)
1121                 else:
1122                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1123                         test.message)
1124             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1125                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1126                     test.message)
1127             else:
1128                 test_result[u"msg"] = test.message
1129         else:
1130             test_result[u"msg"] = test.message
1131
1132         if u"PERFTEST" in tags and u"TREX" not in tags:
1133             # Replace info about cores (e.g. -1c-) with the info about threads
1134             # and cores (e.g. -1t1c-) in the long test case names and in the
1135             # test case names if necessary.
1136             tag_count = 0
1137             tag_tc = str()
1138             for tag in test_result[u"tags"]:
1139                 groups = re.search(self.REGEX_TC_TAG, tag)
1140                 if groups:
1141                     tag_count += 1
1142                     tag_tc = tag
1143
1144             if tag_count == 1:
1145                 self._test_id = re.sub(
1146                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1147                     self._test_id, count=1
1148                 )
1149                 test_result[u"name"] = re.sub(
1150                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1151                     test_result["name"], count=1
1152                 )
1153             else:
1154                 test_result[u"status"] = u"FAIL"
1155                 self._data[u"tests"][self._test_id] = test_result
1156                 logging.debug(
1157                     f"The test {self._test_id} has no or more than one "
1158                     f"multi-threading tags.\n"
1159                     f"Tags: {test_result[u'tags']}"
1160                 )
1161                 return
1162
1163         if u"DEVICETEST" in tags:
1164             test_result[u"type"] = u"DEVICETEST"
1165         elif u"NDRPDR" in tags:
1166             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1167                 test_result[u"type"] = u"CPS"
1168             else:
1169                 test_result[u"type"] = u"NDRPDR"
1170             if test.status == u"PASS":
1171                 test_result[u"throughput"], test_result[u"status"] = \
1172                     self._get_ndrpdr_throughput(test.message)
1173                 test_result[u"gbps"], test_result[u"status"] = \
1174                     self._get_ndrpdr_throughput_gbps(test.message)
1175                 test_result[u"latency"], test_result[u"status"] = \
1176                     self._get_ndrpdr_latency(test.message)
1177         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1178             if u"MRR" in tags:
1179                 test_result[u"type"] = u"MRR"
1180             else:
1181                 test_result[u"type"] = u"BMRR"
1182             if test.status == u"PASS":
1183                 test_result[u"result"] = dict()
1184                 groups = re.search(self.REGEX_BMRR, test.message)
1185                 if groups is not None:
1186                     items_str = groups.group(1)
1187                     items_float = [
1188                         float(item.strip().replace(u"'", u""))
1189                         for item in items_str.split(",")
1190                     ]
1191                     # Use whole list in CSIT-1180.
1192                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1193                     test_result[u"result"][u"samples"] = items_float
1194                     test_result[u"result"][u"receive-rate"] = stats.avg
1195                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1196                 else:
1197                     groups = re.search(self.REGEX_MRR, test.message)
1198                     test_result[u"result"][u"receive-rate"] = \
1199                         float(groups.group(3)) / float(groups.group(1))
1200         elif u"SOAK" in tags:
1201             test_result[u"type"] = u"SOAK"
1202             if test.status == u"PASS":
1203                 test_result[u"throughput"], test_result[u"status"] = \
1204                     self._get_plr_throughput(test.message)
1205         elif u"LDP_NGINX" in tags:
1206             test_result[u"type"] = u"LDP_NGINX"
1207             test_result[u"result"], test_result[u"status"] = \
1208                 self._get_vsap_data(test.message, tags)
1209         elif u"HOSTSTACK" in tags:
1210             test_result[u"type"] = u"HOSTSTACK"
1211             if test.status == u"PASS":
1212                 test_result[u"result"], test_result[u"status"] = \
1213                     self._get_hoststack_data(test.message, tags)
1214         elif u"RECONF" in tags:
1215             test_result[u"type"] = u"RECONF"
1216             if test.status == u"PASS":
1217                 test_result[u"result"] = None
1218                 try:
1219                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1220                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1221                     test_result[u"result"] = {
1222                         u"loss": int(grps_loss.group(1)),
1223                         u"time": float(grps_time.group(1))
1224                     }
1225                 except (AttributeError, IndexError, ValueError, TypeError):
1226                     test_result[u"status"] = u"FAIL"
1227         else:
1228             test_result[u"status"] = u"FAIL"
1229
1230         self._data[u"tests"][self._test_id] = test_result
1231
1232     def end_test(self, test):
1233         """Called when test ends.
1234
1235         :param test: Test to process.
1236         :type test: Test
1237         :returns: Nothing.
1238         """
1239
1240     def visit_keyword(self, keyword):
1241         """Implements traversing through the keyword and its child keywords.
1242
1243         :param keyword: Keyword to process.
1244         :type keyword: Keyword
1245         :returns: Nothing.
1246         """
1247         if self.start_keyword(keyword) is not False:
1248             self.end_keyword(keyword)
1249
1250     def start_keyword(self, keyword):
1251         """Called when keyword starts. Default implementation does nothing.
1252
1253         :param keyword: Keyword to process.
1254         :type keyword: Keyword
1255         :returns: Nothing.
1256         """
1257         try:
1258             if keyword.type == u"setup":
1259                 self.visit_setup_kw(keyword)
1260             elif keyword.type == u"teardown":
1261                 self.visit_teardown_kw(keyword)
1262             else:
1263                 self.visit_test_kw(keyword)
1264         except AttributeError:
1265             pass
1266
1267     def end_keyword(self, keyword):
1268         """Called when keyword ends. Default implementation does nothing.
1269
1270         :param keyword: Keyword to process.
1271         :type keyword: Keyword
1272         :returns: Nothing.
1273         """
1274
1275     def visit_test_kw(self, test_kw):
1276         """Implements traversing through the test keyword and its child
1277         keywords.
1278
1279         :param test_kw: Keyword to process.
1280         :type test_kw: Keyword
1281         :returns: Nothing.
1282         """
1283         for keyword in test_kw.body:
1284             if self.start_test_kw(keyword) is not False:
1285                 self.visit_test_kw(keyword)
1286                 self.end_test_kw(keyword)
1287
1288     def start_test_kw(self, test_kw):
1289         """Called when test keyword starts. Default implementation does
1290         nothing.
1291
1292         :param test_kw: Keyword to process.
1293         :type test_kw: Keyword
1294         :returns: Nothing.
1295         """
1296         if not self._process_oper:
1297             return
1298
1299         if test_kw.name.count(u"Run Telemetry On All Duts"):
1300             self._msg_type = u"test-telemetry"
1301             self._telemetry_kw_counter += 1
1302         elif test_kw.name.count(u"Show Runtime On All Duts"):
1303             self._msg_type = u"test-show-runtime"
1304             self._sh_run_counter += 1
1305         else:
1306             return
1307         test_kw.messages.visit(self)
1308
1309     def end_test_kw(self, test_kw):
1310         """Called when keyword ends. Default implementation does nothing.
1311
1312         :param test_kw: Keyword to process.
1313         :type test_kw: Keyword
1314         :returns: Nothing.
1315         """
1316
1317     def visit_setup_kw(self, setup_kw):
1318         """Implements traversing through the teardown keyword and its child
1319         keywords.
1320
1321         :param setup_kw: Keyword to process.
1322         :type setup_kw: Keyword
1323         :returns: Nothing.
1324         """
1325         for keyword in setup_kw.body:
1326             if self.start_setup_kw(keyword) is not False:
1327                 self.visit_setup_kw(keyword)
1328                 self.end_setup_kw(keyword)
1329
1330     def start_setup_kw(self, setup_kw):
1331         """Called when teardown keyword starts. Default implementation does
1332         nothing.
1333
1334         :param setup_kw: Keyword to process.
1335         :type setup_kw: Keyword
1336         :returns: Nothing.
1337         """
1338         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1339                 and not self._version:
1340             self._msg_type = u"vpp-version"
1341         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1342                 not self._version:
1343             self._msg_type = u"dpdk-version"
1344         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1345             self._msg_type = u"testbed"
1346         else:
1347             return
1348         setup_kw.messages.visit(self)
1349
1350     def end_setup_kw(self, setup_kw):
1351         """Called when keyword ends. Default implementation does nothing.
1352
1353         :param setup_kw: Keyword to process.
1354         :type setup_kw: Keyword
1355         :returns: Nothing.
1356         """
1357
1358     def visit_teardown_kw(self, teardown_kw):
1359         """Implements traversing through the teardown keyword and its child
1360         keywords.
1361
1362         :param teardown_kw: Keyword to process.
1363         :type teardown_kw: Keyword
1364         :returns: Nothing.
1365         """
1366         for keyword in teardown_kw.body:
1367             if self.start_teardown_kw(keyword) is not False:
1368                 self.visit_teardown_kw(keyword)
1369                 self.end_teardown_kw(keyword)
1370
1371     def start_teardown_kw(self, teardown_kw):
1372         """Called when teardown keyword starts
1373
1374         :param teardown_kw: Keyword to process.
1375         :type teardown_kw: Keyword
1376         :returns: Nothing.
1377         """
1378         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1379             self._conf_history_lookup_nr = 0
1380             self._msg_type = u"teardown-papi-history"
1381             teardown_kw.messages.visit(self)
1382
1383     def end_teardown_kw(self, teardown_kw):
1384         """Called when keyword ends. Default implementation does nothing.
1385
1386         :param teardown_kw: Keyword to process.
1387         :type teardown_kw: Keyword
1388         :returns: Nothing.
1389         """
1390
1391     def visit_message(self, msg):
1392         """Implements visiting the message.
1393
1394         :param msg: Message to process.
1395         :type msg: Message
1396         :returns: Nothing.
1397         """
1398         if self.start_message(msg) is not False:
1399             self.end_message(msg)
1400
1401     def start_message(self, msg):
1402         """Called when message starts. Get required information from messages:
1403         - VPP version.
1404
1405         :param msg: Message to process.
1406         :type msg: Message
1407         :returns: Nothing.
1408         """
1409         if self._msg_type:
1410             self.parse_msg[self._msg_type](msg)
1411
1412     def end_message(self, msg):
1413         """Called when message ends. Default implementation does nothing.
1414
1415         :param msg: Message to process.
1416         :type msg: Message
1417         :returns: Nothing.
1418         """
1419
1420
1421 class InputData:
1422     """Input data
1423
1424     The data is extracted from output.xml files generated by Jenkins jobs and
1425     stored in pandas' DataFrames.
1426
1427     The data structure:
1428     - job name
1429       - build number
1430         - metadata
1431           (as described in ExecutionChecker documentation)
1432         - suites
1433           (as described in ExecutionChecker documentation)
1434         - tests
1435           (as described in ExecutionChecker documentation)
1436     """
1437
1438     def __init__(self, spec, for_output):
1439         """Initialization.
1440
1441         :param spec: Specification.
1442         :param for_output: Output to be generated from downloaded data.
1443         :type spec: Specification
1444         :type for_output: str
1445         """
1446
1447         # Specification:
1448         self._cfg = spec
1449
1450         self._for_output = for_output
1451
1452         # Data store:
1453         self._input_data = pd.Series(dtype="object")
1454
1455     @property
1456     def data(self):
1457         """Getter - Input data.
1458
1459         :returns: Input data
1460         :rtype: pandas.Series
1461         """
1462         return self._input_data
1463
1464     def metadata(self, job, build):
1465         """Getter - metadata
1466
1467         :param job: Job which metadata we want.
1468         :param build: Build which metadata we want.
1469         :type job: str
1470         :type build: str
1471         :returns: Metadata
1472         :rtype: pandas.Series
1473         """
1474         return self.data[job][build][u"metadata"]
1475
1476     def suites(self, job, build):
1477         """Getter - suites
1478
1479         :param job: Job which suites we want.
1480         :param build: Build which suites we want.
1481         :type job: str
1482         :type build: str
1483         :returns: Suites.
1484         :rtype: pandas.Series
1485         """
1486         return self.data[job][str(build)][u"suites"]
1487
1488     def tests(self, job, build):
1489         """Getter - tests
1490
1491         :param job: Job which tests we want.
1492         :param build: Build which tests we want.
1493         :type job: str
1494         :type build: str
1495         :returns: Tests.
1496         :rtype: pandas.Series
1497         """
1498         return self.data[job][build][u"tests"]
1499
1500     def _parse_tests(self, job, build):
1501         """Process data from robot output.xml file and return JSON structured
1502         data.
1503
1504         :param job: The name of job which build output data will be processed.
1505         :param build: The build which output data will be processed.
1506         :type job: str
1507         :type build: dict
1508         :returns: JSON data structure.
1509         :rtype: dict
1510         """
1511
1512         metadata = {
1513             u"job": job,
1514             u"build": build
1515         }
1516
1517         with open(build[u"file-name"], u'r') as data_file:
1518             try:
1519                 result = ExecutionResult(data_file)
1520             except errors.DataError as err:
1521                 logging.error(
1522                     f"Error occurred while parsing output.xml: {repr(err)}"
1523                 )
1524                 return None
1525
1526         process_oper = False
1527         if u"-vpp-perf-report-coverage-" in job:
1528             process_oper = True
1529         # elif u"-vpp-perf-report-iterative-" in job:
1530         #     # Exceptions for TBs where we do not have coverage data:
1531         #     for item in (u"-2n-icx", ):
1532         #         if item in job:
1533         #             process_oper = True
1534         #             break
1535         checker = ExecutionChecker(
1536             metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1537         )
1538         result.visit(checker)
1539
1540         checker.data[u"metadata"][u"tests_total"] = \
1541             result.statistics.total.total
1542         checker.data[u"metadata"][u"tests_passed"] = \
1543             result.statistics.total.passed
1544         checker.data[u"metadata"][u"tests_failed"] = \
1545             result.statistics.total.failed
1546         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1547         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1548
1549         return checker.data
1550
1551     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1552         """Download and parse the input data file.
1553
1554         :param pid: PID of the process executing this method.
1555         :param job: Name of the Jenkins job which generated the processed input
1556             file.
1557         :param build: Information about the Jenkins build which generated the
1558             processed input file.
1559         :param repeat: Repeat the download specified number of times if not
1560             successful.
1561         :type pid: int
1562         :type job: str
1563         :type build: dict
1564         :type repeat: int
1565         """
1566
1567         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1568
1569         state = u"failed"
1570         success = False
1571         data = None
1572         do_repeat = repeat
1573         while do_repeat:
1574             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1575             if success:
1576                 break
1577             do_repeat -= 1
1578         if not success:
1579             logging.error(
1580                 f"It is not possible to download the input data file from the "
1581                 f"job {job}, build {build[u'build']}, or it is damaged. "
1582                 f"Skipped."
1583             )
1584         if success:
1585             logging.info(f"  Processing data from build {build[u'build']}")
1586             data = self._parse_tests(job, build)
1587             if data is None:
1588                 logging.error(
1589                     f"Input data file from the job {job}, build "
1590                     f"{build[u'build']} is damaged. Skipped."
1591                 )
1592             else:
1593                 state = u"processed"
1594
1595             try:
1596                 remove(build[u"file-name"])
1597             except OSError as err:
1598                 logging.error(
1599                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1600                 )
1601
1602         # If the time-period is defined in the specification file, remove all
1603         # files which are outside the time period.
1604         is_last = False
1605         timeperiod = self._cfg.environment.get(u"time-period", None)
1606         if timeperiod and data:
1607             now = dt.utcnow()
1608             timeperiod = timedelta(int(timeperiod))
1609             metadata = data.get(u"metadata", None)
1610             if metadata:
1611                 generated = metadata.get(u"generated", None)
1612                 if generated:
1613                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1614                     if (now - generated) > timeperiod:
1615                         # Remove the data and the file:
1616                         state = u"removed"
1617                         data = None
1618                         is_last = True
1619                         logging.info(
1620                             f"  The build {job}/{build[u'build']} is "
1621                             f"outdated, will be removed."
1622                         )
1623         return {
1624             u"data": data,
1625             u"state": state,
1626             u"job": job,
1627             u"build": build,
1628             u"last": is_last
1629         }
1630
1631     def download_and_parse_data(self, repeat=1):
1632         """Download the input data files, parse input data from input files and
1633         store in pandas' Series.
1634
1635         :param repeat: Repeat the download specified number of times if not
1636             successful.
1637         :type repeat: int
1638         """
1639
1640         logging.info(u"Downloading and parsing input files ...")
1641
1642         for job, builds in self._cfg.input.items():
1643             for build in builds:
1644
1645                 result = self._download_and_parse_build(job, build, repeat)
1646                 if result[u"last"]:
1647                     break
1648                 build_nr = result[u"build"][u"build"]
1649
1650                 if result[u"data"]:
1651                     data = result[u"data"]
1652                     build_data = pd.Series({
1653                         u"metadata": pd.Series(
1654                             list(data[u"metadata"].values()),
1655                             index=list(data[u"metadata"].keys())
1656                         ),
1657                         u"suites": pd.Series(
1658                             list(data[u"suites"].values()),
1659                             index=list(data[u"suites"].keys())
1660                         ),
1661                         u"tests": pd.Series(
1662                             list(data[u"tests"].values()),
1663                             index=list(data[u"tests"].keys())
1664                         )
1665                     })
1666
1667                     if self._input_data.get(job, None) is None:
1668                         self._input_data[job] = pd.Series(dtype="object")
1669                     self._input_data[job][str(build_nr)] = build_data
1670                     self._cfg.set_input_file_name(
1671                         job, build_nr, result[u"build"][u"file-name"]
1672                     )
1673                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1674
1675                 mem_alloc = \
1676                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1677                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1678
1679         logging.info(u"Done.")
1680
1681         msg = f"Successful downloads from the sources:\n"
1682         for source in self._cfg.environment[u"data-sources"]:
1683             if source[u"successful-downloads"]:
1684                 msg += (
1685                     f"{source[u'url']}/{source[u'path']}/"
1686                     f"{source[u'file-name']}: "
1687                     f"{source[u'successful-downloads']}\n"
1688                 )
1689         logging.info(msg)
1690
1691     def process_local_file(self, local_file, job=u"local", build_nr=1,
1692                            replace=True):
1693         """Process local XML file given as a command-line parameter.
1694
1695         :param local_file: The file to process.
1696         :param job: Job name.
1697         :param build_nr: Build number.
1698         :param replace: If True, the information about jobs and builds is
1699             replaced by the new one, otherwise the new jobs and builds are
1700             added.
1701         :type local_file: str
1702         :type job: str
1703         :type build_nr: int
1704         :type replace: bool
1705         :raises: PresentationError if an error occurs.
1706         """
1707         if not isfile(local_file):
1708             raise PresentationError(f"The file {local_file} does not exist.")
1709
1710         try:
1711             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1712         except (IndexError, ValueError):
1713             pass
1714
1715         build = {
1716             u"build": build_nr,
1717             u"status": u"failed",
1718             u"file-name": local_file
1719         }
1720         if replace:
1721             self._cfg.input = dict()
1722         self._cfg.add_build(job, build)
1723
1724         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1725         data = self._parse_tests(job, build)
1726         if data is None:
1727             raise PresentationError(
1728                 f"Error occurred while parsing the file {local_file}"
1729             )
1730
1731         build_data = pd.Series({
1732             u"metadata": pd.Series(
1733                 list(data[u"metadata"].values()),
1734                 index=list(data[u"metadata"].keys())
1735             ),
1736             u"suites": pd.Series(
1737                 list(data[u"suites"].values()),
1738                 index=list(data[u"suites"].keys())
1739             ),
1740             u"tests": pd.Series(
1741                 list(data[u"tests"].values()),
1742                 index=list(data[u"tests"].keys())
1743             )
1744         })
1745
1746         if self._input_data.get(job, None) is None:
1747             self._input_data[job] = pd.Series(dtype="object")
1748         self._input_data[job][str(build_nr)] = build_data
1749
1750         self._cfg.set_input_state(job, build_nr, u"processed")
1751
1752     def process_local_directory(self, local_dir, replace=True):
1753         """Process local directory with XML file(s). The directory is processed
1754         as a 'job' and the XML files in it as builds.
1755         If the given directory contains only sub-directories, these
1756         sub-directories processed as jobs and corresponding XML files as builds
1757         of their job.
1758
1759         :param local_dir: Local directory to process.
1760         :param replace: If True, the information about jobs and builds is
1761             replaced by the new one, otherwise the new jobs and builds are
1762             added.
1763         :type local_dir: str
1764         :type replace: bool
1765         """
1766         if not isdir(local_dir):
1767             raise PresentationError(
1768                 f"The directory {local_dir} does not exist."
1769             )
1770
1771         # Check if the given directory includes only files, or only directories
1772         _, dirnames, filenames = next(walk(local_dir))
1773
1774         if filenames and not dirnames:
1775             filenames.sort()
1776             # local_builds:
1777             # key: dir (job) name, value: list of file names (builds)
1778             local_builds = {
1779                 local_dir: [join(local_dir, name) for name in filenames]
1780             }
1781
1782         elif dirnames and not filenames:
1783             dirnames.sort()
1784             # local_builds:
1785             # key: dir (job) name, value: list of file names (builds)
1786             local_builds = dict()
1787             for dirname in dirnames:
1788                 builds = [
1789                     join(local_dir, dirname, name)
1790                     for name in listdir(join(local_dir, dirname))
1791                     if isfile(join(local_dir, dirname, name))
1792                 ]
1793                 if builds:
1794                     local_builds[dirname] = sorted(builds)
1795
1796         elif not filenames and not dirnames:
1797             raise PresentationError(f"The directory {local_dir} is empty.")
1798         else:
1799             raise PresentationError(
1800                 f"The directory {local_dir} can include only files or only "
1801                 f"directories, not both.\nThe directory {local_dir} includes "
1802                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1803             )
1804
1805         if replace:
1806             self._cfg.input = dict()
1807
1808         for job, files in local_builds.items():
1809             for idx, local_file in enumerate(files):
1810                 self.process_local_file(local_file, job, idx + 1, replace=False)
1811
1812     @staticmethod
1813     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1814         """Return the index of character in the string which is the end of tag.
1815
1816         :param tag_filter: The string where the end of tag is being searched.
1817         :param start: The index where the searching is stated.
1818         :param closer: The character which is the tag closer.
1819         :type tag_filter: str
1820         :type start: int
1821         :type closer: str
1822         :returns: The index of the tag closer.
1823         :rtype: int
1824         """
1825         try:
1826             idx_opener = tag_filter.index(closer, start)
1827             return tag_filter.index(closer, idx_opener + 1)
1828         except ValueError:
1829             return None
1830
1831     @staticmethod
1832     def _condition(tag_filter):
1833         """Create a conditional statement from the given tag filter.
1834
1835         :param tag_filter: Filter based on tags from the element specification.
1836         :type tag_filter: str
1837         :returns: Conditional statement which can be evaluated.
1838         :rtype: str
1839         """
1840         index = 0
1841         while True:
1842             index = InputData._end_of_tag(tag_filter, index)
1843             if index is None:
1844                 return tag_filter
1845             index += 1
1846             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1847
1848     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1849                     continue_on_error=False):
1850         """Filter required data from the given jobs and builds.
1851
1852         The output data structure is:
1853         - job 1
1854           - build 1
1855             - test (or suite) 1 ID:
1856               - param 1
1857               - param 2
1858               ...
1859               - param n
1860             ...
1861             - test (or suite) n ID:
1862             ...
1863           ...
1864           - build n
1865         ...
1866         - job n
1867
1868         :param element: Element which will use the filtered data.
1869         :param params: Parameters which will be included in the output. If None,
1870             all parameters are included.
1871         :param data: If not None, this data is used instead of data specified
1872             in the element.
1873         :param data_set: The set of data to be filtered: tests, suites,
1874             metadata.
1875         :param continue_on_error: Continue if there is error while reading the
1876             data. The Item will be empty then
1877         :type element: pandas.Series
1878         :type params: list
1879         :type data: dict
1880         :type data_set: str
1881         :type continue_on_error: bool
1882         :returns: Filtered data.
1883         :rtype pandas.Series
1884         """
1885
1886         try:
1887             if data_set == "suites":
1888                 cond = u"True"
1889             elif element[u"filter"] in (u"all", u"template"):
1890                 cond = u"True"
1891             else:
1892                 cond = InputData._condition(element[u"filter"])
1893             logging.debug(f"   Filter: {cond}")
1894         except KeyError:
1895             logging.error(u"  No filter defined.")
1896             return None
1897
1898         if params is None:
1899             params = element.get(u"parameters", None)
1900             if params:
1901                 params.extend((u"type", u"status"))
1902
1903         data_to_filter = data if data else element[u"data"]
1904         data = pd.Series(dtype="object")
1905         try:
1906             for job, builds in data_to_filter.items():
1907                 data[job] = pd.Series(dtype="object")
1908                 for build in builds:
1909                     data[job][str(build)] = pd.Series(dtype="object")
1910                     try:
1911                         data_dict = dict(
1912                             self.data[job][str(build)][data_set].items())
1913                     except KeyError:
1914                         if continue_on_error:
1915                             continue
1916                         return None
1917
1918                     for test_id, test_data in data_dict.items():
1919                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1920                             data[job][str(build)][test_id] = \
1921                                 pd.Series(dtype="object")
1922                             if params is None:
1923                                 for param, val in test_data.items():
1924                                     data[job][str(build)][test_id][param] = val
1925                             else:
1926                                 for param in params:
1927                                     try:
1928                                         data[job][str(build)][test_id][param] =\
1929                                             test_data[param]
1930                                     except KeyError:
1931                                         data[job][str(build)][test_id][param] =\
1932                                             u"No Data"
1933             return data
1934
1935         except (KeyError, IndexError, ValueError) as err:
1936             logging.error(
1937                 f"Missing mandatory parameter in the element specification: "
1938                 f"{repr(err)}"
1939             )
1940             return None
1941         except AttributeError as err:
1942             logging.error(repr(err))
1943             return None
1944         except SyntaxError as err:
1945             logging.error(
1946                 f"The filter {cond} is not correct. Check if all tags are "
1947                 f"enclosed by apostrophes.\n{repr(err)}"
1948             )
1949             return None
1950
1951     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1952                              continue_on_error=False):
1953         """Filter required data from the given jobs and builds.
1954
1955         The output data structure is:
1956         - job 1
1957           - build 1
1958             - test (or suite) 1 ID:
1959               - param 1
1960               - param 2
1961               ...
1962               - param n
1963             ...
1964             - test (or suite) n ID:
1965             ...
1966           ...
1967           - build n
1968         ...
1969         - job n
1970
1971         :param element: Element which will use the filtered data.
1972         :param params: Parameters which will be included in the output. If None,
1973         all parameters are included.
1974         :param data_set: The set of data to be filtered: tests, suites,
1975         metadata.
1976         :param continue_on_error: Continue if there is error while reading the
1977         data. The Item will be empty then
1978         :type element: pandas.Series
1979         :type params: list
1980         :type data_set: str
1981         :type continue_on_error: bool
1982         :returns: Filtered data.
1983         :rtype pandas.Series
1984         """
1985
1986         include = element.get(u"include", None)
1987         if not include:
1988             logging.warning(u"No tests to include, skipping the element.")
1989             return None
1990
1991         if params is None:
1992             params = element.get(u"parameters", None)
1993             if params and u"type" not in params:
1994                 params.append(u"type")
1995
1996         cores = element.get(u"core", None)
1997         if cores:
1998             tests = list()
1999             for core in cores:
2000                 for test in include:
2001                     tests.append(test.format(core=core))
2002         else:
2003             tests = include
2004
2005         data = pd.Series(dtype="object")
2006         try:
2007             for job, builds in element[u"data"].items():
2008                 data[job] = pd.Series(dtype="object")
2009                 for build in builds:
2010                     data[job][str(build)] = pd.Series(dtype="object")
2011                     for test in tests:
2012                         try:
2013                             reg_ex = re.compile(str(test).lower())
2014                             for test_id in self.data[job][
2015                                     str(build)][data_set].keys():
2016                                 if re.match(reg_ex, str(test_id).lower()):
2017                                     test_data = self.data[job][
2018                                         str(build)][data_set][test_id]
2019                                     data[job][str(build)][test_id] = \
2020                                         pd.Series(dtype="object")
2021                                     if params is None:
2022                                         for param, val in test_data.items():
2023                                             data[job][str(build)][test_id]\
2024                                                 [param] = val
2025                                     else:
2026                                         for param in params:
2027                                             try:
2028                                                 data[job][str(build)][
2029                                                     test_id][param] = \
2030                                                     test_data[param]
2031                                             except KeyError:
2032                                                 data[job][str(build)][
2033                                                     test_id][param] = u"No Data"
2034                         except KeyError as err:
2035                             if continue_on_error:
2036                                 logging.debug(repr(err))
2037                                 continue
2038                             logging.error(repr(err))
2039                             return None
2040             return data
2041
2042         except (KeyError, IndexError, ValueError) as err:
2043             logging.error(
2044                 f"Missing mandatory parameter in the element "
2045                 f"specification: {repr(err)}"
2046             )
2047             return None
2048         except AttributeError as err:
2049             logging.error(repr(err))
2050             return None
2051
2052     @staticmethod
2053     def merge_data(data):
2054         """Merge data from more jobs and builds to a simple data structure.
2055
2056         The output data structure is:
2057
2058         - test (suite) 1 ID:
2059           - param 1
2060           - param 2
2061           ...
2062           - param n
2063         ...
2064         - test (suite) n ID:
2065         ...
2066
2067         :param data: Data to merge.
2068         :type data: pandas.Series
2069         :returns: Merged data.
2070         :rtype: pandas.Series
2071         """
2072
2073         logging.info(u"    Merging data ...")
2074
2075         merged_data = pd.Series(dtype="object")
2076         for builds in data.values:
2077             for item in builds.values:
2078                 for item_id, item_data in item.items():
2079                     merged_data[item_id] = item_data
2080         return merged_data
2081
2082     def print_all_oper_data(self):
2083         """Print all operational data to console.
2084         """
2085
2086         for job in self._input_data.values:
2087             for build in job.values:
2088                 for test_id, test_data in build[u"tests"].items():
2089                     print(f"{test_id}")
2090                     if test_data.get(u"show-run", None) is None:
2091                         continue
2092                     for dut_name, data in test_data[u"show-run"].items():
2093                         if data.get(u"runtime", None) is None:
2094                             continue
2095                         runtime = loads(data[u"runtime"])
2096                         try:
2097                             threads_nr = len(runtime[0][u"clocks"])
2098                         except (IndexError, KeyError):
2099                             continue
2100                         threads = OrderedDict(
2101                             {idx: list() for idx in range(threads_nr)})
2102                         for item in runtime:
2103                             for idx in range(threads_nr):
2104                                 if item[u"vectors"][idx] > 0:
2105                                     clocks = item[u"clocks"][idx] / \
2106                                              item[u"vectors"][idx]
2107                                 elif item[u"calls"][idx] > 0:
2108                                     clocks = item[u"clocks"][idx] / \
2109                                              item[u"calls"][idx]
2110                                 elif item[u"suspends"][idx] > 0:
2111                                     clocks = item[u"clocks"][idx] / \
2112                                              item[u"suspends"][idx]
2113                                 else:
2114                                     clocks = 0.0
2115
2116                                 if item[u"calls"][idx] > 0:
2117                                     vectors_call = item[u"vectors"][idx] / \
2118                                                    item[u"calls"][idx]
2119                                 else:
2120                                     vectors_call = 0.0
2121
2122                                 if int(item[u"calls"][idx]) + int(
2123                                         item[u"vectors"][idx]) + \
2124                                         int(item[u"suspends"][idx]):
2125                                     threads[idx].append([
2126                                         item[u"name"],
2127                                         item[u"calls"][idx],
2128                                         item[u"vectors"][idx],
2129                                         item[u"suspends"][idx],
2130                                         clocks,
2131                                         vectors_call
2132                                     ])
2133
2134                         print(f"Host IP: {data.get(u'host', '')}, "
2135                               f"Socket: {data.get(u'socket', '')}")
2136                         for thread_nr, thread in threads.items():
2137                             txt_table = prettytable.PrettyTable(
2138                                 (
2139                                     u"Name",
2140                                     u"Nr of Vectors",
2141                                     u"Nr of Packets",
2142                                     u"Suspends",
2143                                     u"Cycles per Packet",
2144                                     u"Average Vector Size"
2145                                 )
2146                             )
2147                             avg = 0.0
2148                             for row in thread:
2149                                 txt_table.add_row(row)
2150                                 avg += row[-1]
2151                             if len(thread) == 0:
2152                                 avg = u""
2153                             else:
2154                                 avg = f", Average Vector Size per Node: " \
2155                                       f"{(avg / len(thread)):.2f}"
2156                             th_name = u"main" if thread_nr == 0 \
2157                                 else f"worker_{thread_nr}"
2158                             print(f"{dut_name}, {th_name}{avg}")
2159                             txt_table.float_format = u".2"
2160                             txt_table.align = u"r"
2161                             txt_table.align[u"Name"] = u"l"
2162                             print(f"{txt_table.get_string()}\n")