2f8f16770174019cf0fa50f642a53623b38a657a
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"VPP Version:\s*|VPP version:\s*)(.*)"
267     )
268     REGEX_VERSION_DPDK = re.compile(
269         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
270     )
271     REGEX_TCP = re.compile(
272         r'Total\s(rps|cps|throughput):\s(\d*).*$'
273     )
274     REGEX_MRR = re.compile(
275         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
276         r'tx\s(\d*),\srx\s(\d*)'
277     )
278     REGEX_BMRR = re.compile(
279         r'.*trial results.*: \[(.*)\]'
280     )
281     REGEX_RECONF_LOSS = re.compile(
282         r'Packets lost due to reconfig: (\d*)'
283     )
284     REGEX_RECONF_TIME = re.compile(
285         r'Implied time lost: (\d*.[\de-]*)'
286     )
287     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
288
289     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
290
291     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
292
293     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
294
295     REGEX_SH_RUN_HOST = re.compile(
296         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
297     )
298
299     def __init__(self, metadata, mapping, ignore, process_oper):
300         """Initialisation.
301
302         :param metadata: Key-value pairs to be included in "metadata" part of
303             JSON structure.
304         :param mapping: Mapping of the old names of test cases to the new
305             (actual) one.
306         :param ignore: List of TCs to be ignored.
307         :param process_oper: If True, operational data (show run, telemetry) is
308             processed.
309         :type metadata: dict
310         :type mapping: dict
311         :type ignore: list
312         :type process_oper: bool
313         """
314
315         # Type of message to parse out from the test messages
316         self._msg_type = None
317
318         # VPP version
319         self._version = None
320
321         # Timestamp
322         self._timestamp = None
323
324         # Testbed. The testbed is identified by TG node IP address.
325         self._testbed = None
326
327         # Mapping of TCs long names
328         self._mapping = mapping
329
330         # Ignore list
331         self._ignore = ignore
332
333         self._process_oper = process_oper
334
335         # Number of PAPI History messages found:
336         # 0 - no message
337         # 1 - PAPI History of DUT1
338         # 2 - PAPI History of DUT2
339         self._conf_history_lookup_nr = 0
340
341         self._sh_run_counter = 0
342         self._telemetry_kw_counter = 0
343         self._telemetry_msg_counter = 0
344
345         # Test ID of currently processed test- the lowercase full path to the
346         # test
347         self._test_id = None
348
349         # The main data structure
350         self._data = {
351             u"metadata": OrderedDict(),
352             u"suites": OrderedDict(),
353             u"tests": OrderedDict()
354         }
355
356         # Save the provided metadata
357         for key, val in metadata.items():
358             self._data[u"metadata"][key] = val
359
360         # Dictionary defining the methods used to parse different types of
361         # messages
362         self.parse_msg = {
363             u"vpp-version": self._get_vpp_version,
364             u"dpdk-version": self._get_dpdk_version,
365             u"teardown-papi-history": self._get_papi_history,
366             u"test-show-runtime": self._get_show_run,
367             u"testbed": self._get_testbed,
368             u"test-telemetry": self._get_telemetry
369         }
370
371     @property
372     def data(self):
373         """Getter - Data parsed from the XML file.
374
375         :returns: Data parsed from the XML file.
376         :rtype: dict
377         """
378         return self._data
379
380     def _get_data_from_mrr_test_msg(self, msg):
381         """Get info from message of MRR performance tests.
382
383         :param msg: Message to be processed.
384         :type msg: str
385         :returns: Processed message or original message if a problem occurs.
386         :rtype: str
387         """
388
389         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
390         if not groups or groups.lastindex != 1:
391             return u"Test Failed."
392
393         try:
394             data = groups.group(1).split(u", ")
395         except (AttributeError, IndexError, ValueError, KeyError):
396             return u"Test Failed."
397
398         out_str = u"["
399         try:
400             for item in data:
401                 out_str += f"{(float(item) / 1e6):.2f}, "
402             return out_str[:-2] + u"]"
403         except (AttributeError, IndexError, ValueError, KeyError):
404             return u"Test Failed."
405
406     def _get_data_from_cps_test_msg(self, msg):
407         """Get info from message of NDRPDR CPS tests.
408
409         :param msg: Message to be processed.
410         :type msg: str
411         :returns: Processed message or "Test Failed." if a problem occurs.
412         :rtype: str
413         """
414
415         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
416         if not groups or groups.lastindex != 2:
417             return u"Test Failed."
418
419         try:
420             return (
421                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
422                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
423             )
424         except (AttributeError, IndexError, ValueError, KeyError):
425             return u"Test Failed."
426
427     def _get_data_from_pps_test_msg(self, msg):
428         """Get info from message of NDRPDR PPS tests.
429
430         :param msg: Message to be processed.
431         :type msg: str
432         :returns: Processed message or "Test Failed." if a problem occurs.
433         :rtype: str
434         """
435
436         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
437         if not groups or groups.lastindex != 4:
438             return u"Test Failed."
439
440         try:
441             return (
442                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
443                 f"{float(groups.group(2)):5.2f}\n"
444                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
445                 f"{float(groups.group(4)):5.2f}"
446             )
447         except (AttributeError, IndexError, ValueError, KeyError):
448             return u"Test Failed."
449
450     def _get_data_from_perf_test_msg(self, msg):
451         """Get info from message of NDRPDR performance tests.
452
453         :param msg: Message to be processed.
454         :type msg: str
455         :returns: Processed message or "Test Failed." if a problem occurs.
456         :rtype: str
457         """
458
459         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
460         if not groups or groups.lastindex != 10:
461             return u"Test Failed."
462
463         try:
464             data = {
465                 u"ndr_low": float(groups.group(1)),
466                 u"ndr_low_b": float(groups.group(2)),
467                 u"pdr_low": float(groups.group(3)),
468                 u"pdr_low_b": float(groups.group(4)),
469                 u"pdr_lat_90_1": groups.group(5),
470                 u"pdr_lat_90_2": groups.group(6),
471                 u"pdr_lat_50_1": groups.group(7),
472                 u"pdr_lat_50_2": groups.group(8),
473                 u"pdr_lat_10_1": groups.group(9),
474                 u"pdr_lat_10_2": groups.group(10),
475             }
476         except (AttributeError, IndexError, ValueError, KeyError):
477             return u"Test Failed."
478
479         def _process_lat(in_str_1, in_str_2):
480             """Extract P50, P90 and P99 latencies or min, avg, max values from
481             latency string.
482
483             :param in_str_1: Latency string for one direction produced by robot
484                 framework.
485             :param in_str_2: Latency string for second direction produced by
486                 robot framework.
487             :type in_str_1: str
488             :type in_str_2: str
489             :returns: Processed latency string or None if a problem occurs.
490             :rtype: tuple
491             """
492             in_list_1 = in_str_1.split('/', 3)
493             in_list_2 = in_str_2.split('/', 3)
494
495             if len(in_list_1) != 4 and len(in_list_2) != 4:
496                 return None
497
498             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
499             try:
500                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
501             except hdrh.codec.HdrLengthException:
502                 hdr_lat_1 = None
503
504             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
505             try:
506                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
507             except hdrh.codec.HdrLengthException:
508                 hdr_lat_2 = None
509
510             if hdr_lat_1 and hdr_lat_2:
511                 hdr_lat = (
512                     hdr_lat_1.get_value_at_percentile(50.0),
513                     hdr_lat_1.get_value_at_percentile(90.0),
514                     hdr_lat_1.get_value_at_percentile(99.0),
515                     hdr_lat_2.get_value_at_percentile(50.0),
516                     hdr_lat_2.get_value_at_percentile(90.0),
517                     hdr_lat_2.get_value_at_percentile(99.0)
518                 )
519                 if all(hdr_lat):
520                     return hdr_lat
521
522             hdr_lat = (
523                 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
524                 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
525             )
526             for item in hdr_lat:
527                 if item in (-1, 4294967295, 0):
528                     return None
529             return hdr_lat
530
531         try:
532             out_msg = (
533                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
534                 f"{data[u'ndr_low_b']:5.2f}"
535                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
536                 f"{data[u'pdr_low_b']:5.2f}"
537             )
538             latency = (
539                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
540                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
541                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
542             )
543             if all(latency):
544                 max_len = len(str(max((max(item) for item in latency))))
545                 max_len = 4 if max_len < 4 else max_len
546
547                 for idx, lat in enumerate(latency):
548                     if not idx:
549                         out_msg += u"\n"
550                     out_msg += (
551                         f"\n{idx + 3}. "
552                         f"{lat[0]:{max_len}d} "
553                         f"{lat[1]:{max_len}d} "
554                         f"{lat[2]:{max_len}d}      "
555                         f"{lat[3]:{max_len}d} "
556                         f"{lat[4]:{max_len}d} "
557                         f"{lat[5]:{max_len}d} "
558                     )
559
560             return out_msg
561
562         except (AttributeError, IndexError, ValueError, KeyError):
563             return u"Test Failed."
564
565     def _get_testbed(self, msg):
566         """Called when extraction of testbed IP is required.
567         The testbed is identified by TG node IP address.
568
569         :param msg: Message to process.
570         :type msg: Message
571         :returns: Nothing.
572         """
573
574         if msg.message.count(u"Setup of TG node") or \
575                 msg.message.count(u"Setup of node TG host"):
576             reg_tg_ip = re.compile(
577                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
578             try:
579                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
580             except (KeyError, ValueError, IndexError, AttributeError):
581                 pass
582             finally:
583                 self._data[u"metadata"][u"testbed"] = self._testbed
584                 self._msg_type = None
585
586     def _get_vpp_version(self, msg):
587         """Called when extraction of VPP version is required.
588
589         :param msg: Message to process.
590         :type msg: Message
591         :returns: Nothing.
592         """
593
594         if msg.message.count(u"VPP version:") or \
595                 msg.message.count(u"VPP Version:"):
596             self._version = str(
597                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
598             )
599             self._data[u"metadata"][u"version"] = self._version
600             self._msg_type = None
601             logging.info(self._version)
602
603     def _get_dpdk_version(self, msg):
604         """Called when extraction of DPDK version is required.
605
606         :param msg: Message to process.
607         :type msg: Message
608         :returns: Nothing.
609         """
610
611         if msg.message.count(u"DPDK Version:"):
612             try:
613                 self._version = str(re.search(
614                     self.REGEX_VERSION_DPDK, msg.message).group(2))
615                 self._data[u"metadata"][u"version"] = self._version
616             except IndexError:
617                 pass
618             finally:
619                 self._msg_type = None
620
621     def _get_papi_history(self, msg):
622         """Called when extraction of PAPI command history is required.
623
624         :param msg: Message to process.
625         :type msg: Message
626         :returns: Nothing.
627         """
628         if msg.message.count(u"PAPI command history:"):
629             self._conf_history_lookup_nr += 1
630             if self._conf_history_lookup_nr == 1:
631                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
632             else:
633                 self._msg_type = None
634             text = re.sub(
635                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
636                 u"",
637                 msg.message,
638                 count=1
639             ).replace(u'"', u"'")
640             self._data[u"tests"][self._test_id][u"conf-history"] += (
641                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
642             )
643
644     def _get_show_run(self, msg):
645         """Called when extraction of VPP operational data (output of CLI command
646         Show Runtime) is required.
647
648         :param msg: Message to process.
649         :type msg: Message
650         :returns: Nothing.
651         """
652
653         if not msg.message.count(u"stats runtime"):
654             return
655
656         # Temporary solution
657         if self._sh_run_counter > 1:
658             return
659
660         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
661             self._data[u"tests"][self._test_id][u"show-run"] = dict()
662
663         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
664         if not groups:
665             return
666         try:
667             host = groups.group(1)
668         except (AttributeError, IndexError):
669             host = u""
670         try:
671             sock = groups.group(2)
672         except (AttributeError, IndexError):
673             sock = u""
674
675         dut = u"dut{nr}".format(
676             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
677
678         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
679             copy.copy(
680                 {
681                     u"host": host,
682                     u"socket": sock,
683                     u"runtime": str(msg.message).replace(u' ', u'').
684                                 replace(u'\n', u'').replace(u"'", u'"').
685                                 replace(u'b"', u'"').replace(u'u"', u'"').
686                                 split(u":", 1)[1]
687                 }
688             )
689
690     def _get_telemetry(self, msg):
691         """Called when extraction of VPP telemetry data is required.
692
693         :param msg: Message to process.
694         :type msg: Message
695         :returns: Nothing.
696         """
697
698         if self._telemetry_kw_counter > 1:
699             return
700         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
701             return
702
703         if u"telemetry-show-run" not in \
704                 self._data[u"tests"][self._test_id].keys():
705             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
706
707         self._telemetry_msg_counter += 1
708         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
709         if not groups:
710             return
711         try:
712             host = groups.group(1)
713         except (AttributeError, IndexError):
714             host = u""
715         try:
716             sock = groups.group(2)
717         except (AttributeError, IndexError):
718             sock = u""
719         runtime = {
720             u"source_type": u"node",
721             u"source_id": host,
722             u"msg_type": u"metric",
723             u"log_level": u"INFO",
724             u"timestamp": msg.timestamp,
725             u"msg": u"show_runtime",
726             u"host": host,
727             u"socket": sock,
728             u"data": list()
729         }
730         for line in msg.message.splitlines():
731             if not line.startswith(u"vpp_runtime_"):
732                 continue
733             try:
734                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
735                 cut = params.index(u"{")
736                 name = params[:cut].split(u"_", maxsplit=2)[-1]
737                 labels = eval(
738                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
739                 )
740                 labels[u"graph_node"] = labels.pop(u"name")
741                 runtime[u"data"].append(
742                     {
743                         u"name": name,
744                         u"value": value,
745                         u"timestamp": timestamp,
746                         u"labels": labels
747                     }
748                 )
749             except (TypeError, ValueError, IndexError):
750                 continue
751         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
752             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
753                 {
754                     u"host": host,
755                     u"socket": sock,
756                     u"runtime": runtime
757                 }
758             )
759
760     def _get_ndrpdr_throughput(self, msg):
761         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
762         message.
763
764         :param msg: The test message to be parsed.
765         :type msg: str
766         :returns: Parsed data as a dict and the status (PASS/FAIL).
767         :rtype: tuple(dict, str)
768         """
769
770         throughput = {
771             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
772             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
773         }
774         status = u"FAIL"
775         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
776
777         if groups is not None:
778             try:
779                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
780                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
781                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
782                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
783                 status = u"PASS"
784             except (IndexError, ValueError):
785                 pass
786
787         return throughput, status
788
789     def _get_ndrpdr_throughput_gbps(self, msg):
790         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
791         test message.
792
793         :param msg: The test message to be parsed.
794         :type msg: str
795         :returns: Parsed data as a dict and the status (PASS/FAIL).
796         :rtype: tuple(dict, str)
797         """
798
799         gbps = {
800             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
801             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
802         }
803         status = u"FAIL"
804         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
805
806         if groups is not None:
807             try:
808                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
809                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
810                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
811                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
812                 status = u"PASS"
813             except (IndexError, ValueError):
814                 pass
815
816         return gbps, status
817
818     def _get_plr_throughput(self, msg):
819         """Get PLRsearch lower bound and PLRsearch upper bound from the test
820         message.
821
822         :param msg: The test message to be parsed.
823         :type msg: str
824         :returns: Parsed data as a dict and the status (PASS/FAIL).
825         :rtype: tuple(dict, str)
826         """
827
828         throughput = {
829             u"LOWER": -1.0,
830             u"UPPER": -1.0
831         }
832         status = u"FAIL"
833         groups = re.search(self.REGEX_PLR_RATE, msg)
834
835         if groups is not None:
836             try:
837                 throughput[u"LOWER"] = float(groups.group(1))
838                 throughput[u"UPPER"] = float(groups.group(2))
839                 status = u"PASS"
840             except (IndexError, ValueError):
841                 pass
842
843         return throughput, status
844
845     def _get_ndrpdr_latency(self, msg):
846         """Get LATENCY from the test message.
847
848         :param msg: The test message to be parsed.
849         :type msg: str
850         :returns: Parsed data as a dict and the status (PASS/FAIL).
851         :rtype: tuple(dict, str)
852         """
853         latency_default = {
854             u"min": -1.0,
855             u"avg": -1.0,
856             u"max": -1.0,
857             u"hdrh": u""
858         }
859         latency = {
860             u"NDR": {
861                 u"direction1": copy.copy(latency_default),
862                 u"direction2": copy.copy(latency_default)
863             },
864             u"PDR": {
865                 u"direction1": copy.copy(latency_default),
866                 u"direction2": copy.copy(latency_default)
867             },
868             u"LAT0": {
869                 u"direction1": copy.copy(latency_default),
870                 u"direction2": copy.copy(latency_default)
871             },
872             u"PDR10": {
873                 u"direction1": copy.copy(latency_default),
874                 u"direction2": copy.copy(latency_default)
875             },
876             u"PDR50": {
877                 u"direction1": copy.copy(latency_default),
878                 u"direction2": copy.copy(latency_default)
879             },
880             u"PDR90": {
881                 u"direction1": copy.copy(latency_default),
882                 u"direction2": copy.copy(latency_default)
883             },
884         }
885
886         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
887         if groups is None:
888             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
889         if groups is None:
890             return latency, u"FAIL"
891
892         def process_latency(in_str):
893             """Return object with parsed latency values.
894
895             TODO: Define class for the return type.
896
897             :param in_str: Input string, min/avg/max/hdrh format.
898             :type in_str: str
899             :returns: Dict with corresponding keys, except hdrh float values.
900             :rtype dict:
901             :throws IndexError: If in_str does not have enough substrings.
902             :throws ValueError: If a substring does not convert to float.
903             """
904             in_list = in_str.split('/', 3)
905
906             rval = {
907                 u"min": float(in_list[0]),
908                 u"avg": float(in_list[1]),
909                 u"max": float(in_list[2]),
910                 u"hdrh": u""
911             }
912
913             if len(in_list) == 4:
914                 rval[u"hdrh"] = str(in_list[3])
915
916             return rval
917
918         try:
919             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
920             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
921             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
922             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
923             if groups.lastindex == 4:
924                 return latency, u"PASS"
925         except (IndexError, ValueError):
926             pass
927
928         try:
929             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
930             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
931             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
932             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
933             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
934             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
935             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
936             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
937             if groups.lastindex == 12:
938                 return latency, u"PASS"
939         except (IndexError, ValueError):
940             pass
941
942         return latency, u"FAIL"
943
944     @staticmethod
945     def _get_hoststack_data(msg, tags):
946         """Get data from the hoststack test message.
947
948         :param msg: The test message to be parsed.
949         :param tags: Test tags.
950         :type msg: str
951         :type tags: list
952         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
953         :rtype: tuple(dict, str)
954         """
955         result = dict()
956         status = u"FAIL"
957
958         msg = msg.replace(u"'", u'"').replace(u" ", u"")
959         if u"LDPRELOAD" in tags:
960             try:
961                 result = loads(msg)
962                 status = u"PASS"
963             except JSONDecodeError:
964                 pass
965         elif u"VPPECHO" in tags:
966             try:
967                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
968                 result = dict(
969                     client=loads(msg_lst[0]),
970                     server=loads(msg_lst[1])
971                 )
972                 status = u"PASS"
973             except (JSONDecodeError, IndexError):
974                 pass
975
976         return result, status
977
978     def _get_vsap_data(self, msg, tags):
979         """Get data from the vsap test message.
980
981         :param msg: The test message to be parsed.
982         :param tags: Test tags.
983         :type msg: str
984         :type tags: list
985         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
986         :rtype: tuple(dict, str)
987         """
988         result = dict()
989         status = u"FAIL"
990
991         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
992         if groups is not None:
993             try:
994                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
995                 result[u"latency"] = float(groups.group(2))
996                 result[u"completed-requests"] = int(groups.group(3))
997                 result[u"failed-requests"] = int(groups.group(4))
998                 result[u"bytes-transferred"] = int(groups.group(5))
999                 if u"TCP_CPS"in tags:
1000                     result[u"cps"] = float(groups.group(6))
1001                 elif u"TCP_RPS" in tags:
1002                     result[u"rps"] = float(groups.group(6))
1003                 else:
1004                     return result, status
1005                 status = u"PASS"
1006             except (IndexError, ValueError):
1007                 pass
1008
1009         return result, status
1010
1011     def visit_suite(self, suite):
1012         """Implements traversing through the suite and its direct children.
1013
1014         :param suite: Suite to process.
1015         :type suite: Suite
1016         :returns: Nothing.
1017         """
1018         if self.start_suite(suite) is not False:
1019             suite.suites.visit(self)
1020             suite.tests.visit(self)
1021             self.end_suite(suite)
1022
1023     def start_suite(self, suite):
1024         """Called when suite starts.
1025
1026         :param suite: Suite to process.
1027         :type suite: Suite
1028         :returns: Nothing.
1029         """
1030
1031         try:
1032             parent_name = suite.parent.name
1033         except AttributeError:
1034             return
1035
1036         self._data[u"suites"][suite.longname.lower().
1037                               replace(u'"', u"'").
1038                               replace(u" ", u"_")] = {
1039                                   u"name": suite.name.lower(),
1040                                   u"doc": suite.doc,
1041                                   u"parent": parent_name,
1042                                   u"level": len(suite.longname.split(u"."))
1043                               }
1044
1045         suite.setup.visit(self)
1046
1047     def end_suite(self, suite):
1048         """Called when suite ends.
1049
1050         :param suite: Suite to process.
1051         :type suite: Suite
1052         :returns: Nothing.
1053         """
1054
1055     def visit_test(self, test):
1056         """Implements traversing through the test.
1057
1058         :param test: Test to process.
1059         :type test: Test
1060         :returns: Nothing.
1061         """
1062         if self.start_test(test) is not False:
1063             test.body.visit(self)
1064             self.end_test(test)
1065
1066     def start_test(self, test):
1067         """Called when test starts.
1068
1069         :param test: Test to process.
1070         :type test: Test
1071         :returns: Nothing.
1072         """
1073
1074         self._sh_run_counter = 0
1075         self._telemetry_kw_counter = 0
1076         self._telemetry_msg_counter = 0
1077
1078         longname_orig = test.longname.lower()
1079
1080         # Check the ignore list
1081         if longname_orig in self._ignore:
1082             return
1083
1084         tags = [str(tag) for tag in test.tags]
1085         test_result = dict()
1086
1087         # Change the TC long name and name if defined in the mapping table
1088         longname = self._mapping.get(longname_orig, None)
1089         if longname is not None:
1090             name = longname.split(u'.')[-1]
1091             logging.debug(
1092                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1093                 f"{name}"
1094             )
1095         else:
1096             longname = longname_orig
1097             name = test.name.lower()
1098
1099         # Remove TC number from the TC long name (backward compatibility):
1100         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1101         # Remove TC number from the TC name (not needed):
1102         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1103
1104         test_result[u"parent"] = test.parent.name.lower()
1105         test_result[u"tags"] = tags
1106         test_result["doc"] = test.doc
1107         test_result[u"type"] = u""
1108         test_result[u"status"] = test.status
1109         test_result[u"starttime"] = test.starttime
1110         test_result[u"endtime"] = test.endtime
1111
1112         if test.status == u"PASS":
1113             if u"NDRPDR" in tags:
1114                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1115                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1116                         test.message)
1117                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1118                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1119                         test.message)
1120                 else:
1121                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1122                         test.message)
1123             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1124                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1125                     test.message)
1126             else:
1127                 test_result[u"msg"] = test.message
1128         else:
1129             test_result[u"msg"] = test.message
1130
1131         if u"PERFTEST" in tags and u"TREX" not in tags:
1132             # Replace info about cores (e.g. -1c-) with the info about threads
1133             # and cores (e.g. -1t1c-) in the long test case names and in the
1134             # test case names if necessary.
1135             tag_count = 0
1136             tag_tc = str()
1137             for tag in test_result[u"tags"]:
1138                 groups = re.search(self.REGEX_TC_TAG, tag)
1139                 if groups:
1140                     tag_count += 1
1141                     tag_tc = tag
1142
1143             if tag_count == 1:
1144                 self._test_id = re.sub(
1145                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1146                     self._test_id, count=1
1147                 )
1148                 test_result[u"name"] = re.sub(
1149                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1150                     test_result["name"], count=1
1151                 )
1152             else:
1153                 test_result[u"status"] = u"FAIL"
1154                 self._data[u"tests"][self._test_id] = test_result
1155                 logging.debug(
1156                     f"The test {self._test_id} has no or more than one "
1157                     f"multi-threading tags.\n"
1158                     f"Tags: {test_result[u'tags']}"
1159                 )
1160                 return
1161
1162         if u"DEVICETEST" in tags:
1163             test_result[u"type"] = u"DEVICETEST"
1164         elif u"NDRPDR" in tags:
1165             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1166                 test_result[u"type"] = u"CPS"
1167             else:
1168                 test_result[u"type"] = u"NDRPDR"
1169             if test.status == u"PASS":
1170                 test_result[u"throughput"], test_result[u"status"] = \
1171                     self._get_ndrpdr_throughput(test.message)
1172                 test_result[u"gbps"], test_result[u"status"] = \
1173                     self._get_ndrpdr_throughput_gbps(test.message)
1174                 test_result[u"latency"], test_result[u"status"] = \
1175                     self._get_ndrpdr_latency(test.message)
1176         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1177             if u"MRR" in tags:
1178                 test_result[u"type"] = u"MRR"
1179             else:
1180                 test_result[u"type"] = u"BMRR"
1181             if test.status == u"PASS":
1182                 test_result[u"result"] = dict()
1183                 groups = re.search(self.REGEX_BMRR, test.message)
1184                 if groups is not None:
1185                     items_str = groups.group(1)
1186                     items_float = [
1187                         float(item.strip().replace(u"'", u""))
1188                         for item in items_str.split(",")
1189                     ]
1190                     # Use whole list in CSIT-1180.
1191                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1192                     test_result[u"result"][u"samples"] = items_float
1193                     test_result[u"result"][u"receive-rate"] = stats.avg
1194                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1195                 else:
1196                     groups = re.search(self.REGEX_MRR, test.message)
1197                     test_result[u"result"][u"receive-rate"] = \
1198                         float(groups.group(3)) / float(groups.group(1))
1199         elif u"SOAK" in tags:
1200             test_result[u"type"] = u"SOAK"
1201             if test.status == u"PASS":
1202                 test_result[u"throughput"], test_result[u"status"] = \
1203                     self._get_plr_throughput(test.message)
1204         elif u"LDP_NGINX" in tags:
1205             test_result[u"type"] = u"LDP_NGINX"
1206             test_result[u"result"], test_result[u"status"] = \
1207                 self._get_vsap_data(test.message, tags)
1208         elif u"HOSTSTACK" in tags:
1209             test_result[u"type"] = u"HOSTSTACK"
1210             if test.status == u"PASS":
1211                 test_result[u"result"], test_result[u"status"] = \
1212                     self._get_hoststack_data(test.message, tags)
1213         elif u"RECONF" in tags:
1214             test_result[u"type"] = u"RECONF"
1215             if test.status == u"PASS":
1216                 test_result[u"result"] = None
1217                 try:
1218                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1219                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1220                     test_result[u"result"] = {
1221                         u"loss": int(grps_loss.group(1)),
1222                         u"time": float(grps_time.group(1))
1223                     }
1224                 except (AttributeError, IndexError, ValueError, TypeError):
1225                     test_result[u"status"] = u"FAIL"
1226         else:
1227             test_result[u"status"] = u"FAIL"
1228
1229         self._data[u"tests"][self._test_id] = test_result
1230
1231     def end_test(self, test):
1232         """Called when test ends.
1233
1234         :param test: Test to process.
1235         :type test: Test
1236         :returns: Nothing.
1237         """
1238
1239     def visit_keyword(self, keyword):
1240         """Implements traversing through the keyword and its child keywords.
1241
1242         :param keyword: Keyword to process.
1243         :type keyword: Keyword
1244         :returns: Nothing.
1245         """
1246         if self.start_keyword(keyword) is not False:
1247             self.end_keyword(keyword)
1248
1249     def start_keyword(self, keyword):
1250         """Called when keyword starts. Default implementation does nothing.
1251
1252         :param keyword: Keyword to process.
1253         :type keyword: Keyword
1254         :returns: Nothing.
1255         """
1256         try:
1257             if keyword.type == u"setup":
1258                 self.visit_setup_kw(keyword)
1259             elif keyword.type == u"teardown":
1260                 self.visit_teardown_kw(keyword)
1261             else:
1262                 self.visit_test_kw(keyword)
1263         except AttributeError:
1264             pass
1265
1266     def end_keyword(self, keyword):
1267         """Called when keyword ends. Default implementation does nothing.
1268
1269         :param keyword: Keyword to process.
1270         :type keyword: Keyword
1271         :returns: Nothing.
1272         """
1273
1274     def visit_test_kw(self, test_kw):
1275         """Implements traversing through the test keyword and its child
1276         keywords.
1277
1278         :param test_kw: Keyword to process.
1279         :type test_kw: Keyword
1280         :returns: Nothing.
1281         """
1282         for keyword in test_kw.body:
1283             if self.start_test_kw(keyword) is not False:
1284                 self.visit_test_kw(keyword)
1285                 self.end_test_kw(keyword)
1286
1287     def start_test_kw(self, test_kw):
1288         """Called when test keyword starts. Default implementation does
1289         nothing.
1290
1291         :param test_kw: Keyword to process.
1292         :type test_kw: Keyword
1293         :returns: Nothing.
1294         """
1295         if not self._process_oper:
1296             return
1297
1298         if test_kw.name.count(u"Run Telemetry On All Duts"):
1299             self._msg_type = u"test-telemetry"
1300             self._telemetry_kw_counter += 1
1301         elif test_kw.name.count(u"Show Runtime On All Duts"):
1302             self._msg_type = u"test-show-runtime"
1303             self._sh_run_counter += 1
1304         else:
1305             return
1306         test_kw.messages.visit(self)
1307
1308     def end_test_kw(self, test_kw):
1309         """Called when keyword ends. Default implementation does nothing.
1310
1311         :param test_kw: Keyword to process.
1312         :type test_kw: Keyword
1313         :returns: Nothing.
1314         """
1315
1316     def visit_setup_kw(self, setup_kw):
1317         """Implements traversing through the teardown keyword and its child
1318         keywords.
1319
1320         :param setup_kw: Keyword to process.
1321         :type setup_kw: Keyword
1322         :returns: Nothing.
1323         """
1324         for keyword in setup_kw.setup:
1325             if self.start_setup_kw(keyword) is not False:
1326                 self.visit_setup_kw(keyword)
1327                 self.end_setup_kw(keyword)
1328
1329     def start_setup_kw(self, setup_kw):
1330         """Called when teardown keyword starts. Default implementation does
1331         nothing.
1332
1333         :param setup_kw: Keyword to process.
1334         :type setup_kw: Keyword
1335         :returns: Nothing.
1336         """
1337         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1338                 and not self._version:
1339             self._msg_type = u"vpp-version"
1340         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1341                 not self._version:
1342             self._msg_type = u"dpdk-version"
1343         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1344             self._msg_type = u"testbed"
1345         else:
1346             return
1347         setup_kw.messages.visit(self)
1348
1349     def end_setup_kw(self, setup_kw):
1350         """Called when keyword ends. Default implementation does nothing.
1351
1352         :param setup_kw: Keyword to process.
1353         :type setup_kw: Keyword
1354         :returns: Nothing.
1355         """
1356
1357     def visit_teardown_kw(self, teardown_kw):
1358         """Implements traversing through the teardown keyword and its child
1359         keywords.
1360
1361         :param teardown_kw: Keyword to process.
1362         :type teardown_kw: Keyword
1363         :returns: Nothing.
1364         """
1365         for keyword in teardown_kw.body:
1366             if self.start_teardown_kw(keyword) is not False:
1367                 self.visit_teardown_kw(keyword)
1368                 self.end_teardown_kw(keyword)
1369
1370     def start_teardown_kw(self, teardown_kw):
1371         """Called when teardown keyword starts
1372
1373         :param teardown_kw: Keyword to process.
1374         :type teardown_kw: Keyword
1375         :returns: Nothing.
1376         """
1377         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1378             self._conf_history_lookup_nr = 0
1379             self._msg_type = u"teardown-papi-history"
1380             teardown_kw.messages.visit(self)
1381
1382     def end_teardown_kw(self, teardown_kw):
1383         """Called when keyword ends. Default implementation does nothing.
1384
1385         :param teardown_kw: Keyword to process.
1386         :type teardown_kw: Keyword
1387         :returns: Nothing.
1388         """
1389
1390     def visit_message(self, msg):
1391         """Implements visiting the message.
1392
1393         :param msg: Message to process.
1394         :type msg: Message
1395         :returns: Nothing.
1396         """
1397         if self.start_message(msg) is not False:
1398             self.end_message(msg)
1399
1400     def start_message(self, msg):
1401         """Called when message starts. Get required information from messages:
1402         - VPP version.
1403
1404         :param msg: Message to process.
1405         :type msg: Message
1406         :returns: Nothing.
1407         """
1408         if self._msg_type:
1409             self.parse_msg[self._msg_type](msg)
1410
1411     def end_message(self, msg):
1412         """Called when message ends. Default implementation does nothing.
1413
1414         :param msg: Message to process.
1415         :type msg: Message
1416         :returns: Nothing.
1417         """
1418
1419
1420 class InputData:
1421     """Input data
1422
1423     The data is extracted from output.xml files generated by Jenkins jobs and
1424     stored in pandas' DataFrames.
1425
1426     The data structure:
1427     - job name
1428       - build number
1429         - metadata
1430           (as described in ExecutionChecker documentation)
1431         - suites
1432           (as described in ExecutionChecker documentation)
1433         - tests
1434           (as described in ExecutionChecker documentation)
1435     """
1436
1437     def __init__(self, spec, for_output):
1438         """Initialization.
1439
1440         :param spec: Specification.
1441         :param for_output: Output to be generated from downloaded data.
1442         :type spec: Specification
1443         :type for_output: str
1444         """
1445
1446         # Specification:
1447         self._cfg = spec
1448
1449         self._for_output = for_output
1450
1451         # Data store:
1452         self._input_data = pd.Series(dtype="float64")
1453
1454     @property
1455     def data(self):
1456         """Getter - Input data.
1457
1458         :returns: Input data
1459         :rtype: pandas.Series
1460         """
1461         return self._input_data
1462
1463     def metadata(self, job, build):
1464         """Getter - metadata
1465
1466         :param job: Job which metadata we want.
1467         :param build: Build which metadata we want.
1468         :type job: str
1469         :type build: str
1470         :returns: Metadata
1471         :rtype: pandas.Series
1472         """
1473         return self.data[job][build][u"metadata"]
1474
1475     def suites(self, job, build):
1476         """Getter - suites
1477
1478         :param job: Job which suites we want.
1479         :param build: Build which suites we want.
1480         :type job: str
1481         :type build: str
1482         :returns: Suites.
1483         :rtype: pandas.Series
1484         """
1485         return self.data[job][str(build)][u"suites"]
1486
1487     def tests(self, job, build):
1488         """Getter - tests
1489
1490         :param job: Job which tests we want.
1491         :param build: Build which tests we want.
1492         :type job: str
1493         :type build: str
1494         :returns: Tests.
1495         :rtype: pandas.Series
1496         """
1497         return self.data[job][build][u"tests"]
1498
1499     def _parse_tests(self, job, build):
1500         """Process data from robot output.xml file and return JSON structured
1501         data.
1502
1503         :param job: The name of job which build output data will be processed.
1504         :param build: The build which output data will be processed.
1505         :type job: str
1506         :type build: dict
1507         :returns: JSON data structure.
1508         :rtype: dict
1509         """
1510
1511         metadata = {
1512             u"job": job,
1513             u"build": build
1514         }
1515
1516         with open(build[u"file-name"], u'r') as data_file:
1517             try:
1518                 result = ExecutionResult(data_file)
1519             except errors.DataError as err:
1520                 logging.error(
1521                     f"Error occurred while parsing output.xml: {repr(err)}"
1522                 )
1523                 return None
1524
1525         process_oper = False
1526         if u"-vpp-perf-report-coverage-" in job:
1527             process_oper = True
1528         # elif u"-vpp-perf-report-iterative-" in job:
1529         #     # Exceptions for TBs where we do not have coverage data:
1530         #     for item in (u"-2n-icx", ):
1531         #         if item in job:
1532         #             process_oper = True
1533         #             break
1534         checker = ExecutionChecker(
1535             metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1536         )
1537         result.visit(checker)
1538
1539         checker.data[u"metadata"][u"tests_total"] = \
1540             result.statistics.total.total
1541         checker.data[u"metadata"][u"tests_passed"] = \
1542             result.statistics.total.passed
1543         checker.data[u"metadata"][u"tests_failed"] = \
1544             result.statistics.total.failed
1545         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1546         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1547
1548         return checker.data
1549
1550     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1551         """Download and parse the input data file.
1552
1553         :param pid: PID of the process executing this method.
1554         :param job: Name of the Jenkins job which generated the processed input
1555             file.
1556         :param build: Information about the Jenkins build which generated the
1557             processed input file.
1558         :param repeat: Repeat the download specified number of times if not
1559             successful.
1560         :type pid: int
1561         :type job: str
1562         :type build: dict
1563         :type repeat: int
1564         """
1565
1566         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1567
1568         state = u"failed"
1569         success = False
1570         data = None
1571         do_repeat = repeat
1572         while do_repeat:
1573             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1574             if success:
1575                 break
1576             do_repeat -= 1
1577         if not success:
1578             logging.error(
1579                 f"It is not possible to download the input data file from the "
1580                 f"job {job}, build {build[u'build']}, or it is damaged. "
1581                 f"Skipped."
1582             )
1583         if success:
1584             logging.info(f"  Processing data from build {build[u'build']}")
1585             data = self._parse_tests(job, build)
1586             if data is None:
1587                 logging.error(
1588                     f"Input data file from the job {job}, build "
1589                     f"{build[u'build']} is damaged. Skipped."
1590                 )
1591             else:
1592                 state = u"processed"
1593
1594             try:
1595                 remove(build[u"file-name"])
1596             except OSError as err:
1597                 logging.error(
1598                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1599                 )
1600
1601         # If the time-period is defined in the specification file, remove all
1602         # files which are outside the time period.
1603         is_last = False
1604         timeperiod = self._cfg.environment.get(u"time-period", None)
1605         if timeperiod and data:
1606             now = dt.utcnow()
1607             timeperiod = timedelta(int(timeperiod))
1608             metadata = data.get(u"metadata", None)
1609             if metadata:
1610                 generated = metadata.get(u"generated", None)
1611                 if generated:
1612                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1613                     if (now - generated) > timeperiod:
1614                         # Remove the data and the file:
1615                         state = u"removed"
1616                         data = None
1617                         is_last = True
1618                         logging.info(
1619                             f"  The build {job}/{build[u'build']} is "
1620                             f"outdated, will be removed."
1621                         )
1622         return {
1623             u"data": data,
1624             u"state": state,
1625             u"job": job,
1626             u"build": build,
1627             u"last": is_last
1628         }
1629
1630     def download_and_parse_data(self, repeat=1):
1631         """Download the input data files, parse input data from input files and
1632         store in pandas' Series.
1633
1634         :param repeat: Repeat the download specified number of times if not
1635             successful.
1636         :type repeat: int
1637         """
1638
1639         logging.info(u"Downloading and parsing input files ...")
1640
1641         for job, builds in self._cfg.input.items():
1642             for build in builds:
1643
1644                 result = self._download_and_parse_build(job, build, repeat)
1645                 if result[u"last"]:
1646                     break
1647                 build_nr = result[u"build"][u"build"]
1648
1649                 if result[u"data"]:
1650                     data = result[u"data"]
1651                     build_data = pd.Series({
1652                         u"metadata": pd.Series(
1653                             list(data[u"metadata"].values()),
1654                             index=list(data[u"metadata"].keys())
1655                         ),
1656                         u"suites": pd.Series(
1657                             list(data[u"suites"].values()),
1658                             index=list(data[u"suites"].keys())
1659                         ),
1660                         u"tests": pd.Series(
1661                             list(data[u"tests"].values()),
1662                             index=list(data[u"tests"].keys())
1663                         )
1664                     })
1665
1666                     if self._input_data.get(job, None) is None:
1667                         self._input_data[job] = pd.Series(dtype="float64")
1668                     self._input_data[job][str(build_nr)] = build_data
1669                     self._cfg.set_input_file_name(
1670                         job, build_nr, result[u"build"][u"file-name"]
1671                     )
1672                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1673
1674                 mem_alloc = \
1675                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1676                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1677
1678         logging.info(u"Done.")
1679
1680         msg = f"Successful downloads from the sources:\n"
1681         for source in self._cfg.environment[u"data-sources"]:
1682             if source[u"successful-downloads"]:
1683                 msg += (
1684                     f"{source[u'url']}/{source[u'path']}/"
1685                     f"{source[u'file-name']}: "
1686                     f"{source[u'successful-downloads']}\n"
1687                 )
1688         logging.info(msg)
1689
1690     def process_local_file(self, local_file, job=u"local", build_nr=1,
1691                            replace=True):
1692         """Process local XML file given as a command-line parameter.
1693
1694         :param local_file: The file to process.
1695         :param job: Job name.
1696         :param build_nr: Build number.
1697         :param replace: If True, the information about jobs and builds is
1698             replaced by the new one, otherwise the new jobs and builds are
1699             added.
1700         :type local_file: str
1701         :type job: str
1702         :type build_nr: int
1703         :type replace: bool
1704         :raises: PresentationError if an error occurs.
1705         """
1706         if not isfile(local_file):
1707             raise PresentationError(f"The file {local_file} does not exist.")
1708
1709         try:
1710             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1711         except (IndexError, ValueError):
1712             pass
1713
1714         build = {
1715             u"build": build_nr,
1716             u"status": u"failed",
1717             u"file-name": local_file
1718         }
1719         if replace:
1720             self._cfg.input = dict()
1721         self._cfg.add_build(job, build)
1722
1723         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1724         data = self._parse_tests(job, build)
1725         if data is None:
1726             raise PresentationError(
1727                 f"Error occurred while parsing the file {local_file}"
1728             )
1729
1730         build_data = pd.Series({
1731             u"metadata": pd.Series(
1732                 list(data[u"metadata"].values()),
1733                 index=list(data[u"metadata"].keys())
1734             ),
1735             u"suites": pd.Series(
1736                 list(data[u"suites"].values()),
1737                 index=list(data[u"suites"].keys())
1738             ),
1739             u"tests": pd.Series(
1740                 list(data[u"tests"].values()),
1741                 index=list(data[u"tests"].keys())
1742             )
1743         })
1744
1745         if self._input_data.get(job, None) is None:
1746             self._input_data[job] = pd.Series(dtype="float64")
1747         self._input_data[job][str(build_nr)] = build_data
1748
1749         self._cfg.set_input_state(job, build_nr, u"processed")
1750
1751     def process_local_directory(self, local_dir, replace=True):
1752         """Process local directory with XML file(s). The directory is processed
1753         as a 'job' and the XML files in it as builds.
1754         If the given directory contains only sub-directories, these
1755         sub-directories processed as jobs and corresponding XML files as builds
1756         of their job.
1757
1758         :param local_dir: Local directory to process.
1759         :param replace: If True, the information about jobs and builds is
1760             replaced by the new one, otherwise the new jobs and builds are
1761             added.
1762         :type local_dir: str
1763         :type replace: bool
1764         """
1765         if not isdir(local_dir):
1766             raise PresentationError(
1767                 f"The directory {local_dir} does not exist."
1768             )
1769
1770         # Check if the given directory includes only files, or only directories
1771         _, dirnames, filenames = next(walk(local_dir))
1772
1773         if filenames and not dirnames:
1774             filenames.sort()
1775             # local_builds:
1776             # key: dir (job) name, value: list of file names (builds)
1777             local_builds = {
1778                 local_dir: [join(local_dir, name) for name in filenames]
1779             }
1780
1781         elif dirnames and not filenames:
1782             dirnames.sort()
1783             # local_builds:
1784             # key: dir (job) name, value: list of file names (builds)
1785             local_builds = dict()
1786             for dirname in dirnames:
1787                 builds = [
1788                     join(local_dir, dirname, name)
1789                     for name in listdir(join(local_dir, dirname))
1790                     if isfile(join(local_dir, dirname, name))
1791                 ]
1792                 if builds:
1793                     local_builds[dirname] = sorted(builds)
1794
1795         elif not filenames and not dirnames:
1796             raise PresentationError(f"The directory {local_dir} is empty.")
1797         else:
1798             raise PresentationError(
1799                 f"The directory {local_dir} can include only files or only "
1800                 f"directories, not both.\nThe directory {local_dir} includes "
1801                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1802             )
1803
1804         if replace:
1805             self._cfg.input = dict()
1806
1807         for job, files in local_builds.items():
1808             for idx, local_file in enumerate(files):
1809                 self.process_local_file(local_file, job, idx + 1, replace=False)
1810
1811     @staticmethod
1812     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1813         """Return the index of character in the string which is the end of tag.
1814
1815         :param tag_filter: The string where the end of tag is being searched.
1816         :param start: The index where the searching is stated.
1817         :param closer: The character which is the tag closer.
1818         :type tag_filter: str
1819         :type start: int
1820         :type closer: str
1821         :returns: The index of the tag closer.
1822         :rtype: int
1823         """
1824         try:
1825             idx_opener = tag_filter.index(closer, start)
1826             return tag_filter.index(closer, idx_opener + 1)
1827         except ValueError:
1828             return None
1829
1830     @staticmethod
1831     def _condition(tag_filter):
1832         """Create a conditional statement from the given tag filter.
1833
1834         :param tag_filter: Filter based on tags from the element specification.
1835         :type tag_filter: str
1836         :returns: Conditional statement which can be evaluated.
1837         :rtype: str
1838         """
1839         index = 0
1840         while True:
1841             index = InputData._end_of_tag(tag_filter, index)
1842             if index is None:
1843                 return tag_filter
1844             index += 1
1845             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1846
1847     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1848                     continue_on_error=False):
1849         """Filter required data from the given jobs and builds.
1850
1851         The output data structure is:
1852         - job 1
1853           - build 1
1854             - test (or suite) 1 ID:
1855               - param 1
1856               - param 2
1857               ...
1858               - param n
1859             ...
1860             - test (or suite) n ID:
1861             ...
1862           ...
1863           - build n
1864         ...
1865         - job n
1866
1867         :param element: Element which will use the filtered data.
1868         :param params: Parameters which will be included in the output. If None,
1869             all parameters are included.
1870         :param data: If not None, this data is used instead of data specified
1871             in the element.
1872         :param data_set: The set of data to be filtered: tests, suites,
1873             metadata.
1874         :param continue_on_error: Continue if there is error while reading the
1875             data. The Item will be empty then
1876         :type element: pandas.Series
1877         :type params: list
1878         :type data: dict
1879         :type data_set: str
1880         :type continue_on_error: bool
1881         :returns: Filtered data.
1882         :rtype pandas.Series
1883         """
1884
1885         try:
1886             if data_set == "suites":
1887                 cond = u"True"
1888             elif element[u"filter"] in (u"all", u"template"):
1889                 cond = u"True"
1890             else:
1891                 cond = InputData._condition(element[u"filter"])
1892             logging.debug(f"   Filter: {cond}")
1893         except KeyError:
1894             logging.error(u"  No filter defined.")
1895             return None
1896
1897         if params is None:
1898             params = element.get(u"parameters", None)
1899             if params:
1900                 params.extend((u"type", u"status"))
1901
1902         data_to_filter = data if data else element[u"data"]
1903         data = pd.Series(dtype="float64")
1904         try:
1905             for job, builds in data_to_filter.items():
1906                 data[job] = pd.Series(dtype="float64")
1907                 for build in builds:
1908                     data[job][str(build)] = pd.Series(dtype="float64")
1909                     try:
1910                         data_dict = dict(
1911                             self.data[job][str(build)][data_set].items())
1912                     except KeyError:
1913                         if continue_on_error:
1914                             continue
1915                         return None
1916
1917                     for test_id, test_data in data_dict.items():
1918                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1919                             data[job][str(build)][test_id] = \
1920                                 pd.Series(dtype="float64")
1921                             if params is None:
1922                                 for param, val in test_data.items():
1923                                     data[job][str(build)][test_id][param] = val
1924                             else:
1925                                 for param in params:
1926                                     try:
1927                                         data[job][str(build)][test_id][param] =\
1928                                             test_data[param]
1929                                     except KeyError:
1930                                         data[job][str(build)][test_id][param] =\
1931                                             u"No Data"
1932             return data
1933
1934         except (KeyError, IndexError, ValueError) as err:
1935             logging.error(
1936                 f"Missing mandatory parameter in the element specification: "
1937                 f"{repr(err)}"
1938             )
1939             return None
1940         except AttributeError as err:
1941             logging.error(repr(err))
1942             return None
1943         except SyntaxError as err:
1944             logging.error(
1945                 f"The filter {cond} is not correct. Check if all tags are "
1946                 f"enclosed by apostrophes.\n{repr(err)}"
1947             )
1948             return None
1949
1950     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1951                              continue_on_error=False):
1952         """Filter required data from the given jobs and builds.
1953
1954         The output data structure is:
1955         - job 1
1956           - build 1
1957             - test (or suite) 1 ID:
1958               - param 1
1959               - param 2
1960               ...
1961               - param n
1962             ...
1963             - test (or suite) n ID:
1964             ...
1965           ...
1966           - build n
1967         ...
1968         - job n
1969
1970         :param element: Element which will use the filtered data.
1971         :param params: Parameters which will be included in the output. If None,
1972         all parameters are included.
1973         :param data_set: The set of data to be filtered: tests, suites,
1974         metadata.
1975         :param continue_on_error: Continue if there is error while reading the
1976         data. The Item will be empty then
1977         :type element: pandas.Series
1978         :type params: list
1979         :type data_set: str
1980         :type continue_on_error: bool
1981         :returns: Filtered data.
1982         :rtype pandas.Series
1983         """
1984
1985         include = element.get(u"include", None)
1986         if not include:
1987             logging.warning(u"No tests to include, skipping the element.")
1988             return None
1989
1990         if params is None:
1991             params = element.get(u"parameters", None)
1992             if params and u"type" not in params:
1993                 params.append(u"type")
1994
1995         cores = element.get(u"core", None)
1996         if cores:
1997             tests = list()
1998             for core in cores:
1999                 for test in include:
2000                     tests.append(test.format(core=core))
2001         else:
2002             tests = include
2003
2004         data = pd.Series(dtype="float64")
2005         try:
2006             for job, builds in element[u"data"].items():
2007                 data[job] = pd.Series(dtype="float64")
2008                 for build in builds:
2009                     data[job][str(build)] = pd.Series(dtype="float64")
2010                     for test in tests:
2011                         try:
2012                             reg_ex = re.compile(str(test).lower())
2013                             for test_id in self.data[job][
2014                                     str(build)][data_set].keys():
2015                                 if re.match(reg_ex, str(test_id).lower()):
2016                                     test_data = self.data[job][
2017                                         str(build)][data_set][test_id]
2018                                     data[job][str(build)][test_id] = \
2019                                         pd.Series(dtype="float64")
2020                                     if params is None:
2021                                         for param, val in test_data.items():
2022                                             data[job][str(build)][test_id]\
2023                                                 [param] = val
2024                                     else:
2025                                         for param in params:
2026                                             try:
2027                                                 data[job][str(build)][
2028                                                     test_id][param] = \
2029                                                     test_data[param]
2030                                             except KeyError:
2031                                                 data[job][str(build)][
2032                                                     test_id][param] = u"No Data"
2033                         except KeyError as err:
2034                             if continue_on_error:
2035                                 logging.debug(repr(err))
2036                                 continue
2037                             logging.error(repr(err))
2038                             return None
2039             return data
2040
2041         except (KeyError, IndexError, ValueError) as err:
2042             logging.error(
2043                 f"Missing mandatory parameter in the element "
2044                 f"specification: {repr(err)}"
2045             )
2046             return None
2047         except AttributeError as err:
2048             logging.error(repr(err))
2049             return None
2050
2051     @staticmethod
2052     def merge_data(data):
2053         """Merge data from more jobs and builds to a simple data structure.
2054
2055         The output data structure is:
2056
2057         - test (suite) 1 ID:
2058           - param 1
2059           - param 2
2060           ...
2061           - param n
2062         ...
2063         - test (suite) n ID:
2064         ...
2065
2066         :param data: Data to merge.
2067         :type data: pandas.Series
2068         :returns: Merged data.
2069         :rtype: pandas.Series
2070         """
2071
2072         logging.info(u"    Merging data ...")
2073
2074         merged_data = pd.Series(dtype="float64")
2075         for builds in data.values:
2076             for item in builds.values:
2077                 for item_id, item_data in item.items():
2078                     merged_data[item_id] = item_data
2079         return merged_data
2080
2081     def print_all_oper_data(self):
2082         """Print all operational data to console.
2083         """
2084
2085         for job in self._input_data.values:
2086             for build in job.values:
2087                 for test_id, test_data in build[u"tests"].items():
2088                     print(f"{test_id}")
2089                     if test_data.get(u"show-run", None) is None:
2090                         continue
2091                     for dut_name, data in test_data[u"show-run"].items():
2092                         if data.get(u"runtime", None) is None:
2093                             continue
2094                         runtime = loads(data[u"runtime"])
2095                         try:
2096                             threads_nr = len(runtime[0][u"clocks"])
2097                         except (IndexError, KeyError):
2098                             continue
2099                         threads = OrderedDict(
2100                             {idx: list() for idx in range(threads_nr)})
2101                         for item in runtime:
2102                             for idx in range(threads_nr):
2103                                 if item[u"vectors"][idx] > 0:
2104                                     clocks = item[u"clocks"][idx] / \
2105                                              item[u"vectors"][idx]
2106                                 elif item[u"calls"][idx] > 0:
2107                                     clocks = item[u"clocks"][idx] / \
2108                                              item[u"calls"][idx]
2109                                 elif item[u"suspends"][idx] > 0:
2110                                     clocks = item[u"clocks"][idx] / \
2111                                              item[u"suspends"][idx]
2112                                 else:
2113                                     clocks = 0.0
2114
2115                                 if item[u"calls"][idx] > 0:
2116                                     vectors_call = item[u"vectors"][idx] / \
2117                                                    item[u"calls"][idx]
2118                                 else:
2119                                     vectors_call = 0.0
2120
2121                                 if int(item[u"calls"][idx]) + int(
2122                                         item[u"vectors"][idx]) + \
2123                                         int(item[u"suspends"][idx]):
2124                                     threads[idx].append([
2125                                         item[u"name"],
2126                                         item[u"calls"][idx],
2127                                         item[u"vectors"][idx],
2128                                         item[u"suspends"][idx],
2129                                         clocks,
2130                                         vectors_call
2131                                     ])
2132
2133                         print(f"Host IP: {data.get(u'host', '')}, "
2134                               f"Socket: {data.get(u'socket', '')}")
2135                         for thread_nr, thread in threads.items():
2136                             txt_table = prettytable.PrettyTable(
2137                                 (
2138                                     u"Name",
2139                                     u"Nr of Vectors",
2140                                     u"Nr of Packets",
2141                                     u"Suspends",
2142                                     u"Cycles per Packet",
2143                                     u"Average Vector Size"
2144                                 )
2145                             )
2146                             avg = 0.0
2147                             for row in thread:
2148                                 txt_table.add_row(row)
2149                                 avg += row[-1]
2150                             if len(thread) == 0:
2151                                 avg = u""
2152                             else:
2153                                 avg = f", Average Vector Size per Node: " \
2154                                       f"{(avg / len(thread)):.2f}"
2155                             th_name = u"main" if thread_nr == 0 \
2156                                 else f"worker_{thread_nr}"
2157                             print(f"{dut_name}, {th_name}{avg}")
2158                             txt_table.float_format = u".2"
2159                             txt_table.align = u"r"
2160                             txt_table.align[u"Name"] = u"l"
2161                             print(f"{txt_table.get_string()}\n")