PAL: Optimize specification of elements
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     # Needed for CPS and PPS tests
243     REGEX_NDRPDR_LAT_BASE = re.compile(
244         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
245         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
246     )
247     REGEX_NDRPDR_LAT = re.compile(
248         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
249         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
250         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
251         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
252         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
253         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
254     )
255
256     REGEX_VERSION_VPP = re.compile(
257         r"(return STDOUT Version:\s*|"
258         r"VPP Version:\s*|VPP version:\s*)(.*)"
259     )
260     REGEX_VERSION_DPDK = re.compile(
261         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
262     )
263     REGEX_TCP = re.compile(
264         r'Total\s(rps|cps|throughput):\s(\d*).*$'
265     )
266     REGEX_MRR = re.compile(
267         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
268         r'tx\s(\d*),\srx\s(\d*)'
269     )
270     REGEX_BMRR = re.compile(
271         r'.*trial results.*: \[(.*)\]'
272     )
273     REGEX_RECONF_LOSS = re.compile(
274         r'Packets lost due to reconfig: (\d*)'
275     )
276     REGEX_RECONF_TIME = re.compile(
277         r'Implied time lost: (\d*.[\de-]*)'
278     )
279     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
280
281     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
282
283     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
284
285     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
286
287     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
288
289     def __init__(self, metadata, mapping, ignore):
290         """Initialisation.
291
292         :param metadata: Key-value pairs to be included in "metadata" part of
293             JSON structure.
294         :param mapping: Mapping of the old names of test cases to the new
295             (actual) one.
296         :param ignore: List of TCs to be ignored.
297         :type metadata: dict
298         :type mapping: dict
299         :type ignore: list
300         """
301
302         # Type of message to parse out from the test messages
303         self._msg_type = None
304
305         # VPP version
306         self._version = None
307
308         # Timestamp
309         self._timestamp = None
310
311         # Testbed. The testbed is identified by TG node IP address.
312         self._testbed = None
313
314         # Mapping of TCs long names
315         self._mapping = mapping
316
317         # Ignore list
318         self._ignore = ignore
319
320         # Number of PAPI History messages found:
321         # 0 - no message
322         # 1 - PAPI History of DUT1
323         # 2 - PAPI History of DUT2
324         self._conf_history_lookup_nr = 0
325
326         self._sh_run_counter = 0
327
328         # Test ID of currently processed test- the lowercase full path to the
329         # test
330         self._test_id = None
331
332         # The main data structure
333         self._data = {
334             u"metadata": OrderedDict(),
335             u"suites": OrderedDict(),
336             u"tests": OrderedDict()
337         }
338
339         # Save the provided metadata
340         for key, val in metadata.items():
341             self._data[u"metadata"][key] = val
342
343         # Dictionary defining the methods used to parse different types of
344         # messages
345         self.parse_msg = {
346             u"timestamp": self._get_timestamp,
347             u"vpp-version": self._get_vpp_version,
348             u"dpdk-version": self._get_dpdk_version,
349             # TODO: Remove when not needed:
350             u"teardown-vat-history": self._get_vat_history,
351             u"teardown-papi-history": self._get_papi_history,
352             u"test-show-runtime": self._get_show_run,
353             u"testbed": self._get_testbed
354         }
355
356     @property
357     def data(self):
358         """Getter - Data parsed from the XML file.
359
360         :returns: Data parsed from the XML file.
361         :rtype: dict
362         """
363         return self._data
364
365     def _get_data_from_mrr_test_msg(self, msg):
366         """Get info from message of MRR performance tests.
367
368         :param msg: Message to be processed.
369         :type msg: str
370         :returns: Processed message or original message if a problem occurs.
371         :rtype: str
372         """
373
374         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
375         if not groups or groups.lastindex != 1:
376             return u"Test Failed."
377
378         try:
379             data = groups.group(1).split(u", ")
380         except (AttributeError, IndexError, ValueError, KeyError):
381             return u"Test Failed."
382
383         out_str = u"["
384         try:
385             for item in data:
386                 out_str += f"{(float(item) / 1e6):.2f}, "
387             return out_str[:-2] + u"]"
388         except (AttributeError, IndexError, ValueError, KeyError):
389             return u"Test Failed."
390
391     def _get_data_from_cps_test_msg(self, msg):
392         """Get info from message of NDRPDR CPS tests.
393
394         :param msg: Message to be processed.
395         :type msg: str
396         :returns: Processed message or "Test Failed." if a problem occurs.
397         :rtype: str
398         """
399
400         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
401         if not groups or groups.lastindex != 2:
402             return u"Test Failed."
403
404         try:
405             return (
406                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
407                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
408             )
409         except (AttributeError, IndexError, ValueError, KeyError):
410             return u"Test Failed."
411
412     def _get_data_from_pps_test_msg(self, msg):
413         """Get info from message of NDRPDR PPS tests.
414
415         :param msg: Message to be processed.
416         :type msg: str
417         :returns: Processed message or "Test Failed." if a problem occurs.
418         :rtype: str
419         """
420
421         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
422         if not groups or groups.lastindex != 4:
423             return u"Test Failed."
424
425         try:
426             return (
427                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
428                 f"{float(groups.group(2)):5.2f}\n"
429                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
430                 f"{float(groups.group(4)):5.2f}"
431             )
432         except (AttributeError, IndexError, ValueError, KeyError):
433             return u"Test Failed."
434
435     def _get_data_from_perf_test_msg(self, msg):
436         """Get info from message of NDRPDR performance tests.
437
438         :param msg: Message to be processed.
439         :type msg: str
440         :returns: Processed message or "Test Failed." if a problem occurs.
441         :rtype: str
442         """
443
444         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
445         if not groups or groups.lastindex != 10:
446             return u"Test Failed."
447
448         try:
449             data = {
450                 u"ndr_low": float(groups.group(1)),
451                 u"ndr_low_b": float(groups.group(2)),
452                 u"pdr_low": float(groups.group(3)),
453                 u"pdr_low_b": float(groups.group(4)),
454                 u"pdr_lat_90_1": groups.group(5),
455                 u"pdr_lat_90_2": groups.group(6),
456                 u"pdr_lat_50_1": groups.group(7),
457                 u"pdr_lat_50_2": groups.group(8),
458                 u"pdr_lat_10_1": groups.group(9),
459                 u"pdr_lat_10_2": groups.group(10),
460             }
461         except (AttributeError, IndexError, ValueError, KeyError):
462             return u"Test Failed."
463
464         def _process_lat(in_str_1, in_str_2):
465             """Extract min, avg, max values from latency string.
466
467             :param in_str_1: Latency string for one direction produced by robot
468                 framework.
469             :param in_str_2: Latency string for second direction produced by
470                 robot framework.
471             :type in_str_1: str
472             :type in_str_2: str
473             :returns: Processed latency string or None if a problem occurs.
474             :rtype: tuple
475             """
476             in_list_1 = in_str_1.split('/', 3)
477             in_list_2 = in_str_2.split('/', 3)
478
479             if len(in_list_1) != 4 and len(in_list_2) != 4:
480                 return None
481
482             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
483             try:
484                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
485             except hdrh.codec.HdrLengthException:
486                 return None
487
488             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
489             try:
490                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
491             except hdrh.codec.HdrLengthException:
492                 return None
493
494             if hdr_lat_1 and hdr_lat_2:
495                 hdr_lat = (
496                     hdr_lat_1.get_value_at_percentile(50.0),
497                     hdr_lat_1.get_value_at_percentile(90.0),
498                     hdr_lat_1.get_value_at_percentile(99.0),
499                     hdr_lat_2.get_value_at_percentile(50.0),
500                     hdr_lat_2.get_value_at_percentile(90.0),
501                     hdr_lat_2.get_value_at_percentile(99.0)
502                 )
503
504                 if all(hdr_lat):
505                     return hdr_lat
506
507             return None
508
509         try:
510             out_msg = (
511                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
512                 f"{data[u'ndr_low_b']:5.2f}"
513                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
514                 f"{data[u'pdr_low_b']:5.2f}"
515             )
516             latency = (
517                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
518                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
519                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
520             )
521             if all(latency):
522                 max_len = len(str(max((max(item) for item in latency))))
523                 max_len = 4 if max_len < 4 else max_len
524
525                 for idx, lat in enumerate(latency):
526                     if not idx:
527                         out_msg += u"\n"
528                     out_msg += (
529                         f"\n{idx + 3}. "
530                         f"{lat[0]:{max_len}d} "
531                         f"{lat[1]:{max_len}d} "
532                         f"{lat[2]:{max_len}d}      "
533                         f"{lat[3]:{max_len}d} "
534                         f"{lat[4]:{max_len}d} "
535                         f"{lat[5]:{max_len}d} "
536                     )
537
538             return out_msg
539
540         except (AttributeError, IndexError, ValueError, KeyError):
541             return u"Test Failed."
542
543     def _get_testbed(self, msg):
544         """Called when extraction of testbed IP is required.
545         The testbed is identified by TG node IP address.
546
547         :param msg: Message to process.
548         :type msg: Message
549         :returns: Nothing.
550         """
551
552         if msg.message.count(u"Setup of TG node") or \
553                 msg.message.count(u"Setup of node TG host"):
554             reg_tg_ip = re.compile(
555                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
556             try:
557                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
558             except (KeyError, ValueError, IndexError, AttributeError):
559                 pass
560             finally:
561                 self._data[u"metadata"][u"testbed"] = self._testbed
562                 self._msg_type = None
563
564     def _get_vpp_version(self, msg):
565         """Called when extraction of VPP version is required.
566
567         :param msg: Message to process.
568         :type msg: Message
569         :returns: Nothing.
570         """
571
572         if msg.message.count(u"return STDOUT Version:") or \
573                 msg.message.count(u"VPP Version:") or \
574                 msg.message.count(u"VPP version:"):
575             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
576                                 group(2))
577             self._data[u"metadata"][u"version"] = self._version
578             self._msg_type = None
579
580     def _get_dpdk_version(self, msg):
581         """Called when extraction of DPDK version is required.
582
583         :param msg: Message to process.
584         :type msg: Message
585         :returns: Nothing.
586         """
587
588         if msg.message.count(u"DPDK Version:"):
589             try:
590                 self._version = str(re.search(
591                     self.REGEX_VERSION_DPDK, msg.message).group(2))
592                 self._data[u"metadata"][u"version"] = self._version
593             except IndexError:
594                 pass
595             finally:
596                 self._msg_type = None
597
598     def _get_timestamp(self, msg):
599         """Called when extraction of timestamp is required.
600
601         :param msg: Message to process.
602         :type msg: Message
603         :returns: Nothing.
604         """
605
606         self._timestamp = msg.timestamp[:14]
607         self._data[u"metadata"][u"generated"] = self._timestamp
608         self._msg_type = None
609
610     def _get_vat_history(self, msg):
611         """Called when extraction of VAT command history is required.
612
613         TODO: Remove when not needed.
614
615         :param msg: Message to process.
616         :type msg: Message
617         :returns: Nothing.
618         """
619         if msg.message.count(u"VAT command history:"):
620             self._conf_history_lookup_nr += 1
621             if self._conf_history_lookup_nr == 1:
622                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
623             else:
624                 self._msg_type = None
625             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
626                           r"VAT command history:", u"",
627                           msg.message, count=1).replace(u'\n', u' |br| ').\
628                 replace(u'"', u"'")
629
630             self._data[u"tests"][self._test_id][u"conf-history"] += (
631                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
632             )
633
634     def _get_papi_history(self, msg):
635         """Called when extraction of PAPI command history is required.
636
637         :param msg: Message to process.
638         :type msg: Message
639         :returns: Nothing.
640         """
641         if msg.message.count(u"PAPI command history:"):
642             self._conf_history_lookup_nr += 1
643             if self._conf_history_lookup_nr == 1:
644                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
645             else:
646                 self._msg_type = None
647             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
648                           r"PAPI command history:", u"",
649                           msg.message, count=1).replace(u'\n', u' |br| ').\
650                 replace(u'"', u"'")
651             self._data[u"tests"][self._test_id][u"conf-history"] += (
652                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
653             )
654
655     def _get_show_run(self, msg):
656         """Called when extraction of VPP operational data (output of CLI command
657         Show Runtime) is required.
658
659         :param msg: Message to process.
660         :type msg: Message
661         :returns: Nothing.
662         """
663
664         if not msg.message.count(u"stats runtime"):
665             return
666
667         # Temporary solution
668         if self._sh_run_counter > 1:
669             return
670
671         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
672             self._data[u"tests"][self._test_id][u"show-run"] = dict()
673
674         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
675         if not groups:
676             return
677         try:
678             host = groups.group(1)
679         except (AttributeError, IndexError):
680             host = u""
681         try:
682             sock = groups.group(2)
683         except (AttributeError, IndexError):
684             sock = u""
685
686         runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
687                         replace(u"'", u'"').replace(u'b"', u'"').
688                         replace(u'u"', u'"').split(u":", 1)[1])
689
690         try:
691             threads_nr = len(runtime[0][u"clocks"])
692         except (IndexError, KeyError):
693             return
694
695         dut = u"DUT{nr}".format(
696             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
697
698         oper = {
699             u"host": host,
700             u"socket": sock,
701             u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
702         }
703
704         for item in runtime:
705             for idx in range(threads_nr):
706                 if item[u"vectors"][idx] > 0:
707                     clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
708                 elif item[u"calls"][idx] > 0:
709                     clocks = item[u"clocks"][idx] / item[u"calls"][idx]
710                 elif item[u"suspends"][idx] > 0:
711                     clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
712                 else:
713                     clocks = 0.0
714
715                 if item[u"calls"][idx] > 0:
716                     vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
717                 else:
718                     vectors_call = 0.0
719
720                 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
721                         int(item[u"suspends"][idx]):
722                     oper[u"threads"][idx].append([
723                         item[u"name"],
724                         item[u"calls"][idx],
725                         item[u"vectors"][idx],
726                         item[u"suspends"][idx],
727                         clocks,
728                         vectors_call
729                     ])
730
731         self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
732
733     def _get_ndrpdr_throughput(self, msg):
734         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
735         message.
736
737         :param msg: The test message to be parsed.
738         :type msg: str
739         :returns: Parsed data as a dict and the status (PASS/FAIL).
740         :rtype: tuple(dict, str)
741         """
742
743         throughput = {
744             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
745             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
746         }
747         status = u"FAIL"
748         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
749
750         if groups is not None:
751             try:
752                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
753                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
754                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
755                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
756                 status = u"PASS"
757             except (IndexError, ValueError):
758                 pass
759
760         return throughput, status
761
762     def _get_ndrpdr_throughput_gbps(self, msg):
763         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
764         test message.
765
766         :param msg: The test message to be parsed.
767         :type msg: str
768         :returns: Parsed data as a dict and the status (PASS/FAIL).
769         :rtype: tuple(dict, str)
770         """
771
772         gbps = {
773             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
774             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
775         }
776         status = u"FAIL"
777         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
778
779         if groups is not None:
780             try:
781                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
782                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
783                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
784                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
785                 status = u"PASS"
786             except (IndexError, ValueError):
787                 pass
788
789         return gbps, status
790
791     def _get_plr_throughput(self, msg):
792         """Get PLRsearch lower bound and PLRsearch upper bound from the test
793         message.
794
795         :param msg: The test message to be parsed.
796         :type msg: str
797         :returns: Parsed data as a dict and the status (PASS/FAIL).
798         :rtype: tuple(dict, str)
799         """
800
801         throughput = {
802             u"LOWER": -1.0,
803             u"UPPER": -1.0
804         }
805         status = u"FAIL"
806         groups = re.search(self.REGEX_PLR_RATE, msg)
807
808         if groups is not None:
809             try:
810                 throughput[u"LOWER"] = float(groups.group(1))
811                 throughput[u"UPPER"] = float(groups.group(2))
812                 status = u"PASS"
813             except (IndexError, ValueError):
814                 pass
815
816         return throughput, status
817
818     def _get_ndrpdr_latency(self, msg):
819         """Get LATENCY from the test message.
820
821         :param msg: The test message to be parsed.
822         :type msg: str
823         :returns: Parsed data as a dict and the status (PASS/FAIL).
824         :rtype: tuple(dict, str)
825         """
826         latency_default = {
827             u"min": -1.0,
828             u"avg": -1.0,
829             u"max": -1.0,
830             u"hdrh": u""
831         }
832         latency = {
833             u"NDR": {
834                 u"direction1": copy.copy(latency_default),
835                 u"direction2": copy.copy(latency_default)
836             },
837             u"PDR": {
838                 u"direction1": copy.copy(latency_default),
839                 u"direction2": copy.copy(latency_default)
840             },
841             u"LAT0": {
842                 u"direction1": copy.copy(latency_default),
843                 u"direction2": copy.copy(latency_default)
844             },
845             u"PDR10": {
846                 u"direction1": copy.copy(latency_default),
847                 u"direction2": copy.copy(latency_default)
848             },
849             u"PDR50": {
850                 u"direction1": copy.copy(latency_default),
851                 u"direction2": copy.copy(latency_default)
852             },
853             u"PDR90": {
854                 u"direction1": copy.copy(latency_default),
855                 u"direction2": copy.copy(latency_default)
856             },
857         }
858
859         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
860         if groups is None:
861             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
862         if groups is None:
863             return latency, u"FAIL"
864
865         def process_latency(in_str):
866             """Return object with parsed latency values.
867
868             TODO: Define class for the return type.
869
870             :param in_str: Input string, min/avg/max/hdrh format.
871             :type in_str: str
872             :returns: Dict with corresponding keys, except hdrh float values.
873             :rtype dict:
874             :throws IndexError: If in_str does not have enough substrings.
875             :throws ValueError: If a substring does not convert to float.
876             """
877             in_list = in_str.split('/', 3)
878
879             rval = {
880                 u"min": float(in_list[0]),
881                 u"avg": float(in_list[1]),
882                 u"max": float(in_list[2]),
883                 u"hdrh": u""
884             }
885
886             if len(in_list) == 4:
887                 rval[u"hdrh"] = str(in_list[3])
888
889             return rval
890
891         try:
892             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
893             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
894             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
895             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
896             if groups.lastindex == 4:
897                 return latency, u"PASS"
898         except (IndexError, ValueError):
899             pass
900
901         try:
902             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
903             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
904             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
905             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
906             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
907             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
908             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
909             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
910             if groups.lastindex == 12:
911                 return latency, u"PASS"
912         except (IndexError, ValueError):
913             pass
914
915         # TODO: Remove when not needed
916         latency[u"NDR10"] = {
917             u"direction1": copy.copy(latency_default),
918             u"direction2": copy.copy(latency_default)
919         }
920         latency[u"NDR50"] = {
921             u"direction1": copy.copy(latency_default),
922             u"direction2": copy.copy(latency_default)
923         }
924         latency[u"NDR90"] = {
925             u"direction1": copy.copy(latency_default),
926             u"direction2": copy.copy(latency_default)
927         }
928         try:
929             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
930             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
931             latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
932             latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
933             latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
934             latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
935             latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
936             latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
937             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
938             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
939             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
940             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
941             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
942             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
943             return latency, u"PASS"
944         except (IndexError, ValueError):
945             pass
946
947         return latency, u"FAIL"
948
949     @staticmethod
950     def _get_hoststack_data(msg, tags):
951         """Get data from the hoststack test message.
952
953         :param msg: The test message to be parsed.
954         :param tags: Test tags.
955         :type msg: str
956         :type tags: list
957         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
958         :rtype: tuple(dict, str)
959         """
960         result = dict()
961         status = u"FAIL"
962
963         msg = msg.replace(u"'", u'"').replace(u" ", u"")
964         if u"LDPRELOAD" in tags:
965             try:
966                 result = loads(msg)
967                 status = u"PASS"
968             except JSONDecodeError:
969                 pass
970         elif u"VPPECHO" in tags:
971             try:
972                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
973                 result = dict(
974                     client=loads(msg_lst[0]),
975                     server=loads(msg_lst[1])
976                 )
977                 status = u"PASS"
978             except (JSONDecodeError, IndexError):
979                 pass
980
981         return result, status
982
983     def visit_suite(self, suite):
984         """Implements traversing through the suite and its direct children.
985
986         :param suite: Suite to process.
987         :type suite: Suite
988         :returns: Nothing.
989         """
990         if self.start_suite(suite) is not False:
991             suite.suites.visit(self)
992             suite.tests.visit(self)
993             self.end_suite(suite)
994
995     def start_suite(self, suite):
996         """Called when suite starts.
997
998         :param suite: Suite to process.
999         :type suite: Suite
1000         :returns: Nothing.
1001         """
1002
1003         try:
1004             parent_name = suite.parent.name
1005         except AttributeError:
1006             return
1007
1008         doc_str = suite.doc.\
1009             replace(u'"', u"'").\
1010             replace(u'\n', u' ').\
1011             replace(u'\r', u'').\
1012             replace(u'*[', u' |br| *[').\
1013             replace(u"*", u"**").\
1014             replace(u' |br| *[', u'*[', 1)
1015
1016         self._data[u"suites"][suite.longname.lower().
1017                               replace(u'"', u"'").
1018                               replace(u" ", u"_")] = {
1019                                   u"name": suite.name.lower(),
1020                                   u"doc": doc_str,
1021                                   u"parent": parent_name,
1022                                   u"level": len(suite.longname.split(u"."))
1023                               }
1024
1025         suite.keywords.visit(self)
1026
1027     def end_suite(self, suite):
1028         """Called when suite ends.
1029
1030         :param suite: Suite to process.
1031         :type suite: Suite
1032         :returns: Nothing.
1033         """
1034
1035     def visit_test(self, test):
1036         """Implements traversing through the test.
1037
1038         :param test: Test to process.
1039         :type test: Test
1040         :returns: Nothing.
1041         """
1042         if self.start_test(test) is not False:
1043             test.keywords.visit(self)
1044             self.end_test(test)
1045
1046     def start_test(self, test):
1047         """Called when test starts.
1048
1049         :param test: Test to process.
1050         :type test: Test
1051         :returns: Nothing.
1052         """
1053
1054         self._sh_run_counter = 0
1055
1056         longname_orig = test.longname.lower()
1057
1058         # Check the ignore list
1059         if longname_orig in self._ignore:
1060             return
1061
1062         tags = [str(tag) for tag in test.tags]
1063         test_result = dict()
1064
1065         # Change the TC long name and name if defined in the mapping table
1066         longname = self._mapping.get(longname_orig, None)
1067         if longname is not None:
1068             name = longname.split(u'.')[-1]
1069             logging.debug(
1070                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1071                 f"{name}"
1072             )
1073         else:
1074             longname = longname_orig
1075             name = test.name.lower()
1076
1077         # Remove TC number from the TC long name (backward compatibility):
1078         self._test_id = re.sub(
1079             self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
1080         )
1081         # Remove TC number from the TC name (not needed):
1082         test_result[u"name"] = re.sub(
1083             self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
1084         )
1085
1086         test_result[u"parent"] = test.parent.name.lower().\
1087             replace(u"snat", u"nat")
1088         test_result[u"tags"] = tags
1089         test_result["doc"] = test.doc.\
1090             replace(u'"', u"'").\
1091             replace(u'\n', u' ').\
1092             replace(u'\r', u'').\
1093             replace(u'[', u' |br| [').\
1094             replace(u' |br| [', u'[', 1)
1095         test_result[u"type"] = u"FUNC"
1096         test_result[u"status"] = test.status
1097
1098         if test.status == u"PASS":
1099             if u"NDRPDR" in tags:
1100                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1101                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1102                         test.message).replace(u'\n', u' |br| '). \
1103                         replace(u'\r', u'').replace(u'"', u"'")
1104                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1105                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1106                         test.message).replace(u'\n', u' |br| '). \
1107                         replace(u'\r', u'').replace(u'"', u"'")
1108                 else:
1109                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1110                         test.message).replace(u'\n', u' |br| ').\
1111                         replace(u'\r', u'').replace(u'"', u"'")
1112             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1113                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1114                     test.message).replace(u'\n', u' |br| ').\
1115                     replace(u'\r', u'').replace(u'"', u"'")
1116             else:
1117                 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1118                     replace(u'\r', u'').replace(u'"', u"'")
1119         else:
1120             test_result[u"msg"] = u"Test Failed."
1121
1122         if u"PERFTEST" in tags:
1123             # Replace info about cores (e.g. -1c-) with the info about threads
1124             # and cores (e.g. -1t1c-) in the long test case names and in the
1125             # test case names if necessary.
1126             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1127             if not groups:
1128                 tag_count = 0
1129                 tag_tc = str()
1130                 for tag in test_result[u"tags"]:
1131                     groups = re.search(self.REGEX_TC_TAG, tag)
1132                     if groups:
1133                         tag_count += 1
1134                         tag_tc = tag
1135
1136                 if tag_count == 1:
1137                     self._test_id = re.sub(
1138                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1139                         self._test_id, count=1
1140                     )
1141                     test_result[u"name"] = re.sub(
1142                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1143                         test_result["name"], count=1
1144                     )
1145                 else:
1146                     test_result[u"status"] = u"FAIL"
1147                     self._data[u"tests"][self._test_id] = test_result
1148                     logging.debug(
1149                         f"The test {self._test_id} has no or more than one "
1150                         f"multi-threading tags.\n"
1151                         f"Tags: {test_result[u'tags']}"
1152                     )
1153                     return
1154
1155         if test.status == u"PASS":
1156             if u"DEVICETEST" in tags:
1157                 test_result[u"type"] = u"DEVICETEST"
1158             elif u"NDRPDR" in tags:
1159                 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1160                     test_result[u"type"] = u"CPS"
1161                 else:
1162                     test_result[u"type"] = u"NDRPDR"
1163                 test_result[u"throughput"], test_result[u"status"] = \
1164                     self._get_ndrpdr_throughput(test.message)
1165                 test_result[u"gbps"], test_result[u"status"] = \
1166                     self._get_ndrpdr_throughput_gbps(test.message)
1167                 test_result[u"latency"], test_result[u"status"] = \
1168                     self._get_ndrpdr_latency(test.message)
1169             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1170                 if u"MRR" in tags:
1171                     test_result[u"type"] = u"MRR"
1172                 else:
1173                     test_result[u"type"] = u"BMRR"
1174
1175                 test_result[u"result"] = dict()
1176                 groups = re.search(self.REGEX_BMRR, test.message)
1177                 if groups is not None:
1178                     items_str = groups.group(1)
1179                     items_float = [
1180                         float(item.strip().replace(u"'", u""))
1181                         for item in items_str.split(",")
1182                     ]
1183                     # Use whole list in CSIT-1180.
1184                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1185                     test_result[u"result"][u"samples"] = items_float
1186                     test_result[u"result"][u"receive-rate"] = stats.avg
1187                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1188                 else:
1189                     groups = re.search(self.REGEX_MRR, test.message)
1190                     test_result[u"result"][u"receive-rate"] = \
1191                         float(groups.group(3)) / float(groups.group(1))
1192             elif u"SOAK" in tags:
1193                 test_result[u"type"] = u"SOAK"
1194                 test_result[u"throughput"], test_result[u"status"] = \
1195                     self._get_plr_throughput(test.message)
1196             elif u"HOSTSTACK" in tags:
1197                 test_result[u"type"] = u"HOSTSTACK"
1198                 test_result[u"result"], test_result[u"status"] = \
1199                     self._get_hoststack_data(test.message, tags)
1200             elif u"TCP" in tags:
1201                 test_result[u"type"] = u"TCP"
1202                 groups = re.search(self.REGEX_TCP, test.message)
1203                 test_result[u"result"] = int(groups.group(2))
1204             elif u"RECONF" in tags:
1205                 test_result[u"type"] = u"RECONF"
1206                 test_result[u"result"] = None
1207                 try:
1208                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1209                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1210                     test_result[u"result"] = {
1211                         u"loss": int(grps_loss.group(1)),
1212                         u"time": float(grps_time.group(1))
1213                     }
1214                 except (AttributeError, IndexError, ValueError, TypeError):
1215                     test_result[u"status"] = u"FAIL"
1216             else:
1217                 test_result[u"status"] = u"FAIL"
1218                 self._data[u"tests"][self._test_id] = test_result
1219                 return
1220
1221         self._data[u"tests"][self._test_id] = test_result
1222
1223     def end_test(self, test):
1224         """Called when test ends.
1225
1226         :param test: Test to process.
1227         :type test: Test
1228         :returns: Nothing.
1229         """
1230
1231     def visit_keyword(self, keyword):
1232         """Implements traversing through the keyword and its child keywords.
1233
1234         :param keyword: Keyword to process.
1235         :type keyword: Keyword
1236         :returns: Nothing.
1237         """
1238         if self.start_keyword(keyword) is not False:
1239             self.end_keyword(keyword)
1240
1241     def start_keyword(self, keyword):
1242         """Called when keyword starts. Default implementation does nothing.
1243
1244         :param keyword: Keyword to process.
1245         :type keyword: Keyword
1246         :returns: Nothing.
1247         """
1248         try:
1249             if keyword.type == u"setup":
1250                 self.visit_setup_kw(keyword)
1251             elif keyword.type == u"teardown":
1252                 self.visit_teardown_kw(keyword)
1253             else:
1254                 self.visit_test_kw(keyword)
1255         except AttributeError:
1256             pass
1257
1258     def end_keyword(self, keyword):
1259         """Called when keyword ends. Default implementation does nothing.
1260
1261         :param keyword: Keyword to process.
1262         :type keyword: Keyword
1263         :returns: Nothing.
1264         """
1265
1266     def visit_test_kw(self, test_kw):
1267         """Implements traversing through the test keyword and its child
1268         keywords.
1269
1270         :param test_kw: Keyword to process.
1271         :type test_kw: Keyword
1272         :returns: Nothing.
1273         """
1274         for keyword in test_kw.keywords:
1275             if self.start_test_kw(keyword) is not False:
1276                 self.visit_test_kw(keyword)
1277                 self.end_test_kw(keyword)
1278
1279     def start_test_kw(self, test_kw):
1280         """Called when test keyword starts. Default implementation does
1281         nothing.
1282
1283         :param test_kw: Keyword to process.
1284         :type test_kw: Keyword
1285         :returns: Nothing.
1286         """
1287         if test_kw.name.count(u"Show Runtime On All Duts") or \
1288                 test_kw.name.count(u"Show Runtime Counters On All Duts") or \
1289                 test_kw.name.count(u"Vpp Show Runtime On All Duts"):
1290             self._msg_type = u"test-show-runtime"
1291             self._sh_run_counter += 1
1292         else:
1293             return
1294         test_kw.messages.visit(self)
1295
1296     def end_test_kw(self, test_kw):
1297         """Called when keyword ends. Default implementation does nothing.
1298
1299         :param test_kw: Keyword to process.
1300         :type test_kw: Keyword
1301         :returns: Nothing.
1302         """
1303
1304     def visit_setup_kw(self, setup_kw):
1305         """Implements traversing through the teardown keyword and its child
1306         keywords.
1307
1308         :param setup_kw: Keyword to process.
1309         :type setup_kw: Keyword
1310         :returns: Nothing.
1311         """
1312         for keyword in setup_kw.keywords:
1313             if self.start_setup_kw(keyword) is not False:
1314                 self.visit_setup_kw(keyword)
1315                 self.end_setup_kw(keyword)
1316
1317     def start_setup_kw(self, setup_kw):
1318         """Called when teardown keyword starts. Default implementation does
1319         nothing.
1320
1321         :param setup_kw: Keyword to process.
1322         :type setup_kw: Keyword
1323         :returns: Nothing.
1324         """
1325         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1326                 and not self._version:
1327             self._msg_type = u"vpp-version"
1328         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1329                 not self._version:
1330             self._msg_type = u"dpdk-version"
1331         elif setup_kw.name.count(u"Set Global Variable") \
1332                 and not self._timestamp:
1333             self._msg_type = u"timestamp"
1334         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1335             self._msg_type = u"testbed"
1336         else:
1337             return
1338         setup_kw.messages.visit(self)
1339
1340     def end_setup_kw(self, setup_kw):
1341         """Called when keyword ends. Default implementation does nothing.
1342
1343         :param setup_kw: Keyword to process.
1344         :type setup_kw: Keyword
1345         :returns: Nothing.
1346         """
1347
1348     def visit_teardown_kw(self, teardown_kw):
1349         """Implements traversing through the teardown keyword and its child
1350         keywords.
1351
1352         :param teardown_kw: Keyword to process.
1353         :type teardown_kw: Keyword
1354         :returns: Nothing.
1355         """
1356         for keyword in teardown_kw.keywords:
1357             if self.start_teardown_kw(keyword) is not False:
1358                 self.visit_teardown_kw(keyword)
1359                 self.end_teardown_kw(keyword)
1360
1361     def start_teardown_kw(self, teardown_kw):
1362         """Called when teardown keyword starts
1363
1364         :param teardown_kw: Keyword to process.
1365         :type teardown_kw: Keyword
1366         :returns: Nothing.
1367         """
1368
1369         if teardown_kw.name.count(u"Show Vat History On All Duts"):
1370             # TODO: Remove when not needed:
1371             self._conf_history_lookup_nr = 0
1372             self._msg_type = u"teardown-vat-history"
1373             teardown_kw.messages.visit(self)
1374         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1375             self._conf_history_lookup_nr = 0
1376             self._msg_type = u"teardown-papi-history"
1377             teardown_kw.messages.visit(self)
1378
1379     def end_teardown_kw(self, teardown_kw):
1380         """Called when keyword ends. Default implementation does nothing.
1381
1382         :param teardown_kw: Keyword to process.
1383         :type teardown_kw: Keyword
1384         :returns: Nothing.
1385         """
1386
1387     def visit_message(self, msg):
1388         """Implements visiting the message.
1389
1390         :param msg: Message to process.
1391         :type msg: Message
1392         :returns: Nothing.
1393         """
1394         if self.start_message(msg) is not False:
1395             self.end_message(msg)
1396
1397     def start_message(self, msg):
1398         """Called when message starts. Get required information from messages:
1399         - VPP version.
1400
1401         :param msg: Message to process.
1402         :type msg: Message
1403         :returns: Nothing.
1404         """
1405         if self._msg_type:
1406             self.parse_msg[self._msg_type](msg)
1407
1408     def end_message(self, msg):
1409         """Called when message ends. Default implementation does nothing.
1410
1411         :param msg: Message to process.
1412         :type msg: Message
1413         :returns: Nothing.
1414         """
1415
1416
1417 class InputData:
1418     """Input data
1419
1420     The data is extracted from output.xml files generated by Jenkins jobs and
1421     stored in pandas' DataFrames.
1422
1423     The data structure:
1424     - job name
1425       - build number
1426         - metadata
1427           (as described in ExecutionChecker documentation)
1428         - suites
1429           (as described in ExecutionChecker documentation)
1430         - tests
1431           (as described in ExecutionChecker documentation)
1432     """
1433
1434     def __init__(self, spec):
1435         """Initialization.
1436
1437         :param spec: Specification.
1438         :type spec: Specification
1439         """
1440
1441         # Specification:
1442         self._cfg = spec
1443
1444         # Data store:
1445         self._input_data = pd.Series()
1446
1447     @property
1448     def data(self):
1449         """Getter - Input data.
1450
1451         :returns: Input data
1452         :rtype: pandas.Series
1453         """
1454         return self._input_data
1455
1456     def metadata(self, job, build):
1457         """Getter - metadata
1458
1459         :param job: Job which metadata we want.
1460         :param build: Build which metadata we want.
1461         :type job: str
1462         :type build: str
1463         :returns: Metadata
1464         :rtype: pandas.Series
1465         """
1466         return self.data[job][build][u"metadata"]
1467
1468     def suites(self, job, build):
1469         """Getter - suites
1470
1471         :param job: Job which suites we want.
1472         :param build: Build which suites we want.
1473         :type job: str
1474         :type build: str
1475         :returns: Suites.
1476         :rtype: pandas.Series
1477         """
1478         return self.data[job][str(build)][u"suites"]
1479
1480     def tests(self, job, build):
1481         """Getter - tests
1482
1483         :param job: Job which tests we want.
1484         :param build: Build which tests we want.
1485         :type job: str
1486         :type build: str
1487         :returns: Tests.
1488         :rtype: pandas.Series
1489         """
1490         return self.data[job][build][u"tests"]
1491
1492     def _parse_tests(self, job, build):
1493         """Process data from robot output.xml file and return JSON structured
1494         data.
1495
1496         :param job: The name of job which build output data will be processed.
1497         :param build: The build which output data will be processed.
1498         :type job: str
1499         :type build: dict
1500         :returns: JSON data structure.
1501         :rtype: dict
1502         """
1503
1504         metadata = {
1505             u"job": job,
1506             u"build": build
1507         }
1508
1509         with open(build[u"file-name"], u'r') as data_file:
1510             try:
1511                 result = ExecutionResult(data_file)
1512             except errors.DataError as err:
1513                 logging.error(
1514                     f"Error occurred while parsing output.xml: {repr(err)}"
1515                 )
1516                 return None
1517         checker = ExecutionChecker(metadata, self._cfg.mapping,
1518                                    self._cfg.ignore)
1519         result.visit(checker)
1520
1521         return checker.data
1522
1523     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1524         """Download and parse the input data file.
1525
1526         :param pid: PID of the process executing this method.
1527         :param job: Name of the Jenkins job which generated the processed input
1528             file.
1529         :param build: Information about the Jenkins build which generated the
1530             processed input file.
1531         :param repeat: Repeat the download specified number of times if not
1532             successful.
1533         :type pid: int
1534         :type job: str
1535         :type build: dict
1536         :type repeat: int
1537         """
1538
1539         logging.info(f"  Processing the job/build: {job}: {build[u'build']}")
1540
1541         state = u"failed"
1542         success = False
1543         data = None
1544         do_repeat = repeat
1545         while do_repeat:
1546             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1547             if success:
1548                 break
1549             do_repeat -= 1
1550         if not success:
1551             logging.error(
1552                 f"It is not possible to download the input data file from the "
1553                 f"job {job}, build {build[u'build']}, or it is damaged. "
1554                 f"Skipped."
1555             )
1556         if success:
1557             logging.info(f"    Processing data from build {build[u'build']}")
1558             data = self._parse_tests(job, build)
1559             if data is None:
1560                 logging.error(
1561                     f"Input data file from the job {job}, build "
1562                     f"{build[u'build']} is damaged. Skipped."
1563                 )
1564             else:
1565                 state = u"processed"
1566
1567             try:
1568                 remove(build[u"file-name"])
1569             except OSError as err:
1570                 logging.error(
1571                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1572                 )
1573
1574         # If the time-period is defined in the specification file, remove all
1575         # files which are outside the time period.
1576         is_last = False
1577         timeperiod = self._cfg.input.get(u"time-period", None)
1578         if timeperiod and data:
1579             now = dt.utcnow()
1580             timeperiod = timedelta(int(timeperiod))
1581             metadata = data.get(u"metadata", None)
1582             if metadata:
1583                 generated = metadata.get(u"generated", None)
1584                 if generated:
1585                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1586                     if (now - generated) > timeperiod:
1587                         # Remove the data and the file:
1588                         state = u"removed"
1589                         data = None
1590                         is_last = True
1591                         logging.info(
1592                             f"    The build {job}/{build[u'build']} is "
1593                             f"outdated, will be removed."
1594                         )
1595         logging.info(u"  Done.")
1596
1597         return {
1598             u"data": data,
1599             u"state": state,
1600             u"job": job,
1601             u"build": build,
1602             u"last": is_last
1603         }
1604
1605     def download_and_parse_data(self, repeat=1):
1606         """Download the input data files, parse input data from input files and
1607         store in pandas' Series.
1608
1609         :param repeat: Repeat the download specified number of times if not
1610             successful.
1611         :type repeat: int
1612         """
1613
1614         logging.info(u"Downloading and parsing input files ...")
1615
1616         for job, builds in self._cfg.builds.items():
1617             for build in builds:
1618
1619                 result = self._download_and_parse_build(job, build, repeat)
1620                 if result[u"last"]:
1621                     break
1622                 build_nr = result[u"build"][u"build"]
1623
1624                 if result[u"data"]:
1625                     data = result[u"data"]
1626                     build_data = pd.Series({
1627                         u"metadata": pd.Series(
1628                             list(data[u"metadata"].values()),
1629                             index=list(data[u"metadata"].keys())
1630                         ),
1631                         u"suites": pd.Series(
1632                             list(data[u"suites"].values()),
1633                             index=list(data[u"suites"].keys())
1634                         ),
1635                         u"tests": pd.Series(
1636                             list(data[u"tests"].values()),
1637                             index=list(data[u"tests"].keys())
1638                         )
1639                     })
1640
1641                     if self._input_data.get(job, None) is None:
1642                         self._input_data[job] = pd.Series()
1643                     self._input_data[job][str(build_nr)] = build_data
1644
1645                     self._cfg.set_input_file_name(
1646                         job, build_nr, result[u"build"][u"file-name"])
1647
1648                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1649
1650                 mem_alloc = \
1651                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1652                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1653
1654         logging.info(u"Done.")
1655
1656     def process_local_file(self, local_file, job=u"local", build_nr=1,
1657                            replace=True):
1658         """Process local XML file given as a command-line parameter.
1659
1660         :param local_file: The file to process.
1661         :param job: Job name.
1662         :param build_nr: Build number.
1663         :param replace: If True, the information about jobs and builds is
1664             replaced by the new one, otherwise the new jobs and builds are
1665             added.
1666         :type local_file: str
1667         :type job: str
1668         :type build_nr: int
1669         :type replace: bool
1670         :raises: PresentationError if an error occurs.
1671         """
1672         if not isfile(local_file):
1673             raise PresentationError(f"The file {local_file} does not exist.")
1674
1675         try:
1676             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1677         except (IndexError, ValueError):
1678             pass
1679
1680         build = {
1681             u"build": build_nr,
1682             u"status": u"failed",
1683             u"file-name": local_file
1684         }
1685         if replace:
1686             self._cfg.builds = dict()
1687         self._cfg.add_build(job, build)
1688
1689         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1690         data = self._parse_tests(job, build)
1691         if data is None:
1692             raise PresentationError(
1693                 f"Error occurred while parsing the file {local_file}"
1694             )
1695
1696         build_data = pd.Series({
1697             u"metadata": pd.Series(
1698                 list(data[u"metadata"].values()),
1699                 index=list(data[u"metadata"].keys())
1700             ),
1701             u"suites": pd.Series(
1702                 list(data[u"suites"].values()),
1703                 index=list(data[u"suites"].keys())
1704             ),
1705             u"tests": pd.Series(
1706                 list(data[u"tests"].values()),
1707                 index=list(data[u"tests"].keys())
1708             )
1709         })
1710
1711         if self._input_data.get(job, None) is None:
1712             self._input_data[job] = pd.Series()
1713         self._input_data[job][str(build_nr)] = build_data
1714
1715         self._cfg.set_input_state(job, build_nr, u"processed")
1716
1717     def process_local_directory(self, local_dir, replace=True):
1718         """Process local directory with XML file(s). The directory is processed
1719         as a 'job' and the XML files in it as builds.
1720         If the given directory contains only sub-directories, these
1721         sub-directories processed as jobs and corresponding XML files as builds
1722         of their job.
1723
1724         :param local_dir: Local directory to process.
1725         :param replace: If True, the information about jobs and builds is
1726             replaced by the new one, otherwise the new jobs and builds are
1727             added.
1728         :type local_dir: str
1729         :type replace: bool
1730         """
1731         if not isdir(local_dir):
1732             raise PresentationError(
1733                 f"The directory {local_dir} does not exist."
1734             )
1735
1736         # Check if the given directory includes only files, or only directories
1737         _, dirnames, filenames = next(walk(local_dir))
1738
1739         if filenames and not dirnames:
1740             filenames.sort()
1741             # local_builds:
1742             # key: dir (job) name, value: list of file names (builds)
1743             local_builds = {
1744                 local_dir: [join(local_dir, name) for name in filenames]
1745             }
1746
1747         elif dirnames and not filenames:
1748             dirnames.sort()
1749             # local_builds:
1750             # key: dir (job) name, value: list of file names (builds)
1751             local_builds = dict()
1752             for dirname in dirnames:
1753                 builds = [
1754                     join(local_dir, dirname, name)
1755                     for name in listdir(join(local_dir, dirname))
1756                     if isfile(join(local_dir, dirname, name))
1757                 ]
1758                 if builds:
1759                     local_builds[dirname] = sorted(builds)
1760
1761         elif not filenames and not dirnames:
1762             raise PresentationError(f"The directory {local_dir} is empty.")
1763         else:
1764             raise PresentationError(
1765                 f"The directory {local_dir} can include only files or only "
1766                 f"directories, not both.\nThe directory {local_dir} includes "
1767                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1768             )
1769
1770         if replace:
1771             self._cfg.builds = dict()
1772
1773         for job, files in local_builds.items():
1774             for idx, local_file in enumerate(files):
1775                 self.process_local_file(local_file, job, idx + 1, replace=False)
1776
1777     @staticmethod
1778     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1779         """Return the index of character in the string which is the end of tag.
1780
1781         :param tag_filter: The string where the end of tag is being searched.
1782         :param start: The index where the searching is stated.
1783         :param closer: The character which is the tag closer.
1784         :type tag_filter: str
1785         :type start: int
1786         :type closer: str
1787         :returns: The index of the tag closer.
1788         :rtype: int
1789         """
1790         try:
1791             idx_opener = tag_filter.index(closer, start)
1792             return tag_filter.index(closer, idx_opener + 1)
1793         except ValueError:
1794             return None
1795
1796     @staticmethod
1797     def _condition(tag_filter):
1798         """Create a conditional statement from the given tag filter.
1799
1800         :param tag_filter: Filter based on tags from the element specification.
1801         :type tag_filter: str
1802         :returns: Conditional statement which can be evaluated.
1803         :rtype: str
1804         """
1805         index = 0
1806         while True:
1807             index = InputData._end_of_tag(tag_filter, index)
1808             if index is None:
1809                 return tag_filter
1810             index += 1
1811             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1812
1813     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1814                     continue_on_error=False):
1815         """Filter required data from the given jobs and builds.
1816
1817         The output data structure is:
1818         - job 1
1819           - build 1
1820             - test (or suite) 1 ID:
1821               - param 1
1822               - param 2
1823               ...
1824               - param n
1825             ...
1826             - test (or suite) n ID:
1827             ...
1828           ...
1829           - build n
1830         ...
1831         - job n
1832
1833         :param element: Element which will use the filtered data.
1834         :param params: Parameters which will be included in the output. If None,
1835             all parameters are included.
1836         :param data: If not None, this data is used instead of data specified
1837             in the element.
1838         :param data_set: The set of data to be filtered: tests, suites,
1839             metadata.
1840         :param continue_on_error: Continue if there is error while reading the
1841             data. The Item will be empty then
1842         :type element: pandas.Series
1843         :type params: list
1844         :type data: dict
1845         :type data_set: str
1846         :type continue_on_error: bool
1847         :returns: Filtered data.
1848         :rtype pandas.Series
1849         """
1850
1851         try:
1852             if data_set == "suites":
1853                 cond = u"True"
1854             elif element[u"filter"] in (u"all", u"template"):
1855                 cond = u"True"
1856             else:
1857                 cond = InputData._condition(element[u"filter"])
1858             logging.debug(f"   Filter: {cond}")
1859         except KeyError:
1860             logging.error(u"  No filter defined.")
1861             return None
1862
1863         if params is None:
1864             params = element.get(u"parameters", None)
1865             if params:
1866                 params.append(u"type")
1867
1868         data_to_filter = data if data else element[u"data"]
1869         data = pd.Series()
1870         try:
1871             for job, builds in data_to_filter.items():
1872                 data[job] = pd.Series()
1873                 for build in builds:
1874                     data[job][str(build)] = pd.Series()
1875                     try:
1876                         data_dict = dict(
1877                             self.data[job][str(build)][data_set].items())
1878                     except KeyError:
1879                         if continue_on_error:
1880                             continue
1881                         return None
1882
1883                     for test_id, test_data in data_dict.items():
1884                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1885                             data[job][str(build)][test_id] = pd.Series()
1886                             if params is None:
1887                                 for param, val in test_data.items():
1888                                     data[job][str(build)][test_id][param] = val
1889                             else:
1890                                 for param in params:
1891                                     try:
1892                                         data[job][str(build)][test_id][param] =\
1893                                             test_data[param]
1894                                     except KeyError:
1895                                         data[job][str(build)][test_id][param] =\
1896                                             u"No Data"
1897             return data
1898
1899         except (KeyError, IndexError, ValueError) as err:
1900             logging.error(
1901                 f"Missing mandatory parameter in the element specification: "
1902                 f"{repr(err)}"
1903             )
1904             return None
1905         except AttributeError as err:
1906             logging.error(repr(err))
1907             return None
1908         except SyntaxError as err:
1909             logging.error(
1910                 f"The filter {cond} is not correct. Check if all tags are "
1911                 f"enclosed by apostrophes.\n{repr(err)}"
1912             )
1913             return None
1914
1915     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1916                              continue_on_error=False):
1917         """Filter required data from the given jobs and builds.
1918
1919         The output data structure is:
1920         - job 1
1921           - build 1
1922             - test (or suite) 1 ID:
1923               - param 1
1924               - param 2
1925               ...
1926               - param n
1927             ...
1928             - test (or suite) n ID:
1929             ...
1930           ...
1931           - build n
1932         ...
1933         - job n
1934
1935         :param element: Element which will use the filtered data.
1936         :param params: Parameters which will be included in the output. If None,
1937         all parameters are included.
1938         :param data_set: The set of data to be filtered: tests, suites,
1939         metadata.
1940         :param continue_on_error: Continue if there is error while reading the
1941         data. The Item will be empty then
1942         :type element: pandas.Series
1943         :type params: list
1944         :type data_set: str
1945         :type continue_on_error: bool
1946         :returns: Filtered data.
1947         :rtype pandas.Series
1948         """
1949
1950         include = element.get(u"include", None)
1951         if not include:
1952             logging.warning(u"No tests to include, skipping the element.")
1953             return None
1954
1955         if params is None:
1956             params = element.get(u"parameters", None)
1957             if params and u"type" not in params:
1958                 params.append(u"type")
1959
1960         cores = element.get(u"core", None)
1961         if cores:
1962             tests = list()
1963             for core in cores:
1964                 for test in include:
1965                     tests.append(test.format(core=core))
1966         else:
1967             tests = include
1968
1969         data = pd.Series()
1970         try:
1971             for job, builds in element[u"data"].items():
1972                 data[job] = pd.Series()
1973                 for build in builds:
1974                     data[job][str(build)] = pd.Series()
1975                     for test in tests:
1976                         try:
1977                             reg_ex = re.compile(str(test).lower())
1978                             for test_id in self.data[job][
1979                                     str(build)][data_set].keys():
1980                                 if re.match(reg_ex, str(test_id).lower()):
1981                                     test_data = self.data[job][
1982                                         str(build)][data_set][test_id]
1983                                     data[job][str(build)][test_id] = pd.Series()
1984                                     if params is None:
1985                                         for param, val in test_data.items():
1986                                             data[job][str(build)][test_id]\
1987                                                 [param] = val
1988                                     else:
1989                                         for param in params:
1990                                             try:
1991                                                 data[job][str(build)][
1992                                                     test_id][param] = \
1993                                                     test_data[param]
1994                                             except KeyError:
1995                                                 data[job][str(build)][
1996                                                     test_id][param] = u"No Data"
1997                         except KeyError as err:
1998                             if continue_on_error:
1999                                 logging.debug(repr(err))
2000                                 continue
2001                             logging.error(repr(err))
2002                             return None
2003             return data
2004
2005         except (KeyError, IndexError, ValueError) as err:
2006             logging.error(
2007                 f"Missing mandatory parameter in the element "
2008                 f"specification: {repr(err)}"
2009             )
2010             return None
2011         except AttributeError as err:
2012             logging.error(repr(err))
2013             return None
2014
2015     @staticmethod
2016     def merge_data(data):
2017         """Merge data from more jobs and builds to a simple data structure.
2018
2019         The output data structure is:
2020
2021         - test (suite) 1 ID:
2022           - param 1
2023           - param 2
2024           ...
2025           - param n
2026         ...
2027         - test (suite) n ID:
2028         ...
2029
2030         :param data: Data to merge.
2031         :type data: pandas.Series
2032         :returns: Merged data.
2033         :rtype: pandas.Series
2034         """
2035
2036         logging.info(u"    Merging data ...")
2037
2038         merged_data = pd.Series()
2039         for builds in data.values:
2040             for item in builds.values:
2041                 for item_id, item_data in item.items():
2042                     merged_data[item_id] = item_data
2043         return merged_data
2044
2045     def print_all_oper_data(self):
2046         """Print all operational data to console.
2047         """
2048
2049         tbl_hdr = (
2050             u"Name",
2051             u"Nr of Vectors",
2052             u"Nr of Packets",
2053             u"Suspends",
2054             u"Cycles per Packet",
2055             u"Average Vector Size"
2056         )
2057
2058         for job in self._input_data.values:
2059             for build in job.values:
2060                 for test_id, test_data in build[u"tests"].items():
2061                     print(f"{test_id}")
2062                     if test_data.get(u"show-run", None) is None:
2063                         continue
2064                     for dut_name, data in test_data[u"show-run"].items():
2065                         if data.get(u"threads", None) is None:
2066                             continue
2067                         print(f"Host IP: {data.get(u'host', '')}, "
2068                               f"Socket: {data.get(u'socket', '')}")
2069                         for thread_nr, thread in data[u"threads"].items():
2070                             txt_table = prettytable.PrettyTable(tbl_hdr)
2071                             avg = 0.0
2072                             for row in thread:
2073                                 txt_table.add_row(row)
2074                                 avg += row[-1]
2075                             if len(thread) == 0:
2076                                 avg = u""
2077                             else:
2078                                 avg = f", Average Vector Size per Node: " \
2079                                       f"{(avg / len(thread)):.2f}"
2080                             th_name = u"main" if thread_nr == 0 \
2081                                 else f"worker_{thread_nr}"
2082                             print(f"{dut_name}, {th_name}{avg}")
2083                             txt_table.float_format = u".2"
2084                             txt_table.align = u"r"
2085                             txt_table.align[u"Name"] = u"l"
2086                             print(f"{txt_table.get_string()}\n")