6eb60ec2847bf4e6e84506e294fcd02b88b2a8eb
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     # Needed for CPS and PPS tests
243     REGEX_NDRPDR_LAT_BASE = re.compile(
244         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
245         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
246     )
247     REGEX_NDRPDR_LAT = re.compile(
248         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
249         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
250         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
251         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
252         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
253         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
254     )
255
256     REGEX_VERSION_VPP = re.compile(
257         r"(return STDOUT Version:\s*|"
258         r"VPP Version:\s*|VPP version:\s*)(.*)"
259     )
260     REGEX_VERSION_DPDK = re.compile(
261         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
262     )
263     REGEX_TCP = re.compile(
264         r'Total\s(rps|cps|throughput):\s(\d*).*$'
265     )
266     REGEX_MRR = re.compile(
267         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
268         r'tx\s(\d*),\srx\s(\d*)'
269     )
270     REGEX_BMRR = re.compile(
271         r'Maximum Receive Rate trial results .*: \[(.*)\]'
272     )
273     REGEX_RECONF_LOSS = re.compile(
274         r'Packets lost due to reconfig: (\d*)'
275     )
276     REGEX_RECONF_TIME = re.compile(
277         r'Implied time lost: (\d*.[\de-]*)'
278     )
279     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
280
281     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
282
283     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
284
285     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
286
287     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
288
289     def __init__(self, metadata, mapping, ignore):
290         """Initialisation.
291
292         :param metadata: Key-value pairs to be included in "metadata" part of
293             JSON structure.
294         :param mapping: Mapping of the old names of test cases to the new
295             (actual) one.
296         :param ignore: List of TCs to be ignored.
297         :type metadata: dict
298         :type mapping: dict
299         :type ignore: list
300         """
301
302         # Type of message to parse out from the test messages
303         self._msg_type = None
304
305         # VPP version
306         self._version = None
307
308         # Timestamp
309         self._timestamp = None
310
311         # Testbed. The testbed is identified by TG node IP address.
312         self._testbed = None
313
314         # Mapping of TCs long names
315         self._mapping = mapping
316
317         # Ignore list
318         self._ignore = ignore
319
320         # Number of PAPI History messages found:
321         # 0 - no message
322         # 1 - PAPI History of DUT1
323         # 2 - PAPI History of DUT2
324         self._conf_history_lookup_nr = 0
325
326         self._sh_run_counter = 0
327
328         # Test ID of currently processed test- the lowercase full path to the
329         # test
330         self._test_id = None
331
332         # The main data structure
333         self._data = {
334             u"metadata": OrderedDict(),
335             u"suites": OrderedDict(),
336             u"tests": OrderedDict()
337         }
338
339         # Save the provided metadata
340         for key, val in metadata.items():
341             self._data[u"metadata"][key] = val
342
343         # Dictionary defining the methods used to parse different types of
344         # messages
345         self.parse_msg = {
346             u"timestamp": self._get_timestamp,
347             u"vpp-version": self._get_vpp_version,
348             u"dpdk-version": self._get_dpdk_version,
349             # TODO: Remove when not needed:
350             u"teardown-vat-history": self._get_vat_history,
351             u"teardown-papi-history": self._get_papi_history,
352             u"test-show-runtime": self._get_show_run,
353             u"testbed": self._get_testbed
354         }
355
356     @property
357     def data(self):
358         """Getter - Data parsed from the XML file.
359
360         :returns: Data parsed from the XML file.
361         :rtype: dict
362         """
363         return self._data
364
365     def _get_data_from_mrr_test_msg(self, msg):
366         """Get info from message of MRR performance tests.
367
368         :param msg: Message to be processed.
369         :type msg: str
370         :returns: Processed message or original message if a problem occurs.
371         :rtype: str
372         """
373
374         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
375         if not groups or groups.lastindex != 1:
376             return u"Test Failed."
377
378         try:
379             data = groups.group(1).split(u", ")
380         except (AttributeError, IndexError, ValueError, KeyError):
381             return u"Test Failed."
382
383         out_str = u"["
384         try:
385             for item in data:
386                 out_str += f"{(float(item) / 1e6):.2f}, "
387             return out_str[:-2] + u"]"
388         except (AttributeError, IndexError, ValueError, KeyError):
389             return u"Test Failed."
390
391     def _get_data_from_cps_test_msg(self, msg):
392         """Get info from message of NDRPDR CPS tests.
393
394         :param msg: Message to be processed.
395         :type msg: str
396         :returns: Processed message or "Test Failed." if a problem occurs.
397         :rtype: str
398         """
399
400         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
401         if not groups or groups.lastindex != 2:
402             return u"Test Failed."
403
404         try:
405             return (
406                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
407                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
408             )
409         except (AttributeError, IndexError, ValueError, KeyError):
410             return u"Test Failed."
411
412     def _get_data_from_pps_test_msg(self, msg):
413         """Get info from message of NDRPDR PPS tests.
414
415         :param msg: Message to be processed.
416         :type msg: str
417         :returns: Processed message or "Test Failed." if a problem occurs.
418         :rtype: str
419         """
420
421         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
422         if not groups or groups.lastindex != 4:
423             return u"Test Failed."
424
425         try:
426             return (
427                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
428                 f"{float(groups.group(2)):5.2f}\n"
429                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
430                 f"{float(groups.group(4)):5.2f}"
431             )
432         except (AttributeError, IndexError, ValueError, KeyError):
433             return u"Test Failed."
434
435     def _get_data_from_perf_test_msg(self, msg):
436         """Get info from message of NDRPDR performance tests.
437
438         :param msg: Message to be processed.
439         :type msg: str
440         :returns: Processed message or "Test Failed." if a problem occurs.
441         :rtype: str
442         """
443
444         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
445         if not groups or groups.lastindex != 10:
446             return u"Test Failed."
447
448         try:
449             data = {
450                 u"ndr_low": float(groups.group(1)),
451                 u"ndr_low_b": float(groups.group(2)),
452                 u"pdr_low": float(groups.group(3)),
453                 u"pdr_low_b": float(groups.group(4)),
454                 u"pdr_lat_90_1": groups.group(5),
455                 u"pdr_lat_90_2": groups.group(6),
456                 u"pdr_lat_50_1": groups.group(7),
457                 u"pdr_lat_50_2": groups.group(8),
458                 u"pdr_lat_10_1": groups.group(9),
459                 u"pdr_lat_10_2": groups.group(10),
460             }
461         except (AttributeError, IndexError, ValueError, KeyError):
462             return u"Test Failed."
463
464         def _process_lat(in_str_1, in_str_2):
465             """Extract min, avg, max values from latency string.
466
467             :param in_str_1: Latency string for one direction produced by robot
468                 framework.
469             :param in_str_2: Latency string for second direction produced by
470                 robot framework.
471             :type in_str_1: str
472             :type in_str_2: str
473             :returns: Processed latency string or None if a problem occurs.
474             :rtype: tuple
475             """
476             in_list_1 = in_str_1.split('/', 3)
477             in_list_2 = in_str_2.split('/', 3)
478
479             if len(in_list_1) != 4 and len(in_list_2) != 4:
480                 return None
481
482             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
483             try:
484                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
485             except hdrh.codec.HdrLengthException:
486                 return None
487
488             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
489             try:
490                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
491             except hdrh.codec.HdrLengthException:
492                 return None
493
494             if hdr_lat_1 and hdr_lat_2:
495                 hdr_lat = (
496                     hdr_lat_1.get_value_at_percentile(50.0),
497                     hdr_lat_1.get_value_at_percentile(90.0),
498                     hdr_lat_1.get_value_at_percentile(99.0),
499                     hdr_lat_2.get_value_at_percentile(50.0),
500                     hdr_lat_2.get_value_at_percentile(90.0),
501                     hdr_lat_2.get_value_at_percentile(99.0)
502                 )
503
504                 if all(hdr_lat):
505                     return hdr_lat
506
507             return None
508
509         try:
510             out_msg = (
511                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
512                 f"{data[u'ndr_low_b']:5.2f}"
513                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
514                 f"{data[u'pdr_low_b']:5.2f}"
515             )
516             latency = (
517                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
518                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
519                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
520             )
521             if all(latency):
522                 max_len = len(str(max((max(item) for item in latency))))
523                 max_len = 4 if max_len < 4 else max_len
524
525                 for idx, lat in enumerate(latency):
526                     if not idx:
527                         out_msg += u"\n"
528                     out_msg += (
529                         f"\n{idx + 3}. "
530                         f"{lat[0]:{max_len}d} "
531                         f"{lat[1]:{max_len}d} "
532                         f"{lat[2]:{max_len}d}      "
533                         f"{lat[3]:{max_len}d} "
534                         f"{lat[4]:{max_len}d} "
535                         f"{lat[5]:{max_len}d} "
536                     )
537
538             return out_msg
539
540         except (AttributeError, IndexError, ValueError, KeyError):
541             return u"Test Failed."
542
543     def _get_testbed(self, msg):
544         """Called when extraction of testbed IP is required.
545         The testbed is identified by TG node IP address.
546
547         :param msg: Message to process.
548         :type msg: Message
549         :returns: Nothing.
550         """
551
552         if msg.message.count(u"Setup of TG node") or \
553                 msg.message.count(u"Setup of node TG host"):
554             reg_tg_ip = re.compile(
555                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
556             try:
557                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
558             except (KeyError, ValueError, IndexError, AttributeError):
559                 pass
560             finally:
561                 self._data[u"metadata"][u"testbed"] = self._testbed
562                 self._msg_type = None
563
564     def _get_vpp_version(self, msg):
565         """Called when extraction of VPP version is required.
566
567         :param msg: Message to process.
568         :type msg: Message
569         :returns: Nothing.
570         """
571
572         if msg.message.count(u"return STDOUT Version:") or \
573                 msg.message.count(u"VPP Version:") or \
574                 msg.message.count(u"VPP version:"):
575             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
576                                 group(2))
577             self._data[u"metadata"][u"version"] = self._version
578             self._msg_type = None
579
580     def _get_dpdk_version(self, msg):
581         """Called when extraction of DPDK version is required.
582
583         :param msg: Message to process.
584         :type msg: Message
585         :returns: Nothing.
586         """
587
588         if msg.message.count(u"DPDK Version:"):
589             try:
590                 self._version = str(re.search(
591                     self.REGEX_VERSION_DPDK, msg.message).group(2))
592                 self._data[u"metadata"][u"version"] = self._version
593             except IndexError:
594                 pass
595             finally:
596                 self._msg_type = None
597
598     def _get_timestamp(self, msg):
599         """Called when extraction of timestamp is required.
600
601         :param msg: Message to process.
602         :type msg: Message
603         :returns: Nothing.
604         """
605
606         self._timestamp = msg.timestamp[:14]
607         self._data[u"metadata"][u"generated"] = self._timestamp
608         self._msg_type = None
609
610     def _get_vat_history(self, msg):
611         """Called when extraction of VAT command history is required.
612
613         TODO: Remove when not needed.
614
615         :param msg: Message to process.
616         :type msg: Message
617         :returns: Nothing.
618         """
619         if msg.message.count(u"VAT command history:"):
620             self._conf_history_lookup_nr += 1
621             if self._conf_history_lookup_nr == 1:
622                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
623             else:
624                 self._msg_type = None
625             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
626                           r"VAT command history:", u"",
627                           msg.message, count=1).replace(u'\n', u' |br| ').\
628                 replace(u'"', u"'")
629
630             self._data[u"tests"][self._test_id][u"conf-history"] += (
631                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
632             )
633
634     def _get_papi_history(self, msg):
635         """Called when extraction of PAPI command history is required.
636
637         :param msg: Message to process.
638         :type msg: Message
639         :returns: Nothing.
640         """
641         if msg.message.count(u"PAPI command history:"):
642             self._conf_history_lookup_nr += 1
643             if self._conf_history_lookup_nr == 1:
644                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
645             else:
646                 self._msg_type = None
647             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
648                           r"PAPI command history:", u"",
649                           msg.message, count=1).replace(u'\n', u' |br| ').\
650                 replace(u'"', u"'")
651             self._data[u"tests"][self._test_id][u"conf-history"] += (
652                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
653             )
654
655     def _get_show_run(self, msg):
656         """Called when extraction of VPP operational data (output of CLI command
657         Show Runtime) is required.
658
659         :param msg: Message to process.
660         :type msg: Message
661         :returns: Nothing.
662         """
663
664         if not msg.message.count(u"stats runtime"):
665             return
666
667         # Temporary solution
668         if self._sh_run_counter > 1:
669             return
670
671         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
672             self._data[u"tests"][self._test_id][u"show-run"] = dict()
673
674         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
675         if not groups:
676             return
677         try:
678             host = groups.group(1)
679         except (AttributeError, IndexError):
680             host = u""
681         try:
682             sock = groups.group(2)
683         except (AttributeError, IndexError):
684             sock = u""
685
686         runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
687                         replace(u"'", u'"').replace(u'b"', u'"').
688                         replace(u'u"', u'"').split(u":", 1)[1])
689
690         try:
691             threads_nr = len(runtime[0][u"clocks"])
692         except (IndexError, KeyError):
693             return
694
695         dut = u"DUT{nr}".format(
696             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
697
698         oper = {
699             u"host": host,
700             u"socket": sock,
701             u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
702         }
703
704         for item in runtime:
705             for idx in range(threads_nr):
706                 if item[u"vectors"][idx] > 0:
707                     clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
708                 elif item[u"calls"][idx] > 0:
709                     clocks = item[u"clocks"][idx] / item[u"calls"][idx]
710                 elif item[u"suspends"][idx] > 0:
711                     clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
712                 else:
713                     clocks = 0.0
714
715                 if item[u"calls"][idx] > 0:
716                     vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
717                 else:
718                     vectors_call = 0.0
719
720                 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
721                         int(item[u"suspends"][idx]):
722                     oper[u"threads"][idx].append([
723                         item[u"name"],
724                         item[u"calls"][idx],
725                         item[u"vectors"][idx],
726                         item[u"suspends"][idx],
727                         clocks,
728                         vectors_call
729                     ])
730
731         self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
732
733     def _get_ndrpdr_throughput(self, msg):
734         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
735         message.
736
737         :param msg: The test message to be parsed.
738         :type msg: str
739         :returns: Parsed data as a dict and the status (PASS/FAIL).
740         :rtype: tuple(dict, str)
741         """
742
743         throughput = {
744             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
745             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
746         }
747         status = u"FAIL"
748         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
749
750         if groups is not None:
751             try:
752                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
753                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
754                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
755                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
756                 status = u"PASS"
757             except (IndexError, ValueError):
758                 pass
759
760         return throughput, status
761
762     def _get_ndrpdr_throughput_gbps(self, msg):
763         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
764         test message.
765
766         :param msg: The test message to be parsed.
767         :type msg: str
768         :returns: Parsed data as a dict and the status (PASS/FAIL).
769         :rtype: tuple(dict, str)
770         """
771
772         gbps = {
773             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
774             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
775         }
776         status = u"FAIL"
777         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
778
779         if groups is not None:
780             try:
781                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
782                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
783                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
784                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
785                 status = u"PASS"
786             except (IndexError, ValueError):
787                 pass
788
789         return gbps, status
790
791     def _get_plr_throughput(self, msg):
792         """Get PLRsearch lower bound and PLRsearch upper bound from the test
793         message.
794
795         :param msg: The test message to be parsed.
796         :type msg: str
797         :returns: Parsed data as a dict and the status (PASS/FAIL).
798         :rtype: tuple(dict, str)
799         """
800
801         throughput = {
802             u"LOWER": -1.0,
803             u"UPPER": -1.0
804         }
805         status = u"FAIL"
806         groups = re.search(self.REGEX_PLR_RATE, msg)
807
808         if groups is not None:
809             try:
810                 throughput[u"LOWER"] = float(groups.group(1))
811                 throughput[u"UPPER"] = float(groups.group(2))
812                 status = u"PASS"
813             except (IndexError, ValueError):
814                 pass
815
816         return throughput, status
817
818     def _get_ndrpdr_latency(self, msg):
819         """Get LATENCY from the test message.
820
821         :param msg: The test message to be parsed.
822         :type msg: str
823         :returns: Parsed data as a dict and the status (PASS/FAIL).
824         :rtype: tuple(dict, str)
825         """
826         latency_default = {
827             u"min": -1.0,
828             u"avg": -1.0,
829             u"max": -1.0,
830             u"hdrh": u""
831         }
832         latency = {
833             u"NDR": {
834                 u"direction1": copy.copy(latency_default),
835                 u"direction2": copy.copy(latency_default)
836             },
837             u"PDR": {
838                 u"direction1": copy.copy(latency_default),
839                 u"direction2": copy.copy(latency_default)
840             },
841             u"LAT0": {
842                 u"direction1": copy.copy(latency_default),
843                 u"direction2": copy.copy(latency_default)
844             },
845             u"PDR10": {
846                 u"direction1": copy.copy(latency_default),
847                 u"direction2": copy.copy(latency_default)
848             },
849             u"PDR50": {
850                 u"direction1": copy.copy(latency_default),
851                 u"direction2": copy.copy(latency_default)
852             },
853             u"PDR90": {
854                 u"direction1": copy.copy(latency_default),
855                 u"direction2": copy.copy(latency_default)
856             },
857         }
858
859         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
860         if groups is None:
861             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
862         if groups is None:
863             return latency, u"FAIL"
864
865         def process_latency(in_str):
866             """Return object with parsed latency values.
867
868             TODO: Define class for the return type.
869
870             :param in_str: Input string, min/avg/max/hdrh format.
871             :type in_str: str
872             :returns: Dict with corresponding keys, except hdrh float values.
873             :rtype dict:
874             :throws IndexError: If in_str does not have enough substrings.
875             :throws ValueError: If a substring does not convert to float.
876             """
877             in_list = in_str.split('/', 3)
878
879             rval = {
880                 u"min": float(in_list[0]),
881                 u"avg": float(in_list[1]),
882                 u"max": float(in_list[2]),
883                 u"hdrh": u""
884             }
885
886             if len(in_list) == 4:
887                 rval[u"hdrh"] = str(in_list[3])
888
889             return rval
890
891         try:
892             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
893             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
894             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
895             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
896             if groups.lastindex == 4:
897                 return latency, u"PASS"
898         except (IndexError, ValueError):
899             pass
900
901         try:
902             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
903             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
904             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
905             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
906             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
907             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
908             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
909             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
910             if groups.lastindex == 12:
911                 return latency, u"PASS"
912         except (IndexError, ValueError):
913             pass
914
915         # TODO: Remove when not needed
916         latency[u"NDR10"] = {
917             u"direction1": copy.copy(latency_default),
918             u"direction2": copy.copy(latency_default)
919         }
920         latency[u"NDR50"] = {
921             u"direction1": copy.copy(latency_default),
922             u"direction2": copy.copy(latency_default)
923         }
924         latency[u"NDR90"] = {
925             u"direction1": copy.copy(latency_default),
926             u"direction2": copy.copy(latency_default)
927         }
928         try:
929             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
930             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
931             latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
932             latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
933             latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
934             latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
935             latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
936             latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
937             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
938             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
939             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
940             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
941             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
942             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
943             return latency, u"PASS"
944         except (IndexError, ValueError):
945             pass
946
947         return latency, u"FAIL"
948
949     @staticmethod
950     def _get_hoststack_data(msg, tags):
951         """Get data from the hoststack test message.
952
953         :param msg: The test message to be parsed.
954         :param tags: Test tags.
955         :type msg: str
956         :type tags: list
957         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
958         :rtype: tuple(dict, str)
959         """
960         result = dict()
961         status = u"FAIL"
962
963         msg = msg.replace(u"'", u'"').replace(u" ", u"")
964         if u"LDPRELOAD" in tags:
965             try:
966                 result = loads(msg)
967                 status = u"PASS"
968             except JSONDecodeError:
969                 pass
970         elif u"VPPECHO" in tags:
971             try:
972                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
973                 result = dict(
974                     client=loads(msg_lst[0]),
975                     server=loads(msg_lst[1])
976                 )
977                 status = u"PASS"
978             except (JSONDecodeError, IndexError):
979                 pass
980
981         return result, status
982
983     def visit_suite(self, suite):
984         """Implements traversing through the suite and its direct children.
985
986         :param suite: Suite to process.
987         :type suite: Suite
988         :returns: Nothing.
989         """
990         if self.start_suite(suite) is not False:
991             suite.suites.visit(self)
992             suite.tests.visit(self)
993             self.end_suite(suite)
994
995     def start_suite(self, suite):
996         """Called when suite starts.
997
998         :param suite: Suite to process.
999         :type suite: Suite
1000         :returns: Nothing.
1001         """
1002
1003         try:
1004             parent_name = suite.parent.name
1005         except AttributeError:
1006             return
1007
1008         doc_str = suite.doc.\
1009             replace(u'"', u"'").\
1010             replace(u'\n', u' ').\
1011             replace(u'\r', u'').\
1012             replace(u'*[', u' |br| *[').\
1013             replace(u"*", u"**").\
1014             replace(u' |br| *[', u'*[', 1)
1015
1016         self._data[u"suites"][suite.longname.lower().
1017                               replace(u'"', u"'").
1018                               replace(u" ", u"_")] = {
1019                                   u"name": suite.name.lower(),
1020                                   u"doc": doc_str,
1021                                   u"parent": parent_name,
1022                                   u"level": len(suite.longname.split(u"."))
1023                               }
1024
1025         suite.keywords.visit(self)
1026
1027     def end_suite(self, suite):
1028         """Called when suite ends.
1029
1030         :param suite: Suite to process.
1031         :type suite: Suite
1032         :returns: Nothing.
1033         """
1034
1035     def visit_test(self, test):
1036         """Implements traversing through the test.
1037
1038         :param test: Test to process.
1039         :type test: Test
1040         :returns: Nothing.
1041         """
1042         if self.start_test(test) is not False:
1043             test.keywords.visit(self)
1044             self.end_test(test)
1045
1046     def start_test(self, test):
1047         """Called when test starts.
1048
1049         :param test: Test to process.
1050         :type test: Test
1051         :returns: Nothing.
1052         """
1053
1054         self._sh_run_counter = 0
1055
1056         longname_orig = test.longname.lower()
1057
1058         # Check the ignore list
1059         if longname_orig in self._ignore:
1060             return
1061
1062         tags = [str(tag) for tag in test.tags]
1063         test_result = dict()
1064
1065         # Change the TC long name and name if defined in the mapping table
1066         longname = self._mapping.get(longname_orig, None)
1067         if longname is not None:
1068             name = longname.split(u'.')[-1]
1069             logging.debug(
1070                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1071                 f"{name}"
1072             )
1073         else:
1074             longname = longname_orig
1075             name = test.name.lower()
1076
1077         # Remove TC number from the TC long name (backward compatibility):
1078         self._test_id = re.sub(
1079             self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
1080         )
1081         # Remove TC number from the TC name (not needed):
1082         test_result[u"name"] = re.sub(
1083             self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
1084         )
1085
1086         test_result[u"parent"] = test.parent.name.lower().\
1087             replace(u"snat", u"nat")
1088         test_result[u"tags"] = tags
1089         test_result["doc"] = test.doc.\
1090             replace(u'"', u"'").\
1091             replace(u'\n', u' ').\
1092             replace(u'\r', u'').\
1093             replace(u'[', u' |br| [').\
1094             replace(u' |br| [', u'[', 1)
1095         test_result[u"type"] = u"FUNC"
1096         test_result[u"status"] = test.status
1097
1098         if test.status == u"PASS":
1099             if u"NDRPDR" in tags:
1100                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1101                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1102                         test.message).replace(u'\n', u' |br| '). \
1103                         replace(u'\r', u'').replace(u'"', u"'")
1104                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1105                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1106                         test.message).replace(u'\n', u' |br| '). \
1107                         replace(u'\r', u'').replace(u'"', u"'")
1108                 else:
1109                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1110                         test.message).replace(u'\n', u' |br| ').\
1111                         replace(u'\r', u'').replace(u'"', u"'")
1112             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1113                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1114                     test.message).replace(u'\n', u' |br| ').\
1115                     replace(u'\r', u'').replace(u'"', u"'")
1116             else:
1117                 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1118                     replace(u'\r', u'').replace(u'"', u"'")
1119         else:
1120             test_result[u"msg"] = u"Test Failed."
1121
1122         if u"PERFTEST" in tags:
1123             # Replace info about cores (e.g. -1c-) with the info about threads
1124             # and cores (e.g. -1t1c-) in the long test case names and in the
1125             # test case names if necessary.
1126             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1127             if not groups:
1128                 tag_count = 0
1129                 tag_tc = str()
1130                 for tag in test_result[u"tags"]:
1131                     groups = re.search(self.REGEX_TC_TAG, tag)
1132                     if groups:
1133                         tag_count += 1
1134                         tag_tc = tag
1135
1136                 if tag_count == 1:
1137                     self._test_id = re.sub(
1138                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1139                         self._test_id, count=1
1140                     )
1141                     test_result[u"name"] = re.sub(
1142                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1143                         test_result["name"], count=1
1144                     )
1145                 else:
1146                     test_result[u"status"] = u"FAIL"
1147                     self._data[u"tests"][self._test_id] = test_result
1148                     logging.debug(
1149                         f"The test {self._test_id} has no or more than one "
1150                         f"multi-threading tags.\n"
1151                         f"Tags: {test_result[u'tags']}"
1152                     )
1153                     return
1154
1155         if test.status == u"PASS":
1156             logging.info(self._test_id)
1157             logging.info(tags)
1158             if u"DEVICETEST" in tags:
1159                 test_result[u"type"] = u"DEVICETEST"
1160             elif u"NDRPDR" in tags:
1161                 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1162                     test_result[u"type"] = u"CPS"
1163                 else:
1164                     test_result[u"type"] = u"NDRPDR"
1165                 test_result[u"throughput"], test_result[u"status"] = \
1166                     self._get_ndrpdr_throughput(test.message)
1167                 test_result[u"gbps"], test_result[u"status"] = \
1168                     self._get_ndrpdr_throughput_gbps(test.message)
1169                 test_result[u"latency"], test_result[u"status"] = \
1170                     self._get_ndrpdr_latency(test.message)
1171                 logging.info(test_result[u"throughput"])
1172                 logging.info(test_result[u"gbps"])
1173                 logging.info(test_result[u"latency"])
1174             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1175                 if u"MRR" in tags:
1176                     test_result[u"type"] = u"MRR"
1177                 else:
1178                     test_result[u"type"] = u"BMRR"
1179
1180                 test_result[u"result"] = dict()
1181                 groups = re.search(self.REGEX_BMRR, test.message)
1182                 if groups is not None:
1183                     items_str = groups.group(1)
1184                     items_float = [
1185                         float(item.strip()) for item in items_str.split(",")
1186                     ]
1187                     # Use whole list in CSIT-1180.
1188                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1189                     test_result[u"result"][u"receive-rate"] = stats.avg
1190                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1191                 else:
1192                     groups = re.search(self.REGEX_MRR, test.message)
1193                     test_result[u"result"][u"receive-rate"] = \
1194                         float(groups.group(3)) / float(groups.group(1))
1195                 logging.info(test_result[u"result"][u"receive-rate"])
1196             elif u"SOAK" in tags:
1197                 test_result[u"type"] = u"SOAK"
1198                 test_result[u"throughput"], test_result[u"status"] = \
1199                     self._get_plr_throughput(test.message)
1200             elif u"HOSTSTACK" in tags:
1201                 test_result[u"type"] = u"HOSTSTACK"
1202                 test_result[u"result"], test_result[u"status"] = \
1203                     self._get_hoststack_data(test.message, tags)
1204             elif u"TCP" in tags:
1205                 test_result[u"type"] = u"TCP"
1206                 groups = re.search(self.REGEX_TCP, test.message)
1207                 test_result[u"result"] = int(groups.group(2))
1208             elif u"RECONF" in tags:
1209                 test_result[u"type"] = u"RECONF"
1210                 test_result[u"result"] = None
1211                 try:
1212                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1213                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1214                     test_result[u"result"] = {
1215                         u"loss": int(grps_loss.group(1)),
1216                         u"time": float(grps_time.group(1))
1217                     }
1218                 except (AttributeError, IndexError, ValueError, TypeError):
1219                     test_result[u"status"] = u"FAIL"
1220             else:
1221                 test_result[u"status"] = u"FAIL"
1222                 self._data[u"tests"][self._test_id] = test_result
1223                 return
1224
1225         self._data[u"tests"][self._test_id] = test_result
1226
1227     def end_test(self, test):
1228         """Called when test ends.
1229
1230         :param test: Test to process.
1231         :type test: Test
1232         :returns: Nothing.
1233         """
1234
1235     def visit_keyword(self, keyword):
1236         """Implements traversing through the keyword and its child keywords.
1237
1238         :param keyword: Keyword to process.
1239         :type keyword: Keyword
1240         :returns: Nothing.
1241         """
1242         if self.start_keyword(keyword) is not False:
1243             self.end_keyword(keyword)
1244
1245     def start_keyword(self, keyword):
1246         """Called when keyword starts. Default implementation does nothing.
1247
1248         :param keyword: Keyword to process.
1249         :type keyword: Keyword
1250         :returns: Nothing.
1251         """
1252         try:
1253             if keyword.type == u"setup":
1254                 self.visit_setup_kw(keyword)
1255             elif keyword.type == u"teardown":
1256                 self.visit_teardown_kw(keyword)
1257             else:
1258                 self.visit_test_kw(keyword)
1259         except AttributeError:
1260             pass
1261
1262     def end_keyword(self, keyword):
1263         """Called when keyword ends. Default implementation does nothing.
1264
1265         :param keyword: Keyword to process.
1266         :type keyword: Keyword
1267         :returns: Nothing.
1268         """
1269
1270     def visit_test_kw(self, test_kw):
1271         """Implements traversing through the test keyword and its child
1272         keywords.
1273
1274         :param test_kw: Keyword to process.
1275         :type test_kw: Keyword
1276         :returns: Nothing.
1277         """
1278         for keyword in test_kw.keywords:
1279             if self.start_test_kw(keyword) is not False:
1280                 self.visit_test_kw(keyword)
1281                 self.end_test_kw(keyword)
1282
1283     def start_test_kw(self, test_kw):
1284         """Called when test keyword starts. Default implementation does
1285         nothing.
1286
1287         :param test_kw: Keyword to process.
1288         :type test_kw: Keyword
1289         :returns: Nothing.
1290         """
1291         if test_kw.name.count(u"Show Runtime On All Duts") or \
1292                 test_kw.name.count(u"Show Runtime Counters On All Duts") or \
1293                 test_kw.name.count(u"Vpp Show Runtime On All Duts"):
1294             self._msg_type = u"test-show-runtime"
1295             self._sh_run_counter += 1
1296         else:
1297             return
1298         test_kw.messages.visit(self)
1299
1300     def end_test_kw(self, test_kw):
1301         """Called when keyword ends. Default implementation does nothing.
1302
1303         :param test_kw: Keyword to process.
1304         :type test_kw: Keyword
1305         :returns: Nothing.
1306         """
1307
1308     def visit_setup_kw(self, setup_kw):
1309         """Implements traversing through the teardown keyword and its child
1310         keywords.
1311
1312         :param setup_kw: Keyword to process.
1313         :type setup_kw: Keyword
1314         :returns: Nothing.
1315         """
1316         for keyword in setup_kw.keywords:
1317             if self.start_setup_kw(keyword) is not False:
1318                 self.visit_setup_kw(keyword)
1319                 self.end_setup_kw(keyword)
1320
1321     def start_setup_kw(self, setup_kw):
1322         """Called when teardown keyword starts. Default implementation does
1323         nothing.
1324
1325         :param setup_kw: Keyword to process.
1326         :type setup_kw: Keyword
1327         :returns: Nothing.
1328         """
1329         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1330                 and not self._version:
1331             self._msg_type = u"vpp-version"
1332         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1333                 not self._version:
1334             self._msg_type = u"dpdk-version"
1335         elif setup_kw.name.count(u"Set Global Variable") \
1336                 and not self._timestamp:
1337             self._msg_type = u"timestamp"
1338         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1339             self._msg_type = u"testbed"
1340         else:
1341             return
1342         setup_kw.messages.visit(self)
1343
1344     def end_setup_kw(self, setup_kw):
1345         """Called when keyword ends. Default implementation does nothing.
1346
1347         :param setup_kw: Keyword to process.
1348         :type setup_kw: Keyword
1349         :returns: Nothing.
1350         """
1351
1352     def visit_teardown_kw(self, teardown_kw):
1353         """Implements traversing through the teardown keyword and its child
1354         keywords.
1355
1356         :param teardown_kw: Keyword to process.
1357         :type teardown_kw: Keyword
1358         :returns: Nothing.
1359         """
1360         for keyword in teardown_kw.keywords:
1361             if self.start_teardown_kw(keyword) is not False:
1362                 self.visit_teardown_kw(keyword)
1363                 self.end_teardown_kw(keyword)
1364
1365     def start_teardown_kw(self, teardown_kw):
1366         """Called when teardown keyword starts
1367
1368         :param teardown_kw: Keyword to process.
1369         :type teardown_kw: Keyword
1370         :returns: Nothing.
1371         """
1372
1373         if teardown_kw.name.count(u"Show Vat History On All Duts"):
1374             # TODO: Remove when not needed:
1375             self._conf_history_lookup_nr = 0
1376             self._msg_type = u"teardown-vat-history"
1377             teardown_kw.messages.visit(self)
1378         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1379             self._conf_history_lookup_nr = 0
1380             self._msg_type = u"teardown-papi-history"
1381             teardown_kw.messages.visit(self)
1382
1383     def end_teardown_kw(self, teardown_kw):
1384         """Called when keyword ends. Default implementation does nothing.
1385
1386         :param teardown_kw: Keyword to process.
1387         :type teardown_kw: Keyword
1388         :returns: Nothing.
1389         """
1390
1391     def visit_message(self, msg):
1392         """Implements visiting the message.
1393
1394         :param msg: Message to process.
1395         :type msg: Message
1396         :returns: Nothing.
1397         """
1398         if self.start_message(msg) is not False:
1399             self.end_message(msg)
1400
1401     def start_message(self, msg):
1402         """Called when message starts. Get required information from messages:
1403         - VPP version.
1404
1405         :param msg: Message to process.
1406         :type msg: Message
1407         :returns: Nothing.
1408         """
1409         if self._msg_type:
1410             self.parse_msg[self._msg_type](msg)
1411
1412     def end_message(self, msg):
1413         """Called when message ends. Default implementation does nothing.
1414
1415         :param msg: Message to process.
1416         :type msg: Message
1417         :returns: Nothing.
1418         """
1419
1420
1421 class InputData:
1422     """Input data
1423
1424     The data is extracted from output.xml files generated by Jenkins jobs and
1425     stored in pandas' DataFrames.
1426
1427     The data structure:
1428     - job name
1429       - build number
1430         - metadata
1431           (as described in ExecutionChecker documentation)
1432         - suites
1433           (as described in ExecutionChecker documentation)
1434         - tests
1435           (as described in ExecutionChecker documentation)
1436     """
1437
1438     def __init__(self, spec):
1439         """Initialization.
1440
1441         :param spec: Specification.
1442         :type spec: Specification
1443         """
1444
1445         # Specification:
1446         self._cfg = spec
1447
1448         # Data store:
1449         self._input_data = pd.Series()
1450
1451     @property
1452     def data(self):
1453         """Getter - Input data.
1454
1455         :returns: Input data
1456         :rtype: pandas.Series
1457         """
1458         return self._input_data
1459
1460     def metadata(self, job, build):
1461         """Getter - metadata
1462
1463         :param job: Job which metadata we want.
1464         :param build: Build which metadata we want.
1465         :type job: str
1466         :type build: str
1467         :returns: Metadata
1468         :rtype: pandas.Series
1469         """
1470         return self.data[job][build][u"metadata"]
1471
1472     def suites(self, job, build):
1473         """Getter - suites
1474
1475         :param job: Job which suites we want.
1476         :param build: Build which suites we want.
1477         :type job: str
1478         :type build: str
1479         :returns: Suites.
1480         :rtype: pandas.Series
1481         """
1482         return self.data[job][str(build)][u"suites"]
1483
1484     def tests(self, job, build):
1485         """Getter - tests
1486
1487         :param job: Job which tests we want.
1488         :param build: Build which tests we want.
1489         :type job: str
1490         :type build: str
1491         :returns: Tests.
1492         :rtype: pandas.Series
1493         """
1494         return self.data[job][build][u"tests"]
1495
1496     def _parse_tests(self, job, build):
1497         """Process data from robot output.xml file and return JSON structured
1498         data.
1499
1500         :param job: The name of job which build output data will be processed.
1501         :param build: The build which output data will be processed.
1502         :type job: str
1503         :type build: dict
1504         :returns: JSON data structure.
1505         :rtype: dict
1506         """
1507
1508         metadata = {
1509             u"job": job,
1510             u"build": build
1511         }
1512
1513         with open(build[u"file-name"], u'r') as data_file:
1514             try:
1515                 result = ExecutionResult(data_file)
1516             except errors.DataError as err:
1517                 logging.error(
1518                     f"Error occurred while parsing output.xml: {repr(err)}"
1519                 )
1520                 return None
1521         checker = ExecutionChecker(metadata, self._cfg.mapping,
1522                                    self._cfg.ignore)
1523         result.visit(checker)
1524
1525         return checker.data
1526
1527     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1528         """Download and parse the input data file.
1529
1530         :param pid: PID of the process executing this method.
1531         :param job: Name of the Jenkins job which generated the processed input
1532             file.
1533         :param build: Information about the Jenkins build which generated the
1534             processed input file.
1535         :param repeat: Repeat the download specified number of times if not
1536             successful.
1537         :type pid: int
1538         :type job: str
1539         :type build: dict
1540         :type repeat: int
1541         """
1542
1543         logging.info(f"  Processing the job/build: {job}: {build[u'build']}")
1544
1545         state = u"failed"
1546         success = False
1547         data = None
1548         do_repeat = repeat
1549         while do_repeat:
1550             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1551             if success:
1552                 break
1553             do_repeat -= 1
1554         if not success:
1555             logging.error(
1556                 f"It is not possible to download the input data file from the "
1557                 f"job {job}, build {build[u'build']}, or it is damaged. "
1558                 f"Skipped."
1559             )
1560         if success:
1561             logging.info(f"    Processing data from build {build[u'build']}")
1562             data = self._parse_tests(job, build)
1563             if data is None:
1564                 logging.error(
1565                     f"Input data file from the job {job}, build "
1566                     f"{build[u'build']} is damaged. Skipped."
1567                 )
1568             else:
1569                 state = u"processed"
1570
1571             try:
1572                 remove(build[u"file-name"])
1573             except OSError as err:
1574                 logging.error(
1575                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1576                 )
1577
1578         # If the time-period is defined in the specification file, remove all
1579         # files which are outside the time period.
1580         is_last = False
1581         timeperiod = self._cfg.input.get(u"time-period", None)
1582         if timeperiod and data:
1583             now = dt.utcnow()
1584             timeperiod = timedelta(int(timeperiod))
1585             metadata = data.get(u"metadata", None)
1586             if metadata:
1587                 generated = metadata.get(u"generated", None)
1588                 if generated:
1589                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1590                     if (now - generated) > timeperiod:
1591                         # Remove the data and the file:
1592                         state = u"removed"
1593                         data = None
1594                         is_last = True
1595                         logging.info(
1596                             f"    The build {job}/{build[u'build']} is "
1597                             f"outdated, will be removed."
1598                         )
1599         logging.info(u"  Done.")
1600
1601         return {
1602             u"data": data,
1603             u"state": state,
1604             u"job": job,
1605             u"build": build,
1606             u"last": is_last
1607         }
1608
1609     def download_and_parse_data(self, repeat=1):
1610         """Download the input data files, parse input data from input files and
1611         store in pandas' Series.
1612
1613         :param repeat: Repeat the download specified number of times if not
1614             successful.
1615         :type repeat: int
1616         """
1617
1618         logging.info(u"Downloading and parsing input files ...")
1619
1620         for job, builds in self._cfg.builds.items():
1621             for build in builds:
1622
1623                 result = self._download_and_parse_build(job, build, repeat)
1624                 if result[u"last"]:
1625                     break
1626                 build_nr = result[u"build"][u"build"]
1627
1628                 if result[u"data"]:
1629                     data = result[u"data"]
1630                     build_data = pd.Series({
1631                         u"metadata": pd.Series(
1632                             list(data[u"metadata"].values()),
1633                             index=list(data[u"metadata"].keys())
1634                         ),
1635                         u"suites": pd.Series(
1636                             list(data[u"suites"].values()),
1637                             index=list(data[u"suites"].keys())
1638                         ),
1639                         u"tests": pd.Series(
1640                             list(data[u"tests"].values()),
1641                             index=list(data[u"tests"].keys())
1642                         )
1643                     })
1644
1645                     if self._input_data.get(job, None) is None:
1646                         self._input_data[job] = pd.Series()
1647                     self._input_data[job][str(build_nr)] = build_data
1648
1649                     self._cfg.set_input_file_name(
1650                         job, build_nr, result[u"build"][u"file-name"])
1651
1652                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1653
1654                 mem_alloc = \
1655                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1656                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1657
1658         logging.info(u"Done.")
1659
1660     def process_local_file(self, local_file, job=u"local", build_nr=1,
1661                            replace=True):
1662         """Process local XML file given as a command-line parameter.
1663
1664         :param local_file: The file to process.
1665         :param job: Job name.
1666         :param build_nr: Build number.
1667         :param replace: If True, the information about jobs and builds is
1668             replaced by the new one, otherwise the new jobs and builds are
1669             added.
1670         :type local_file: str
1671         :type job: str
1672         :type build_nr: int
1673         :type replace: bool
1674         :raises: PresentationError if an error occurs.
1675         """
1676         if not isfile(local_file):
1677             raise PresentationError(f"The file {local_file} does not exist.")
1678
1679         try:
1680             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1681         except (IndexError, ValueError):
1682             pass
1683
1684         build = {
1685             u"build": build_nr,
1686             u"status": u"failed",
1687             u"file-name": local_file
1688         }
1689         if replace:
1690             self._cfg.builds = dict()
1691         self._cfg.add_build(job, build)
1692
1693         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1694         data = self._parse_tests(job, build)
1695         if data is None:
1696             raise PresentationError(
1697                 f"Error occurred while parsing the file {local_file}"
1698             )
1699
1700         build_data = pd.Series({
1701             u"metadata": pd.Series(
1702                 list(data[u"metadata"].values()),
1703                 index=list(data[u"metadata"].keys())
1704             ),
1705             u"suites": pd.Series(
1706                 list(data[u"suites"].values()),
1707                 index=list(data[u"suites"].keys())
1708             ),
1709             u"tests": pd.Series(
1710                 list(data[u"tests"].values()),
1711                 index=list(data[u"tests"].keys())
1712             )
1713         })
1714
1715         if self._input_data.get(job, None) is None:
1716             self._input_data[job] = pd.Series()
1717         self._input_data[job][str(build_nr)] = build_data
1718
1719         self._cfg.set_input_state(job, build_nr, u"processed")
1720
1721     def process_local_directory(self, local_dir, replace=True):
1722         """Process local directory with XML file(s). The directory is processed
1723         as a 'job' and the XML files in it as builds.
1724         If the given directory contains only sub-directories, these
1725         sub-directories processed as jobs and corresponding XML files as builds
1726         of their job.
1727
1728         :param local_dir: Local directory to process.
1729         :param replace: If True, the information about jobs and builds is
1730             replaced by the new one, otherwise the new jobs and builds are
1731             added.
1732         :type local_dir: str
1733         :type replace: bool
1734         """
1735         if not isdir(local_dir):
1736             raise PresentationError(
1737                 f"The directory {local_dir} does not exist."
1738             )
1739
1740         # Check if the given directory includes only files, or only directories
1741         _, dirnames, filenames = next(walk(local_dir))
1742
1743         if filenames and not dirnames:
1744             filenames.sort()
1745             # local_builds:
1746             # key: dir (job) name, value: list of file names (builds)
1747             local_builds = {
1748                 local_dir: [join(local_dir, name) for name in filenames]
1749             }
1750
1751         elif dirnames and not filenames:
1752             dirnames.sort()
1753             # local_builds:
1754             # key: dir (job) name, value: list of file names (builds)
1755             local_builds = dict()
1756             for dirname in dirnames:
1757                 builds = [
1758                     join(local_dir, dirname, name)
1759                     for name in listdir(join(local_dir, dirname))
1760                     if isfile(join(local_dir, dirname, name))
1761                 ]
1762                 if builds:
1763                     local_builds[dirname] = sorted(builds)
1764
1765         elif not filenames and not dirnames:
1766             raise PresentationError(f"The directory {local_dir} is empty.")
1767         else:
1768             raise PresentationError(
1769                 f"The directory {local_dir} can include only files or only "
1770                 f"directories, not both.\nThe directory {local_dir} includes "
1771                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1772             )
1773
1774         if replace:
1775             self._cfg.builds = dict()
1776
1777         for job, files in local_builds.items():
1778             for idx, local_file in enumerate(files):
1779                 self.process_local_file(local_file, job, idx + 1, replace=False)
1780
1781     @staticmethod
1782     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1783         """Return the index of character in the string which is the end of tag.
1784
1785         :param tag_filter: The string where the end of tag is being searched.
1786         :param start: The index where the searching is stated.
1787         :param closer: The character which is the tag closer.
1788         :type tag_filter: str
1789         :type start: int
1790         :type closer: str
1791         :returns: The index of the tag closer.
1792         :rtype: int
1793         """
1794         try:
1795             idx_opener = tag_filter.index(closer, start)
1796             return tag_filter.index(closer, idx_opener + 1)
1797         except ValueError:
1798             return None
1799
1800     @staticmethod
1801     def _condition(tag_filter):
1802         """Create a conditional statement from the given tag filter.
1803
1804         :param tag_filter: Filter based on tags from the element specification.
1805         :type tag_filter: str
1806         :returns: Conditional statement which can be evaluated.
1807         :rtype: str
1808         """
1809         index = 0
1810         while True:
1811             index = InputData._end_of_tag(tag_filter, index)
1812             if index is None:
1813                 return tag_filter
1814             index += 1
1815             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1816
1817     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1818                     continue_on_error=False):
1819         """Filter required data from the given jobs and builds.
1820
1821         The output data structure is:
1822         - job 1
1823           - build 1
1824             - test (or suite) 1 ID:
1825               - param 1
1826               - param 2
1827               ...
1828               - param n
1829             ...
1830             - test (or suite) n ID:
1831             ...
1832           ...
1833           - build n
1834         ...
1835         - job n
1836
1837         :param element: Element which will use the filtered data.
1838         :param params: Parameters which will be included in the output. If None,
1839             all parameters are included.
1840         :param data: If not None, this data is used instead of data specified
1841             in the element.
1842         :param data_set: The set of data to be filtered: tests, suites,
1843             metadata.
1844         :param continue_on_error: Continue if there is error while reading the
1845             data. The Item will be empty then
1846         :type element: pandas.Series
1847         :type params: list
1848         :type data: dict
1849         :type data_set: str
1850         :type continue_on_error: bool
1851         :returns: Filtered data.
1852         :rtype pandas.Series
1853         """
1854
1855         try:
1856             if data_set == "suites":
1857                 cond = u"True"
1858             elif element[u"filter"] in (u"all", u"template"):
1859                 cond = u"True"
1860             else:
1861                 cond = InputData._condition(element[u"filter"])
1862             logging.debug(f"   Filter: {cond}")
1863         except KeyError:
1864             logging.error(u"  No filter defined.")
1865             return None
1866
1867         if params is None:
1868             params = element.get(u"parameters", None)
1869             if params:
1870                 params.append(u"type")
1871
1872         data_to_filter = data if data else element[u"data"]
1873         data = pd.Series()
1874         try:
1875             for job, builds in data_to_filter.items():
1876                 data[job] = pd.Series()
1877                 for build in builds:
1878                     data[job][str(build)] = pd.Series()
1879                     try:
1880                         data_dict = dict(
1881                             self.data[job][str(build)][data_set].items())
1882                     except KeyError:
1883                         if continue_on_error:
1884                             continue
1885                         return None
1886
1887                     for test_id, test_data in data_dict.items():
1888                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1889                             data[job][str(build)][test_id] = pd.Series()
1890                             if params is None:
1891                                 for param, val in test_data.items():
1892                                     data[job][str(build)][test_id][param] = val
1893                             else:
1894                                 for param in params:
1895                                     try:
1896                                         data[job][str(build)][test_id][param] =\
1897                                             test_data[param]
1898                                     except KeyError:
1899                                         data[job][str(build)][test_id][param] =\
1900                                             u"No Data"
1901             return data
1902
1903         except (KeyError, IndexError, ValueError) as err:
1904             logging.error(
1905                 f"Missing mandatory parameter in the element specification: "
1906                 f"{repr(err)}"
1907             )
1908             return None
1909         except AttributeError as err:
1910             logging.error(repr(err))
1911             return None
1912         except SyntaxError as err:
1913             logging.error(
1914                 f"The filter {cond} is not correct. Check if all tags are "
1915                 f"enclosed by apostrophes.\n{repr(err)}"
1916             )
1917             return None
1918
1919     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1920                              continue_on_error=False):
1921         """Filter required data from the given jobs and builds.
1922
1923         The output data structure is:
1924         - job 1
1925           - build 1
1926             - test (or suite) 1 ID:
1927               - param 1
1928               - param 2
1929               ...
1930               - param n
1931             ...
1932             - test (or suite) n ID:
1933             ...
1934           ...
1935           - build n
1936         ...
1937         - job n
1938
1939         :param element: Element which will use the filtered data.
1940         :param params: Parameters which will be included in the output. If None,
1941         all parameters are included.
1942         :param data_set: The set of data to be filtered: tests, suites,
1943         metadata.
1944         :param continue_on_error: Continue if there is error while reading the
1945         data. The Item will be empty then
1946         :type element: pandas.Series
1947         :type params: list
1948         :type data_set: str
1949         :type continue_on_error: bool
1950         :returns: Filtered data.
1951         :rtype pandas.Series
1952         """
1953
1954         include = element.get(u"include", None)
1955         if not include:
1956             logging.warning(u"No tests to include, skipping the element.")
1957             return None
1958
1959         if params is None:
1960             params = element.get(u"parameters", None)
1961             if params:
1962                 params.append(u"type")
1963
1964         data = pd.Series()
1965         try:
1966             for job, builds in element[u"data"].items():
1967                 data[job] = pd.Series()
1968                 for build in builds:
1969                     data[job][str(build)] = pd.Series()
1970                     for test in include:
1971                         try:
1972                             reg_ex = re.compile(str(test).lower())
1973                             for test_id in self.data[job][
1974                                     str(build)][data_set].keys():
1975                                 if re.match(reg_ex, str(test_id).lower()):
1976                                     test_data = self.data[job][
1977                                         str(build)][data_set][test_id]
1978                                     data[job][str(build)][test_id] = pd.Series()
1979                                     if params is None:
1980                                         for param, val in test_data.items():
1981                                             data[job][str(build)][test_id]\
1982                                                 [param] = val
1983                                     else:
1984                                         for param in params:
1985                                             try:
1986                                                 data[job][str(build)][
1987                                                     test_id][param] = \
1988                                                     test_data[param]
1989                                             except KeyError:
1990                                                 data[job][str(build)][
1991                                                     test_id][param] = u"No Data"
1992                         except KeyError as err:
1993                             if continue_on_error:
1994                                 logging.debug(repr(err))
1995                                 continue
1996                             logging.error(repr(err))
1997                             return None
1998             return data
1999
2000         except (KeyError, IndexError, ValueError) as err:
2001             logging.error(
2002                 f"Missing mandatory parameter in the element "
2003                 f"specification: {repr(err)}"
2004             )
2005             return None
2006         except AttributeError as err:
2007             logging.error(repr(err))
2008             return None
2009
2010     @staticmethod
2011     def merge_data(data):
2012         """Merge data from more jobs and builds to a simple data structure.
2013
2014         The output data structure is:
2015
2016         - test (suite) 1 ID:
2017           - param 1
2018           - param 2
2019           ...
2020           - param n
2021         ...
2022         - test (suite) n ID:
2023         ...
2024
2025         :param data: Data to merge.
2026         :type data: pandas.Series
2027         :returns: Merged data.
2028         :rtype: pandas.Series
2029         """
2030
2031         logging.info(u"    Merging data ...")
2032
2033         merged_data = pd.Series()
2034         for builds in data.values:
2035             for item in builds.values:
2036                 for item_id, item_data in item.items():
2037                     merged_data[item_id] = item_data
2038         return merged_data
2039
2040     def print_all_oper_data(self):
2041         """Print all operational data to console.
2042         """
2043
2044         tbl_hdr = (
2045             u"Name",
2046             u"Nr of Vectors",
2047             u"Nr of Packets",
2048             u"Suspends",
2049             u"Cycles per Packet",
2050             u"Average Vector Size"
2051         )
2052
2053         for job in self._input_data.values:
2054             for build in job.values:
2055                 for test_id, test_data in build[u"tests"].items():
2056                     print(f"{test_id}")
2057                     if test_data.get(u"show-run", None) is None:
2058                         continue
2059                     for dut_name, data in test_data[u"show-run"].items():
2060                         if data.get(u"threads", None) is None:
2061                             continue
2062                         print(f"Host IP: {data.get(u'host', '')}, "
2063                               f"Socket: {data.get(u'socket', '')}")
2064                         for thread_nr, thread in data[u"threads"].items():
2065                             txt_table = prettytable.PrettyTable(tbl_hdr)
2066                             avg = 0.0
2067                             for row in thread:
2068                                 txt_table.add_row(row)
2069                                 avg += row[-1]
2070                             if len(thread) == 0:
2071                                 avg = u""
2072                             else:
2073                                 avg = f", Average Vector Size per Node: " \
2074                                       f"{(avg / len(thread)):.2f}"
2075                             th_name = u"main" if thread_nr == 0 \
2076                                 else f"worker_{thread_nr}"
2077                             print(f"{dut_name}, {th_name}{avg}")
2078                             txt_table.float_format = u".2"
2079                             txt_table.align = u"r"
2080                             txt_table.align[u"Name"] = u"l"
2081                             print(f"{txt_table.get_string()}\n")