e1db03660d21d075bdc7640a03e10a845db0b757
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     # Needed for CPS and PPS tests
243     REGEX_NDRPDR_LAT_BASE = re.compile(
244         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
245         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
246     )
247     REGEX_NDRPDR_LAT = re.compile(
248         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
249         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
250         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
251         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
252         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
253         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
254     )
255
256     REGEX_VERSION_VPP = re.compile(
257         r"(return STDOUT Version:\s*|"
258         r"VPP Version:\s*|VPP version:\s*)(.*)"
259     )
260     REGEX_VERSION_DPDK = re.compile(
261         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
262     )
263     REGEX_TCP = re.compile(
264         r'Total\s(rps|cps|throughput):\s(\d*).*$'
265     )
266     REGEX_MRR = re.compile(
267         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
268         r'tx\s(\d*),\srx\s(\d*)'
269     )
270     REGEX_BMRR = re.compile(
271         r'.*trial results.*: \[(.*)\]'
272     )
273     REGEX_RECONF_LOSS = re.compile(
274         r'Packets lost due to reconfig: (\d*)'
275     )
276     REGEX_RECONF_TIME = re.compile(
277         r'Implied time lost: (\d*.[\de-]*)'
278     )
279     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
280
281     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
282
283     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
284
285     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
286
287     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
288
289     def __init__(self, metadata, mapping, ignore):
290         """Initialisation.
291
292         :param metadata: Key-value pairs to be included in "metadata" part of
293             JSON structure.
294         :param mapping: Mapping of the old names of test cases to the new
295             (actual) one.
296         :param ignore: List of TCs to be ignored.
297         :type metadata: dict
298         :type mapping: dict
299         :type ignore: list
300         """
301
302         # Type of message to parse out from the test messages
303         self._msg_type = None
304
305         # VPP version
306         self._version = None
307
308         # Timestamp
309         self._timestamp = None
310
311         # Testbed. The testbed is identified by TG node IP address.
312         self._testbed = None
313
314         # Mapping of TCs long names
315         self._mapping = mapping
316
317         # Ignore list
318         self._ignore = ignore
319
320         # Number of PAPI History messages found:
321         # 0 - no message
322         # 1 - PAPI History of DUT1
323         # 2 - PAPI History of DUT2
324         self._conf_history_lookup_nr = 0
325
326         self._sh_run_counter = 0
327
328         # Test ID of currently processed test- the lowercase full path to the
329         # test
330         self._test_id = None
331
332         # The main data structure
333         self._data = {
334             u"metadata": OrderedDict(),
335             u"suites": OrderedDict(),
336             u"tests": OrderedDict()
337         }
338
339         # Save the provided metadata
340         for key, val in metadata.items():
341             self._data[u"metadata"][key] = val
342
343         # Dictionary defining the methods used to parse different types of
344         # messages
345         self.parse_msg = {
346             u"timestamp": self._get_timestamp,
347             u"vpp-version": self._get_vpp_version,
348             u"dpdk-version": self._get_dpdk_version,
349             # TODO: Remove when not needed:
350             u"teardown-vat-history": self._get_vat_history,
351             u"teardown-papi-history": self._get_papi_history,
352             u"test-show-runtime": self._get_show_run,
353             u"testbed": self._get_testbed
354         }
355
356     @property
357     def data(self):
358         """Getter - Data parsed from the XML file.
359
360         :returns: Data parsed from the XML file.
361         :rtype: dict
362         """
363         return self._data
364
365     def _get_data_from_mrr_test_msg(self, msg):
366         """Get info from message of MRR performance tests.
367
368         :param msg: Message to be processed.
369         :type msg: str
370         :returns: Processed message or original message if a problem occurs.
371         :rtype: str
372         """
373
374         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
375         if not groups or groups.lastindex != 1:
376             return u"Test Failed."
377
378         try:
379             data = groups.group(1).split(u", ")
380         except (AttributeError, IndexError, ValueError, KeyError):
381             return u"Test Failed."
382
383         out_str = u"["
384         try:
385             for item in data:
386                 out_str += f"{(float(item) / 1e6):.2f}, "
387             return out_str[:-2] + u"]"
388         except (AttributeError, IndexError, ValueError, KeyError):
389             return u"Test Failed."
390
391     def _get_data_from_cps_test_msg(self, msg):
392         """Get info from message of NDRPDR CPS tests.
393
394         :param msg: Message to be processed.
395         :type msg: str
396         :returns: Processed message or "Test Failed." if a problem occurs.
397         :rtype: str
398         """
399
400         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
401         if not groups or groups.lastindex != 2:
402             return u"Test Failed."
403
404         try:
405             return (
406                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
407                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
408             )
409         except (AttributeError, IndexError, ValueError, KeyError):
410             return u"Test Failed."
411
412     def _get_data_from_pps_test_msg(self, msg):
413         """Get info from message of NDRPDR PPS tests.
414
415         :param msg: Message to be processed.
416         :type msg: str
417         :returns: Processed message or "Test Failed." if a problem occurs.
418         :rtype: str
419         """
420
421         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
422         if not groups or groups.lastindex != 4:
423             return u"Test Failed."
424
425         try:
426             return (
427                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
428                 f"{float(groups.group(2)):5.2f}\n"
429                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
430                 f"{float(groups.group(4)):5.2f}"
431             )
432         except (AttributeError, IndexError, ValueError, KeyError):
433             return u"Test Failed."
434
435     def _get_data_from_perf_test_msg(self, msg):
436         """Get info from message of NDRPDR performance tests.
437
438         :param msg: Message to be processed.
439         :type msg: str
440         :returns: Processed message or "Test Failed." if a problem occurs.
441         :rtype: str
442         """
443
444         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
445         if not groups or groups.lastindex != 10:
446             return u"Test Failed."
447
448         try:
449             data = {
450                 u"ndr_low": float(groups.group(1)),
451                 u"ndr_low_b": float(groups.group(2)),
452                 u"pdr_low": float(groups.group(3)),
453                 u"pdr_low_b": float(groups.group(4)),
454                 u"pdr_lat_90_1": groups.group(5),
455                 u"pdr_lat_90_2": groups.group(6),
456                 u"pdr_lat_50_1": groups.group(7),
457                 u"pdr_lat_50_2": groups.group(8),
458                 u"pdr_lat_10_1": groups.group(9),
459                 u"pdr_lat_10_2": groups.group(10),
460             }
461         except (AttributeError, IndexError, ValueError, KeyError):
462             return u"Test Failed."
463
464         def _process_lat(in_str_1, in_str_2):
465             """Extract min, avg, max values from latency string.
466
467             :param in_str_1: Latency string for one direction produced by robot
468                 framework.
469             :param in_str_2: Latency string for second direction produced by
470                 robot framework.
471             :type in_str_1: str
472             :type in_str_2: str
473             :returns: Processed latency string or None if a problem occurs.
474             :rtype: tuple
475             """
476             in_list_1 = in_str_1.split('/', 3)
477             in_list_2 = in_str_2.split('/', 3)
478
479             if len(in_list_1) != 4 and len(in_list_2) != 4:
480                 return None
481
482             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
483             try:
484                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
485             except hdrh.codec.HdrLengthException:
486                 return None
487
488             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
489             try:
490                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
491             except hdrh.codec.HdrLengthException:
492                 return None
493
494             if hdr_lat_1 and hdr_lat_2:
495                 hdr_lat = (
496                     hdr_lat_1.get_value_at_percentile(50.0),
497                     hdr_lat_1.get_value_at_percentile(90.0),
498                     hdr_lat_1.get_value_at_percentile(99.0),
499                     hdr_lat_2.get_value_at_percentile(50.0),
500                     hdr_lat_2.get_value_at_percentile(90.0),
501                     hdr_lat_2.get_value_at_percentile(99.0)
502                 )
503
504                 if all(hdr_lat):
505                     return hdr_lat
506
507             return None
508
509         try:
510             out_msg = (
511                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
512                 f"{data[u'ndr_low_b']:5.2f}"
513                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
514                 f"{data[u'pdr_low_b']:5.2f}"
515             )
516             latency = (
517                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
518                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
519                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
520             )
521             if all(latency):
522                 max_len = len(str(max((max(item) for item in latency))))
523                 max_len = 4 if max_len < 4 else max_len
524
525                 for idx, lat in enumerate(latency):
526                     if not idx:
527                         out_msg += u"\n"
528                     out_msg += (
529                         f"\n{idx + 3}. "
530                         f"{lat[0]:{max_len}d} "
531                         f"{lat[1]:{max_len}d} "
532                         f"{lat[2]:{max_len}d}      "
533                         f"{lat[3]:{max_len}d} "
534                         f"{lat[4]:{max_len}d} "
535                         f"{lat[5]:{max_len}d} "
536                     )
537
538             return out_msg
539
540         except (AttributeError, IndexError, ValueError, KeyError):
541             return u"Test Failed."
542
543     def _get_testbed(self, msg):
544         """Called when extraction of testbed IP is required.
545         The testbed is identified by TG node IP address.
546
547         :param msg: Message to process.
548         :type msg: Message
549         :returns: Nothing.
550         """
551
552         if msg.message.count(u"Setup of TG node") or \
553                 msg.message.count(u"Setup of node TG host"):
554             reg_tg_ip = re.compile(
555                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
556             try:
557                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
558             except (KeyError, ValueError, IndexError, AttributeError):
559                 pass
560             finally:
561                 self._data[u"metadata"][u"testbed"] = self._testbed
562                 self._msg_type = None
563
564     def _get_vpp_version(self, msg):
565         """Called when extraction of VPP version is required.
566
567         :param msg: Message to process.
568         :type msg: Message
569         :returns: Nothing.
570         """
571
572         if msg.message.count(u"return STDOUT Version:") or \
573                 msg.message.count(u"VPP Version:") or \
574                 msg.message.count(u"VPP version:"):
575             self._version = str(
576                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
577             )
578             self._data[u"metadata"][u"version"] = self._version
579             self._msg_type = None
580
581     def _get_dpdk_version(self, msg):
582         """Called when extraction of DPDK version is required.
583
584         :param msg: Message to process.
585         :type msg: Message
586         :returns: Nothing.
587         """
588
589         if msg.message.count(u"DPDK Version:"):
590             try:
591                 self._version = str(re.search(
592                     self.REGEX_VERSION_DPDK, msg.message).group(2))
593                 self._data[u"metadata"][u"version"] = self._version
594             except IndexError:
595                 pass
596             finally:
597                 self._msg_type = None
598
599     def _get_timestamp(self, msg):
600         """Called when extraction of timestamp is required.
601
602         :param msg: Message to process.
603         :type msg: Message
604         :returns: Nothing.
605         """
606
607         self._timestamp = msg.timestamp[:14]
608         self._data[u"metadata"][u"generated"] = self._timestamp
609         self._msg_type = None
610
611     def _get_vat_history(self, msg):
612         """Called when extraction of VAT command history is required.
613
614         TODO: Remove when not needed.
615
616         :param msg: Message to process.
617         :type msg: Message
618         :returns: Nothing.
619         """
620         if msg.message.count(u"VAT command history:"):
621             self._conf_history_lookup_nr += 1
622             if self._conf_history_lookup_nr == 1:
623                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
624             else:
625                 self._msg_type = None
626             text = re.sub(
627                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} VAT command history:",
628                 u"",
629                 msg.message,
630                 count=1
631             ).replace(u'\n', u' |br| ').replace(u'"', u"'")
632
633             self._data[u"tests"][self._test_id][u"conf-history"] += (
634                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
635             )
636
637     def _get_papi_history(self, msg):
638         """Called when extraction of PAPI command history is required.
639
640         :param msg: Message to process.
641         :type msg: Message
642         :returns: Nothing.
643         """
644         if msg.message.count(u"PAPI command history:"):
645             self._conf_history_lookup_nr += 1
646             if self._conf_history_lookup_nr == 1:
647                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
648             else:
649                 self._msg_type = None
650             text = re.sub(
651                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
652                 u"",
653                 msg.message,
654                 count=1
655             ).replace(u'\n', u' |br| ').replace(u'"', u"'")
656             self._data[u"tests"][self._test_id][u"conf-history"] += (
657                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
658             )
659
660     def _get_show_run(self, msg):
661         """Called when extraction of VPP operational data (output of CLI command
662         Show Runtime) is required.
663
664         :param msg: Message to process.
665         :type msg: Message
666         :returns: Nothing.
667         """
668
669         if not msg.message.count(u"stats runtime"):
670             return
671
672         # Temporary solution
673         if self._sh_run_counter > 1:
674             return
675
676         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
677             self._data[u"tests"][self._test_id][u"show-run"] = dict()
678
679         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
680         if not groups:
681             return
682         try:
683             host = groups.group(1)
684         except (AttributeError, IndexError):
685             host = u""
686         try:
687             sock = groups.group(2)
688         except (AttributeError, IndexError):
689             sock = u""
690
691         runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
692                         replace(u"'", u'"').replace(u'b"', u'"').
693                         replace(u'u"', u'"').split(u":", 1)[1])
694
695         try:
696             threads_nr = len(runtime[0][u"clocks"])
697         except (IndexError, KeyError):
698             return
699
700         dut = u"DUT{nr}".format(
701             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
702
703         oper = {
704             u"host": host,
705             u"socket": sock,
706             u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
707         }
708
709         for item in runtime:
710             for idx in range(threads_nr):
711                 if item[u"vectors"][idx] > 0:
712                     clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
713                 elif item[u"calls"][idx] > 0:
714                     clocks = item[u"clocks"][idx] / item[u"calls"][idx]
715                 elif item[u"suspends"][idx] > 0:
716                     clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
717                 else:
718                     clocks = 0.0
719
720                 if item[u"calls"][idx] > 0:
721                     vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
722                 else:
723                     vectors_call = 0.0
724
725                 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
726                         int(item[u"suspends"][idx]):
727                     oper[u"threads"][idx].append([
728                         item[u"name"],
729                         item[u"calls"][idx],
730                         item[u"vectors"][idx],
731                         item[u"suspends"][idx],
732                         clocks,
733                         vectors_call
734                     ])
735
736         self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
737
738     def _get_ndrpdr_throughput(self, msg):
739         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
740         message.
741
742         :param msg: The test message to be parsed.
743         :type msg: str
744         :returns: Parsed data as a dict and the status (PASS/FAIL).
745         :rtype: tuple(dict, str)
746         """
747
748         throughput = {
749             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
750             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
751         }
752         status = u"FAIL"
753         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
754
755         if groups is not None:
756             try:
757                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
758                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
759                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
760                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
761                 status = u"PASS"
762             except (IndexError, ValueError):
763                 pass
764
765         return throughput, status
766
767     def _get_ndrpdr_throughput_gbps(self, msg):
768         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
769         test message.
770
771         :param msg: The test message to be parsed.
772         :type msg: str
773         :returns: Parsed data as a dict and the status (PASS/FAIL).
774         :rtype: tuple(dict, str)
775         """
776
777         gbps = {
778             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
779             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
780         }
781         status = u"FAIL"
782         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
783
784         if groups is not None:
785             try:
786                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
787                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
788                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
789                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
790                 status = u"PASS"
791             except (IndexError, ValueError):
792                 pass
793
794         return gbps, status
795
796     def _get_plr_throughput(self, msg):
797         """Get PLRsearch lower bound and PLRsearch upper bound from the test
798         message.
799
800         :param msg: The test message to be parsed.
801         :type msg: str
802         :returns: Parsed data as a dict and the status (PASS/FAIL).
803         :rtype: tuple(dict, str)
804         """
805
806         throughput = {
807             u"LOWER": -1.0,
808             u"UPPER": -1.0
809         }
810         status = u"FAIL"
811         groups = re.search(self.REGEX_PLR_RATE, msg)
812
813         if groups is not None:
814             try:
815                 throughput[u"LOWER"] = float(groups.group(1))
816                 throughput[u"UPPER"] = float(groups.group(2))
817                 status = u"PASS"
818             except (IndexError, ValueError):
819                 pass
820
821         return throughput, status
822
823     def _get_ndrpdr_latency(self, msg):
824         """Get LATENCY from the test message.
825
826         :param msg: The test message to be parsed.
827         :type msg: str
828         :returns: Parsed data as a dict and the status (PASS/FAIL).
829         :rtype: tuple(dict, str)
830         """
831         latency_default = {
832             u"min": -1.0,
833             u"avg": -1.0,
834             u"max": -1.0,
835             u"hdrh": u""
836         }
837         latency = {
838             u"NDR": {
839                 u"direction1": copy.copy(latency_default),
840                 u"direction2": copy.copy(latency_default)
841             },
842             u"PDR": {
843                 u"direction1": copy.copy(latency_default),
844                 u"direction2": copy.copy(latency_default)
845             },
846             u"LAT0": {
847                 u"direction1": copy.copy(latency_default),
848                 u"direction2": copy.copy(latency_default)
849             },
850             u"PDR10": {
851                 u"direction1": copy.copy(latency_default),
852                 u"direction2": copy.copy(latency_default)
853             },
854             u"PDR50": {
855                 u"direction1": copy.copy(latency_default),
856                 u"direction2": copy.copy(latency_default)
857             },
858             u"PDR90": {
859                 u"direction1": copy.copy(latency_default),
860                 u"direction2": copy.copy(latency_default)
861             },
862         }
863
864         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
865         if groups is None:
866             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
867         if groups is None:
868             return latency, u"FAIL"
869
870         def process_latency(in_str):
871             """Return object with parsed latency values.
872
873             TODO: Define class for the return type.
874
875             :param in_str: Input string, min/avg/max/hdrh format.
876             :type in_str: str
877             :returns: Dict with corresponding keys, except hdrh float values.
878             :rtype dict:
879             :throws IndexError: If in_str does not have enough substrings.
880             :throws ValueError: If a substring does not convert to float.
881             """
882             in_list = in_str.split('/', 3)
883
884             rval = {
885                 u"min": float(in_list[0]),
886                 u"avg": float(in_list[1]),
887                 u"max": float(in_list[2]),
888                 u"hdrh": u""
889             }
890
891             if len(in_list) == 4:
892                 rval[u"hdrh"] = str(in_list[3])
893
894             return rval
895
896         try:
897             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
898             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
899             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
900             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
901             if groups.lastindex == 4:
902                 return latency, u"PASS"
903         except (IndexError, ValueError):
904             pass
905
906         try:
907             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
908             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
909             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
910             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
911             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
912             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
913             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
914             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
915             if groups.lastindex == 12:
916                 return latency, u"PASS"
917         except (IndexError, ValueError):
918             pass
919
920         # TODO: Remove when not needed
921         latency[u"NDR10"] = {
922             u"direction1": copy.copy(latency_default),
923             u"direction2": copy.copy(latency_default)
924         }
925         latency[u"NDR50"] = {
926             u"direction1": copy.copy(latency_default),
927             u"direction2": copy.copy(latency_default)
928         }
929         latency[u"NDR90"] = {
930             u"direction1": copy.copy(latency_default),
931             u"direction2": copy.copy(latency_default)
932         }
933         try:
934             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
935             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
936             latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
937             latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
938             latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
939             latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
940             latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
941             latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
942             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
943             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
944             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
945             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
946             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
947             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
948             return latency, u"PASS"
949         except (IndexError, ValueError):
950             pass
951
952         return latency, u"FAIL"
953
954     @staticmethod
955     def _get_hoststack_data(msg, tags):
956         """Get data from the hoststack test message.
957
958         :param msg: The test message to be parsed.
959         :param tags: Test tags.
960         :type msg: str
961         :type tags: list
962         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
963         :rtype: tuple(dict, str)
964         """
965         result = dict()
966         status = u"FAIL"
967
968         msg = msg.replace(u"'", u'"').replace(u" ", u"")
969         if u"LDPRELOAD" in tags:
970             try:
971                 result = loads(msg)
972                 status = u"PASS"
973             except JSONDecodeError:
974                 pass
975         elif u"VPPECHO" in tags:
976             try:
977                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
978                 result = dict(
979                     client=loads(msg_lst[0]),
980                     server=loads(msg_lst[1])
981                 )
982                 status = u"PASS"
983             except (JSONDecodeError, IndexError):
984                 pass
985
986         return result, status
987
988     def visit_suite(self, suite):
989         """Implements traversing through the suite and its direct children.
990
991         :param suite: Suite to process.
992         :type suite: Suite
993         :returns: Nothing.
994         """
995         if self.start_suite(suite) is not False:
996             suite.suites.visit(self)
997             suite.tests.visit(self)
998             self.end_suite(suite)
999
1000     def start_suite(self, suite):
1001         """Called when suite starts.
1002
1003         :param suite: Suite to process.
1004         :type suite: Suite
1005         :returns: Nothing.
1006         """
1007
1008         try:
1009             parent_name = suite.parent.name
1010         except AttributeError:
1011             return
1012
1013         doc_str = suite.doc.\
1014             replace(u'"', u"'").\
1015             replace(u'\n', u' ').\
1016             replace(u'\r', u'').\
1017             replace(u'*[', u' |br| *[').\
1018             replace(u"*", u"**").\
1019             replace(u' |br| *[', u'*[', 1)
1020
1021         self._data[u"suites"][suite.longname.lower().
1022                               replace(u'"', u"'").
1023                               replace(u" ", u"_")] = {
1024                                   u"name": suite.name.lower(),
1025                                   u"doc": doc_str,
1026                                   u"parent": parent_name,
1027                                   u"level": len(suite.longname.split(u"."))
1028                               }
1029
1030         suite.keywords.visit(self)
1031
1032     def end_suite(self, suite):
1033         """Called when suite ends.
1034
1035         :param suite: Suite to process.
1036         :type suite: Suite
1037         :returns: Nothing.
1038         """
1039
1040     def visit_test(self, test):
1041         """Implements traversing through the test.
1042
1043         :param test: Test to process.
1044         :type test: Test
1045         :returns: Nothing.
1046         """
1047         if self.start_test(test) is not False:
1048             test.keywords.visit(self)
1049             self.end_test(test)
1050
1051     def start_test(self, test):
1052         """Called when test starts.
1053
1054         :param test: Test to process.
1055         :type test: Test
1056         :returns: Nothing.
1057         """
1058
1059         self._sh_run_counter = 0
1060
1061         longname_orig = test.longname.lower()
1062
1063         # Check the ignore list
1064         if longname_orig in self._ignore:
1065             return
1066
1067         tags = [str(tag) for tag in test.tags]
1068         test_result = dict()
1069
1070         # Change the TC long name and name if defined in the mapping table
1071         longname = self._mapping.get(longname_orig, None)
1072         if longname is not None:
1073             name = longname.split(u'.')[-1]
1074             logging.debug(
1075                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1076                 f"{name}"
1077             )
1078         else:
1079             longname = longname_orig
1080             name = test.name.lower()
1081
1082         # Remove TC number from the TC long name (backward compatibility):
1083         self._test_id = re.sub(
1084             self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
1085         )
1086         # Remove TC number from the TC name (not needed):
1087         test_result[u"name"] = re.sub(
1088             self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
1089         )
1090
1091         test_result[u"parent"] = test.parent.name.lower().\
1092             replace(u"snat", u"nat")
1093         test_result[u"tags"] = tags
1094         test_result["doc"] = test.doc.\
1095             replace(u'"', u"'").\
1096             replace(u'\n', u' ').\
1097             replace(u'\r', u'').\
1098             replace(u'[', u' |br| [').\
1099             replace(u' |br| [', u'[', 1)
1100         test_result[u"type"] = u"FUNC"
1101         test_result[u"status"] = test.status
1102
1103         if test.status == u"PASS":
1104             if u"NDRPDR" in tags:
1105                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1106                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1107                         test.message).replace(u'\n', u' |br| '). \
1108                         replace(u'\r', u'').replace(u'"', u"'")
1109                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1110                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1111                         test.message).replace(u'\n', u' |br| '). \
1112                         replace(u'\r', u'').replace(u'"', u"'")
1113                 else:
1114                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1115                         test.message).replace(u'\n', u' |br| ').\
1116                         replace(u'\r', u'').replace(u'"', u"'")
1117             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1118                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1119                     test.message).replace(u'\n', u' |br| ').\
1120                     replace(u'\r', u'').replace(u'"', u"'")
1121             else:
1122                 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1123                     replace(u'\r', u'').replace(u'"', u"'")
1124         else:
1125             test_result[u"msg"] = u"Test Failed."
1126
1127         if u"PERFTEST" in tags:
1128             # Replace info about cores (e.g. -1c-) with the info about threads
1129             # and cores (e.g. -1t1c-) in the long test case names and in the
1130             # test case names if necessary.
1131             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1132             if not groups:
1133                 tag_count = 0
1134                 tag_tc = str()
1135                 for tag in test_result[u"tags"]:
1136                     groups = re.search(self.REGEX_TC_TAG, tag)
1137                     if groups:
1138                         tag_count += 1
1139                         tag_tc = tag
1140
1141                 if tag_count == 1:
1142                     self._test_id = re.sub(
1143                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1144                         self._test_id, count=1
1145                     )
1146                     test_result[u"name"] = re.sub(
1147                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1148                         test_result["name"], count=1
1149                     )
1150                 else:
1151                     test_result[u"status"] = u"FAIL"
1152                     self._data[u"tests"][self._test_id] = test_result
1153                     logging.debug(
1154                         f"The test {self._test_id} has no or more than one "
1155                         f"multi-threading tags.\n"
1156                         f"Tags: {test_result[u'tags']}"
1157                     )
1158                     return
1159
1160         if test.status == u"PASS":
1161             if u"DEVICETEST" in tags:
1162                 test_result[u"type"] = u"DEVICETEST"
1163             elif u"NDRPDR" in tags:
1164                 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1165                     test_result[u"type"] = u"CPS"
1166                 else:
1167                     test_result[u"type"] = u"NDRPDR"
1168                 test_result[u"throughput"], test_result[u"status"] = \
1169                     self._get_ndrpdr_throughput(test.message)
1170                 test_result[u"gbps"], test_result[u"status"] = \
1171                     self._get_ndrpdr_throughput_gbps(test.message)
1172                 test_result[u"latency"], test_result[u"status"] = \
1173                     self._get_ndrpdr_latency(test.message)
1174             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1175                 if u"MRR" in tags:
1176                     test_result[u"type"] = u"MRR"
1177                 else:
1178                     test_result[u"type"] = u"BMRR"
1179
1180                 test_result[u"result"] = dict()
1181                 groups = re.search(self.REGEX_BMRR, test.message)
1182                 if groups is not None:
1183                     items_str = groups.group(1)
1184                     items_float = [
1185                         float(item.strip().replace(u"'", u""))
1186                         for item in items_str.split(",")
1187                     ]
1188                     # Use whole list in CSIT-1180.
1189                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1190                     test_result[u"result"][u"samples"] = items_float
1191                     test_result[u"result"][u"receive-rate"] = stats.avg
1192                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1193                 else:
1194                     groups = re.search(self.REGEX_MRR, test.message)
1195                     test_result[u"result"][u"receive-rate"] = \
1196                         float(groups.group(3)) / float(groups.group(1))
1197             elif u"SOAK" in tags:
1198                 test_result[u"type"] = u"SOAK"
1199                 test_result[u"throughput"], test_result[u"status"] = \
1200                     self._get_plr_throughput(test.message)
1201             elif u"HOSTSTACK" in tags:
1202                 test_result[u"type"] = u"HOSTSTACK"
1203                 test_result[u"result"], test_result[u"status"] = \
1204                     self._get_hoststack_data(test.message, tags)
1205             elif u"TCP" in tags:
1206                 test_result[u"type"] = u"TCP"
1207                 groups = re.search(self.REGEX_TCP, test.message)
1208                 test_result[u"result"] = int(groups.group(2))
1209             elif u"RECONF" in tags:
1210                 test_result[u"type"] = u"RECONF"
1211                 test_result[u"result"] = None
1212                 try:
1213                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1214                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1215                     test_result[u"result"] = {
1216                         u"loss": int(grps_loss.group(1)),
1217                         u"time": float(grps_time.group(1))
1218                     }
1219                 except (AttributeError, IndexError, ValueError, TypeError):
1220                     test_result[u"status"] = u"FAIL"
1221             else:
1222                 test_result[u"status"] = u"FAIL"
1223                 self._data[u"tests"][self._test_id] = test_result
1224                 return
1225
1226         self._data[u"tests"][self._test_id] = test_result
1227
1228     def end_test(self, test):
1229         """Called when test ends.
1230
1231         :param test: Test to process.
1232         :type test: Test
1233         :returns: Nothing.
1234         """
1235
1236     def visit_keyword(self, keyword):
1237         """Implements traversing through the keyword and its child keywords.
1238
1239         :param keyword: Keyword to process.
1240         :type keyword: Keyword
1241         :returns: Nothing.
1242         """
1243         if self.start_keyword(keyword) is not False:
1244             self.end_keyword(keyword)
1245
1246     def start_keyword(self, keyword):
1247         """Called when keyword starts. Default implementation does nothing.
1248
1249         :param keyword: Keyword to process.
1250         :type keyword: Keyword
1251         :returns: Nothing.
1252         """
1253         try:
1254             if keyword.type == u"setup":
1255                 self.visit_setup_kw(keyword)
1256             elif keyword.type == u"teardown":
1257                 self.visit_teardown_kw(keyword)
1258             else:
1259                 self.visit_test_kw(keyword)
1260         except AttributeError:
1261             pass
1262
1263     def end_keyword(self, keyword):
1264         """Called when keyword ends. Default implementation does nothing.
1265
1266         :param keyword: Keyword to process.
1267         :type keyword: Keyword
1268         :returns: Nothing.
1269         """
1270
1271     def visit_test_kw(self, test_kw):
1272         """Implements traversing through the test keyword and its child
1273         keywords.
1274
1275         :param test_kw: Keyword to process.
1276         :type test_kw: Keyword
1277         :returns: Nothing.
1278         """
1279         for keyword in test_kw.keywords:
1280             if self.start_test_kw(keyword) is not False:
1281                 self.visit_test_kw(keyword)
1282                 self.end_test_kw(keyword)
1283
1284     def start_test_kw(self, test_kw):
1285         """Called when test keyword starts. Default implementation does
1286         nothing.
1287
1288         :param test_kw: Keyword to process.
1289         :type test_kw: Keyword
1290         :returns: Nothing.
1291         """
1292         if test_kw.name.count(u"Show Runtime On All Duts") or \
1293                 test_kw.name.count(u"Show Runtime Counters On All Duts") or \
1294                 test_kw.name.count(u"Vpp Show Runtime On All Duts"):
1295             self._msg_type = u"test-show-runtime"
1296             self._sh_run_counter += 1
1297         else:
1298             return
1299         test_kw.messages.visit(self)
1300
1301     def end_test_kw(self, test_kw):
1302         """Called when keyword ends. Default implementation does nothing.
1303
1304         :param test_kw: Keyword to process.
1305         :type test_kw: Keyword
1306         :returns: Nothing.
1307         """
1308
1309     def visit_setup_kw(self, setup_kw):
1310         """Implements traversing through the teardown keyword and its child
1311         keywords.
1312
1313         :param setup_kw: Keyword to process.
1314         :type setup_kw: Keyword
1315         :returns: Nothing.
1316         """
1317         for keyword in setup_kw.keywords:
1318             if self.start_setup_kw(keyword) is not False:
1319                 self.visit_setup_kw(keyword)
1320                 self.end_setup_kw(keyword)
1321
1322     def start_setup_kw(self, setup_kw):
1323         """Called when teardown keyword starts. Default implementation does
1324         nothing.
1325
1326         :param setup_kw: Keyword to process.
1327         :type setup_kw: Keyword
1328         :returns: Nothing.
1329         """
1330         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1331                 and not self._version:
1332             self._msg_type = u"vpp-version"
1333         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1334                 not self._version:
1335             self._msg_type = u"dpdk-version"
1336         elif setup_kw.name.count(u"Set Global Variable") \
1337                 and not self._timestamp:
1338             self._msg_type = u"timestamp"
1339         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1340             self._msg_type = u"testbed"
1341         else:
1342             return
1343         setup_kw.messages.visit(self)
1344
1345     def end_setup_kw(self, setup_kw):
1346         """Called when keyword ends. Default implementation does nothing.
1347
1348         :param setup_kw: Keyword to process.
1349         :type setup_kw: Keyword
1350         :returns: Nothing.
1351         """
1352
1353     def visit_teardown_kw(self, teardown_kw):
1354         """Implements traversing through the teardown keyword and its child
1355         keywords.
1356
1357         :param teardown_kw: Keyword to process.
1358         :type teardown_kw: Keyword
1359         :returns: Nothing.
1360         """
1361         for keyword in teardown_kw.keywords:
1362             if self.start_teardown_kw(keyword) is not False:
1363                 self.visit_teardown_kw(keyword)
1364                 self.end_teardown_kw(keyword)
1365
1366     def start_teardown_kw(self, teardown_kw):
1367         """Called when teardown keyword starts
1368
1369         :param teardown_kw: Keyword to process.
1370         :type teardown_kw: Keyword
1371         :returns: Nothing.
1372         """
1373
1374         if teardown_kw.name.count(u"Show Vat History On All Duts"):
1375             # TODO: Remove when not needed:
1376             self._conf_history_lookup_nr = 0
1377             self._msg_type = u"teardown-vat-history"
1378             teardown_kw.messages.visit(self)
1379         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1380             self._conf_history_lookup_nr = 0
1381             self._msg_type = u"teardown-papi-history"
1382             teardown_kw.messages.visit(self)
1383
1384     def end_teardown_kw(self, teardown_kw):
1385         """Called when keyword ends. Default implementation does nothing.
1386
1387         :param teardown_kw: Keyword to process.
1388         :type teardown_kw: Keyword
1389         :returns: Nothing.
1390         """
1391
1392     def visit_message(self, msg):
1393         """Implements visiting the message.
1394
1395         :param msg: Message to process.
1396         :type msg: Message
1397         :returns: Nothing.
1398         """
1399         if self.start_message(msg) is not False:
1400             self.end_message(msg)
1401
1402     def start_message(self, msg):
1403         """Called when message starts. Get required information from messages:
1404         - VPP version.
1405
1406         :param msg: Message to process.
1407         :type msg: Message
1408         :returns: Nothing.
1409         """
1410         if self._msg_type:
1411             self.parse_msg[self._msg_type](msg)
1412
1413     def end_message(self, msg):
1414         """Called when message ends. Default implementation does nothing.
1415
1416         :param msg: Message to process.
1417         :type msg: Message
1418         :returns: Nothing.
1419         """
1420
1421
1422 class InputData:
1423     """Input data
1424
1425     The data is extracted from output.xml files generated by Jenkins jobs and
1426     stored in pandas' DataFrames.
1427
1428     The data structure:
1429     - job name
1430       - build number
1431         - metadata
1432           (as described in ExecutionChecker documentation)
1433         - suites
1434           (as described in ExecutionChecker documentation)
1435         - tests
1436           (as described in ExecutionChecker documentation)
1437     """
1438
1439     def __init__(self, spec):
1440         """Initialization.
1441
1442         :param spec: Specification.
1443         :type spec: Specification
1444         """
1445
1446         # Specification:
1447         self._cfg = spec
1448
1449         # Data store:
1450         self._input_data = pd.Series()
1451
1452     @property
1453     def data(self):
1454         """Getter - Input data.
1455
1456         :returns: Input data
1457         :rtype: pandas.Series
1458         """
1459         return self._input_data
1460
1461     def metadata(self, job, build):
1462         """Getter - metadata
1463
1464         :param job: Job which metadata we want.
1465         :param build: Build which metadata we want.
1466         :type job: str
1467         :type build: str
1468         :returns: Metadata
1469         :rtype: pandas.Series
1470         """
1471         return self.data[job][build][u"metadata"]
1472
1473     def suites(self, job, build):
1474         """Getter - suites
1475
1476         :param job: Job which suites we want.
1477         :param build: Build which suites we want.
1478         :type job: str
1479         :type build: str
1480         :returns: Suites.
1481         :rtype: pandas.Series
1482         """
1483         return self.data[job][str(build)][u"suites"]
1484
1485     def tests(self, job, build):
1486         """Getter - tests
1487
1488         :param job: Job which tests we want.
1489         :param build: Build which tests we want.
1490         :type job: str
1491         :type build: str
1492         :returns: Tests.
1493         :rtype: pandas.Series
1494         """
1495         return self.data[job][build][u"tests"]
1496
1497     def _parse_tests(self, job, build):
1498         """Process data from robot output.xml file and return JSON structured
1499         data.
1500
1501         :param job: The name of job which build output data will be processed.
1502         :param build: The build which output data will be processed.
1503         :type job: str
1504         :type build: dict
1505         :returns: JSON data structure.
1506         :rtype: dict
1507         """
1508
1509         metadata = {
1510             u"job": job,
1511             u"build": build
1512         }
1513
1514         with open(build[u"file-name"], u'r') as data_file:
1515             try:
1516                 result = ExecutionResult(data_file)
1517             except errors.DataError as err:
1518                 logging.error(
1519                     f"Error occurred while parsing output.xml: {repr(err)}"
1520                 )
1521                 return None
1522         checker = ExecutionChecker(
1523             metadata, self._cfg.mapping, self._cfg.ignore
1524         )
1525         result.visit(checker)
1526
1527         return checker.data
1528
1529     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1530         """Download and parse the input data file.
1531
1532         :param pid: PID of the process executing this method.
1533         :param job: Name of the Jenkins job which generated the processed input
1534             file.
1535         :param build: Information about the Jenkins build which generated the
1536             processed input file.
1537         :param repeat: Repeat the download specified number of times if not
1538             successful.
1539         :type pid: int
1540         :type job: str
1541         :type build: dict
1542         :type repeat: int
1543         """
1544
1545         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1546
1547         state = u"failed"
1548         success = False
1549         data = None
1550         do_repeat = repeat
1551         while do_repeat:
1552             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1553             if success:
1554                 break
1555             do_repeat -= 1
1556         if not success:
1557             logging.error(
1558                 f"It is not possible to download the input data file from the "
1559                 f"job {job}, build {build[u'build']}, or it is damaged. "
1560                 f"Skipped."
1561             )
1562         if success:
1563             logging.info(f"  Processing data from build {build[u'build']}")
1564             data = self._parse_tests(job, build)
1565             if data is None:
1566                 logging.error(
1567                     f"Input data file from the job {job}, build "
1568                     f"{build[u'build']} is damaged. Skipped."
1569                 )
1570             else:
1571                 state = u"processed"
1572
1573             try:
1574                 remove(build[u"file-name"])
1575             except OSError as err:
1576                 logging.error(
1577                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1578                 )
1579
1580         # If the time-period is defined in the specification file, remove all
1581         # files which are outside the time period.
1582         is_last = False
1583         timeperiod = self._cfg.environment.get(u"time-period", None)
1584         if timeperiod and data:
1585             now = dt.utcnow()
1586             timeperiod = timedelta(int(timeperiod))
1587             metadata = data.get(u"metadata", None)
1588             if metadata:
1589                 generated = metadata.get(u"generated", None)
1590                 if generated:
1591                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1592                     if (now - generated) > timeperiod:
1593                         # Remove the data and the file:
1594                         state = u"removed"
1595                         data = None
1596                         is_last = True
1597                         logging.info(
1598                             f"  The build {job}/{build[u'build']} is "
1599                             f"outdated, will be removed."
1600                         )
1601         return {
1602             u"data": data,
1603             u"state": state,
1604             u"job": job,
1605             u"build": build,
1606             u"last": is_last
1607         }
1608
1609     def download_and_parse_data(self, repeat=1):
1610         """Download the input data files, parse input data from input files and
1611         store in pandas' Series.
1612
1613         :param repeat: Repeat the download specified number of times if not
1614             successful.
1615         :type repeat: int
1616         """
1617
1618         logging.info(u"Downloading and parsing input files ...")
1619
1620         for job, builds in self._cfg.input.items():
1621             for build in builds:
1622
1623                 result = self._download_and_parse_build(job, build, repeat)
1624                 if result[u"last"]:
1625                     break
1626                 build_nr = result[u"build"][u"build"]
1627
1628                 if result[u"data"]:
1629                     data = result[u"data"]
1630                     build_data = pd.Series({
1631                         u"metadata": pd.Series(
1632                             list(data[u"metadata"].values()),
1633                             index=list(data[u"metadata"].keys())
1634                         ),
1635                         u"suites": pd.Series(
1636                             list(data[u"suites"].values()),
1637                             index=list(data[u"suites"].keys())
1638                         ),
1639                         u"tests": pd.Series(
1640                             list(data[u"tests"].values()),
1641                             index=list(data[u"tests"].keys())
1642                         )
1643                     })
1644
1645                     if self._input_data.get(job, None) is None:
1646                         self._input_data[job] = pd.Series()
1647                     self._input_data[job][str(build_nr)] = build_data
1648                     self._cfg.set_input_file_name(
1649                         job, build_nr, result[u"build"][u"file-name"]
1650                     )
1651                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1652
1653                 mem_alloc = \
1654                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1655                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1656
1657         logging.info(u"Done.")
1658
1659         msg = f"Successful downloads from the sources:\n"
1660         for source in self._cfg.environment[u"data-sources"]:
1661             if source[u"successful-downloads"]:
1662                 msg += (
1663                     f"{source[u'url']}/{source[u'path']}/"
1664                     f"{source[u'file-name']}: "
1665                     f"{source[u'successful-downloads']}\n"
1666                 )
1667         logging.info(msg)
1668
1669     def process_local_file(self, local_file, job=u"local", build_nr=1,
1670                            replace=True):
1671         """Process local XML file given as a command-line parameter.
1672
1673         :param local_file: The file to process.
1674         :param job: Job name.
1675         :param build_nr: Build number.
1676         :param replace: If True, the information about jobs and builds is
1677             replaced by the new one, otherwise the new jobs and builds are
1678             added.
1679         :type local_file: str
1680         :type job: str
1681         :type build_nr: int
1682         :type replace: bool
1683         :raises: PresentationError if an error occurs.
1684         """
1685         if not isfile(local_file):
1686             raise PresentationError(f"The file {local_file} does not exist.")
1687
1688         try:
1689             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1690         except (IndexError, ValueError):
1691             pass
1692
1693         build = {
1694             u"build": build_nr,
1695             u"status": u"failed",
1696             u"file-name": local_file
1697         }
1698         if replace:
1699             self._cfg.input = dict()
1700         self._cfg.add_build(job, build)
1701
1702         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1703         data = self._parse_tests(job, build)
1704         if data is None:
1705             raise PresentationError(
1706                 f"Error occurred while parsing the file {local_file}"
1707             )
1708
1709         build_data = pd.Series({
1710             u"metadata": pd.Series(
1711                 list(data[u"metadata"].values()),
1712                 index=list(data[u"metadata"].keys())
1713             ),
1714             u"suites": pd.Series(
1715                 list(data[u"suites"].values()),
1716                 index=list(data[u"suites"].keys())
1717             ),
1718             u"tests": pd.Series(
1719                 list(data[u"tests"].values()),
1720                 index=list(data[u"tests"].keys())
1721             )
1722         })
1723
1724         if self._input_data.get(job, None) is None:
1725             self._input_data[job] = pd.Series()
1726         self._input_data[job][str(build_nr)] = build_data
1727
1728         self._cfg.set_input_state(job, build_nr, u"processed")
1729
1730     def process_local_directory(self, local_dir, replace=True):
1731         """Process local directory with XML file(s). The directory is processed
1732         as a 'job' and the XML files in it as builds.
1733         If the given directory contains only sub-directories, these
1734         sub-directories processed as jobs and corresponding XML files as builds
1735         of their job.
1736
1737         :param local_dir: Local directory to process.
1738         :param replace: If True, the information about jobs and builds is
1739             replaced by the new one, otherwise the new jobs and builds are
1740             added.
1741         :type local_dir: str
1742         :type replace: bool
1743         """
1744         if not isdir(local_dir):
1745             raise PresentationError(
1746                 f"The directory {local_dir} does not exist."
1747             )
1748
1749         # Check if the given directory includes only files, or only directories
1750         _, dirnames, filenames = next(walk(local_dir))
1751
1752         if filenames and not dirnames:
1753             filenames.sort()
1754             # local_builds:
1755             # key: dir (job) name, value: list of file names (builds)
1756             local_builds = {
1757                 local_dir: [join(local_dir, name) for name in filenames]
1758             }
1759
1760         elif dirnames and not filenames:
1761             dirnames.sort()
1762             # local_builds:
1763             # key: dir (job) name, value: list of file names (builds)
1764             local_builds = dict()
1765             for dirname in dirnames:
1766                 builds = [
1767                     join(local_dir, dirname, name)
1768                     for name in listdir(join(local_dir, dirname))
1769                     if isfile(join(local_dir, dirname, name))
1770                 ]
1771                 if builds:
1772                     local_builds[dirname] = sorted(builds)
1773
1774         elif not filenames and not dirnames:
1775             raise PresentationError(f"The directory {local_dir} is empty.")
1776         else:
1777             raise PresentationError(
1778                 f"The directory {local_dir} can include only files or only "
1779                 f"directories, not both.\nThe directory {local_dir} includes "
1780                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1781             )
1782
1783         if replace:
1784             self._cfg.input = dict()
1785
1786         for job, files in local_builds.items():
1787             for idx, local_file in enumerate(files):
1788                 self.process_local_file(local_file, job, idx + 1, replace=False)
1789
1790     @staticmethod
1791     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1792         """Return the index of character in the string which is the end of tag.
1793
1794         :param tag_filter: The string where the end of tag is being searched.
1795         :param start: The index where the searching is stated.
1796         :param closer: The character which is the tag closer.
1797         :type tag_filter: str
1798         :type start: int
1799         :type closer: str
1800         :returns: The index of the tag closer.
1801         :rtype: int
1802         """
1803         try:
1804             idx_opener = tag_filter.index(closer, start)
1805             return tag_filter.index(closer, idx_opener + 1)
1806         except ValueError:
1807             return None
1808
1809     @staticmethod
1810     def _condition(tag_filter):
1811         """Create a conditional statement from the given tag filter.
1812
1813         :param tag_filter: Filter based on tags from the element specification.
1814         :type tag_filter: str
1815         :returns: Conditional statement which can be evaluated.
1816         :rtype: str
1817         """
1818         index = 0
1819         while True:
1820             index = InputData._end_of_tag(tag_filter, index)
1821             if index is None:
1822                 return tag_filter
1823             index += 1
1824             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1825
1826     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1827                     continue_on_error=False):
1828         """Filter required data from the given jobs and builds.
1829
1830         The output data structure is:
1831         - job 1
1832           - build 1
1833             - test (or suite) 1 ID:
1834               - param 1
1835               - param 2
1836               ...
1837               - param n
1838             ...
1839             - test (or suite) n ID:
1840             ...
1841           ...
1842           - build n
1843         ...
1844         - job n
1845
1846         :param element: Element which will use the filtered data.
1847         :param params: Parameters which will be included in the output. If None,
1848             all parameters are included.
1849         :param data: If not None, this data is used instead of data specified
1850             in the element.
1851         :param data_set: The set of data to be filtered: tests, suites,
1852             metadata.
1853         :param continue_on_error: Continue if there is error while reading the
1854             data. The Item will be empty then
1855         :type element: pandas.Series
1856         :type params: list
1857         :type data: dict
1858         :type data_set: str
1859         :type continue_on_error: bool
1860         :returns: Filtered data.
1861         :rtype pandas.Series
1862         """
1863
1864         try:
1865             if data_set == "suites":
1866                 cond = u"True"
1867             elif element[u"filter"] in (u"all", u"template"):
1868                 cond = u"True"
1869             else:
1870                 cond = InputData._condition(element[u"filter"])
1871             logging.debug(f"   Filter: {cond}")
1872         except KeyError:
1873             logging.error(u"  No filter defined.")
1874             return None
1875
1876         if params is None:
1877             params = element.get(u"parameters", None)
1878             if params:
1879                 params.append(u"type")
1880
1881         data_to_filter = data if data else element[u"data"]
1882         data = pd.Series()
1883         try:
1884             for job, builds in data_to_filter.items():
1885                 data[job] = pd.Series()
1886                 for build in builds:
1887                     data[job][str(build)] = pd.Series()
1888                     try:
1889                         data_dict = dict(
1890                             self.data[job][str(build)][data_set].items())
1891                     except KeyError:
1892                         if continue_on_error:
1893                             continue
1894                         return None
1895
1896                     for test_id, test_data in data_dict.items():
1897                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1898                             data[job][str(build)][test_id] = pd.Series()
1899                             if params is None:
1900                                 for param, val in test_data.items():
1901                                     data[job][str(build)][test_id][param] = val
1902                             else:
1903                                 for param in params:
1904                                     try:
1905                                         data[job][str(build)][test_id][param] =\
1906                                             test_data[param]
1907                                     except KeyError:
1908                                         data[job][str(build)][test_id][param] =\
1909                                             u"No Data"
1910             return data
1911
1912         except (KeyError, IndexError, ValueError) as err:
1913             logging.error(
1914                 f"Missing mandatory parameter in the element specification: "
1915                 f"{repr(err)}"
1916             )
1917             return None
1918         except AttributeError as err:
1919             logging.error(repr(err))
1920             return None
1921         except SyntaxError as err:
1922             logging.error(
1923                 f"The filter {cond} is not correct. Check if all tags are "
1924                 f"enclosed by apostrophes.\n{repr(err)}"
1925             )
1926             return None
1927
1928     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1929                              continue_on_error=False):
1930         """Filter required data from the given jobs and builds.
1931
1932         The output data structure is:
1933         - job 1
1934           - build 1
1935             - test (or suite) 1 ID:
1936               - param 1
1937               - param 2
1938               ...
1939               - param n
1940             ...
1941             - test (or suite) n ID:
1942             ...
1943           ...
1944           - build n
1945         ...
1946         - job n
1947
1948         :param element: Element which will use the filtered data.
1949         :param params: Parameters which will be included in the output. If None,
1950         all parameters are included.
1951         :param data_set: The set of data to be filtered: tests, suites,
1952         metadata.
1953         :param continue_on_error: Continue if there is error while reading the
1954         data. The Item will be empty then
1955         :type element: pandas.Series
1956         :type params: list
1957         :type data_set: str
1958         :type continue_on_error: bool
1959         :returns: Filtered data.
1960         :rtype pandas.Series
1961         """
1962
1963         include = element.get(u"include", None)
1964         if not include:
1965             logging.warning(u"No tests to include, skipping the element.")
1966             return None
1967
1968         if params is None:
1969             params = element.get(u"parameters", None)
1970             if params and u"type" not in params:
1971                 params.append(u"type")
1972
1973         cores = element.get(u"core", None)
1974         if cores:
1975             tests = list()
1976             for core in cores:
1977                 for test in include:
1978                     tests.append(test.format(core=core))
1979         else:
1980             tests = include
1981
1982         data = pd.Series()
1983         try:
1984             for job, builds in element[u"data"].items():
1985                 data[job] = pd.Series()
1986                 for build in builds:
1987                     data[job][str(build)] = pd.Series()
1988                     for test in tests:
1989                         try:
1990                             reg_ex = re.compile(str(test).lower())
1991                             for test_id in self.data[job][
1992                                     str(build)][data_set].keys():
1993                                 if re.match(reg_ex, str(test_id).lower()):
1994                                     test_data = self.data[job][
1995                                         str(build)][data_set][test_id]
1996                                     data[job][str(build)][test_id] = pd.Series()
1997                                     if params is None:
1998                                         for param, val in test_data.items():
1999                                             data[job][str(build)][test_id]\
2000                                                 [param] = val
2001                                     else:
2002                                         for param in params:
2003                                             try:
2004                                                 data[job][str(build)][
2005                                                     test_id][param] = \
2006                                                     test_data[param]
2007                                             except KeyError:
2008                                                 data[job][str(build)][
2009                                                     test_id][param] = u"No Data"
2010                         except KeyError as err:
2011                             if continue_on_error:
2012                                 logging.debug(repr(err))
2013                                 continue
2014                             logging.error(repr(err))
2015                             return None
2016             return data
2017
2018         except (KeyError, IndexError, ValueError) as err:
2019             logging.error(
2020                 f"Missing mandatory parameter in the element "
2021                 f"specification: {repr(err)}"
2022             )
2023             return None
2024         except AttributeError as err:
2025             logging.error(repr(err))
2026             return None
2027
2028     @staticmethod
2029     def merge_data(data):
2030         """Merge data from more jobs and builds to a simple data structure.
2031
2032         The output data structure is:
2033
2034         - test (suite) 1 ID:
2035           - param 1
2036           - param 2
2037           ...
2038           - param n
2039         ...
2040         - test (suite) n ID:
2041         ...
2042
2043         :param data: Data to merge.
2044         :type data: pandas.Series
2045         :returns: Merged data.
2046         :rtype: pandas.Series
2047         """
2048
2049         logging.info(u"    Merging data ...")
2050
2051         merged_data = pd.Series()
2052         for builds in data.values:
2053             for item in builds.values:
2054                 for item_id, item_data in item.items():
2055                     merged_data[item_id] = item_data
2056         return merged_data
2057
2058     def print_all_oper_data(self):
2059         """Print all operational data to console.
2060         """
2061
2062         tbl_hdr = (
2063             u"Name",
2064             u"Nr of Vectors",
2065             u"Nr of Packets",
2066             u"Suspends",
2067             u"Cycles per Packet",
2068             u"Average Vector Size"
2069         )
2070
2071         for job in self._input_data.values:
2072             for build in job.values:
2073                 for test_id, test_data in build[u"tests"].items():
2074                     print(f"{test_id}")
2075                     if test_data.get(u"show-run", None) is None:
2076                         continue
2077                     for dut_name, data in test_data[u"show-run"].items():
2078                         if data.get(u"threads", None) is None:
2079                             continue
2080                         print(f"Host IP: {data.get(u'host', '')}, "
2081                               f"Socket: {data.get(u'socket', '')}")
2082                         for thread_nr, thread in data[u"threads"].items():
2083                             txt_table = prettytable.PrettyTable(tbl_hdr)
2084                             avg = 0.0
2085                             for row in thread:
2086                                 txt_table.add_row(row)
2087                                 avg += row[-1]
2088                             if len(thread) == 0:
2089                                 avg = u""
2090                             else:
2091                                 avg = f", Average Vector Size per Node: " \
2092                                       f"{(avg / len(thread)):.2f}"
2093                             th_name = u"main" if thread_nr == 0 \
2094                                 else f"worker_{thread_nr}"
2095                             print(f"{dut_name}, {th_name}{avg}")
2096                             txt_table.float_format = u".2"
2097                             txt_table.align = u"r"
2098                             txt_table.align[u"Name"] = u"l"
2099                             print(f"{txt_table.get_string()}\n")