Report: Add gso tests
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     # Needed for CPS and PPS tests
243     REGEX_NDRPDR_LAT_BASE = re.compile(
244         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
245         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
246     )
247     REGEX_NDRPDR_LAT = re.compile(
248         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
249         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
250         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
251         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
252         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
253         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
254     )
255
256     REGEX_VERSION_VPP = re.compile(
257         r"(return STDOUT Version:\s*|"
258         r"VPP Version:\s*|VPP version:\s*)(.*)"
259     )
260     REGEX_VERSION_DPDK = re.compile(
261         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
262     )
263     REGEX_TCP = re.compile(
264         r'Total\s(rps|cps|throughput):\s(\d*).*$'
265     )
266     REGEX_MRR = re.compile(
267         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
268         r'tx\s(\d*),\srx\s(\d*)'
269     )
270     REGEX_BMRR = re.compile(
271         r'.*trial results.*: \[(.*)\]'
272     )
273     REGEX_RECONF_LOSS = re.compile(
274         r'Packets lost due to reconfig: (\d*)'
275     )
276     REGEX_RECONF_TIME = re.compile(
277         r'Implied time lost: (\d*.[\de-]*)'
278     )
279     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
280
281     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
282
283     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
284
285     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
286
287     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
288
289     def __init__(self, metadata, mapping, ignore):
290         """Initialisation.
291
292         :param metadata: Key-value pairs to be included in "metadata" part of
293             JSON structure.
294         :param mapping: Mapping of the old names of test cases to the new
295             (actual) one.
296         :param ignore: List of TCs to be ignored.
297         :type metadata: dict
298         :type mapping: dict
299         :type ignore: list
300         """
301
302         # Type of message to parse out from the test messages
303         self._msg_type = None
304
305         # VPP version
306         self._version = None
307
308         # Timestamp
309         self._timestamp = None
310
311         # Testbed. The testbed is identified by TG node IP address.
312         self._testbed = None
313
314         # Mapping of TCs long names
315         self._mapping = mapping
316
317         # Ignore list
318         self._ignore = ignore
319
320         # Number of PAPI History messages found:
321         # 0 - no message
322         # 1 - PAPI History of DUT1
323         # 2 - PAPI History of DUT2
324         self._conf_history_lookup_nr = 0
325
326         self._sh_run_counter = 0
327
328         # Test ID of currently processed test- the lowercase full path to the
329         # test
330         self._test_id = None
331
332         # The main data structure
333         self._data = {
334             u"metadata": OrderedDict(),
335             u"suites": OrderedDict(),
336             u"tests": OrderedDict()
337         }
338
339         # Save the provided metadata
340         for key, val in metadata.items():
341             self._data[u"metadata"][key] = val
342
343         # Dictionary defining the methods used to parse different types of
344         # messages
345         self.parse_msg = {
346             u"timestamp": self._get_timestamp,
347             u"vpp-version": self._get_vpp_version,
348             u"dpdk-version": self._get_dpdk_version,
349             # TODO: Remove when not needed:
350             u"teardown-vat-history": self._get_vat_history,
351             u"teardown-papi-history": self._get_papi_history,
352             u"test-show-runtime": self._get_show_run,
353             u"testbed": self._get_testbed
354         }
355
356     @property
357     def data(self):
358         """Getter - Data parsed from the XML file.
359
360         :returns: Data parsed from the XML file.
361         :rtype: dict
362         """
363         return self._data
364
365     def _get_data_from_mrr_test_msg(self, msg):
366         """Get info from message of MRR performance tests.
367
368         :param msg: Message to be processed.
369         :type msg: str
370         :returns: Processed message or original message if a problem occurs.
371         :rtype: str
372         """
373
374         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
375         if not groups or groups.lastindex != 1:
376             return u"Test Failed."
377
378         try:
379             data = groups.group(1).split(u", ")
380         except (AttributeError, IndexError, ValueError, KeyError):
381             return u"Test Failed."
382
383         out_str = u"["
384         try:
385             for item in data:
386                 out_str += f"{(float(item) / 1e6):.2f}, "
387             return out_str[:-2] + u"]"
388         except (AttributeError, IndexError, ValueError, KeyError):
389             return u"Test Failed."
390
391     def _get_data_from_cps_test_msg(self, msg):
392         """Get info from message of NDRPDR CPS tests.
393
394         :param msg: Message to be processed.
395         :type msg: str
396         :returns: Processed message or "Test Failed." if a problem occurs.
397         :rtype: str
398         """
399
400         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
401         if not groups or groups.lastindex != 2:
402             return u"Test Failed."
403
404         try:
405             return (
406                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
407                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
408             )
409         except (AttributeError, IndexError, ValueError, KeyError):
410             return u"Test Failed."
411
412     def _get_data_from_pps_test_msg(self, msg):
413         """Get info from message of NDRPDR PPS tests.
414
415         :param msg: Message to be processed.
416         :type msg: str
417         :returns: Processed message or "Test Failed." if a problem occurs.
418         :rtype: str
419         """
420
421         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
422         if not groups or groups.lastindex != 4:
423             return u"Test Failed."
424
425         try:
426             return (
427                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
428                 f"{float(groups.group(2)):5.2f}\n"
429                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
430                 f"{float(groups.group(4)):5.2f}"
431             )
432         except (AttributeError, IndexError, ValueError, KeyError):
433             return u"Test Failed."
434
435     def _get_data_from_perf_test_msg(self, msg):
436         """Get info from message of NDRPDR performance tests.
437
438         :param msg: Message to be processed.
439         :type msg: str
440         :returns: Processed message or "Test Failed." if a problem occurs.
441         :rtype: str
442         """
443
444         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
445         if not groups or groups.lastindex != 10:
446             return u"Test Failed."
447
448         try:
449             data = {
450                 u"ndr_low": float(groups.group(1)),
451                 u"ndr_low_b": float(groups.group(2)),
452                 u"pdr_low": float(groups.group(3)),
453                 u"pdr_low_b": float(groups.group(4)),
454                 u"pdr_lat_90_1": groups.group(5),
455                 u"pdr_lat_90_2": groups.group(6),
456                 u"pdr_lat_50_1": groups.group(7),
457                 u"pdr_lat_50_2": groups.group(8),
458                 u"pdr_lat_10_1": groups.group(9),
459                 u"pdr_lat_10_2": groups.group(10),
460             }
461         except (AttributeError, IndexError, ValueError, KeyError):
462             return u"Test Failed."
463
464         def _process_lat(in_str_1, in_str_2):
465             """Extract min, avg, max values from latency string.
466
467             :param in_str_1: Latency string for one direction produced by robot
468                 framework.
469             :param in_str_2: Latency string for second direction produced by
470                 robot framework.
471             :type in_str_1: str
472             :type in_str_2: str
473             :returns: Processed latency string or None if a problem occurs.
474             :rtype: tuple
475             """
476             in_list_1 = in_str_1.split('/', 3)
477             in_list_2 = in_str_2.split('/', 3)
478
479             if len(in_list_1) != 4 and len(in_list_2) != 4:
480                 return None
481
482             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
483             try:
484                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
485             except hdrh.codec.HdrLengthException:
486                 return None
487
488             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
489             try:
490                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
491             except hdrh.codec.HdrLengthException:
492                 return None
493
494             if hdr_lat_1 and hdr_lat_2:
495                 hdr_lat = (
496                     hdr_lat_1.get_value_at_percentile(50.0),
497                     hdr_lat_1.get_value_at_percentile(90.0),
498                     hdr_lat_1.get_value_at_percentile(99.0),
499                     hdr_lat_2.get_value_at_percentile(50.0),
500                     hdr_lat_2.get_value_at_percentile(90.0),
501                     hdr_lat_2.get_value_at_percentile(99.0)
502                 )
503
504                 if all(hdr_lat):
505                     return hdr_lat
506
507             return None
508
509         try:
510             out_msg = (
511                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
512                 f"{data[u'ndr_low_b']:5.2f}"
513                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
514                 f"{data[u'pdr_low_b']:5.2f}"
515             )
516             latency = (
517                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
518                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
519                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
520             )
521             if all(latency):
522                 max_len = len(str(max((max(item) for item in latency))))
523                 max_len = 4 if max_len < 4 else max_len
524
525                 for idx, lat in enumerate(latency):
526                     if not idx:
527                         out_msg += u"\n"
528                     out_msg += (
529                         f"\n{idx + 3}. "
530                         f"{lat[0]:{max_len}d} "
531                         f"{lat[1]:{max_len}d} "
532                         f"{lat[2]:{max_len}d}      "
533                         f"{lat[3]:{max_len}d} "
534                         f"{lat[4]:{max_len}d} "
535                         f"{lat[5]:{max_len}d} "
536                     )
537
538             return out_msg
539
540         except (AttributeError, IndexError, ValueError, KeyError):
541             return u"Test Failed."
542
543     def _get_testbed(self, msg):
544         """Called when extraction of testbed IP is required.
545         The testbed is identified by TG node IP address.
546
547         :param msg: Message to process.
548         :type msg: Message
549         :returns: Nothing.
550         """
551
552         if msg.message.count(u"Setup of TG node") or \
553                 msg.message.count(u"Setup of node TG host"):
554             reg_tg_ip = re.compile(
555                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
556             try:
557                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
558             except (KeyError, ValueError, IndexError, AttributeError):
559                 pass
560             finally:
561                 self._data[u"metadata"][u"testbed"] = self._testbed
562                 self._msg_type = None
563
564     def _get_vpp_version(self, msg):
565         """Called when extraction of VPP version is required.
566
567         :param msg: Message to process.
568         :type msg: Message
569         :returns: Nothing.
570         """
571
572         if msg.message.count(u"return STDOUT Version:") or \
573                 msg.message.count(u"VPP Version:") or \
574                 msg.message.count(u"VPP version:"):
575             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
576                                 group(2))
577             self._data[u"metadata"][u"version"] = self._version
578             self._msg_type = None
579
580     def _get_dpdk_version(self, msg):
581         """Called when extraction of DPDK version is required.
582
583         :param msg: Message to process.
584         :type msg: Message
585         :returns: Nothing.
586         """
587
588         if msg.message.count(u"DPDK Version:"):
589             try:
590                 self._version = str(re.search(
591                     self.REGEX_VERSION_DPDK, msg.message).group(2))
592                 self._data[u"metadata"][u"version"] = self._version
593             except IndexError:
594                 pass
595             finally:
596                 self._msg_type = None
597
598     def _get_timestamp(self, msg):
599         """Called when extraction of timestamp is required.
600
601         :param msg: Message to process.
602         :type msg: Message
603         :returns: Nothing.
604         """
605
606         self._timestamp = msg.timestamp[:14]
607         self._data[u"metadata"][u"generated"] = self._timestamp
608         self._msg_type = None
609
610     def _get_vat_history(self, msg):
611         """Called when extraction of VAT command history is required.
612
613         TODO: Remove when not needed.
614
615         :param msg: Message to process.
616         :type msg: Message
617         :returns: Nothing.
618         """
619         if msg.message.count(u"VAT command history:"):
620             self._conf_history_lookup_nr += 1
621             if self._conf_history_lookup_nr == 1:
622                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
623             else:
624                 self._msg_type = None
625             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
626                           r"VAT command history:", u"",
627                           msg.message, count=1).replace(u'\n', u' |br| ').\
628                 replace(u'"', u"'")
629
630             self._data[u"tests"][self._test_id][u"conf-history"] += (
631                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
632             )
633
634     def _get_papi_history(self, msg):
635         """Called when extraction of PAPI command history is required.
636
637         :param msg: Message to process.
638         :type msg: Message
639         :returns: Nothing.
640         """
641         if msg.message.count(u"PAPI command history:"):
642             self._conf_history_lookup_nr += 1
643             if self._conf_history_lookup_nr == 1:
644                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
645             else:
646                 self._msg_type = None
647             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
648                           r"PAPI command history:", u"",
649                           msg.message, count=1).replace(u'\n', u' |br| ').\
650                 replace(u'"', u"'")
651             self._data[u"tests"][self._test_id][u"conf-history"] += (
652                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
653             )
654
655     def _get_show_run(self, msg):
656         """Called when extraction of VPP operational data (output of CLI command
657         Show Runtime) is required.
658
659         :param msg: Message to process.
660         :type msg: Message
661         :returns: Nothing.
662         """
663
664         if not msg.message.count(u"stats runtime"):
665             return
666
667         # Temporary solution
668         if self._sh_run_counter > 1:
669             return
670
671         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
672             self._data[u"tests"][self._test_id][u"show-run"] = dict()
673
674         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
675         if not groups:
676             return
677         try:
678             host = groups.group(1)
679         except (AttributeError, IndexError):
680             host = u""
681         try:
682             sock = groups.group(2)
683         except (AttributeError, IndexError):
684             sock = u""
685
686         runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
687                         replace(u"'", u'"').replace(u'b"', u'"').
688                         replace(u'u"', u'"').split(u":", 1)[1])
689
690         try:
691             threads_nr = len(runtime[0][u"clocks"])
692         except (IndexError, KeyError):
693             return
694
695         dut = u"DUT{nr}".format(
696             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
697
698         oper = {
699             u"host": host,
700             u"socket": sock,
701             u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
702         }
703
704         for item in runtime:
705             for idx in range(threads_nr):
706                 if item[u"vectors"][idx] > 0:
707                     clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
708                 elif item[u"calls"][idx] > 0:
709                     clocks = item[u"clocks"][idx] / item[u"calls"][idx]
710                 elif item[u"suspends"][idx] > 0:
711                     clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
712                 else:
713                     clocks = 0.0
714
715                 if item[u"calls"][idx] > 0:
716                     vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
717                 else:
718                     vectors_call = 0.0
719
720                 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
721                         int(item[u"suspends"][idx]):
722                     oper[u"threads"][idx].append([
723                         item[u"name"],
724                         item[u"calls"][idx],
725                         item[u"vectors"][idx],
726                         item[u"suspends"][idx],
727                         clocks,
728                         vectors_call
729                     ])
730
731         self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
732
733     def _get_ndrpdr_throughput(self, msg):
734         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
735         message.
736
737         :param msg: The test message to be parsed.
738         :type msg: str
739         :returns: Parsed data as a dict and the status (PASS/FAIL).
740         :rtype: tuple(dict, str)
741         """
742
743         throughput = {
744             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
745             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
746         }
747         status = u"FAIL"
748         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
749
750         if groups is not None:
751             try:
752                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
753                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
754                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
755                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
756                 status = u"PASS"
757             except (IndexError, ValueError):
758                 pass
759
760         return throughput, status
761
762     def _get_ndrpdr_throughput_gbps(self, msg):
763         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
764         test message.
765
766         :param msg: The test message to be parsed.
767         :type msg: str
768         :returns: Parsed data as a dict and the status (PASS/FAIL).
769         :rtype: tuple(dict, str)
770         """
771
772         gbps = {
773             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
774             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
775         }
776         status = u"FAIL"
777         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
778
779         if groups is not None:
780             try:
781                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
782                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
783                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
784                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
785                 status = u"PASS"
786             except (IndexError, ValueError):
787                 pass
788
789         return gbps, status
790
791     def _get_plr_throughput(self, msg):
792         """Get PLRsearch lower bound and PLRsearch upper bound from the test
793         message.
794
795         :param msg: The test message to be parsed.
796         :type msg: str
797         :returns: Parsed data as a dict and the status (PASS/FAIL).
798         :rtype: tuple(dict, str)
799         """
800
801         throughput = {
802             u"LOWER": -1.0,
803             u"UPPER": -1.0
804         }
805         status = u"FAIL"
806         groups = re.search(self.REGEX_PLR_RATE, msg)
807
808         if groups is not None:
809             try:
810                 throughput[u"LOWER"] = float(groups.group(1))
811                 throughput[u"UPPER"] = float(groups.group(2))
812                 status = u"PASS"
813             except (IndexError, ValueError):
814                 pass
815
816         return throughput, status
817
818     def _get_ndrpdr_latency(self, msg):
819         """Get LATENCY from the test message.
820
821         :param msg: The test message to be parsed.
822         :type msg: str
823         :returns: Parsed data as a dict and the status (PASS/FAIL).
824         :rtype: tuple(dict, str)
825         """
826         latency_default = {
827             u"min": -1.0,
828             u"avg": -1.0,
829             u"max": -1.0,
830             u"hdrh": u""
831         }
832         latency = {
833             u"NDR": {
834                 u"direction1": copy.copy(latency_default),
835                 u"direction2": copy.copy(latency_default)
836             },
837             u"PDR": {
838                 u"direction1": copy.copy(latency_default),
839                 u"direction2": copy.copy(latency_default)
840             },
841             u"LAT0": {
842                 u"direction1": copy.copy(latency_default),
843                 u"direction2": copy.copy(latency_default)
844             },
845             u"PDR10": {
846                 u"direction1": copy.copy(latency_default),
847                 u"direction2": copy.copy(latency_default)
848             },
849             u"PDR50": {
850                 u"direction1": copy.copy(latency_default),
851                 u"direction2": copy.copy(latency_default)
852             },
853             u"PDR90": {
854                 u"direction1": copy.copy(latency_default),
855                 u"direction2": copy.copy(latency_default)
856             },
857         }
858
859         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
860         if groups is None:
861             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
862         if groups is None:
863             return latency, u"FAIL"
864
865         def process_latency(in_str):
866             """Return object with parsed latency values.
867
868             TODO: Define class for the return type.
869
870             :param in_str: Input string, min/avg/max/hdrh format.
871             :type in_str: str
872             :returns: Dict with corresponding keys, except hdrh float values.
873             :rtype dict:
874             :throws IndexError: If in_str does not have enough substrings.
875             :throws ValueError: If a substring does not convert to float.
876             """
877             in_list = in_str.split('/', 3)
878
879             rval = {
880                 u"min": float(in_list[0]),
881                 u"avg": float(in_list[1]),
882                 u"max": float(in_list[2]),
883                 u"hdrh": u""
884             }
885
886             if len(in_list) == 4:
887                 rval[u"hdrh"] = str(in_list[3])
888
889             return rval
890
891         try:
892             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
893             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
894             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
895             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
896             if groups.lastindex == 4:
897                 return latency, u"PASS"
898         except (IndexError, ValueError):
899             pass
900
901         try:
902             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
903             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
904             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
905             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
906             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
907             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
908             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
909             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
910             if groups.lastindex == 12:
911                 return latency, u"PASS"
912         except (IndexError, ValueError):
913             pass
914
915         # TODO: Remove when not needed
916         latency[u"NDR10"] = {
917             u"direction1": copy.copy(latency_default),
918             u"direction2": copy.copy(latency_default)
919         }
920         latency[u"NDR50"] = {
921             u"direction1": copy.copy(latency_default),
922             u"direction2": copy.copy(latency_default)
923         }
924         latency[u"NDR90"] = {
925             u"direction1": copy.copy(latency_default),
926             u"direction2": copy.copy(latency_default)
927         }
928         try:
929             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
930             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
931             latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
932             latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
933             latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
934             latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
935             latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
936             latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
937             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
938             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
939             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
940             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
941             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
942             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
943             return latency, u"PASS"
944         except (IndexError, ValueError):
945             pass
946
947         return latency, u"FAIL"
948
949     @staticmethod
950     def _get_hoststack_data(msg, tags):
951         """Get data from the hoststack test message.
952
953         :param msg: The test message to be parsed.
954         :param tags: Test tags.
955         :type msg: str
956         :type tags: list
957         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
958         :rtype: tuple(dict, str)
959         """
960         result = dict()
961         status = u"FAIL"
962
963         msg = msg.replace(u"'", u'"').replace(u" ", u"")
964         if u"LDPRELOAD" in tags:
965             try:
966                 result = loads(msg)
967                 status = u"PASS"
968             except JSONDecodeError:
969                 pass
970         elif u"VPPECHO" in tags:
971             try:
972                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
973                 result = dict(
974                     client=loads(msg_lst[0]),
975                     server=loads(msg_lst[1])
976                 )
977                 status = u"PASS"
978             except (JSONDecodeError, IndexError):
979                 pass
980
981         return result, status
982
983     def visit_suite(self, suite):
984         """Implements traversing through the suite and its direct children.
985
986         :param suite: Suite to process.
987         :type suite: Suite
988         :returns: Nothing.
989         """
990         if self.start_suite(suite) is not False:
991             suite.suites.visit(self)
992             suite.tests.visit(self)
993             self.end_suite(suite)
994
995     def start_suite(self, suite):
996         """Called when suite starts.
997
998         :param suite: Suite to process.
999         :type suite: Suite
1000         :returns: Nothing.
1001         """
1002
1003         try:
1004             parent_name = suite.parent.name
1005         except AttributeError:
1006             return
1007
1008         doc_str = suite.doc.\
1009             replace(u'"', u"'").\
1010             replace(u'\n', u' ').\
1011             replace(u'\r', u'').\
1012             replace(u'*[', u' |br| *[').\
1013             replace(u"*", u"**").\
1014             replace(u' |br| *[', u'*[', 1)
1015
1016         self._data[u"suites"][suite.longname.lower().
1017                               replace(u'"', u"'").
1018                               replace(u" ", u"_")] = {
1019                                   u"name": suite.name.lower(),
1020                                   u"doc": doc_str,
1021                                   u"parent": parent_name,
1022                                   u"level": len(suite.longname.split(u"."))
1023                               }
1024
1025         suite.keywords.visit(self)
1026
1027     def end_suite(self, suite):
1028         """Called when suite ends.
1029
1030         :param suite: Suite to process.
1031         :type suite: Suite
1032         :returns: Nothing.
1033         """
1034
1035     def visit_test(self, test):
1036         """Implements traversing through the test.
1037
1038         :param test: Test to process.
1039         :type test: Test
1040         :returns: Nothing.
1041         """
1042         if self.start_test(test) is not False:
1043             test.keywords.visit(self)
1044             self.end_test(test)
1045
1046     def start_test(self, test):
1047         """Called when test starts.
1048
1049         :param test: Test to process.
1050         :type test: Test
1051         :returns: Nothing.
1052         """
1053
1054         self._sh_run_counter = 0
1055
1056         longname_orig = test.longname.lower()
1057
1058         # Check the ignore list
1059         if longname_orig in self._ignore:
1060             return
1061
1062         tags = [str(tag) for tag in test.tags]
1063         test_result = dict()
1064
1065         # Change the TC long name and name if defined in the mapping table
1066         longname = self._mapping.get(longname_orig, None)
1067         if longname is not None:
1068             name = longname.split(u'.')[-1]
1069             logging.debug(
1070                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1071                 f"{name}"
1072             )
1073         else:
1074             longname = longname_orig
1075             name = test.name.lower()
1076
1077         # Remove TC number from the TC long name (backward compatibility):
1078         self._test_id = re.sub(
1079             self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
1080         )
1081         # Remove TC number from the TC name (not needed):
1082         test_result[u"name"] = re.sub(
1083             self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
1084         )
1085
1086         test_result[u"parent"] = test.parent.name.lower().\
1087             replace(u"snat", u"nat")
1088         test_result[u"tags"] = tags
1089         test_result["doc"] = test.doc.\
1090             replace(u'"', u"'").\
1091             replace(u'\n', u' ').\
1092             replace(u'\r', u'').\
1093             replace(u'[', u' |br| [').\
1094             replace(u' |br| [', u'[', 1)
1095         test_result[u"type"] = u"FUNC"
1096         test_result[u"status"] = test.status
1097
1098         if test.status == u"PASS":
1099             if u"NDRPDR" in tags:
1100                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1101                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1102                         test.message).replace(u'\n', u' |br| '). \
1103                         replace(u'\r', u'').replace(u'"', u"'")
1104                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1105                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1106                         test.message).replace(u'\n', u' |br| '). \
1107                         replace(u'\r', u'').replace(u'"', u"'")
1108                 else:
1109                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1110                         test.message).replace(u'\n', u' |br| ').\
1111                         replace(u'\r', u'').replace(u'"', u"'")
1112             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1113                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1114                     test.message).replace(u'\n', u' |br| ').\
1115                     replace(u'\r', u'').replace(u'"', u"'")
1116             else:
1117                 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1118                     replace(u'\r', u'').replace(u'"', u"'")
1119         else:
1120             test_result[u"msg"] = u"Test Failed."
1121
1122         if u"PERFTEST" in tags:
1123             # Replace info about cores (e.g. -1c-) with the info about threads
1124             # and cores (e.g. -1t1c-) in the long test case names and in the
1125             # test case names if necessary.
1126             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1127             if not groups:
1128                 tag_count = 0
1129                 tag_tc = str()
1130                 for tag in test_result[u"tags"]:
1131                     groups = re.search(self.REGEX_TC_TAG, tag)
1132                     if groups:
1133                         tag_count += 1
1134                         tag_tc = tag
1135
1136                 if tag_count == 1:
1137                     self._test_id = re.sub(
1138                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1139                         self._test_id, count=1
1140                     )
1141                     test_result[u"name"] = re.sub(
1142                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1143                         test_result["name"], count=1
1144                     )
1145                 else:
1146                     test_result[u"status"] = u"FAIL"
1147                     self._data[u"tests"][self._test_id] = test_result
1148                     logging.debug(
1149                         f"The test {self._test_id} has no or more than one "
1150                         f"multi-threading tags.\n"
1151                         f"Tags: {test_result[u'tags']}"
1152                     )
1153                     return
1154
1155         if test.status == u"PASS":
1156             if u"DEVICETEST" in tags:
1157                 test_result[u"type"] = u"DEVICETEST"
1158             elif u"NDRPDR" in tags:
1159                 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1160                     test_result[u"type"] = u"CPS"
1161                 else:
1162                     test_result[u"type"] = u"NDRPDR"
1163                 test_result[u"throughput"], test_result[u"status"] = \
1164                     self._get_ndrpdr_throughput(test.message)
1165                 test_result[u"gbps"], test_result[u"status"] = \
1166                     self._get_ndrpdr_throughput_gbps(test.message)
1167                 test_result[u"latency"], test_result[u"status"] = \
1168                     self._get_ndrpdr_latency(test.message)
1169             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1170                 if u"MRR" in tags:
1171                     test_result[u"type"] = u"MRR"
1172                 else:
1173                     test_result[u"type"] = u"BMRR"
1174
1175                 test_result[u"result"] = dict()
1176                 groups = re.search(self.REGEX_BMRR, test.message)
1177                 if groups is not None:
1178                     items_str = groups.group(1)
1179                     items_float = [
1180                         float(item.strip().replace(u"'", u""))
1181                         for item in items_str.split(",")
1182                     ]
1183                     # Use whole list in CSIT-1180.
1184                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1185                     test_result[u"result"][u"receive-rate"] = stats.avg
1186                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1187                 else:
1188                     groups = re.search(self.REGEX_MRR, test.message)
1189                     test_result[u"result"][u"receive-rate"] = \
1190                         float(groups.group(3)) / float(groups.group(1))
1191             elif u"SOAK" in tags:
1192                 test_result[u"type"] = u"SOAK"
1193                 test_result[u"throughput"], test_result[u"status"] = \
1194                     self._get_plr_throughput(test.message)
1195             elif u"HOSTSTACK" in tags:
1196                 test_result[u"type"] = u"HOSTSTACK"
1197                 test_result[u"result"], test_result[u"status"] = \
1198                     self._get_hoststack_data(test.message, tags)
1199             elif u"TCP" in tags:
1200                 test_result[u"type"] = u"TCP"
1201                 groups = re.search(self.REGEX_TCP, test.message)
1202                 test_result[u"result"] = int(groups.group(2))
1203             elif u"RECONF" in tags:
1204                 test_result[u"type"] = u"RECONF"
1205                 test_result[u"result"] = None
1206                 try:
1207                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1208                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1209                     test_result[u"result"] = {
1210                         u"loss": int(grps_loss.group(1)),
1211                         u"time": float(grps_time.group(1))
1212                     }
1213                 except (AttributeError, IndexError, ValueError, TypeError):
1214                     test_result[u"status"] = u"FAIL"
1215             else:
1216                 test_result[u"status"] = u"FAIL"
1217                 self._data[u"tests"][self._test_id] = test_result
1218                 return
1219
1220         self._data[u"tests"][self._test_id] = test_result
1221
1222     def end_test(self, test):
1223         """Called when test ends.
1224
1225         :param test: Test to process.
1226         :type test: Test
1227         :returns: Nothing.
1228         """
1229
1230     def visit_keyword(self, keyword):
1231         """Implements traversing through the keyword and its child keywords.
1232
1233         :param keyword: Keyword to process.
1234         :type keyword: Keyword
1235         :returns: Nothing.
1236         """
1237         if self.start_keyword(keyword) is not False:
1238             self.end_keyword(keyword)
1239
1240     def start_keyword(self, keyword):
1241         """Called when keyword starts. Default implementation does nothing.
1242
1243         :param keyword: Keyword to process.
1244         :type keyword: Keyword
1245         :returns: Nothing.
1246         """
1247         try:
1248             if keyword.type == u"setup":
1249                 self.visit_setup_kw(keyword)
1250             elif keyword.type == u"teardown":
1251                 self.visit_teardown_kw(keyword)
1252             else:
1253                 self.visit_test_kw(keyword)
1254         except AttributeError:
1255             pass
1256
1257     def end_keyword(self, keyword):
1258         """Called when keyword ends. Default implementation does nothing.
1259
1260         :param keyword: Keyword to process.
1261         :type keyword: Keyword
1262         :returns: Nothing.
1263         """
1264
1265     def visit_test_kw(self, test_kw):
1266         """Implements traversing through the test keyword and its child
1267         keywords.
1268
1269         :param test_kw: Keyword to process.
1270         :type test_kw: Keyword
1271         :returns: Nothing.
1272         """
1273         for keyword in test_kw.keywords:
1274             if self.start_test_kw(keyword) is not False:
1275                 self.visit_test_kw(keyword)
1276                 self.end_test_kw(keyword)
1277
1278     def start_test_kw(self, test_kw):
1279         """Called when test keyword starts. Default implementation does
1280         nothing.
1281
1282         :param test_kw: Keyword to process.
1283         :type test_kw: Keyword
1284         :returns: Nothing.
1285         """
1286         if test_kw.name.count(u"Show Runtime On All Duts") or \
1287                 test_kw.name.count(u"Show Runtime Counters On All Duts") or \
1288                 test_kw.name.count(u"Vpp Show Runtime On All Duts"):
1289             self._msg_type = u"test-show-runtime"
1290             self._sh_run_counter += 1
1291         else:
1292             return
1293         test_kw.messages.visit(self)
1294
1295     def end_test_kw(self, test_kw):
1296         """Called when keyword ends. Default implementation does nothing.
1297
1298         :param test_kw: Keyword to process.
1299         :type test_kw: Keyword
1300         :returns: Nothing.
1301         """
1302
1303     def visit_setup_kw(self, setup_kw):
1304         """Implements traversing through the teardown keyword and its child
1305         keywords.
1306
1307         :param setup_kw: Keyword to process.
1308         :type setup_kw: Keyword
1309         :returns: Nothing.
1310         """
1311         for keyword in setup_kw.keywords:
1312             if self.start_setup_kw(keyword) is not False:
1313                 self.visit_setup_kw(keyword)
1314                 self.end_setup_kw(keyword)
1315
1316     def start_setup_kw(self, setup_kw):
1317         """Called when teardown keyword starts. Default implementation does
1318         nothing.
1319
1320         :param setup_kw: Keyword to process.
1321         :type setup_kw: Keyword
1322         :returns: Nothing.
1323         """
1324         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1325                 and not self._version:
1326             self._msg_type = u"vpp-version"
1327         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1328                 not self._version:
1329             self._msg_type = u"dpdk-version"
1330         elif setup_kw.name.count(u"Set Global Variable") \
1331                 and not self._timestamp:
1332             self._msg_type = u"timestamp"
1333         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1334             self._msg_type = u"testbed"
1335         else:
1336             return
1337         setup_kw.messages.visit(self)
1338
1339     def end_setup_kw(self, setup_kw):
1340         """Called when keyword ends. Default implementation does nothing.
1341
1342         :param setup_kw: Keyword to process.
1343         :type setup_kw: Keyword
1344         :returns: Nothing.
1345         """
1346
1347     def visit_teardown_kw(self, teardown_kw):
1348         """Implements traversing through the teardown keyword and its child
1349         keywords.
1350
1351         :param teardown_kw: Keyword to process.
1352         :type teardown_kw: Keyword
1353         :returns: Nothing.
1354         """
1355         for keyword in teardown_kw.keywords:
1356             if self.start_teardown_kw(keyword) is not False:
1357                 self.visit_teardown_kw(keyword)
1358                 self.end_teardown_kw(keyword)
1359
1360     def start_teardown_kw(self, teardown_kw):
1361         """Called when teardown keyword starts
1362
1363         :param teardown_kw: Keyword to process.
1364         :type teardown_kw: Keyword
1365         :returns: Nothing.
1366         """
1367
1368         if teardown_kw.name.count(u"Show Vat History On All Duts"):
1369             # TODO: Remove when not needed:
1370             self._conf_history_lookup_nr = 0
1371             self._msg_type = u"teardown-vat-history"
1372             teardown_kw.messages.visit(self)
1373         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1374             self._conf_history_lookup_nr = 0
1375             self._msg_type = u"teardown-papi-history"
1376             teardown_kw.messages.visit(self)
1377
1378     def end_teardown_kw(self, teardown_kw):
1379         """Called when keyword ends. Default implementation does nothing.
1380
1381         :param teardown_kw: Keyword to process.
1382         :type teardown_kw: Keyword
1383         :returns: Nothing.
1384         """
1385
1386     def visit_message(self, msg):
1387         """Implements visiting the message.
1388
1389         :param msg: Message to process.
1390         :type msg: Message
1391         :returns: Nothing.
1392         """
1393         if self.start_message(msg) is not False:
1394             self.end_message(msg)
1395
1396     def start_message(self, msg):
1397         """Called when message starts. Get required information from messages:
1398         - VPP version.
1399
1400         :param msg: Message to process.
1401         :type msg: Message
1402         :returns: Nothing.
1403         """
1404         if self._msg_type:
1405             self.parse_msg[self._msg_type](msg)
1406
1407     def end_message(self, msg):
1408         """Called when message ends. Default implementation does nothing.
1409
1410         :param msg: Message to process.
1411         :type msg: Message
1412         :returns: Nothing.
1413         """
1414
1415
1416 class InputData:
1417     """Input data
1418
1419     The data is extracted from output.xml files generated by Jenkins jobs and
1420     stored in pandas' DataFrames.
1421
1422     The data structure:
1423     - job name
1424       - build number
1425         - metadata
1426           (as described in ExecutionChecker documentation)
1427         - suites
1428           (as described in ExecutionChecker documentation)
1429         - tests
1430           (as described in ExecutionChecker documentation)
1431     """
1432
1433     def __init__(self, spec):
1434         """Initialization.
1435
1436         :param spec: Specification.
1437         :type spec: Specification
1438         """
1439
1440         # Specification:
1441         self._cfg = spec
1442
1443         # Data store:
1444         self._input_data = pd.Series()
1445
1446     @property
1447     def data(self):
1448         """Getter - Input data.
1449
1450         :returns: Input data
1451         :rtype: pandas.Series
1452         """
1453         return self._input_data
1454
1455     def metadata(self, job, build):
1456         """Getter - metadata
1457
1458         :param job: Job which metadata we want.
1459         :param build: Build which metadata we want.
1460         :type job: str
1461         :type build: str
1462         :returns: Metadata
1463         :rtype: pandas.Series
1464         """
1465         return self.data[job][build][u"metadata"]
1466
1467     def suites(self, job, build):
1468         """Getter - suites
1469
1470         :param job: Job which suites we want.
1471         :param build: Build which suites we want.
1472         :type job: str
1473         :type build: str
1474         :returns: Suites.
1475         :rtype: pandas.Series
1476         """
1477         return self.data[job][str(build)][u"suites"]
1478
1479     def tests(self, job, build):
1480         """Getter - tests
1481
1482         :param job: Job which tests we want.
1483         :param build: Build which tests we want.
1484         :type job: str
1485         :type build: str
1486         :returns: Tests.
1487         :rtype: pandas.Series
1488         """
1489         return self.data[job][build][u"tests"]
1490
1491     def _parse_tests(self, job, build):
1492         """Process data from robot output.xml file and return JSON structured
1493         data.
1494
1495         :param job: The name of job which build output data will be processed.
1496         :param build: The build which output data will be processed.
1497         :type job: str
1498         :type build: dict
1499         :returns: JSON data structure.
1500         :rtype: dict
1501         """
1502
1503         metadata = {
1504             u"job": job,
1505             u"build": build
1506         }
1507
1508         with open(build[u"file-name"], u'r') as data_file:
1509             try:
1510                 result = ExecutionResult(data_file)
1511             except errors.DataError as err:
1512                 logging.error(
1513                     f"Error occurred while parsing output.xml: {repr(err)}"
1514                 )
1515                 return None
1516         checker = ExecutionChecker(metadata, self._cfg.mapping,
1517                                    self._cfg.ignore)
1518         result.visit(checker)
1519
1520         return checker.data
1521
1522     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1523         """Download and parse the input data file.
1524
1525         :param pid: PID of the process executing this method.
1526         :param job: Name of the Jenkins job which generated the processed input
1527             file.
1528         :param build: Information about the Jenkins build which generated the
1529             processed input file.
1530         :param repeat: Repeat the download specified number of times if not
1531             successful.
1532         :type pid: int
1533         :type job: str
1534         :type build: dict
1535         :type repeat: int
1536         """
1537
1538         logging.info(f"  Processing the job/build: {job}: {build[u'build']}")
1539
1540         state = u"failed"
1541         success = False
1542         data = None
1543         do_repeat = repeat
1544         while do_repeat:
1545             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1546             if success:
1547                 break
1548             do_repeat -= 1
1549         if not success:
1550             logging.error(
1551                 f"It is not possible to download the input data file from the "
1552                 f"job {job}, build {build[u'build']}, or it is damaged. "
1553                 f"Skipped."
1554             )
1555         if success:
1556             logging.info(f"    Processing data from build {build[u'build']}")
1557             data = self._parse_tests(job, build)
1558             if data is None:
1559                 logging.error(
1560                     f"Input data file from the job {job}, build "
1561                     f"{build[u'build']} is damaged. Skipped."
1562                 )
1563             else:
1564                 state = u"processed"
1565
1566             try:
1567                 remove(build[u"file-name"])
1568             except OSError as err:
1569                 logging.error(
1570                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1571                 )
1572
1573         # If the time-period is defined in the specification file, remove all
1574         # files which are outside the time period.
1575         is_last = False
1576         timeperiod = self._cfg.input.get(u"time-period", None)
1577         if timeperiod and data:
1578             now = dt.utcnow()
1579             timeperiod = timedelta(int(timeperiod))
1580             metadata = data.get(u"metadata", None)
1581             if metadata:
1582                 generated = metadata.get(u"generated", None)
1583                 if generated:
1584                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1585                     if (now - generated) > timeperiod:
1586                         # Remove the data and the file:
1587                         state = u"removed"
1588                         data = None
1589                         is_last = True
1590                         logging.info(
1591                             f"    The build {job}/{build[u'build']} is "
1592                             f"outdated, will be removed."
1593                         )
1594         logging.info(u"  Done.")
1595
1596         return {
1597             u"data": data,
1598             u"state": state,
1599             u"job": job,
1600             u"build": build,
1601             u"last": is_last
1602         }
1603
1604     def download_and_parse_data(self, repeat=1):
1605         """Download the input data files, parse input data from input files and
1606         store in pandas' Series.
1607
1608         :param repeat: Repeat the download specified number of times if not
1609             successful.
1610         :type repeat: int
1611         """
1612
1613         logging.info(u"Downloading and parsing input files ...")
1614
1615         for job, builds in self._cfg.builds.items():
1616             for build in builds:
1617
1618                 result = self._download_and_parse_build(job, build, repeat)
1619                 if result[u"last"]:
1620                     break
1621                 build_nr = result[u"build"][u"build"]
1622
1623                 if result[u"data"]:
1624                     data = result[u"data"]
1625                     build_data = pd.Series({
1626                         u"metadata": pd.Series(
1627                             list(data[u"metadata"].values()),
1628                             index=list(data[u"metadata"].keys())
1629                         ),
1630                         u"suites": pd.Series(
1631                             list(data[u"suites"].values()),
1632                             index=list(data[u"suites"].keys())
1633                         ),
1634                         u"tests": pd.Series(
1635                             list(data[u"tests"].values()),
1636                             index=list(data[u"tests"].keys())
1637                         )
1638                     })
1639
1640                     if self._input_data.get(job, None) is None:
1641                         self._input_data[job] = pd.Series()
1642                     self._input_data[job][str(build_nr)] = build_data
1643
1644                     self._cfg.set_input_file_name(
1645                         job, build_nr, result[u"build"][u"file-name"])
1646
1647                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1648
1649                 mem_alloc = \
1650                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1651                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1652
1653         logging.info(u"Done.")
1654
1655     def process_local_file(self, local_file, job=u"local", build_nr=1,
1656                            replace=True):
1657         """Process local XML file given as a command-line parameter.
1658
1659         :param local_file: The file to process.
1660         :param job: Job name.
1661         :param build_nr: Build number.
1662         :param replace: If True, the information about jobs and builds is
1663             replaced by the new one, otherwise the new jobs and builds are
1664             added.
1665         :type local_file: str
1666         :type job: str
1667         :type build_nr: int
1668         :type replace: bool
1669         :raises: PresentationError if an error occurs.
1670         """
1671         if not isfile(local_file):
1672             raise PresentationError(f"The file {local_file} does not exist.")
1673
1674         try:
1675             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1676         except (IndexError, ValueError):
1677             pass
1678
1679         build = {
1680             u"build": build_nr,
1681             u"status": u"failed",
1682             u"file-name": local_file
1683         }
1684         if replace:
1685             self._cfg.builds = dict()
1686         self._cfg.add_build(job, build)
1687
1688         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1689         data = self._parse_tests(job, build)
1690         if data is None:
1691             raise PresentationError(
1692                 f"Error occurred while parsing the file {local_file}"
1693             )
1694
1695         build_data = pd.Series({
1696             u"metadata": pd.Series(
1697                 list(data[u"metadata"].values()),
1698                 index=list(data[u"metadata"].keys())
1699             ),
1700             u"suites": pd.Series(
1701                 list(data[u"suites"].values()),
1702                 index=list(data[u"suites"].keys())
1703             ),
1704             u"tests": pd.Series(
1705                 list(data[u"tests"].values()),
1706                 index=list(data[u"tests"].keys())
1707             )
1708         })
1709
1710         if self._input_data.get(job, None) is None:
1711             self._input_data[job] = pd.Series()
1712         self._input_data[job][str(build_nr)] = build_data
1713
1714         self._cfg.set_input_state(job, build_nr, u"processed")
1715
1716     def process_local_directory(self, local_dir, replace=True):
1717         """Process local directory with XML file(s). The directory is processed
1718         as a 'job' and the XML files in it as builds.
1719         If the given directory contains only sub-directories, these
1720         sub-directories processed as jobs and corresponding XML files as builds
1721         of their job.
1722
1723         :param local_dir: Local directory to process.
1724         :param replace: If True, the information about jobs and builds is
1725             replaced by the new one, otherwise the new jobs and builds are
1726             added.
1727         :type local_dir: str
1728         :type replace: bool
1729         """
1730         if not isdir(local_dir):
1731             raise PresentationError(
1732                 f"The directory {local_dir} does not exist."
1733             )
1734
1735         # Check if the given directory includes only files, or only directories
1736         _, dirnames, filenames = next(walk(local_dir))
1737
1738         if filenames and not dirnames:
1739             filenames.sort()
1740             # local_builds:
1741             # key: dir (job) name, value: list of file names (builds)
1742             local_builds = {
1743                 local_dir: [join(local_dir, name) for name in filenames]
1744             }
1745
1746         elif dirnames and not filenames:
1747             dirnames.sort()
1748             # local_builds:
1749             # key: dir (job) name, value: list of file names (builds)
1750             local_builds = dict()
1751             for dirname in dirnames:
1752                 builds = [
1753                     join(local_dir, dirname, name)
1754                     for name in listdir(join(local_dir, dirname))
1755                     if isfile(join(local_dir, dirname, name))
1756                 ]
1757                 if builds:
1758                     local_builds[dirname] = sorted(builds)
1759
1760         elif not filenames and not dirnames:
1761             raise PresentationError(f"The directory {local_dir} is empty.")
1762         else:
1763             raise PresentationError(
1764                 f"The directory {local_dir} can include only files or only "
1765                 f"directories, not both.\nThe directory {local_dir} includes "
1766                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1767             )
1768
1769         if replace:
1770             self._cfg.builds = dict()
1771
1772         for job, files in local_builds.items():
1773             for idx, local_file in enumerate(files):
1774                 self.process_local_file(local_file, job, idx + 1, replace=False)
1775
1776     @staticmethod
1777     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1778         """Return the index of character in the string which is the end of tag.
1779
1780         :param tag_filter: The string where the end of tag is being searched.
1781         :param start: The index where the searching is stated.
1782         :param closer: The character which is the tag closer.
1783         :type tag_filter: str
1784         :type start: int
1785         :type closer: str
1786         :returns: The index of the tag closer.
1787         :rtype: int
1788         """
1789         try:
1790             idx_opener = tag_filter.index(closer, start)
1791             return tag_filter.index(closer, idx_opener + 1)
1792         except ValueError:
1793             return None
1794
1795     @staticmethod
1796     def _condition(tag_filter):
1797         """Create a conditional statement from the given tag filter.
1798
1799         :param tag_filter: Filter based on tags from the element specification.
1800         :type tag_filter: str
1801         :returns: Conditional statement which can be evaluated.
1802         :rtype: str
1803         """
1804         index = 0
1805         while True:
1806             index = InputData._end_of_tag(tag_filter, index)
1807             if index is None:
1808                 return tag_filter
1809             index += 1
1810             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1811
1812     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1813                     continue_on_error=False):
1814         """Filter required data from the given jobs and builds.
1815
1816         The output data structure is:
1817         - job 1
1818           - build 1
1819             - test (or suite) 1 ID:
1820               - param 1
1821               - param 2
1822               ...
1823               - param n
1824             ...
1825             - test (or suite) n ID:
1826             ...
1827           ...
1828           - build n
1829         ...
1830         - job n
1831
1832         :param element: Element which will use the filtered data.
1833         :param params: Parameters which will be included in the output. If None,
1834             all parameters are included.
1835         :param data: If not None, this data is used instead of data specified
1836             in the element.
1837         :param data_set: The set of data to be filtered: tests, suites,
1838             metadata.
1839         :param continue_on_error: Continue if there is error while reading the
1840             data. The Item will be empty then
1841         :type element: pandas.Series
1842         :type params: list
1843         :type data: dict
1844         :type data_set: str
1845         :type continue_on_error: bool
1846         :returns: Filtered data.
1847         :rtype pandas.Series
1848         """
1849
1850         try:
1851             if data_set == "suites":
1852                 cond = u"True"
1853             elif element[u"filter"] in (u"all", u"template"):
1854                 cond = u"True"
1855             else:
1856                 cond = InputData._condition(element[u"filter"])
1857             logging.debug(f"   Filter: {cond}")
1858         except KeyError:
1859             logging.error(u"  No filter defined.")
1860             return None
1861
1862         if params is None:
1863             params = element.get(u"parameters", None)
1864             if params:
1865                 params.append(u"type")
1866
1867         data_to_filter = data if data else element[u"data"]
1868         data = pd.Series()
1869         try:
1870             for job, builds in data_to_filter.items():
1871                 data[job] = pd.Series()
1872                 for build in builds:
1873                     data[job][str(build)] = pd.Series()
1874                     try:
1875                         data_dict = dict(
1876                             self.data[job][str(build)][data_set].items())
1877                     except KeyError:
1878                         if continue_on_error:
1879                             continue
1880                         return None
1881
1882                     for test_id, test_data in data_dict.items():
1883                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1884                             data[job][str(build)][test_id] = pd.Series()
1885                             if params is None:
1886                                 for param, val in test_data.items():
1887                                     data[job][str(build)][test_id][param] = val
1888                             else:
1889                                 for param in params:
1890                                     try:
1891                                         data[job][str(build)][test_id][param] =\
1892                                             test_data[param]
1893                                     except KeyError:
1894                                         data[job][str(build)][test_id][param] =\
1895                                             u"No Data"
1896             return data
1897
1898         except (KeyError, IndexError, ValueError) as err:
1899             logging.error(
1900                 f"Missing mandatory parameter in the element specification: "
1901                 f"{repr(err)}"
1902             )
1903             return None
1904         except AttributeError as err:
1905             logging.error(repr(err))
1906             return None
1907         except SyntaxError as err:
1908             logging.error(
1909                 f"The filter {cond} is not correct. Check if all tags are "
1910                 f"enclosed by apostrophes.\n{repr(err)}"
1911             )
1912             return None
1913
1914     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1915                              continue_on_error=False):
1916         """Filter required data from the given jobs and builds.
1917
1918         The output data structure is:
1919         - job 1
1920           - build 1
1921             - test (or suite) 1 ID:
1922               - param 1
1923               - param 2
1924               ...
1925               - param n
1926             ...
1927             - test (or suite) n ID:
1928             ...
1929           ...
1930           - build n
1931         ...
1932         - job n
1933
1934         :param element: Element which will use the filtered data.
1935         :param params: Parameters which will be included in the output. If None,
1936         all parameters are included.
1937         :param data_set: The set of data to be filtered: tests, suites,
1938         metadata.
1939         :param continue_on_error: Continue if there is error while reading the
1940         data. The Item will be empty then
1941         :type element: pandas.Series
1942         :type params: list
1943         :type data_set: str
1944         :type continue_on_error: bool
1945         :returns: Filtered data.
1946         :rtype pandas.Series
1947         """
1948
1949         include = element.get(u"include", None)
1950         if not include:
1951             logging.warning(u"No tests to include, skipping the element.")
1952             return None
1953
1954         if params is None:
1955             params = element.get(u"parameters", None)
1956             if params:
1957                 params.append(u"type")
1958
1959         data = pd.Series()
1960         try:
1961             for job, builds in element[u"data"].items():
1962                 data[job] = pd.Series()
1963                 for build in builds:
1964                     data[job][str(build)] = pd.Series()
1965                     for test in include:
1966                         try:
1967                             reg_ex = re.compile(str(test).lower())
1968                             for test_id in self.data[job][
1969                                     str(build)][data_set].keys():
1970                                 if re.match(reg_ex, str(test_id).lower()):
1971                                     test_data = self.data[job][
1972                                         str(build)][data_set][test_id]
1973                                     data[job][str(build)][test_id] = pd.Series()
1974                                     if params is None:
1975                                         for param, val in test_data.items():
1976                                             data[job][str(build)][test_id]\
1977                                                 [param] = val
1978                                     else:
1979                                         for param in params:
1980                                             try:
1981                                                 data[job][str(build)][
1982                                                     test_id][param] = \
1983                                                     test_data[param]
1984                                             except KeyError:
1985                                                 data[job][str(build)][
1986                                                     test_id][param] = u"No Data"
1987                         except KeyError as err:
1988                             if continue_on_error:
1989                                 logging.debug(repr(err))
1990                                 continue
1991                             logging.error(repr(err))
1992                             return None
1993             return data
1994
1995         except (KeyError, IndexError, ValueError) as err:
1996             logging.error(
1997                 f"Missing mandatory parameter in the element "
1998                 f"specification: {repr(err)}"
1999             )
2000             return None
2001         except AttributeError as err:
2002             logging.error(repr(err))
2003             return None
2004
2005     @staticmethod
2006     def merge_data(data):
2007         """Merge data from more jobs and builds to a simple data structure.
2008
2009         The output data structure is:
2010
2011         - test (suite) 1 ID:
2012           - param 1
2013           - param 2
2014           ...
2015           - param n
2016         ...
2017         - test (suite) n ID:
2018         ...
2019
2020         :param data: Data to merge.
2021         :type data: pandas.Series
2022         :returns: Merged data.
2023         :rtype: pandas.Series
2024         """
2025
2026         logging.info(u"    Merging data ...")
2027
2028         merged_data = pd.Series()
2029         for builds in data.values:
2030             for item in builds.values:
2031                 for item_id, item_data in item.items():
2032                     merged_data[item_id] = item_data
2033         return merged_data
2034
2035     def print_all_oper_data(self):
2036         """Print all operational data to console.
2037         """
2038
2039         tbl_hdr = (
2040             u"Name",
2041             u"Nr of Vectors",
2042             u"Nr of Packets",
2043             u"Suspends",
2044             u"Cycles per Packet",
2045             u"Average Vector Size"
2046         )
2047
2048         for job in self._input_data.values:
2049             for build in job.values:
2050                 for test_id, test_data in build[u"tests"].items():
2051                     print(f"{test_id}")
2052                     if test_data.get(u"show-run", None) is None:
2053                         continue
2054                     for dut_name, data in test_data[u"show-run"].items():
2055                         if data.get(u"threads", None) is None:
2056                             continue
2057                         print(f"Host IP: {data.get(u'host', '')}, "
2058                               f"Socket: {data.get(u'socket', '')}")
2059                         for thread_nr, thread in data[u"threads"].items():
2060                             txt_table = prettytable.PrettyTable(tbl_hdr)
2061                             avg = 0.0
2062                             for row in thread:
2063                                 txt_table.add_row(row)
2064                                 avg += row[-1]
2065                             if len(thread) == 0:
2066                                 avg = u""
2067                             else:
2068                                 avg = f", Average Vector Size per Node: " \
2069                                       f"{(avg / len(thread)):.2f}"
2070                             th_name = u"main" if thread_nr == 0 \
2071                                 else f"worker_{thread_nr}"
2072                             print(f"{dut_name}, {th_name}{avg}")
2073                             txt_table.float_format = u".2"
2074                             txt_table.align = u"r"
2075                             txt_table.align[u"Name"] = u"l"
2076                             print(f"{txt_table.get_string()}\n")