4fdc7e33535e1d2b9a5947b7952cc160e62e109c
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
291
292     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
293
294     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
295
296     REGEX_SH_RUN_HOST = re.compile(
297         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
298     )
299
300     def __init__(self, metadata, mapping, ignore, for_output):
301         """Initialisation.
302
303         :param metadata: Key-value pairs to be included in "metadata" part of
304             JSON structure.
305         :param mapping: Mapping of the old names of test cases to the new
306             (actual) one.
307         :param ignore: List of TCs to be ignored.
308         :param for_output: Output to be generated from downloaded data.
309         :type metadata: dict
310         :type mapping: dict
311         :type ignore: list
312         :type for_output: str
313         """
314
315         # Type of message to parse out from the test messages
316         self._msg_type = None
317
318         # VPP version
319         self._version = None
320
321         # Timestamp
322         self._timestamp = None
323
324         # Testbed. The testbed is identified by TG node IP address.
325         self._testbed = None
326
327         # Mapping of TCs long names
328         self._mapping = mapping
329
330         # Ignore list
331         self._ignore = ignore
332
333         self._for_output = for_output
334
335         # Number of PAPI History messages found:
336         # 0 - no message
337         # 1 - PAPI History of DUT1
338         # 2 - PAPI History of DUT2
339         self._conf_history_lookup_nr = 0
340
341         self._sh_run_counter = 0
342         self._telemetry_kw_counter = 0
343         self._telemetry_msg_counter = 0
344
345         # Test ID of currently processed test- the lowercase full path to the
346         # test
347         self._test_id = None
348
349         # The main data structure
350         self._data = {
351             u"metadata": OrderedDict(),
352             u"suites": OrderedDict(),
353             u"tests": OrderedDict()
354         }
355
356         # Save the provided metadata
357         for key, val in metadata.items():
358             self._data[u"metadata"][key] = val
359
360         # Dictionary defining the methods used to parse different types of
361         # messages
362         self.parse_msg = {
363             u"vpp-version": self._get_vpp_version,
364             u"dpdk-version": self._get_dpdk_version,
365             u"teardown-papi-history": self._get_papi_history,
366             u"test-show-runtime": self._get_show_run,
367             u"testbed": self._get_testbed,
368             u"test-telemetry": self._get_telemetry
369         }
370
371     @property
372     def data(self):
373         """Getter - Data parsed from the XML file.
374
375         :returns: Data parsed from the XML file.
376         :rtype: dict
377         """
378         return self._data
379
380     def _get_data_from_mrr_test_msg(self, msg):
381         """Get info from message of MRR performance tests.
382
383         :param msg: Message to be processed.
384         :type msg: str
385         :returns: Processed message or original message if a problem occurs.
386         :rtype: str
387         """
388
389         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
390         if not groups or groups.lastindex != 1:
391             return u"Test Failed."
392
393         try:
394             data = groups.group(1).split(u", ")
395         except (AttributeError, IndexError, ValueError, KeyError):
396             return u"Test Failed."
397
398         out_str = u"["
399         try:
400             for item in data:
401                 out_str += f"{(float(item) / 1e6):.2f}, "
402             return out_str[:-2] + u"]"
403         except (AttributeError, IndexError, ValueError, KeyError):
404             return u"Test Failed."
405
406     def _get_data_from_cps_test_msg(self, msg):
407         """Get info from message of NDRPDR CPS tests.
408
409         :param msg: Message to be processed.
410         :type msg: str
411         :returns: Processed message or "Test Failed." if a problem occurs.
412         :rtype: str
413         """
414
415         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
416         if not groups or groups.lastindex != 2:
417             return u"Test Failed."
418
419         try:
420             return (
421                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
422                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
423             )
424         except (AttributeError, IndexError, ValueError, KeyError):
425             return u"Test Failed."
426
427     def _get_data_from_pps_test_msg(self, msg):
428         """Get info from message of NDRPDR PPS tests.
429
430         :param msg: Message to be processed.
431         :type msg: str
432         :returns: Processed message or "Test Failed." if a problem occurs.
433         :rtype: str
434         """
435
436         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
437         if not groups or groups.lastindex != 4:
438             return u"Test Failed."
439
440         try:
441             return (
442                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
443                 f"{float(groups.group(2)):5.2f}\n"
444                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
445                 f"{float(groups.group(4)):5.2f}"
446             )
447         except (AttributeError, IndexError, ValueError, KeyError):
448             return u"Test Failed."
449
450     def _get_data_from_perf_test_msg(self, msg):
451         """Get info from message of NDRPDR performance tests.
452
453         :param msg: Message to be processed.
454         :type msg: str
455         :returns: Processed message or "Test Failed." if a problem occurs.
456         :rtype: str
457         """
458
459         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
460         if not groups or groups.lastindex != 10:
461             return u"Test Failed."
462
463         try:
464             data = {
465                 u"ndr_low": float(groups.group(1)),
466                 u"ndr_low_b": float(groups.group(2)),
467                 u"pdr_low": float(groups.group(3)),
468                 u"pdr_low_b": float(groups.group(4)),
469                 u"pdr_lat_90_1": groups.group(5),
470                 u"pdr_lat_90_2": groups.group(6),
471                 u"pdr_lat_50_1": groups.group(7),
472                 u"pdr_lat_50_2": groups.group(8),
473                 u"pdr_lat_10_1": groups.group(9),
474                 u"pdr_lat_10_2": groups.group(10),
475             }
476         except (AttributeError, IndexError, ValueError, KeyError):
477             return u"Test Failed."
478
479         def _process_lat(in_str_1, in_str_2):
480             """Extract P50, P90 and P99 latencies or min, avg, max values from
481             latency string.
482
483             :param in_str_1: Latency string for one direction produced by robot
484                 framework.
485             :param in_str_2: Latency string for second direction produced by
486                 robot framework.
487             :type in_str_1: str
488             :type in_str_2: str
489             :returns: Processed latency string or None if a problem occurs.
490             :rtype: tuple
491             """
492             in_list_1 = in_str_1.split('/', 3)
493             in_list_2 = in_str_2.split('/', 3)
494
495             if len(in_list_1) != 4 and len(in_list_2) != 4:
496                 return None
497
498             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
499             try:
500                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
501             except hdrh.codec.HdrLengthException:
502                 hdr_lat_1 = None
503
504             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
505             try:
506                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
507             except hdrh.codec.HdrLengthException:
508                 hdr_lat_2 = None
509
510             if hdr_lat_1 and hdr_lat_2:
511                 hdr_lat = (
512                     hdr_lat_1.get_value_at_percentile(50.0),
513                     hdr_lat_1.get_value_at_percentile(90.0),
514                     hdr_lat_1.get_value_at_percentile(99.0),
515                     hdr_lat_2.get_value_at_percentile(50.0),
516                     hdr_lat_2.get_value_at_percentile(90.0),
517                     hdr_lat_2.get_value_at_percentile(99.0)
518                 )
519                 if all(hdr_lat):
520                     return hdr_lat
521
522             hdr_lat = (
523                 in_list_1[0], in_list_1[1], in_list_1[2],
524                 in_list_2[0], in_list_2[1], in_list_2[2]
525             )
526             for item in hdr_lat:
527                 if item in (u"-1", u"4294967295", u"0"):
528                     return None
529             return hdr_lat
530
531         try:
532             out_msg = (
533                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
534                 f"{data[u'ndr_low_b']:5.2f}"
535                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
536                 f"{data[u'pdr_low_b']:5.2f}"
537             )
538             latency = (
539                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
540                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
541                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
542             )
543             if all(latency):
544                 max_len = len(str(max((max(item) for item in latency))))
545                 max_len = 4 if max_len < 4 else max_len
546
547                 for idx, lat in enumerate(latency):
548                     if not idx:
549                         out_msg += u"\n"
550                     out_msg += (
551                         f"\n{idx + 3}. "
552                         f"{lat[0]:{max_len}d} "
553                         f"{lat[1]:{max_len}d} "
554                         f"{lat[2]:{max_len}d}      "
555                         f"{lat[3]:{max_len}d} "
556                         f"{lat[4]:{max_len}d} "
557                         f"{lat[5]:{max_len}d} "
558                     )
559
560             return out_msg
561
562         except (AttributeError, IndexError, ValueError, KeyError):
563             return u"Test Failed."
564
565     def _get_testbed(self, msg):
566         """Called when extraction of testbed IP is required.
567         The testbed is identified by TG node IP address.
568
569         :param msg: Message to process.
570         :type msg: Message
571         :returns: Nothing.
572         """
573
574         if msg.message.count(u"Setup of TG node") or \
575                 msg.message.count(u"Setup of node TG host"):
576             reg_tg_ip = re.compile(
577                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
578             try:
579                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
580             except (KeyError, ValueError, IndexError, AttributeError):
581                 pass
582             finally:
583                 self._data[u"metadata"][u"testbed"] = self._testbed
584                 self._msg_type = None
585
586     def _get_vpp_version(self, msg):
587         """Called when extraction of VPP version is required.
588
589         :param msg: Message to process.
590         :type msg: Message
591         :returns: Nothing.
592         """
593
594         if msg.message.count(u"return STDOUT Version:") or \
595                 msg.message.count(u"VPP Version:") or \
596                 msg.message.count(u"VPP version:"):
597             self._version = str(
598                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
599             )
600             self._data[u"metadata"][u"version"] = self._version
601             self._msg_type = None
602
603     def _get_dpdk_version(self, msg):
604         """Called when extraction of DPDK version is required.
605
606         :param msg: Message to process.
607         :type msg: Message
608         :returns: Nothing.
609         """
610
611         if msg.message.count(u"DPDK Version:"):
612             try:
613                 self._version = str(re.search(
614                     self.REGEX_VERSION_DPDK, msg.message).group(2))
615                 self._data[u"metadata"][u"version"] = self._version
616             except IndexError:
617                 pass
618             finally:
619                 self._msg_type = None
620
621     def _get_papi_history(self, msg):
622         """Called when extraction of PAPI command history is required.
623
624         :param msg: Message to process.
625         :type msg: Message
626         :returns: Nothing.
627         """
628         if msg.message.count(u"PAPI command history:"):
629             self._conf_history_lookup_nr += 1
630             if self._conf_history_lookup_nr == 1:
631                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
632             else:
633                 self._msg_type = None
634             text = re.sub(
635                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
636                 u"",
637                 msg.message,
638                 count=1
639             ).replace(u'"', u"'")
640             self._data[u"tests"][self._test_id][u"conf-history"] += (
641                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
642             )
643
644     def _get_show_run(self, msg):
645         """Called when extraction of VPP operational data (output of CLI command
646         Show Runtime) is required.
647
648         :param msg: Message to process.
649         :type msg: Message
650         :returns: Nothing.
651         """
652
653         if not msg.message.count(u"stats runtime"):
654             return
655
656         # Temporary solution
657         if self._sh_run_counter > 1:
658             return
659
660         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
661             self._data[u"tests"][self._test_id][u"show-run"] = dict()
662
663         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
664         if not groups:
665             return
666         try:
667             host = groups.group(1)
668         except (AttributeError, IndexError):
669             host = u""
670         try:
671             sock = groups.group(2)
672         except (AttributeError, IndexError):
673             sock = u""
674
675         dut = u"dut{nr}".format(
676             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
677
678         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
679             copy.copy(
680                 {
681                     u"host": host,
682                     u"socket": sock,
683                     u"runtime": str(msg.message).replace(u' ', u'').
684                                 replace(u'\n', u'').replace(u"'", u'"').
685                                 replace(u'b"', u'"').replace(u'u"', u'"').
686                                 split(u":", 1)[1]
687                 }
688             )
689
690     def _get_telemetry(self, msg):
691         """Called when extraction of VPP telemetry data is required.
692
693         :param msg: Message to process.
694         :type msg: Message
695         :returns: Nothing.
696         """
697
698         if self._telemetry_kw_counter > 1:
699             return
700         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
701             return
702
703         if u"telemetry-show-run" not in \
704                 self._data[u"tests"][self._test_id].keys():
705             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
706
707         self._telemetry_msg_counter += 1
708         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
709         if not groups:
710             return
711         try:
712             host = groups.group(1)
713         except (AttributeError, IndexError):
714             host = u""
715         try:
716             sock = groups.group(2)
717         except (AttributeError, IndexError):
718             sock = u""
719         runtime = {
720             u"source_type": u"node",
721             u"source_id": host,
722             u"msg_type": u"metric",
723             u"log_level": u"INFO",
724             u"timestamp": msg.timestamp,
725             u"msg": u"show_runtime",
726             u"host": host,
727             u"socket": sock,
728             u"data": list()
729         }
730         for line in msg.message.splitlines():
731             if not line.startswith(u"vpp_runtime_"):
732                 continue
733             try:
734                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
735                 cut = params.index(u"{")
736                 name = params[:cut].split(u"_", maxsplit=2)[-1]
737                 labels = eval(
738                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
739                 )
740                 labels[u"graph_node"] = labels.pop(u"name")
741                 runtime[u"data"].append(
742                     {
743                         u"name": name,
744                         u"value": value,
745                         u"timestamp": timestamp,
746                         u"labels": labels
747                     }
748                 )
749             except (TypeError, ValueError, IndexError):
750                 continue
751         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
752             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
753                 {
754                     u"host": host,
755                     u"socket": sock,
756                     u"runtime": runtime
757                 }
758             )
759
760     def _get_ndrpdr_throughput(self, msg):
761         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
762         message.
763
764         :param msg: The test message to be parsed.
765         :type msg: str
766         :returns: Parsed data as a dict and the status (PASS/FAIL).
767         :rtype: tuple(dict, str)
768         """
769
770         throughput = {
771             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
772             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
773         }
774         status = u"FAIL"
775         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
776
777         if groups is not None:
778             try:
779                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
780                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
781                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
782                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
783                 status = u"PASS"
784             except (IndexError, ValueError):
785                 pass
786
787         return throughput, status
788
789     def _get_ndrpdr_throughput_gbps(self, msg):
790         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
791         test message.
792
793         :param msg: The test message to be parsed.
794         :type msg: str
795         :returns: Parsed data as a dict and the status (PASS/FAIL).
796         :rtype: tuple(dict, str)
797         """
798
799         gbps = {
800             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
801             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
802         }
803         status = u"FAIL"
804         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
805
806         if groups is not None:
807             try:
808                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
809                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
810                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
811                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
812                 status = u"PASS"
813             except (IndexError, ValueError):
814                 pass
815
816         return gbps, status
817
818     def _get_plr_throughput(self, msg):
819         """Get PLRsearch lower bound and PLRsearch upper bound from the test
820         message.
821
822         :param msg: The test message to be parsed.
823         :type msg: str
824         :returns: Parsed data as a dict and the status (PASS/FAIL).
825         :rtype: tuple(dict, str)
826         """
827
828         throughput = {
829             u"LOWER": -1.0,
830             u"UPPER": -1.0
831         }
832         status = u"FAIL"
833         groups = re.search(self.REGEX_PLR_RATE, msg)
834
835         if groups is not None:
836             try:
837                 throughput[u"LOWER"] = float(groups.group(1))
838                 throughput[u"UPPER"] = float(groups.group(2))
839                 status = u"PASS"
840             except (IndexError, ValueError):
841                 pass
842
843         return throughput, status
844
845     def _get_ndrpdr_latency(self, msg):
846         """Get LATENCY from the test message.
847
848         :param msg: The test message to be parsed.
849         :type msg: str
850         :returns: Parsed data as a dict and the status (PASS/FAIL).
851         :rtype: tuple(dict, str)
852         """
853         latency_default = {
854             u"min": -1.0,
855             u"avg": -1.0,
856             u"max": -1.0,
857             u"hdrh": u""
858         }
859         latency = {
860             u"NDR": {
861                 u"direction1": copy.copy(latency_default),
862                 u"direction2": copy.copy(latency_default)
863             },
864             u"PDR": {
865                 u"direction1": copy.copy(latency_default),
866                 u"direction2": copy.copy(latency_default)
867             },
868             u"LAT0": {
869                 u"direction1": copy.copy(latency_default),
870                 u"direction2": copy.copy(latency_default)
871             },
872             u"PDR10": {
873                 u"direction1": copy.copy(latency_default),
874                 u"direction2": copy.copy(latency_default)
875             },
876             u"PDR50": {
877                 u"direction1": copy.copy(latency_default),
878                 u"direction2": copy.copy(latency_default)
879             },
880             u"PDR90": {
881                 u"direction1": copy.copy(latency_default),
882                 u"direction2": copy.copy(latency_default)
883             },
884         }
885
886         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
887         if groups is None:
888             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
889         if groups is None:
890             return latency, u"FAIL"
891
892         def process_latency(in_str):
893             """Return object with parsed latency values.
894
895             TODO: Define class for the return type.
896
897             :param in_str: Input string, min/avg/max/hdrh format.
898             :type in_str: str
899             :returns: Dict with corresponding keys, except hdrh float values.
900             :rtype dict:
901             :throws IndexError: If in_str does not have enough substrings.
902             :throws ValueError: If a substring does not convert to float.
903             """
904             in_list = in_str.split('/', 3)
905
906             rval = {
907                 u"min": float(in_list[0]),
908                 u"avg": float(in_list[1]),
909                 u"max": float(in_list[2]),
910                 u"hdrh": u""
911             }
912
913             if len(in_list) == 4:
914                 rval[u"hdrh"] = str(in_list[3])
915
916             return rval
917
918         try:
919             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
920             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
921             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
922             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
923             if groups.lastindex == 4:
924                 return latency, u"PASS"
925         except (IndexError, ValueError):
926             pass
927
928         try:
929             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
930             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
931             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
932             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
933             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
934             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
935             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
936             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
937             if groups.lastindex == 12:
938                 return latency, u"PASS"
939         except (IndexError, ValueError):
940             pass
941
942         return latency, u"FAIL"
943
944     @staticmethod
945     def _get_hoststack_data(msg, tags):
946         """Get data from the hoststack test message.
947
948         :param msg: The test message to be parsed.
949         :param tags: Test tags.
950         :type msg: str
951         :type tags: list
952         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
953         :rtype: tuple(dict, str)
954         """
955         result = dict()
956         status = u"FAIL"
957
958         msg = msg.replace(u"'", u'"').replace(u" ", u"")
959         if u"LDPRELOAD" in tags:
960             try:
961                 result = loads(msg)
962                 status = u"PASS"
963             except JSONDecodeError:
964                 pass
965         elif u"VPPECHO" in tags:
966             try:
967                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
968                 result = dict(
969                     client=loads(msg_lst[0]),
970                     server=loads(msg_lst[1])
971                 )
972                 status = u"PASS"
973             except (JSONDecodeError, IndexError):
974                 pass
975
976         return result, status
977
978     def _get_vsap_data(self, msg, tags):
979         """Get data from the vsap test message.
980
981         :param msg: The test message to be parsed.
982         :param tags: Test tags.
983         :type msg: str
984         :type tags: list
985         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
986         :rtype: tuple(dict, str)
987         """
988         result = dict()
989         status = u"FAIL"
990
991         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
992         if groups is not None:
993             try:
994                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
995                 result[u"latency"] = float(groups.group(2))
996                 result[u"completed-requests"] = int(groups.group(3))
997                 result[u"failed-requests"] = int(groups.group(4))
998                 result[u"bytes-transferred"] = int(groups.group(5))
999                 if u"TCP_CPS"in tags:
1000                     result[u"cps"] = float(groups.group(6))
1001                 elif u"TCP_RPS" in tags:
1002                     result[u"rps"] = float(groups.group(6))
1003                 else:
1004                     return result, status
1005                 status = u"PASS"
1006             except (IndexError, ValueError):
1007                 pass
1008
1009         return result, status
1010
1011     def visit_suite(self, suite):
1012         """Implements traversing through the suite and its direct children.
1013
1014         :param suite: Suite to process.
1015         :type suite: Suite
1016         :returns: Nothing.
1017         """
1018         if self.start_suite(suite) is not False:
1019             suite.suites.visit(self)
1020             suite.tests.visit(self)
1021             self.end_suite(suite)
1022
1023     def start_suite(self, suite):
1024         """Called when suite starts.
1025
1026         :param suite: Suite to process.
1027         :type suite: Suite
1028         :returns: Nothing.
1029         """
1030
1031         try:
1032             parent_name = suite.parent.name
1033         except AttributeError:
1034             return
1035
1036         self._data[u"suites"][suite.longname.lower().
1037                               replace(u'"', u"'").
1038                               replace(u" ", u"_")] = {
1039                                   u"name": suite.name.lower(),
1040                                   u"doc": suite.doc,
1041                                   u"parent": parent_name,
1042                                   u"level": len(suite.longname.split(u"."))
1043                               }
1044
1045         suite.keywords.visit(self)
1046
1047     def end_suite(self, suite):
1048         """Called when suite ends.
1049
1050         :param suite: Suite to process.
1051         :type suite: Suite
1052         :returns: Nothing.
1053         """
1054
1055     def visit_test(self, test):
1056         """Implements traversing through the test.
1057
1058         :param test: Test to process.
1059         :type test: Test
1060         :returns: Nothing.
1061         """
1062         if self.start_test(test) is not False:
1063             test.keywords.visit(self)
1064             self.end_test(test)
1065
1066     def start_test(self, test):
1067         """Called when test starts.
1068
1069         :param test: Test to process.
1070         :type test: Test
1071         :returns: Nothing.
1072         """
1073
1074         self._sh_run_counter = 0
1075         self._telemetry_kw_counter = 0
1076         self._telemetry_msg_counter = 0
1077
1078         longname_orig = test.longname.lower()
1079
1080         # Check the ignore list
1081         if longname_orig in self._ignore:
1082             return
1083
1084         tags = [str(tag) for tag in test.tags]
1085         test_result = dict()
1086
1087         # Change the TC long name and name if defined in the mapping table
1088         longname = self._mapping.get(longname_orig, None)
1089         if longname is not None:
1090             name = longname.split(u'.')[-1]
1091             logging.debug(
1092                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1093                 f"{name}"
1094             )
1095         else:
1096             longname = longname_orig
1097             name = test.name.lower()
1098
1099         # Remove TC number from the TC long name (backward compatibility):
1100         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1101         # Remove TC number from the TC name (not needed):
1102         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1103
1104         test_result[u"parent"] = test.parent.name.lower()
1105         test_result[u"tags"] = tags
1106         test_result["doc"] = test.doc
1107         test_result[u"type"] = u""
1108         test_result[u"status"] = test.status
1109         test_result[u"starttime"] = test.starttime
1110         test_result[u"endtime"] = test.endtime
1111
1112         if test.status == u"PASS":
1113             if u"NDRPDR" in tags:
1114                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1115                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1116                         test.message)
1117                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1118                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1119                         test.message)
1120                 else:
1121                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1122                         test.message)
1123             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1124                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1125                     test.message)
1126             else:
1127                 test_result[u"msg"] = test.message
1128         else:
1129             test_result[u"msg"] = test.message
1130
1131         if u"PERFTEST" in tags and u"TREX" not in tags:
1132             # Replace info about cores (e.g. -1c-) with the info about threads
1133             # and cores (e.g. -1t1c-) in the long test case names and in the
1134             # test case names if necessary.
1135             tag_count = 0
1136             tag_tc = str()
1137             for tag in test_result[u"tags"]:
1138                 groups = re.search(self.REGEX_TC_TAG, tag)
1139                 if groups:
1140                     tag_count += 1
1141                     tag_tc = tag
1142
1143             if tag_count == 1:
1144                 self._test_id = re.sub(
1145                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1146                     self._test_id, count=1
1147                 )
1148                 test_result[u"name"] = re.sub(
1149                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1150                     test_result["name"], count=1
1151                 )
1152             else:
1153                 test_result[u"status"] = u"FAIL"
1154                 self._data[u"tests"][self._test_id] = test_result
1155                 logging.debug(
1156                     f"The test {self._test_id} has no or more than one "
1157                     f"multi-threading tags.\n"
1158                     f"Tags: {test_result[u'tags']}"
1159                 )
1160                 return
1161
1162         if u"DEVICETEST" in tags:
1163             test_result[u"type"] = u"DEVICETEST"
1164         elif u"NDRPDR" in tags:
1165             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1166                 test_result[u"type"] = u"CPS"
1167             else:
1168                 test_result[u"type"] = u"NDRPDR"
1169             if test.status == u"PASS":
1170                 test_result[u"throughput"], test_result[u"status"] = \
1171                     self._get_ndrpdr_throughput(test.message)
1172                 test_result[u"gbps"], test_result[u"status"] = \
1173                     self._get_ndrpdr_throughput_gbps(test.message)
1174                 test_result[u"latency"], test_result[u"status"] = \
1175                     self._get_ndrpdr_latency(test.message)
1176         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1177             if u"MRR" in tags:
1178                 test_result[u"type"] = u"MRR"
1179             else:
1180                 test_result[u"type"] = u"BMRR"
1181             if test.status == u"PASS":
1182                 test_result[u"result"] = dict()
1183                 groups = re.search(self.REGEX_BMRR, test.message)
1184                 if groups is not None:
1185                     items_str = groups.group(1)
1186                     items_float = [
1187                         float(item.strip().replace(u"'", u""))
1188                         for item in items_str.split(",")
1189                     ]
1190                     # Use whole list in CSIT-1180.
1191                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1192                     test_result[u"result"][u"samples"] = items_float
1193                     test_result[u"result"][u"receive-rate"] = stats.avg
1194                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1195                 else:
1196                     groups = re.search(self.REGEX_MRR, test.message)
1197                     test_result[u"result"][u"receive-rate"] = \
1198                         float(groups.group(3)) / float(groups.group(1))
1199         elif u"SOAK" in tags:
1200             test_result[u"type"] = u"SOAK"
1201             if test.status == u"PASS":
1202                 test_result[u"throughput"], test_result[u"status"] = \
1203                     self._get_plr_throughput(test.message)
1204         elif u"HOSTSTACK" in tags:
1205             test_result[u"type"] = u"HOSTSTACK"
1206             if test.status == u"PASS":
1207                 test_result[u"result"], test_result[u"status"] = \
1208                     self._get_hoststack_data(test.message, tags)
1209         elif u"LDP_NGINX" in tags:
1210             test_result[u"type"] = u"LDP_NGINX"
1211             test_result[u"result"], test_result[u"status"] = \
1212                 self._get_vsap_data(test.message, tags)
1213         # elif u"TCP" in tags:  # This might be not used
1214         #     test_result[u"type"] = u"TCP"
1215         #     if test.status == u"PASS":
1216         #         groups = re.search(self.REGEX_TCP, test.message)
1217         #         test_result[u"result"] = int(groups.group(2))
1218         elif u"RECONF" in tags:
1219             test_result[u"type"] = u"RECONF"
1220             if test.status == u"PASS":
1221                 test_result[u"result"] = None
1222                 try:
1223                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1224                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1225                     test_result[u"result"] = {
1226                         u"loss": int(grps_loss.group(1)),
1227                         u"time": float(grps_time.group(1))
1228                     }
1229                 except (AttributeError, IndexError, ValueError, TypeError):
1230                     test_result[u"status"] = u"FAIL"
1231         else:
1232             test_result[u"status"] = u"FAIL"
1233
1234         self._data[u"tests"][self._test_id] = test_result
1235
1236     def end_test(self, test):
1237         """Called when test ends.
1238
1239         :param test: Test to process.
1240         :type test: Test
1241         :returns: Nothing.
1242         """
1243
1244     def visit_keyword(self, keyword):
1245         """Implements traversing through the keyword and its child keywords.
1246
1247         :param keyword: Keyword to process.
1248         :type keyword: Keyword
1249         :returns: Nothing.
1250         """
1251         if self.start_keyword(keyword) is not False:
1252             self.end_keyword(keyword)
1253
1254     def start_keyword(self, keyword):
1255         """Called when keyword starts. Default implementation does nothing.
1256
1257         :param keyword: Keyword to process.
1258         :type keyword: Keyword
1259         :returns: Nothing.
1260         """
1261         try:
1262             if keyword.type == u"setup":
1263                 self.visit_setup_kw(keyword)
1264             elif keyword.type == u"teardown":
1265                 self.visit_teardown_kw(keyword)
1266             else:
1267                 self.visit_test_kw(keyword)
1268         except AttributeError:
1269             pass
1270
1271     def end_keyword(self, keyword):
1272         """Called when keyword ends. Default implementation does nothing.
1273
1274         :param keyword: Keyword to process.
1275         :type keyword: Keyword
1276         :returns: Nothing.
1277         """
1278
1279     def visit_test_kw(self, test_kw):
1280         """Implements traversing through the test keyword and its child
1281         keywords.
1282
1283         :param test_kw: Keyword to process.
1284         :type test_kw: Keyword
1285         :returns: Nothing.
1286         """
1287         for keyword in test_kw.keywords:
1288             if self.start_test_kw(keyword) is not False:
1289                 self.visit_test_kw(keyword)
1290                 self.end_test_kw(keyword)
1291
1292     def start_test_kw(self, test_kw):
1293         """Called when test keyword starts. Default implementation does
1294         nothing.
1295
1296         :param test_kw: Keyword to process.
1297         :type test_kw: Keyword
1298         :returns: Nothing.
1299         """
1300         if self._for_output == u"trending":
1301             return
1302
1303         if test_kw.name.count(u"Run Telemetry On All Duts"):
1304             self._msg_type = u"test-telemetry"
1305             self._telemetry_kw_counter += 1
1306         elif test_kw.name.count(u"Show Runtime On All Duts"):
1307             self._msg_type = u"test-show-runtime"
1308             self._sh_run_counter += 1
1309         else:
1310             return
1311         test_kw.messages.visit(self)
1312
1313     def end_test_kw(self, test_kw):
1314         """Called when keyword ends. Default implementation does nothing.
1315
1316         :param test_kw: Keyword to process.
1317         :type test_kw: Keyword
1318         :returns: Nothing.
1319         """
1320
1321     def visit_setup_kw(self, setup_kw):
1322         """Implements traversing through the teardown keyword and its child
1323         keywords.
1324
1325         :param setup_kw: Keyword to process.
1326         :type setup_kw: Keyword
1327         :returns: Nothing.
1328         """
1329         for keyword in setup_kw.keywords:
1330             if self.start_setup_kw(keyword) is not False:
1331                 self.visit_setup_kw(keyword)
1332                 self.end_setup_kw(keyword)
1333
1334     def start_setup_kw(self, setup_kw):
1335         """Called when teardown keyword starts. Default implementation does
1336         nothing.
1337
1338         :param setup_kw: Keyword to process.
1339         :type setup_kw: Keyword
1340         :returns: Nothing.
1341         """
1342         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1343                 and not self._version:
1344             self._msg_type = u"vpp-version"
1345         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1346                 not self._version:
1347             self._msg_type = u"dpdk-version"
1348         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1349             self._msg_type = u"testbed"
1350         else:
1351             return
1352         setup_kw.messages.visit(self)
1353
1354     def end_setup_kw(self, setup_kw):
1355         """Called when keyword ends. Default implementation does nothing.
1356
1357         :param setup_kw: Keyword to process.
1358         :type setup_kw: Keyword
1359         :returns: Nothing.
1360         """
1361
1362     def visit_teardown_kw(self, teardown_kw):
1363         """Implements traversing through the teardown keyword and its child
1364         keywords.
1365
1366         :param teardown_kw: Keyword to process.
1367         :type teardown_kw: Keyword
1368         :returns: Nothing.
1369         """
1370         for keyword in teardown_kw.keywords:
1371             if self.start_teardown_kw(keyword) is not False:
1372                 self.visit_teardown_kw(keyword)
1373                 self.end_teardown_kw(keyword)
1374
1375     def start_teardown_kw(self, teardown_kw):
1376         """Called when teardown keyword starts
1377
1378         :param teardown_kw: Keyword to process.
1379         :type teardown_kw: Keyword
1380         :returns: Nothing.
1381         """
1382         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1383             self._conf_history_lookup_nr = 0
1384             self._msg_type = u"teardown-papi-history"
1385             teardown_kw.messages.visit(self)
1386
1387     def end_teardown_kw(self, teardown_kw):
1388         """Called when keyword ends. Default implementation does nothing.
1389
1390         :param teardown_kw: Keyword to process.
1391         :type teardown_kw: Keyword
1392         :returns: Nothing.
1393         """
1394
1395     def visit_message(self, msg):
1396         """Implements visiting the message.
1397
1398         :param msg: Message to process.
1399         :type msg: Message
1400         :returns: Nothing.
1401         """
1402         if self.start_message(msg) is not False:
1403             self.end_message(msg)
1404
1405     def start_message(self, msg):
1406         """Called when message starts. Get required information from messages:
1407         - VPP version.
1408
1409         :param msg: Message to process.
1410         :type msg: Message
1411         :returns: Nothing.
1412         """
1413         if self._msg_type:
1414             self.parse_msg[self._msg_type](msg)
1415
1416     def end_message(self, msg):
1417         """Called when message ends. Default implementation does nothing.
1418
1419         :param msg: Message to process.
1420         :type msg: Message
1421         :returns: Nothing.
1422         """
1423
1424
1425 class InputData:
1426     """Input data
1427
1428     The data is extracted from output.xml files generated by Jenkins jobs and
1429     stored in pandas' DataFrames.
1430
1431     The data structure:
1432     - job name
1433       - build number
1434         - metadata
1435           (as described in ExecutionChecker documentation)
1436         - suites
1437           (as described in ExecutionChecker documentation)
1438         - tests
1439           (as described in ExecutionChecker documentation)
1440     """
1441
1442     def __init__(self, spec, for_output):
1443         """Initialization.
1444
1445         :param spec: Specification.
1446         :param for_output: Output to be generated from downloaded data.
1447         :type spec: Specification
1448         :type for_output: str
1449         """
1450
1451         # Specification:
1452         self._cfg = spec
1453
1454         self._for_output = for_output
1455
1456         # Data store:
1457         self._input_data = pd.Series()
1458
1459     @property
1460     def data(self):
1461         """Getter - Input data.
1462
1463         :returns: Input data
1464         :rtype: pandas.Series
1465         """
1466         return self._input_data
1467
1468     def metadata(self, job, build):
1469         """Getter - metadata
1470
1471         :param job: Job which metadata we want.
1472         :param build: Build which metadata we want.
1473         :type job: str
1474         :type build: str
1475         :returns: Metadata
1476         :rtype: pandas.Series
1477         """
1478         return self.data[job][build][u"metadata"]
1479
1480     def suites(self, job, build):
1481         """Getter - suites
1482
1483         :param job: Job which suites we want.
1484         :param build: Build which suites we want.
1485         :type job: str
1486         :type build: str
1487         :returns: Suites.
1488         :rtype: pandas.Series
1489         """
1490         return self.data[job][str(build)][u"suites"]
1491
1492     def tests(self, job, build):
1493         """Getter - tests
1494
1495         :param job: Job which tests we want.
1496         :param build: Build which tests we want.
1497         :type job: str
1498         :type build: str
1499         :returns: Tests.
1500         :rtype: pandas.Series
1501         """
1502         return self.data[job][build][u"tests"]
1503
1504     def _parse_tests(self, job, build):
1505         """Process data from robot output.xml file and return JSON structured
1506         data.
1507
1508         :param job: The name of job which build output data will be processed.
1509         :param build: The build which output data will be processed.
1510         :type job: str
1511         :type build: dict
1512         :returns: JSON data structure.
1513         :rtype: dict
1514         """
1515
1516         metadata = {
1517             u"job": job,
1518             u"build": build
1519         }
1520
1521         with open(build[u"file-name"], u'r') as data_file:
1522             try:
1523                 result = ExecutionResult(data_file)
1524             except errors.DataError as err:
1525                 logging.error(
1526                     f"Error occurred while parsing output.xml: {repr(err)}"
1527                 )
1528                 return None
1529         checker = ExecutionChecker(
1530             metadata, self._cfg.mapping, self._cfg.ignore, self._for_output
1531         )
1532         result.visit(checker)
1533
1534         checker.data[u"metadata"][u"tests_total"] = \
1535             result.statistics.total.all.total
1536         checker.data[u"metadata"][u"tests_passed"] = \
1537             result.statistics.total.all.passed
1538         checker.data[u"metadata"][u"tests_failed"] = \
1539             result.statistics.total.all.failed
1540         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1541         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1542
1543         return checker.data
1544
1545     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1546         """Download and parse the input data file.
1547
1548         :param pid: PID of the process executing this method.
1549         :param job: Name of the Jenkins job which generated the processed input
1550             file.
1551         :param build: Information about the Jenkins build which generated the
1552             processed input file.
1553         :param repeat: Repeat the download specified number of times if not
1554             successful.
1555         :type pid: int
1556         :type job: str
1557         :type build: dict
1558         :type repeat: int
1559         """
1560
1561         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1562
1563         state = u"failed"
1564         success = False
1565         data = None
1566         do_repeat = repeat
1567         while do_repeat:
1568             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1569             if success:
1570                 break
1571             do_repeat -= 1
1572         if not success:
1573             logging.error(
1574                 f"It is not possible to download the input data file from the "
1575                 f"job {job}, build {build[u'build']}, or it is damaged. "
1576                 f"Skipped."
1577             )
1578         if success:
1579             logging.info(f"  Processing data from build {build[u'build']}")
1580             data = self._parse_tests(job, build)
1581             if data is None:
1582                 logging.error(
1583                     f"Input data file from the job {job}, build "
1584                     f"{build[u'build']} is damaged. Skipped."
1585                 )
1586             else:
1587                 state = u"processed"
1588
1589             try:
1590                 remove(build[u"file-name"])
1591             except OSError as err:
1592                 logging.error(
1593                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1594                 )
1595
1596         # If the time-period is defined in the specification file, remove all
1597         # files which are outside the time period.
1598         is_last = False
1599         timeperiod = self._cfg.environment.get(u"time-period", None)
1600         if timeperiod and data:
1601             now = dt.utcnow()
1602             timeperiod = timedelta(int(timeperiod))
1603             metadata = data.get(u"metadata", None)
1604             if metadata:
1605                 generated = metadata.get(u"generated", None)
1606                 if generated:
1607                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1608                     if (now - generated) > timeperiod:
1609                         # Remove the data and the file:
1610                         state = u"removed"
1611                         data = None
1612                         is_last = True
1613                         logging.info(
1614                             f"  The build {job}/{build[u'build']} is "
1615                             f"outdated, will be removed."
1616                         )
1617         return {
1618             u"data": data,
1619             u"state": state,
1620             u"job": job,
1621             u"build": build,
1622             u"last": is_last
1623         }
1624
1625     def download_and_parse_data(self, repeat=1):
1626         """Download the input data files, parse input data from input files and
1627         store in pandas' Series.
1628
1629         :param repeat: Repeat the download specified number of times if not
1630             successful.
1631         :type repeat: int
1632         """
1633
1634         logging.info(u"Downloading and parsing input files ...")
1635
1636         for job, builds in self._cfg.input.items():
1637             for build in builds:
1638
1639                 result = self._download_and_parse_build(job, build, repeat)
1640                 if result[u"last"]:
1641                     break
1642                 build_nr = result[u"build"][u"build"]
1643
1644                 if result[u"data"]:
1645                     data = result[u"data"]
1646                     build_data = pd.Series({
1647                         u"metadata": pd.Series(
1648                             list(data[u"metadata"].values()),
1649                             index=list(data[u"metadata"].keys())
1650                         ),
1651                         u"suites": pd.Series(
1652                             list(data[u"suites"].values()),
1653                             index=list(data[u"suites"].keys())
1654                         ),
1655                         u"tests": pd.Series(
1656                             list(data[u"tests"].values()),
1657                             index=list(data[u"tests"].keys())
1658                         )
1659                     })
1660
1661                     if self._input_data.get(job, None) is None:
1662                         self._input_data[job] = pd.Series()
1663                     self._input_data[job][str(build_nr)] = build_data
1664                     self._cfg.set_input_file_name(
1665                         job, build_nr, result[u"build"][u"file-name"]
1666                     )
1667                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1668
1669                 mem_alloc = \
1670                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1671                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1672
1673         logging.info(u"Done.")
1674
1675         msg = f"Successful downloads from the sources:\n"
1676         for source in self._cfg.environment[u"data-sources"]:
1677             if source[u"successful-downloads"]:
1678                 msg += (
1679                     f"{source[u'url']}/{source[u'path']}/"
1680                     f"{source[u'file-name']}: "
1681                     f"{source[u'successful-downloads']}\n"
1682                 )
1683         logging.info(msg)
1684
1685     def process_local_file(self, local_file, job=u"local", build_nr=1,
1686                            replace=True):
1687         """Process local XML file given as a command-line parameter.
1688
1689         :param local_file: The file to process.
1690         :param job: Job name.
1691         :param build_nr: Build number.
1692         :param replace: If True, the information about jobs and builds is
1693             replaced by the new one, otherwise the new jobs and builds are
1694             added.
1695         :type local_file: str
1696         :type job: str
1697         :type build_nr: int
1698         :type replace: bool
1699         :raises: PresentationError if an error occurs.
1700         """
1701         if not isfile(local_file):
1702             raise PresentationError(f"The file {local_file} does not exist.")
1703
1704         try:
1705             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1706         except (IndexError, ValueError):
1707             pass
1708
1709         build = {
1710             u"build": build_nr,
1711             u"status": u"failed",
1712             u"file-name": local_file
1713         }
1714         if replace:
1715             self._cfg.input = dict()
1716         self._cfg.add_build(job, build)
1717
1718         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1719         data = self._parse_tests(job, build)
1720         if data is None:
1721             raise PresentationError(
1722                 f"Error occurred while parsing the file {local_file}"
1723             )
1724
1725         build_data = pd.Series({
1726             u"metadata": pd.Series(
1727                 list(data[u"metadata"].values()),
1728                 index=list(data[u"metadata"].keys())
1729             ),
1730             u"suites": pd.Series(
1731                 list(data[u"suites"].values()),
1732                 index=list(data[u"suites"].keys())
1733             ),
1734             u"tests": pd.Series(
1735                 list(data[u"tests"].values()),
1736                 index=list(data[u"tests"].keys())
1737             )
1738         })
1739
1740         if self._input_data.get(job, None) is None:
1741             self._input_data[job] = pd.Series()
1742         self._input_data[job][str(build_nr)] = build_data
1743
1744         self._cfg.set_input_state(job, build_nr, u"processed")
1745
1746     def process_local_directory(self, local_dir, replace=True):
1747         """Process local directory with XML file(s). The directory is processed
1748         as a 'job' and the XML files in it as builds.
1749         If the given directory contains only sub-directories, these
1750         sub-directories processed as jobs and corresponding XML files as builds
1751         of their job.
1752
1753         :param local_dir: Local directory to process.
1754         :param replace: If True, the information about jobs and builds is
1755             replaced by the new one, otherwise the new jobs and builds are
1756             added.
1757         :type local_dir: str
1758         :type replace: bool
1759         """
1760         if not isdir(local_dir):
1761             raise PresentationError(
1762                 f"The directory {local_dir} does not exist."
1763             )
1764
1765         # Check if the given directory includes only files, or only directories
1766         _, dirnames, filenames = next(walk(local_dir))
1767
1768         if filenames and not dirnames:
1769             filenames.sort()
1770             # local_builds:
1771             # key: dir (job) name, value: list of file names (builds)
1772             local_builds = {
1773                 local_dir: [join(local_dir, name) for name in filenames]
1774             }
1775
1776         elif dirnames and not filenames:
1777             dirnames.sort()
1778             # local_builds:
1779             # key: dir (job) name, value: list of file names (builds)
1780             local_builds = dict()
1781             for dirname in dirnames:
1782                 builds = [
1783                     join(local_dir, dirname, name)
1784                     for name in listdir(join(local_dir, dirname))
1785                     if isfile(join(local_dir, dirname, name))
1786                 ]
1787                 if builds:
1788                     local_builds[dirname] = sorted(builds)
1789
1790         elif not filenames and not dirnames:
1791             raise PresentationError(f"The directory {local_dir} is empty.")
1792         else:
1793             raise PresentationError(
1794                 f"The directory {local_dir} can include only files or only "
1795                 f"directories, not both.\nThe directory {local_dir} includes "
1796                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1797             )
1798
1799         if replace:
1800             self._cfg.input = dict()
1801
1802         for job, files in local_builds.items():
1803             for idx, local_file in enumerate(files):
1804                 self.process_local_file(local_file, job, idx + 1, replace=False)
1805
1806     @staticmethod
1807     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1808         """Return the index of character in the string which is the end of tag.
1809
1810         :param tag_filter: The string where the end of tag is being searched.
1811         :param start: The index where the searching is stated.
1812         :param closer: The character which is the tag closer.
1813         :type tag_filter: str
1814         :type start: int
1815         :type closer: str
1816         :returns: The index of the tag closer.
1817         :rtype: int
1818         """
1819         try:
1820             idx_opener = tag_filter.index(closer, start)
1821             return tag_filter.index(closer, idx_opener + 1)
1822         except ValueError:
1823             return None
1824
1825     @staticmethod
1826     def _condition(tag_filter):
1827         """Create a conditional statement from the given tag filter.
1828
1829         :param tag_filter: Filter based on tags from the element specification.
1830         :type tag_filter: str
1831         :returns: Conditional statement which can be evaluated.
1832         :rtype: str
1833         """
1834         index = 0
1835         while True:
1836             index = InputData._end_of_tag(tag_filter, index)
1837             if index is None:
1838                 return tag_filter
1839             index += 1
1840             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1841
1842     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1843                     continue_on_error=False):
1844         """Filter required data from the given jobs and builds.
1845
1846         The output data structure is:
1847         - job 1
1848           - build 1
1849             - test (or suite) 1 ID:
1850               - param 1
1851               - param 2
1852               ...
1853               - param n
1854             ...
1855             - test (or suite) n ID:
1856             ...
1857           ...
1858           - build n
1859         ...
1860         - job n
1861
1862         :param element: Element which will use the filtered data.
1863         :param params: Parameters which will be included in the output. If None,
1864             all parameters are included.
1865         :param data: If not None, this data is used instead of data specified
1866             in the element.
1867         :param data_set: The set of data to be filtered: tests, suites,
1868             metadata.
1869         :param continue_on_error: Continue if there is error while reading the
1870             data. The Item will be empty then
1871         :type element: pandas.Series
1872         :type params: list
1873         :type data: dict
1874         :type data_set: str
1875         :type continue_on_error: bool
1876         :returns: Filtered data.
1877         :rtype pandas.Series
1878         """
1879
1880         try:
1881             if data_set == "suites":
1882                 cond = u"True"
1883             elif element[u"filter"] in (u"all", u"template"):
1884                 cond = u"True"
1885             else:
1886                 cond = InputData._condition(element[u"filter"])
1887             logging.debug(f"   Filter: {cond}")
1888         except KeyError:
1889             logging.error(u"  No filter defined.")
1890             return None
1891
1892         if params is None:
1893             params = element.get(u"parameters", None)
1894             if params:
1895                 params.extend((u"type", u"status"))
1896
1897         data_to_filter = data if data else element[u"data"]
1898         data = pd.Series()
1899         try:
1900             for job, builds in data_to_filter.items():
1901                 data[job] = pd.Series()
1902                 for build in builds:
1903                     data[job][str(build)] = pd.Series()
1904                     try:
1905                         data_dict = dict(
1906                             self.data[job][str(build)][data_set].items())
1907                     except KeyError:
1908                         if continue_on_error:
1909                             continue
1910                         return None
1911
1912                     for test_id, test_data in data_dict.items():
1913                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1914                             data[job][str(build)][test_id] = pd.Series()
1915                             if params is None:
1916                                 for param, val in test_data.items():
1917                                     data[job][str(build)][test_id][param] = val
1918                             else:
1919                                 for param in params:
1920                                     try:
1921                                         data[job][str(build)][test_id][param] =\
1922                                             test_data[param]
1923                                     except KeyError:
1924                                         data[job][str(build)][test_id][param] =\
1925                                             u"No Data"
1926             return data
1927
1928         except (KeyError, IndexError, ValueError) as err:
1929             logging.error(
1930                 f"Missing mandatory parameter in the element specification: "
1931                 f"{repr(err)}"
1932             )
1933             return None
1934         except AttributeError as err:
1935             logging.error(repr(err))
1936             return None
1937         except SyntaxError as err:
1938             logging.error(
1939                 f"The filter {cond} is not correct. Check if all tags are "
1940                 f"enclosed by apostrophes.\n{repr(err)}"
1941             )
1942             return None
1943
1944     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1945                              continue_on_error=False):
1946         """Filter required data from the given jobs and builds.
1947
1948         The output data structure is:
1949         - job 1
1950           - build 1
1951             - test (or suite) 1 ID:
1952               - param 1
1953               - param 2
1954               ...
1955               - param n
1956             ...
1957             - test (or suite) n ID:
1958             ...
1959           ...
1960           - build n
1961         ...
1962         - job n
1963
1964         :param element: Element which will use the filtered data.
1965         :param params: Parameters which will be included in the output. If None,
1966         all parameters are included.
1967         :param data_set: The set of data to be filtered: tests, suites,
1968         metadata.
1969         :param continue_on_error: Continue if there is error while reading the
1970         data. The Item will be empty then
1971         :type element: pandas.Series
1972         :type params: list
1973         :type data_set: str
1974         :type continue_on_error: bool
1975         :returns: Filtered data.
1976         :rtype pandas.Series
1977         """
1978
1979         include = element.get(u"include", None)
1980         if not include:
1981             logging.warning(u"No tests to include, skipping the element.")
1982             return None
1983
1984         if params is None:
1985             params = element.get(u"parameters", None)
1986             if params and u"type" not in params:
1987                 params.append(u"type")
1988
1989         cores = element.get(u"core", None)
1990         if cores:
1991             tests = list()
1992             for core in cores:
1993                 for test in include:
1994                     tests.append(test.format(core=core))
1995         else:
1996             tests = include
1997
1998         data = pd.Series()
1999         try:
2000             for job, builds in element[u"data"].items():
2001                 data[job] = pd.Series()
2002                 for build in builds:
2003                     data[job][str(build)] = pd.Series()
2004                     for test in tests:
2005                         try:
2006                             reg_ex = re.compile(str(test).lower())
2007                             for test_id in self.data[job][
2008                                     str(build)][data_set].keys():
2009                                 if re.match(reg_ex, str(test_id).lower()):
2010                                     test_data = self.data[job][
2011                                         str(build)][data_set][test_id]
2012                                     data[job][str(build)][test_id] = pd.Series()
2013                                     if params is None:
2014                                         for param, val in test_data.items():
2015                                             data[job][str(build)][test_id]\
2016                                                 [param] = val
2017                                     else:
2018                                         for param in params:
2019                                             try:
2020                                                 data[job][str(build)][
2021                                                     test_id][param] = \
2022                                                     test_data[param]
2023                                             except KeyError:
2024                                                 data[job][str(build)][
2025                                                     test_id][param] = u"No Data"
2026                         except KeyError as err:
2027                             if continue_on_error:
2028                                 logging.debug(repr(err))
2029                                 continue
2030                             logging.error(repr(err))
2031                             return None
2032             return data
2033
2034         except (KeyError, IndexError, ValueError) as err:
2035             logging.error(
2036                 f"Missing mandatory parameter in the element "
2037                 f"specification: {repr(err)}"
2038             )
2039             return None
2040         except AttributeError as err:
2041             logging.error(repr(err))
2042             return None
2043
2044     @staticmethod
2045     def merge_data(data):
2046         """Merge data from more jobs and builds to a simple data structure.
2047
2048         The output data structure is:
2049
2050         - test (suite) 1 ID:
2051           - param 1
2052           - param 2
2053           ...
2054           - param n
2055         ...
2056         - test (suite) n ID:
2057         ...
2058
2059         :param data: Data to merge.
2060         :type data: pandas.Series
2061         :returns: Merged data.
2062         :rtype: pandas.Series
2063         """
2064
2065         logging.info(u"    Merging data ...")
2066
2067         merged_data = pd.Series()
2068         for builds in data.values:
2069             for item in builds.values:
2070                 for item_id, item_data in item.items():
2071                     merged_data[item_id] = item_data
2072         return merged_data
2073
2074     def print_all_oper_data(self):
2075         """Print all operational data to console.
2076         """
2077
2078         for job in self._input_data.values:
2079             for build in job.values:
2080                 for test_id, test_data in build[u"tests"].items():
2081                     print(f"{test_id}")
2082                     if test_data.get(u"show-run", None) is None:
2083                         continue
2084                     for dut_name, data in test_data[u"show-run"].items():
2085                         if data.get(u"runtime", None) is None:
2086                             continue
2087                         runtime = loads(data[u"runtime"])
2088                         try:
2089                             threads_nr = len(runtime[0][u"clocks"])
2090                         except (IndexError, KeyError):
2091                             continue
2092                         threads = OrderedDict(
2093                             {idx: list() for idx in range(threads_nr)})
2094                         for item in runtime:
2095                             for idx in range(threads_nr):
2096                                 if item[u"vectors"][idx] > 0:
2097                                     clocks = item[u"clocks"][idx] / \
2098                                              item[u"vectors"][idx]
2099                                 elif item[u"calls"][idx] > 0:
2100                                     clocks = item[u"clocks"][idx] / \
2101                                              item[u"calls"][idx]
2102                                 elif item[u"suspends"][idx] > 0:
2103                                     clocks = item[u"clocks"][idx] / \
2104                                              item[u"suspends"][idx]
2105                                 else:
2106                                     clocks = 0.0
2107
2108                                 if item[u"calls"][idx] > 0:
2109                                     vectors_call = item[u"vectors"][idx] / \
2110                                                    item[u"calls"][idx]
2111                                 else:
2112                                     vectors_call = 0.0
2113
2114                                 if int(item[u"calls"][idx]) + int(
2115                                         item[u"vectors"][idx]) + \
2116                                         int(item[u"suspends"][idx]):
2117                                     threads[idx].append([
2118                                         item[u"name"],
2119                                         item[u"calls"][idx],
2120                                         item[u"vectors"][idx],
2121                                         item[u"suspends"][idx],
2122                                         clocks,
2123                                         vectors_call
2124                                     ])
2125
2126                         print(f"Host IP: {data.get(u'host', '')}, "
2127                               f"Socket: {data.get(u'socket', '')}")
2128                         for thread_nr, thread in threads.items():
2129                             txt_table = prettytable.PrettyTable(
2130                                 (
2131                                     u"Name",
2132                                     u"Nr of Vectors",
2133                                     u"Nr of Packets",
2134                                     u"Suspends",
2135                                     u"Cycles per Packet",
2136                                     u"Average Vector Size"
2137                                 )
2138                             )
2139                             avg = 0.0
2140                             for row in thread:
2141                                 txt_table.add_row(row)
2142                                 avg += row[-1]
2143                             if len(thread) == 0:
2144                                 avg = u""
2145                             else:
2146                                 avg = f", Average Vector Size per Node: " \
2147                                       f"{(avg / len(thread)):.2f}"
2148                             th_name = u"main" if thread_nr == 0 \
2149                                 else f"worker_{thread_nr}"
2150                             print(f"{dut_name}, {th_name}{avg}")
2151                             txt_table.float_format = u".2"
2152                             txt_table.align = u"r"
2153                             txt_table.align[u"Name"] = u"l"
2154                             print(f"{txt_table.get_string()}\n")