461d7b423db3fb651202df5493189d1230aea18f
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
291
292     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
293
294     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
295
296     REGEX_SH_RUN_HOST = re.compile(
297         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
298     )
299
300     def __init__(self, metadata, mapping, ignore, for_output):
301         """Initialisation.
302
303         :param metadata: Key-value pairs to be included in "metadata" part of
304             JSON structure.
305         :param mapping: Mapping of the old names of test cases to the new
306             (actual) one.
307         :param ignore: List of TCs to be ignored.
308         :param for_output: Output to be generated from downloaded data.
309         :type metadata: dict
310         :type mapping: dict
311         :type ignore: list
312         :type for_output: str
313         """
314
315         # Type of message to parse out from the test messages
316         self._msg_type = None
317
318         # VPP version
319         self._version = None
320
321         # Timestamp
322         self._timestamp = None
323
324         # Testbed. The testbed is identified by TG node IP address.
325         self._testbed = None
326
327         # Mapping of TCs long names
328         self._mapping = mapping
329
330         # Ignore list
331         self._ignore = ignore
332
333         self._for_output = for_output
334
335         # Number of PAPI History messages found:
336         # 0 - no message
337         # 1 - PAPI History of DUT1
338         # 2 - PAPI History of DUT2
339         self._conf_history_lookup_nr = 0
340
341         self._sh_run_counter = 0
342         self._telemetry_kw_counter = 0
343         self._telemetry_msg_counter = 0
344
345         # Test ID of currently processed test- the lowercase full path to the
346         # test
347         self._test_id = None
348
349         # The main data structure
350         self._data = {
351             u"metadata": OrderedDict(),
352             u"suites": OrderedDict(),
353             u"tests": OrderedDict()
354         }
355
356         # Save the provided metadata
357         for key, val in metadata.items():
358             self._data[u"metadata"][key] = val
359
360         # Dictionary defining the methods used to parse different types of
361         # messages
362         self.parse_msg = {
363             u"vpp-version": self._get_vpp_version,
364             u"dpdk-version": self._get_dpdk_version,
365             u"teardown-papi-history": self._get_papi_history,
366             u"test-show-runtime": self._get_show_run,
367             u"testbed": self._get_testbed,
368             u"test-telemetry": self._get_telemetry
369         }
370
371     @property
372     def data(self):
373         """Getter - Data parsed from the XML file.
374
375         :returns: Data parsed from the XML file.
376         :rtype: dict
377         """
378         return self._data
379
380     def _get_data_from_mrr_test_msg(self, msg):
381         """Get info from message of MRR performance tests.
382
383         :param msg: Message to be processed.
384         :type msg: str
385         :returns: Processed message or original message if a problem occurs.
386         :rtype: str
387         """
388
389         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
390         if not groups or groups.lastindex != 1:
391             return u"Test Failed."
392
393         try:
394             data = groups.group(1).split(u", ")
395         except (AttributeError, IndexError, ValueError, KeyError):
396             return u"Test Failed."
397
398         out_str = u"["
399         try:
400             for item in data:
401                 out_str += f"{(float(item) / 1e6):.2f}, "
402             return out_str[:-2] + u"]"
403         except (AttributeError, IndexError, ValueError, KeyError):
404             return u"Test Failed."
405
406     def _get_data_from_cps_test_msg(self, msg):
407         """Get info from message of NDRPDR CPS tests.
408
409         :param msg: Message to be processed.
410         :type msg: str
411         :returns: Processed message or "Test Failed." if a problem occurs.
412         :rtype: str
413         """
414
415         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
416         if not groups or groups.lastindex != 2:
417             return u"Test Failed."
418
419         try:
420             return (
421                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
422                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
423             )
424         except (AttributeError, IndexError, ValueError, KeyError):
425             return u"Test Failed."
426
427     def _get_data_from_pps_test_msg(self, msg):
428         """Get info from message of NDRPDR PPS tests.
429
430         :param msg: Message to be processed.
431         :type msg: str
432         :returns: Processed message or "Test Failed." if a problem occurs.
433         :rtype: str
434         """
435
436         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
437         if not groups or groups.lastindex != 4:
438             return u"Test Failed."
439
440         try:
441             return (
442                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
443                 f"{float(groups.group(2)):5.2f}\n"
444                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
445                 f"{float(groups.group(4)):5.2f}"
446             )
447         except (AttributeError, IndexError, ValueError, KeyError):
448             return u"Test Failed."
449
450     def _get_data_from_perf_test_msg(self, msg):
451         """Get info from message of NDRPDR performance tests.
452
453         :param msg: Message to be processed.
454         :type msg: str
455         :returns: Processed message or "Test Failed." if a problem occurs.
456         :rtype: str
457         """
458
459         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
460         if not groups or groups.lastindex != 10:
461             return u"Test Failed."
462
463         try:
464             data = {
465                 u"ndr_low": float(groups.group(1)),
466                 u"ndr_low_b": float(groups.group(2)),
467                 u"pdr_low": float(groups.group(3)),
468                 u"pdr_low_b": float(groups.group(4)),
469                 u"pdr_lat_90_1": groups.group(5),
470                 u"pdr_lat_90_2": groups.group(6),
471                 u"pdr_lat_50_1": groups.group(7),
472                 u"pdr_lat_50_2": groups.group(8),
473                 u"pdr_lat_10_1": groups.group(9),
474                 u"pdr_lat_10_2": groups.group(10),
475             }
476         except (AttributeError, IndexError, ValueError, KeyError):
477             return u"Test Failed."
478
479         def _process_lat(in_str_1, in_str_2):
480             """Extract P50, P90 and P99 latencies or min, avg, max values from
481             latency string.
482
483             :param in_str_1: Latency string for one direction produced by robot
484                 framework.
485             :param in_str_2: Latency string for second direction produced by
486                 robot framework.
487             :type in_str_1: str
488             :type in_str_2: str
489             :returns: Processed latency string or None if a problem occurs.
490             :rtype: tuple
491             """
492             in_list_1 = in_str_1.split('/', 3)
493             in_list_2 = in_str_2.split('/', 3)
494
495             if len(in_list_1) != 4 and len(in_list_2) != 4:
496                 return None
497
498             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
499             try:
500                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
501             except hdrh.codec.HdrLengthException:
502                 hdr_lat_1 = None
503
504             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
505             try:
506                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
507             except hdrh.codec.HdrLengthException:
508                 hdr_lat_2 = None
509
510             if hdr_lat_1 and hdr_lat_2:
511                 hdr_lat = (
512                     hdr_lat_1.get_value_at_percentile(50.0),
513                     hdr_lat_1.get_value_at_percentile(90.0),
514                     hdr_lat_1.get_value_at_percentile(99.0),
515                     hdr_lat_2.get_value_at_percentile(50.0),
516                     hdr_lat_2.get_value_at_percentile(90.0),
517                     hdr_lat_2.get_value_at_percentile(99.0)
518                 )
519                 if all(hdr_lat):
520                     return hdr_lat
521             else:
522                 hdr_lat = (
523                     in_list_1[0], in_list_1[1], in_list_1[2],
524                     in_list_2[0], in_list_2[1], in_list_2[2]
525                 )
526                 for item in hdr_lat:
527                     if item in (u"-1", u"4294967295", u"0"):
528                         return None
529                 return hdr_lat
530
531             return None
532
533         try:
534             out_msg = (
535                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
536                 f"{data[u'ndr_low_b']:5.2f}"
537                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
538                 f"{data[u'pdr_low_b']:5.2f}"
539             )
540             latency = (
541                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
542                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
543                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
544             )
545             if all(latency):
546                 max_len = len(str(max((max(item) for item in latency))))
547                 max_len = 4 if max_len < 4 else max_len
548
549                 for idx, lat in enumerate(latency):
550                     if not idx:
551                         out_msg += u"\n"
552                     out_msg += (
553                         f"\n{idx + 3}. "
554                         f"{lat[0]:{max_len}d} "
555                         f"{lat[1]:{max_len}d} "
556                         f"{lat[2]:{max_len}d}      "
557                         f"{lat[3]:{max_len}d} "
558                         f"{lat[4]:{max_len}d} "
559                         f"{lat[5]:{max_len}d} "
560                     )
561
562             return out_msg
563
564         except (AttributeError, IndexError, ValueError, KeyError):
565             return u"Test Failed."
566
567     def _get_testbed(self, msg):
568         """Called when extraction of testbed IP is required.
569         The testbed is identified by TG node IP address.
570
571         :param msg: Message to process.
572         :type msg: Message
573         :returns: Nothing.
574         """
575
576         if msg.message.count(u"Setup of TG node") or \
577                 msg.message.count(u"Setup of node TG host"):
578             reg_tg_ip = re.compile(
579                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
580             try:
581                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
582             except (KeyError, ValueError, IndexError, AttributeError):
583                 pass
584             finally:
585                 self._data[u"metadata"][u"testbed"] = self._testbed
586                 self._msg_type = None
587
588     def _get_vpp_version(self, msg):
589         """Called when extraction of VPP version is required.
590
591         :param msg: Message to process.
592         :type msg: Message
593         :returns: Nothing.
594         """
595
596         if msg.message.count(u"return STDOUT Version:") or \
597                 msg.message.count(u"VPP Version:") or \
598                 msg.message.count(u"VPP version:"):
599             self._version = str(
600                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
601             )
602             self._data[u"metadata"][u"version"] = self._version
603             self._msg_type = None
604
605     def _get_dpdk_version(self, msg):
606         """Called when extraction of DPDK version is required.
607
608         :param msg: Message to process.
609         :type msg: Message
610         :returns: Nothing.
611         """
612
613         if msg.message.count(u"DPDK Version:"):
614             try:
615                 self._version = str(re.search(
616                     self.REGEX_VERSION_DPDK, msg.message).group(2))
617                 self._data[u"metadata"][u"version"] = self._version
618             except IndexError:
619                 pass
620             finally:
621                 self._msg_type = None
622
623     def _get_papi_history(self, msg):
624         """Called when extraction of PAPI command history is required.
625
626         :param msg: Message to process.
627         :type msg: Message
628         :returns: Nothing.
629         """
630         if msg.message.count(u"PAPI command history:"):
631             self._conf_history_lookup_nr += 1
632             if self._conf_history_lookup_nr == 1:
633                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
634             else:
635                 self._msg_type = None
636             text = re.sub(
637                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
638                 u"",
639                 msg.message,
640                 count=1
641             ).replace(u'"', u"'")
642             self._data[u"tests"][self._test_id][u"conf-history"] += (
643                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
644             )
645
646     def _get_show_run(self, msg):
647         """Called when extraction of VPP operational data (output of CLI command
648         Show Runtime) is required.
649
650         :param msg: Message to process.
651         :type msg: Message
652         :returns: Nothing.
653         """
654
655         if not msg.message.count(u"stats runtime"):
656             return
657
658         # Temporary solution
659         if self._sh_run_counter > 1:
660             return
661
662         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
663             self._data[u"tests"][self._test_id][u"show-run"] = dict()
664
665         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
666         if not groups:
667             return
668         try:
669             host = groups.group(1)
670         except (AttributeError, IndexError):
671             host = u""
672         try:
673             sock = groups.group(2)
674         except (AttributeError, IndexError):
675             sock = u""
676
677         dut = u"dut{nr}".format(
678             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
679
680         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
681             copy.copy(
682                 {
683                     u"host": host,
684                     u"socket": sock,
685                     u"runtime": str(msg.message).replace(u' ', u'').
686                                 replace(u'\n', u'').replace(u"'", u'"').
687                                 replace(u'b"', u'"').replace(u'u"', u'"').
688                                 split(u":", 1)[1]
689                 }
690             )
691
692     def _get_telemetry(self, msg):
693         """Called when extraction of VPP telemetry data is required.
694
695         :param msg: Message to process.
696         :type msg: Message
697         :returns: Nothing.
698         """
699
700         if self._telemetry_kw_counter > 1:
701             return
702         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
703             return
704
705         if u"telemetry-show-run" not in \
706                 self._data[u"tests"][self._test_id].keys():
707             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
708
709         self._telemetry_msg_counter += 1
710         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
711         if not groups:
712             return
713         try:
714             host = groups.group(1)
715         except (AttributeError, IndexError):
716             host = u""
717         try:
718             sock = groups.group(2)
719         except (AttributeError, IndexError):
720             sock = u""
721         runtime = {
722             u"source_type": u"node",
723             u"source_id": host,
724             u"msg_type": u"metric",
725             u"log_level": u"INFO",
726             u"timestamp": msg.timestamp,
727             u"msg": u"show_runtime",
728             u"host": host,
729             u"socket": sock,
730             u"data": list()
731         }
732         for line in msg.message.splitlines():
733             if not line.startswith(u"vpp_runtime_"):
734                 continue
735             try:
736                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
737                 cut = params.index(u"{")
738                 name = params[:cut].split(u"_", maxsplit=2)[-1]
739                 labels = eval(
740                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
741                 )
742                 labels[u"graph_node"] = labels.pop(u"name")
743                 runtime[u"data"].append(
744                     {
745                         u"name": name,
746                         u"value": value,
747                         u"timestamp": timestamp,
748                         u"labels": labels
749                     }
750                 )
751             except (TypeError, ValueError, IndexError):
752                 continue
753         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
754             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
755                 {
756                     u"host": host,
757                     u"socket": sock,
758                     u"runtime": runtime
759                 }
760             )
761
762     def _get_ndrpdr_throughput(self, msg):
763         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
764         message.
765
766         :param msg: The test message to be parsed.
767         :type msg: str
768         :returns: Parsed data as a dict and the status (PASS/FAIL).
769         :rtype: tuple(dict, str)
770         """
771
772         throughput = {
773             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
774             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
775         }
776         status = u"FAIL"
777         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
778
779         if groups is not None:
780             try:
781                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
782                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
783                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
784                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
785                 status = u"PASS"
786             except (IndexError, ValueError):
787                 pass
788
789         return throughput, status
790
791     def _get_ndrpdr_throughput_gbps(self, msg):
792         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
793         test message.
794
795         :param msg: The test message to be parsed.
796         :type msg: str
797         :returns: Parsed data as a dict and the status (PASS/FAIL).
798         :rtype: tuple(dict, str)
799         """
800
801         gbps = {
802             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
803             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
804         }
805         status = u"FAIL"
806         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
807
808         if groups is not None:
809             try:
810                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
811                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
812                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
813                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
814                 status = u"PASS"
815             except (IndexError, ValueError):
816                 pass
817
818         return gbps, status
819
820     def _get_plr_throughput(self, msg):
821         """Get PLRsearch lower bound and PLRsearch upper bound from the test
822         message.
823
824         :param msg: The test message to be parsed.
825         :type msg: str
826         :returns: Parsed data as a dict and the status (PASS/FAIL).
827         :rtype: tuple(dict, str)
828         """
829
830         throughput = {
831             u"LOWER": -1.0,
832             u"UPPER": -1.0
833         }
834         status = u"FAIL"
835         groups = re.search(self.REGEX_PLR_RATE, msg)
836
837         if groups is not None:
838             try:
839                 throughput[u"LOWER"] = float(groups.group(1))
840                 throughput[u"UPPER"] = float(groups.group(2))
841                 status = u"PASS"
842             except (IndexError, ValueError):
843                 pass
844
845         return throughput, status
846
847     def _get_ndrpdr_latency(self, msg):
848         """Get LATENCY from the test message.
849
850         :param msg: The test message to be parsed.
851         :type msg: str
852         :returns: Parsed data as a dict and the status (PASS/FAIL).
853         :rtype: tuple(dict, str)
854         """
855         latency_default = {
856             u"min": -1.0,
857             u"avg": -1.0,
858             u"max": -1.0,
859             u"hdrh": u""
860         }
861         latency = {
862             u"NDR": {
863                 u"direction1": copy.copy(latency_default),
864                 u"direction2": copy.copy(latency_default)
865             },
866             u"PDR": {
867                 u"direction1": copy.copy(latency_default),
868                 u"direction2": copy.copy(latency_default)
869             },
870             u"LAT0": {
871                 u"direction1": copy.copy(latency_default),
872                 u"direction2": copy.copy(latency_default)
873             },
874             u"PDR10": {
875                 u"direction1": copy.copy(latency_default),
876                 u"direction2": copy.copy(latency_default)
877             },
878             u"PDR50": {
879                 u"direction1": copy.copy(latency_default),
880                 u"direction2": copy.copy(latency_default)
881             },
882             u"PDR90": {
883                 u"direction1": copy.copy(latency_default),
884                 u"direction2": copy.copy(latency_default)
885             },
886         }
887
888         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
889         if groups is None:
890             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
891         if groups is None:
892             return latency, u"FAIL"
893
894         def process_latency(in_str):
895             """Return object with parsed latency values.
896
897             TODO: Define class for the return type.
898
899             :param in_str: Input string, min/avg/max/hdrh format.
900             :type in_str: str
901             :returns: Dict with corresponding keys, except hdrh float values.
902             :rtype dict:
903             :throws IndexError: If in_str does not have enough substrings.
904             :throws ValueError: If a substring does not convert to float.
905             """
906             in_list = in_str.split('/', 3)
907
908             rval = {
909                 u"min": float(in_list[0]),
910                 u"avg": float(in_list[1]),
911                 u"max": float(in_list[2]),
912                 u"hdrh": u""
913             }
914
915             if len(in_list) == 4:
916                 rval[u"hdrh"] = str(in_list[3])
917
918             return rval
919
920         try:
921             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
922             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
923             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
924             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
925             if groups.lastindex == 4:
926                 return latency, u"PASS"
927         except (IndexError, ValueError):
928             pass
929
930         try:
931             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
932             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
933             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
934             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
935             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
936             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
937             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
938             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
939             if groups.lastindex == 12:
940                 return latency, u"PASS"
941         except (IndexError, ValueError):
942             pass
943
944         return latency, u"FAIL"
945
946     @staticmethod
947     def _get_hoststack_data(msg, tags):
948         """Get data from the hoststack test message.
949
950         :param msg: The test message to be parsed.
951         :param tags: Test tags.
952         :type msg: str
953         :type tags: list
954         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
955         :rtype: tuple(dict, str)
956         """
957         result = dict()
958         status = u"FAIL"
959
960         msg = msg.replace(u"'", u'"').replace(u" ", u"")
961         if u"LDPRELOAD" in tags:
962             try:
963                 result = loads(msg)
964                 status = u"PASS"
965             except JSONDecodeError:
966                 pass
967         elif u"VPPECHO" in tags:
968             try:
969                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
970                 result = dict(
971                     client=loads(msg_lst[0]),
972                     server=loads(msg_lst[1])
973                 )
974                 status = u"PASS"
975             except (JSONDecodeError, IndexError):
976                 pass
977
978         return result, status
979
980     def _get_vsap_data(self, msg, tags):
981         """Get data from the vsap test message.
982
983         :param msg: The test message to be parsed.
984         :param tags: Test tags.
985         :type msg: str
986         :type tags: list
987         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
988         :rtype: tuple(dict, str)
989         """
990         result = dict()
991         status = u"FAIL"
992
993         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
994         if groups is not None:
995             try:
996                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
997                 result[u"latency"] = float(groups.group(2))
998                 result[u"completed-requests"] = int(groups.group(3))
999                 result[u"failed-requests"] = int(groups.group(4))
1000                 result[u"bytes-transferred"] = int(groups.group(5))
1001                 if u"TCP_CPS"in tags:
1002                     result[u"cps"] = float(groups.group(6))
1003                 elif u"TCP_RPS" in tags:
1004                     result[u"rps"] = float(groups.group(6))
1005                 else:
1006                     return result, status
1007                 status = u"PASS"
1008             except (IndexError, ValueError):
1009                 pass
1010
1011         return result, status
1012
1013     def visit_suite(self, suite):
1014         """Implements traversing through the suite and its direct children.
1015
1016         :param suite: Suite to process.
1017         :type suite: Suite
1018         :returns: Nothing.
1019         """
1020         if self.start_suite(suite) is not False:
1021             suite.suites.visit(self)
1022             suite.tests.visit(self)
1023             self.end_suite(suite)
1024
1025     def start_suite(self, suite):
1026         """Called when suite starts.
1027
1028         :param suite: Suite to process.
1029         :type suite: Suite
1030         :returns: Nothing.
1031         """
1032
1033         try:
1034             parent_name = suite.parent.name
1035         except AttributeError:
1036             return
1037
1038         self._data[u"suites"][suite.longname.lower().
1039                               replace(u'"', u"'").
1040                               replace(u" ", u"_")] = {
1041                                   u"name": suite.name.lower(),
1042                                   u"doc": suite.doc,
1043                                   u"parent": parent_name,
1044                                   u"level": len(suite.longname.split(u"."))
1045                               }
1046
1047         suite.keywords.visit(self)
1048
1049     def end_suite(self, suite):
1050         """Called when suite ends.
1051
1052         :param suite: Suite to process.
1053         :type suite: Suite
1054         :returns: Nothing.
1055         """
1056
1057     def visit_test(self, test):
1058         """Implements traversing through the test.
1059
1060         :param test: Test to process.
1061         :type test: Test
1062         :returns: Nothing.
1063         """
1064         if self.start_test(test) is not False:
1065             test.keywords.visit(self)
1066             self.end_test(test)
1067
1068     def start_test(self, test):
1069         """Called when test starts.
1070
1071         :param test: Test to process.
1072         :type test: Test
1073         :returns: Nothing.
1074         """
1075
1076         self._sh_run_counter = 0
1077         self._telemetry_kw_counter = 0
1078         self._telemetry_msg_counter = 0
1079
1080         longname_orig = test.longname.lower()
1081
1082         # Check the ignore list
1083         if longname_orig in self._ignore:
1084             return
1085
1086         tags = [str(tag) for tag in test.tags]
1087         test_result = dict()
1088
1089         # Change the TC long name and name if defined in the mapping table
1090         longname = self._mapping.get(longname_orig, None)
1091         if longname is not None:
1092             name = longname.split(u'.')[-1]
1093             logging.debug(
1094                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1095                 f"{name}"
1096             )
1097         else:
1098             longname = longname_orig
1099             name = test.name.lower()
1100
1101         # Remove TC number from the TC long name (backward compatibility):
1102         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1103         # Remove TC number from the TC name (not needed):
1104         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1105
1106         test_result[u"parent"] = test.parent.name.lower()
1107         test_result[u"tags"] = tags
1108         test_result["doc"] = test.doc
1109         test_result[u"type"] = u""
1110         test_result[u"status"] = test.status
1111         test_result[u"starttime"] = test.starttime
1112         test_result[u"endtime"] = test.endtime
1113
1114         if test.status == u"PASS":
1115             if u"NDRPDR" in tags:
1116                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1117                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1118                         test.message)
1119                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1120                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1121                         test.message)
1122                 else:
1123                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1124                         test.message)
1125             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1126                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1127                     test.message)
1128             else:
1129                 test_result[u"msg"] = test.message
1130         else:
1131             test_result[u"msg"] = test.message
1132
1133         if u"PERFTEST" in tags and u"TREX" not in tags:
1134             # Replace info about cores (e.g. -1c-) with the info about threads
1135             # and cores (e.g. -1t1c-) in the long test case names and in the
1136             # test case names if necessary.
1137             tag_count = 0
1138             tag_tc = str()
1139             for tag in test_result[u"tags"]:
1140                 groups = re.search(self.REGEX_TC_TAG, tag)
1141                 if groups:
1142                     tag_count += 1
1143                     tag_tc = tag
1144
1145             if tag_count == 1:
1146                 self._test_id = re.sub(
1147                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1148                     self._test_id, count=1
1149                 )
1150                 test_result[u"name"] = re.sub(
1151                     self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1152                     test_result["name"], count=1
1153                 )
1154             else:
1155                 test_result[u"status"] = u"FAIL"
1156                 self._data[u"tests"][self._test_id] = test_result
1157                 logging.debug(
1158                     f"The test {self._test_id} has no or more than one "
1159                     f"multi-threading tags.\n"
1160                     f"Tags: {test_result[u'tags']}"
1161                 )
1162                 return
1163
1164         if u"DEVICETEST" in tags:
1165             test_result[u"type"] = u"DEVICETEST"
1166         elif u"NDRPDR" in tags:
1167             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1168                 test_result[u"type"] = u"CPS"
1169             else:
1170                 test_result[u"type"] = u"NDRPDR"
1171             if test.status == u"PASS":
1172                 test_result[u"throughput"], test_result[u"status"] = \
1173                     self._get_ndrpdr_throughput(test.message)
1174                 test_result[u"gbps"], test_result[u"status"] = \
1175                     self._get_ndrpdr_throughput_gbps(test.message)
1176                 test_result[u"latency"], test_result[u"status"] = \
1177                     self._get_ndrpdr_latency(test.message)
1178         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1179             if u"MRR" in tags:
1180                 test_result[u"type"] = u"MRR"
1181             else:
1182                 test_result[u"type"] = u"BMRR"
1183             if test.status == u"PASS":
1184                 test_result[u"result"] = dict()
1185                 groups = re.search(self.REGEX_BMRR, test.message)
1186                 if groups is not None:
1187                     items_str = groups.group(1)
1188                     items_float = [
1189                         float(item.strip().replace(u"'", u""))
1190                         for item in items_str.split(",")
1191                     ]
1192                     # Use whole list in CSIT-1180.
1193                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1194                     test_result[u"result"][u"samples"] = items_float
1195                     test_result[u"result"][u"receive-rate"] = stats.avg
1196                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1197                 else:
1198                     groups = re.search(self.REGEX_MRR, test.message)
1199                     test_result[u"result"][u"receive-rate"] = \
1200                         float(groups.group(3)) / float(groups.group(1))
1201         elif u"SOAK" in tags:
1202             test_result[u"type"] = u"SOAK"
1203             if test.status == u"PASS":
1204                 test_result[u"throughput"], test_result[u"status"] = \
1205                     self._get_plr_throughput(test.message)
1206         elif u"HOSTSTACK" in tags:
1207             test_result[u"type"] = u"HOSTSTACK"
1208             if test.status == u"PASS":
1209                 test_result[u"result"], test_result[u"status"] = \
1210                     self._get_hoststack_data(test.message, tags)
1211         elif u"LDP_NGINX" in tags:
1212             test_result[u"type"] = u"LDP_NGINX"
1213             test_result[u"result"], test_result[u"status"] = \
1214                 self._get_vsap_data(test.message, tags)
1215         # elif u"TCP" in tags:  # This might be not used
1216         #     test_result[u"type"] = u"TCP"
1217         #     if test.status == u"PASS":
1218         #         groups = re.search(self.REGEX_TCP, test.message)
1219         #         test_result[u"result"] = int(groups.group(2))
1220         elif u"RECONF" in tags:
1221             test_result[u"type"] = u"RECONF"
1222             if test.status == u"PASS":
1223                 test_result[u"result"] = None
1224                 try:
1225                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1226                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1227                     test_result[u"result"] = {
1228                         u"loss": int(grps_loss.group(1)),
1229                         u"time": float(grps_time.group(1))
1230                     }
1231                 except (AttributeError, IndexError, ValueError, TypeError):
1232                     test_result[u"status"] = u"FAIL"
1233         else:
1234             test_result[u"status"] = u"FAIL"
1235
1236         self._data[u"tests"][self._test_id] = test_result
1237
1238     def end_test(self, test):
1239         """Called when test ends.
1240
1241         :param test: Test to process.
1242         :type test: Test
1243         :returns: Nothing.
1244         """
1245
1246     def visit_keyword(self, keyword):
1247         """Implements traversing through the keyword and its child keywords.
1248
1249         :param keyword: Keyword to process.
1250         :type keyword: Keyword
1251         :returns: Nothing.
1252         """
1253         if self.start_keyword(keyword) is not False:
1254             self.end_keyword(keyword)
1255
1256     def start_keyword(self, keyword):
1257         """Called when keyword starts. Default implementation does nothing.
1258
1259         :param keyword: Keyword to process.
1260         :type keyword: Keyword
1261         :returns: Nothing.
1262         """
1263         try:
1264             if keyword.type == u"setup":
1265                 self.visit_setup_kw(keyword)
1266             elif keyword.type == u"teardown":
1267                 self.visit_teardown_kw(keyword)
1268             else:
1269                 self.visit_test_kw(keyword)
1270         except AttributeError:
1271             pass
1272
1273     def end_keyword(self, keyword):
1274         """Called when keyword ends. Default implementation does nothing.
1275
1276         :param keyword: Keyword to process.
1277         :type keyword: Keyword
1278         :returns: Nothing.
1279         """
1280
1281     def visit_test_kw(self, test_kw):
1282         """Implements traversing through the test keyword and its child
1283         keywords.
1284
1285         :param test_kw: Keyword to process.
1286         :type test_kw: Keyword
1287         :returns: Nothing.
1288         """
1289         for keyword in test_kw.keywords:
1290             if self.start_test_kw(keyword) is not False:
1291                 self.visit_test_kw(keyword)
1292                 self.end_test_kw(keyword)
1293
1294     def start_test_kw(self, test_kw):
1295         """Called when test keyword starts. Default implementation does
1296         nothing.
1297
1298         :param test_kw: Keyword to process.
1299         :type test_kw: Keyword
1300         :returns: Nothing.
1301         """
1302         if self._for_output == u"trending":
1303             return
1304
1305         if test_kw.name.count(u"Run Telemetry On All Duts"):
1306             self._msg_type = u"test-telemetry"
1307             self._telemetry_kw_counter += 1
1308         elif test_kw.name.count(u"Show Runtime On All Duts"):
1309             self._msg_type = u"test-show-runtime"
1310             self._sh_run_counter += 1
1311         else:
1312             return
1313         test_kw.messages.visit(self)
1314
1315     def end_test_kw(self, test_kw):
1316         """Called when keyword ends. Default implementation does nothing.
1317
1318         :param test_kw: Keyword to process.
1319         :type test_kw: Keyword
1320         :returns: Nothing.
1321         """
1322
1323     def visit_setup_kw(self, setup_kw):
1324         """Implements traversing through the teardown keyword and its child
1325         keywords.
1326
1327         :param setup_kw: Keyword to process.
1328         :type setup_kw: Keyword
1329         :returns: Nothing.
1330         """
1331         for keyword in setup_kw.keywords:
1332             if self.start_setup_kw(keyword) is not False:
1333                 self.visit_setup_kw(keyword)
1334                 self.end_setup_kw(keyword)
1335
1336     def start_setup_kw(self, setup_kw):
1337         """Called when teardown keyword starts. Default implementation does
1338         nothing.
1339
1340         :param setup_kw: Keyword to process.
1341         :type setup_kw: Keyword
1342         :returns: Nothing.
1343         """
1344         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1345                 and not self._version:
1346             self._msg_type = u"vpp-version"
1347         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1348                 not self._version:
1349             self._msg_type = u"dpdk-version"
1350         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1351             self._msg_type = u"testbed"
1352         else:
1353             return
1354         setup_kw.messages.visit(self)
1355
1356     def end_setup_kw(self, setup_kw):
1357         """Called when keyword ends. Default implementation does nothing.
1358
1359         :param setup_kw: Keyword to process.
1360         :type setup_kw: Keyword
1361         :returns: Nothing.
1362         """
1363
1364     def visit_teardown_kw(self, teardown_kw):
1365         """Implements traversing through the teardown keyword and its child
1366         keywords.
1367
1368         :param teardown_kw: Keyword to process.
1369         :type teardown_kw: Keyword
1370         :returns: Nothing.
1371         """
1372         for keyword in teardown_kw.keywords:
1373             if self.start_teardown_kw(keyword) is not False:
1374                 self.visit_teardown_kw(keyword)
1375                 self.end_teardown_kw(keyword)
1376
1377     def start_teardown_kw(self, teardown_kw):
1378         """Called when teardown keyword starts
1379
1380         :param teardown_kw: Keyword to process.
1381         :type teardown_kw: Keyword
1382         :returns: Nothing.
1383         """
1384         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1385             self._conf_history_lookup_nr = 0
1386             self._msg_type = u"teardown-papi-history"
1387             teardown_kw.messages.visit(self)
1388
1389     def end_teardown_kw(self, teardown_kw):
1390         """Called when keyword ends. Default implementation does nothing.
1391
1392         :param teardown_kw: Keyword to process.
1393         :type teardown_kw: Keyword
1394         :returns: Nothing.
1395         """
1396
1397     def visit_message(self, msg):
1398         """Implements visiting the message.
1399
1400         :param msg: Message to process.
1401         :type msg: Message
1402         :returns: Nothing.
1403         """
1404         if self.start_message(msg) is not False:
1405             self.end_message(msg)
1406
1407     def start_message(self, msg):
1408         """Called when message starts. Get required information from messages:
1409         - VPP version.
1410
1411         :param msg: Message to process.
1412         :type msg: Message
1413         :returns: Nothing.
1414         """
1415         if self._msg_type:
1416             self.parse_msg[self._msg_type](msg)
1417
1418     def end_message(self, msg):
1419         """Called when message ends. Default implementation does nothing.
1420
1421         :param msg: Message to process.
1422         :type msg: Message
1423         :returns: Nothing.
1424         """
1425
1426
1427 class InputData:
1428     """Input data
1429
1430     The data is extracted from output.xml files generated by Jenkins jobs and
1431     stored in pandas' DataFrames.
1432
1433     The data structure:
1434     - job name
1435       - build number
1436         - metadata
1437           (as described in ExecutionChecker documentation)
1438         - suites
1439           (as described in ExecutionChecker documentation)
1440         - tests
1441           (as described in ExecutionChecker documentation)
1442     """
1443
1444     def __init__(self, spec, for_output):
1445         """Initialization.
1446
1447         :param spec: Specification.
1448         :param for_output: Output to be generated from downloaded data.
1449         :type spec: Specification
1450         :type for_output: str
1451         """
1452
1453         # Specification:
1454         self._cfg = spec
1455
1456         self._for_output = for_output
1457
1458         # Data store:
1459         self._input_data = pd.Series()
1460
1461     @property
1462     def data(self):
1463         """Getter - Input data.
1464
1465         :returns: Input data
1466         :rtype: pandas.Series
1467         """
1468         return self._input_data
1469
1470     def metadata(self, job, build):
1471         """Getter - metadata
1472
1473         :param job: Job which metadata we want.
1474         :param build: Build which metadata we want.
1475         :type job: str
1476         :type build: str
1477         :returns: Metadata
1478         :rtype: pandas.Series
1479         """
1480         return self.data[job][build][u"metadata"]
1481
1482     def suites(self, job, build):
1483         """Getter - suites
1484
1485         :param job: Job which suites we want.
1486         :param build: Build which suites we want.
1487         :type job: str
1488         :type build: str
1489         :returns: Suites.
1490         :rtype: pandas.Series
1491         """
1492         return self.data[job][str(build)][u"suites"]
1493
1494     def tests(self, job, build):
1495         """Getter - tests
1496
1497         :param job: Job which tests we want.
1498         :param build: Build which tests we want.
1499         :type job: str
1500         :type build: str
1501         :returns: Tests.
1502         :rtype: pandas.Series
1503         """
1504         return self.data[job][build][u"tests"]
1505
1506     def _parse_tests(self, job, build):
1507         """Process data from robot output.xml file and return JSON structured
1508         data.
1509
1510         :param job: The name of job which build output data will be processed.
1511         :param build: The build which output data will be processed.
1512         :type job: str
1513         :type build: dict
1514         :returns: JSON data structure.
1515         :rtype: dict
1516         """
1517
1518         metadata = {
1519             u"job": job,
1520             u"build": build
1521         }
1522
1523         with open(build[u"file-name"], u'r') as data_file:
1524             try:
1525                 result = ExecutionResult(data_file)
1526             except errors.DataError as err:
1527                 logging.error(
1528                     f"Error occurred while parsing output.xml: {repr(err)}"
1529                 )
1530                 return None
1531         checker = ExecutionChecker(
1532             metadata, self._cfg.mapping, self._cfg.ignore, self._for_output
1533         )
1534         result.visit(checker)
1535
1536         checker.data[u"metadata"][u"tests_total"] = \
1537             result.statistics.total.all.total
1538         checker.data[u"metadata"][u"tests_passed"] = \
1539             result.statistics.total.all.passed
1540         checker.data[u"metadata"][u"tests_failed"] = \
1541             result.statistics.total.all.failed
1542         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1543         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1544
1545         return checker.data
1546
1547     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1548         """Download and parse the input data file.
1549
1550         :param pid: PID of the process executing this method.
1551         :param job: Name of the Jenkins job which generated the processed input
1552             file.
1553         :param build: Information about the Jenkins build which generated the
1554             processed input file.
1555         :param repeat: Repeat the download specified number of times if not
1556             successful.
1557         :type pid: int
1558         :type job: str
1559         :type build: dict
1560         :type repeat: int
1561         """
1562
1563         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1564
1565         state = u"failed"
1566         success = False
1567         data = None
1568         do_repeat = repeat
1569         while do_repeat:
1570             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1571             if success:
1572                 break
1573             do_repeat -= 1
1574         if not success:
1575             logging.error(
1576                 f"It is not possible to download the input data file from the "
1577                 f"job {job}, build {build[u'build']}, or it is damaged. "
1578                 f"Skipped."
1579             )
1580         if success:
1581             logging.info(f"  Processing data from build {build[u'build']}")
1582             data = self._parse_tests(job, build)
1583             if data is None:
1584                 logging.error(
1585                     f"Input data file from the job {job}, build "
1586                     f"{build[u'build']} is damaged. Skipped."
1587                 )
1588             else:
1589                 state = u"processed"
1590
1591             try:
1592                 remove(build[u"file-name"])
1593             except OSError as err:
1594                 logging.error(
1595                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1596                 )
1597
1598         # If the time-period is defined in the specification file, remove all
1599         # files which are outside the time period.
1600         is_last = False
1601         timeperiod = self._cfg.environment.get(u"time-period", None)
1602         if timeperiod and data:
1603             now = dt.utcnow()
1604             timeperiod = timedelta(int(timeperiod))
1605             metadata = data.get(u"metadata", None)
1606             if metadata:
1607                 generated = metadata.get(u"generated", None)
1608                 if generated:
1609                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1610                     if (now - generated) > timeperiod:
1611                         # Remove the data and the file:
1612                         state = u"removed"
1613                         data = None
1614                         is_last = True
1615                         logging.info(
1616                             f"  The build {job}/{build[u'build']} is "
1617                             f"outdated, will be removed."
1618                         )
1619         return {
1620             u"data": data,
1621             u"state": state,
1622             u"job": job,
1623             u"build": build,
1624             u"last": is_last
1625         }
1626
1627     def download_and_parse_data(self, repeat=1):
1628         """Download the input data files, parse input data from input files and
1629         store in pandas' Series.
1630
1631         :param repeat: Repeat the download specified number of times if not
1632             successful.
1633         :type repeat: int
1634         """
1635
1636         logging.info(u"Downloading and parsing input files ...")
1637
1638         for job, builds in self._cfg.input.items():
1639             for build in builds:
1640
1641                 result = self._download_and_parse_build(job, build, repeat)
1642                 if result[u"last"]:
1643                     break
1644                 build_nr = result[u"build"][u"build"]
1645
1646                 if result[u"data"]:
1647                     data = result[u"data"]
1648                     build_data = pd.Series({
1649                         u"metadata": pd.Series(
1650                             list(data[u"metadata"].values()),
1651                             index=list(data[u"metadata"].keys())
1652                         ),
1653                         u"suites": pd.Series(
1654                             list(data[u"suites"].values()),
1655                             index=list(data[u"suites"].keys())
1656                         ),
1657                         u"tests": pd.Series(
1658                             list(data[u"tests"].values()),
1659                             index=list(data[u"tests"].keys())
1660                         )
1661                     })
1662
1663                     if self._input_data.get(job, None) is None:
1664                         self._input_data[job] = pd.Series()
1665                     self._input_data[job][str(build_nr)] = build_data
1666                     self._cfg.set_input_file_name(
1667                         job, build_nr, result[u"build"][u"file-name"]
1668                     )
1669                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1670
1671                 mem_alloc = \
1672                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1673                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1674
1675         logging.info(u"Done.")
1676
1677         msg = f"Successful downloads from the sources:\n"
1678         for source in self._cfg.environment[u"data-sources"]:
1679             if source[u"successful-downloads"]:
1680                 msg += (
1681                     f"{source[u'url']}/{source[u'path']}/"
1682                     f"{source[u'file-name']}: "
1683                     f"{source[u'successful-downloads']}\n"
1684                 )
1685         logging.info(msg)
1686
1687     def process_local_file(self, local_file, job=u"local", build_nr=1,
1688                            replace=True):
1689         """Process local XML file given as a command-line parameter.
1690
1691         :param local_file: The file to process.
1692         :param job: Job name.
1693         :param build_nr: Build number.
1694         :param replace: If True, the information about jobs and builds is
1695             replaced by the new one, otherwise the new jobs and builds are
1696             added.
1697         :type local_file: str
1698         :type job: str
1699         :type build_nr: int
1700         :type replace: bool
1701         :raises: PresentationError if an error occurs.
1702         """
1703         if not isfile(local_file):
1704             raise PresentationError(f"The file {local_file} does not exist.")
1705
1706         try:
1707             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1708         except (IndexError, ValueError):
1709             pass
1710
1711         build = {
1712             u"build": build_nr,
1713             u"status": u"failed",
1714             u"file-name": local_file
1715         }
1716         if replace:
1717             self._cfg.input = dict()
1718         self._cfg.add_build(job, build)
1719
1720         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1721         data = self._parse_tests(job, build)
1722         if data is None:
1723             raise PresentationError(
1724                 f"Error occurred while parsing the file {local_file}"
1725             )
1726
1727         build_data = pd.Series({
1728             u"metadata": pd.Series(
1729                 list(data[u"metadata"].values()),
1730                 index=list(data[u"metadata"].keys())
1731             ),
1732             u"suites": pd.Series(
1733                 list(data[u"suites"].values()),
1734                 index=list(data[u"suites"].keys())
1735             ),
1736             u"tests": pd.Series(
1737                 list(data[u"tests"].values()),
1738                 index=list(data[u"tests"].keys())
1739             )
1740         })
1741
1742         if self._input_data.get(job, None) is None:
1743             self._input_data[job] = pd.Series()
1744         self._input_data[job][str(build_nr)] = build_data
1745
1746         self._cfg.set_input_state(job, build_nr, u"processed")
1747
1748     def process_local_directory(self, local_dir, replace=True):
1749         """Process local directory with XML file(s). The directory is processed
1750         as a 'job' and the XML files in it as builds.
1751         If the given directory contains only sub-directories, these
1752         sub-directories processed as jobs and corresponding XML files as builds
1753         of their job.
1754
1755         :param local_dir: Local directory to process.
1756         :param replace: If True, the information about jobs and builds is
1757             replaced by the new one, otherwise the new jobs and builds are
1758             added.
1759         :type local_dir: str
1760         :type replace: bool
1761         """
1762         if not isdir(local_dir):
1763             raise PresentationError(
1764                 f"The directory {local_dir} does not exist."
1765             )
1766
1767         # Check if the given directory includes only files, or only directories
1768         _, dirnames, filenames = next(walk(local_dir))
1769
1770         if filenames and not dirnames:
1771             filenames.sort()
1772             # local_builds:
1773             # key: dir (job) name, value: list of file names (builds)
1774             local_builds = {
1775                 local_dir: [join(local_dir, name) for name in filenames]
1776             }
1777
1778         elif dirnames and not filenames:
1779             dirnames.sort()
1780             # local_builds:
1781             # key: dir (job) name, value: list of file names (builds)
1782             local_builds = dict()
1783             for dirname in dirnames:
1784                 builds = [
1785                     join(local_dir, dirname, name)
1786                     for name in listdir(join(local_dir, dirname))
1787                     if isfile(join(local_dir, dirname, name))
1788                 ]
1789                 if builds:
1790                     local_builds[dirname] = sorted(builds)
1791
1792         elif not filenames and not dirnames:
1793             raise PresentationError(f"The directory {local_dir} is empty.")
1794         else:
1795             raise PresentationError(
1796                 f"The directory {local_dir} can include only files or only "
1797                 f"directories, not both.\nThe directory {local_dir} includes "
1798                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1799             )
1800
1801         if replace:
1802             self._cfg.input = dict()
1803
1804         for job, files in local_builds.items():
1805             for idx, local_file in enumerate(files):
1806                 self.process_local_file(local_file, job, idx + 1, replace=False)
1807
1808     @staticmethod
1809     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1810         """Return the index of character in the string which is the end of tag.
1811
1812         :param tag_filter: The string where the end of tag is being searched.
1813         :param start: The index where the searching is stated.
1814         :param closer: The character which is the tag closer.
1815         :type tag_filter: str
1816         :type start: int
1817         :type closer: str
1818         :returns: The index of the tag closer.
1819         :rtype: int
1820         """
1821         try:
1822             idx_opener = tag_filter.index(closer, start)
1823             return tag_filter.index(closer, idx_opener + 1)
1824         except ValueError:
1825             return None
1826
1827     @staticmethod
1828     def _condition(tag_filter):
1829         """Create a conditional statement from the given tag filter.
1830
1831         :param tag_filter: Filter based on tags from the element specification.
1832         :type tag_filter: str
1833         :returns: Conditional statement which can be evaluated.
1834         :rtype: str
1835         """
1836         index = 0
1837         while True:
1838             index = InputData._end_of_tag(tag_filter, index)
1839             if index is None:
1840                 return tag_filter
1841             index += 1
1842             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1843
1844     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1845                     continue_on_error=False):
1846         """Filter required data from the given jobs and builds.
1847
1848         The output data structure is:
1849         - job 1
1850           - build 1
1851             - test (or suite) 1 ID:
1852               - param 1
1853               - param 2
1854               ...
1855               - param n
1856             ...
1857             - test (or suite) n ID:
1858             ...
1859           ...
1860           - build n
1861         ...
1862         - job n
1863
1864         :param element: Element which will use the filtered data.
1865         :param params: Parameters which will be included in the output. If None,
1866             all parameters are included.
1867         :param data: If not None, this data is used instead of data specified
1868             in the element.
1869         :param data_set: The set of data to be filtered: tests, suites,
1870             metadata.
1871         :param continue_on_error: Continue if there is error while reading the
1872             data. The Item will be empty then
1873         :type element: pandas.Series
1874         :type params: list
1875         :type data: dict
1876         :type data_set: str
1877         :type continue_on_error: bool
1878         :returns: Filtered data.
1879         :rtype pandas.Series
1880         """
1881
1882         try:
1883             if data_set == "suites":
1884                 cond = u"True"
1885             elif element[u"filter"] in (u"all", u"template"):
1886                 cond = u"True"
1887             else:
1888                 cond = InputData._condition(element[u"filter"])
1889             logging.debug(f"   Filter: {cond}")
1890         except KeyError:
1891             logging.error(u"  No filter defined.")
1892             return None
1893
1894         if params is None:
1895             params = element.get(u"parameters", None)
1896             if params:
1897                 params.extend((u"type", u"status"))
1898
1899         data_to_filter = data if data else element[u"data"]
1900         data = pd.Series()
1901         try:
1902             for job, builds in data_to_filter.items():
1903                 data[job] = pd.Series()
1904                 for build in builds:
1905                     data[job][str(build)] = pd.Series()
1906                     try:
1907                         data_dict = dict(
1908                             self.data[job][str(build)][data_set].items())
1909                     except KeyError:
1910                         if continue_on_error:
1911                             continue
1912                         return None
1913
1914                     for test_id, test_data in data_dict.items():
1915                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1916                             data[job][str(build)][test_id] = pd.Series()
1917                             if params is None:
1918                                 for param, val in test_data.items():
1919                                     data[job][str(build)][test_id][param] = val
1920                             else:
1921                                 for param in params:
1922                                     try:
1923                                         data[job][str(build)][test_id][param] =\
1924                                             test_data[param]
1925                                     except KeyError:
1926                                         data[job][str(build)][test_id][param] =\
1927                                             u"No Data"
1928             return data
1929
1930         except (KeyError, IndexError, ValueError) as err:
1931             logging.error(
1932                 f"Missing mandatory parameter in the element specification: "
1933                 f"{repr(err)}"
1934             )
1935             return None
1936         except AttributeError as err:
1937             logging.error(repr(err))
1938             return None
1939         except SyntaxError as err:
1940             logging.error(
1941                 f"The filter {cond} is not correct. Check if all tags are "
1942                 f"enclosed by apostrophes.\n{repr(err)}"
1943             )
1944             return None
1945
1946     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1947                              continue_on_error=False):
1948         """Filter required data from the given jobs and builds.
1949
1950         The output data structure is:
1951         - job 1
1952           - build 1
1953             - test (or suite) 1 ID:
1954               - param 1
1955               - param 2
1956               ...
1957               - param n
1958             ...
1959             - test (or suite) n ID:
1960             ...
1961           ...
1962           - build n
1963         ...
1964         - job n
1965
1966         :param element: Element which will use the filtered data.
1967         :param params: Parameters which will be included in the output. If None,
1968         all parameters are included.
1969         :param data_set: The set of data to be filtered: tests, suites,
1970         metadata.
1971         :param continue_on_error: Continue if there is error while reading the
1972         data. The Item will be empty then
1973         :type element: pandas.Series
1974         :type params: list
1975         :type data_set: str
1976         :type continue_on_error: bool
1977         :returns: Filtered data.
1978         :rtype pandas.Series
1979         """
1980
1981         include = element.get(u"include", None)
1982         if not include:
1983             logging.warning(u"No tests to include, skipping the element.")
1984             return None
1985
1986         if params is None:
1987             params = element.get(u"parameters", None)
1988             if params and u"type" not in params:
1989                 params.append(u"type")
1990
1991         cores = element.get(u"core", None)
1992         if cores:
1993             tests = list()
1994             for core in cores:
1995                 for test in include:
1996                     tests.append(test.format(core=core))
1997         else:
1998             tests = include
1999
2000         data = pd.Series()
2001         try:
2002             for job, builds in element[u"data"].items():
2003                 data[job] = pd.Series()
2004                 for build in builds:
2005                     data[job][str(build)] = pd.Series()
2006                     for test in tests:
2007                         try:
2008                             reg_ex = re.compile(str(test).lower())
2009                             for test_id in self.data[job][
2010                                     str(build)][data_set].keys():
2011                                 if re.match(reg_ex, str(test_id).lower()):
2012                                     test_data = self.data[job][
2013                                         str(build)][data_set][test_id]
2014                                     data[job][str(build)][test_id] = pd.Series()
2015                                     if params is None:
2016                                         for param, val in test_data.items():
2017                                             data[job][str(build)][test_id]\
2018                                                 [param] = val
2019                                     else:
2020                                         for param in params:
2021                                             try:
2022                                                 data[job][str(build)][
2023                                                     test_id][param] = \
2024                                                     test_data[param]
2025                                             except KeyError:
2026                                                 data[job][str(build)][
2027                                                     test_id][param] = u"No Data"
2028                         except KeyError as err:
2029                             if continue_on_error:
2030                                 logging.debug(repr(err))
2031                                 continue
2032                             logging.error(repr(err))
2033                             return None
2034             return data
2035
2036         except (KeyError, IndexError, ValueError) as err:
2037             logging.error(
2038                 f"Missing mandatory parameter in the element "
2039                 f"specification: {repr(err)}"
2040             )
2041             return None
2042         except AttributeError as err:
2043             logging.error(repr(err))
2044             return None
2045
2046     @staticmethod
2047     def merge_data(data):
2048         """Merge data from more jobs and builds to a simple data structure.
2049
2050         The output data structure is:
2051
2052         - test (suite) 1 ID:
2053           - param 1
2054           - param 2
2055           ...
2056           - param n
2057         ...
2058         - test (suite) n ID:
2059         ...
2060
2061         :param data: Data to merge.
2062         :type data: pandas.Series
2063         :returns: Merged data.
2064         :rtype: pandas.Series
2065         """
2066
2067         logging.info(u"    Merging data ...")
2068
2069         merged_data = pd.Series()
2070         for builds in data.values:
2071             for item in builds.values:
2072                 for item_id, item_data in item.items():
2073                     merged_data[item_id] = item_data
2074         return merged_data
2075
2076     def print_all_oper_data(self):
2077         """Print all operational data to console.
2078         """
2079
2080         for job in self._input_data.values:
2081             for build in job.values:
2082                 for test_id, test_data in build[u"tests"].items():
2083                     print(f"{test_id}")
2084                     if test_data.get(u"show-run", None) is None:
2085                         continue
2086                     for dut_name, data in test_data[u"show-run"].items():
2087                         if data.get(u"runtime", None) is None:
2088                             continue
2089                         runtime = loads(data[u"runtime"])
2090                         try:
2091                             threads_nr = len(runtime[0][u"clocks"])
2092                         except (IndexError, KeyError):
2093                             continue
2094                         threads = OrderedDict(
2095                             {idx: list() for idx in range(threads_nr)})
2096                         for item in runtime:
2097                             for idx in range(threads_nr):
2098                                 if item[u"vectors"][idx] > 0:
2099                                     clocks = item[u"clocks"][idx] / \
2100                                              item[u"vectors"][idx]
2101                                 elif item[u"calls"][idx] > 0:
2102                                     clocks = item[u"clocks"][idx] / \
2103                                              item[u"calls"][idx]
2104                                 elif item[u"suspends"][idx] > 0:
2105                                     clocks = item[u"clocks"][idx] / \
2106                                              item[u"suspends"][idx]
2107                                 else:
2108                                     clocks = 0.0
2109
2110                                 if item[u"calls"][idx] > 0:
2111                                     vectors_call = item[u"vectors"][idx] / \
2112                                                    item[u"calls"][idx]
2113                                 else:
2114                                     vectors_call = 0.0
2115
2116                                 if int(item[u"calls"][idx]) + int(
2117                                         item[u"vectors"][idx]) + \
2118                                         int(item[u"suspends"][idx]):
2119                                     threads[idx].append([
2120                                         item[u"name"],
2121                                         item[u"calls"][idx],
2122                                         item[u"vectors"][idx],
2123                                         item[u"suspends"][idx],
2124                                         clocks,
2125                                         vectors_call
2126                                     ])
2127
2128                         print(f"Host IP: {data.get(u'host', '')}, "
2129                               f"Socket: {data.get(u'socket', '')}")
2130                         for thread_nr, thread in threads.items():
2131                             txt_table = prettytable.PrettyTable(
2132                                 (
2133                                     u"Name",
2134                                     u"Nr of Vectors",
2135                                     u"Nr of Packets",
2136                                     u"Suspends",
2137                                     u"Cycles per Packet",
2138                                     u"Average Vector Size"
2139                                 )
2140                             )
2141                             avg = 0.0
2142                             for row in thread:
2143                                 txt_table.add_row(row)
2144                                 avg += row[-1]
2145                             if len(thread) == 0:
2146                                 avg = u""
2147                             else:
2148                                 avg = f", Average Vector Size per Node: " \
2149                                       f"{(avg / len(thread)):.2f}"
2150                             th_name = u"main" if thread_nr == 0 \
2151                                 else f"worker_{thread_nr}"
2152                             print(f"{dut_name}, {th_name}{avg}")
2153                             txt_table.float_format = u".2"
2154                             txt_table.align = u"r"
2155                             txt_table.align[u"Name"] = u"l"
2156                             print(f"{txt_table.get_string()}\n")