PAL: Process sh run from telemetry
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
291
292     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
293
294     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
295
296     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
297
298     REGEX_SH_RUN_HOST = re.compile(
299         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
300     )
301
302     def __init__(self, metadata, mapping, ignore, for_output):
303         """Initialisation.
304
305         :param metadata: Key-value pairs to be included in "metadata" part of
306             JSON structure.
307         :param mapping: Mapping of the old names of test cases to the new
308             (actual) one.
309         :param ignore: List of TCs to be ignored.
310         :param for_output: Output to be generated from downloaded data.
311         :type metadata: dict
312         :type mapping: dict
313         :type ignore: list
314         :type for_output: str
315         """
316
317         # Type of message to parse out from the test messages
318         self._msg_type = None
319
320         # VPP version
321         self._version = None
322
323         # Timestamp
324         self._timestamp = None
325
326         # Testbed. The testbed is identified by TG node IP address.
327         self._testbed = None
328
329         # Mapping of TCs long names
330         self._mapping = mapping
331
332         # Ignore list
333         self._ignore = ignore
334
335         self._for_output = for_output
336
337         # Number of PAPI History messages found:
338         # 0 - no message
339         # 1 - PAPI History of DUT1
340         # 2 - PAPI History of DUT2
341         self._conf_history_lookup_nr = 0
342
343         self._sh_run_counter = 0
344         self._telemetry_kw_counter = 0
345         self._telemetry_msg_counter = 0
346
347         # Test ID of currently processed test- the lowercase full path to the
348         # test
349         self._test_id = None
350
351         # The main data structure
352         self._data = {
353             u"metadata": OrderedDict(),
354             u"suites": OrderedDict(),
355             u"tests": OrderedDict()
356         }
357
358         # Save the provided metadata
359         for key, val in metadata.items():
360             self._data[u"metadata"][key] = val
361
362         # Dictionary defining the methods used to parse different types of
363         # messages
364         self.parse_msg = {
365             u"timestamp": self._get_timestamp,
366             u"vpp-version": self._get_vpp_version,
367             u"dpdk-version": self._get_dpdk_version,
368             u"teardown-papi-history": self._get_papi_history,
369             u"test-show-runtime": self._get_show_run,
370             u"testbed": self._get_testbed,
371             u"test-telemetry": self._get_telemetry
372         }
373
374     @property
375     def data(self):
376         """Getter - Data parsed from the XML file.
377
378         :returns: Data parsed from the XML file.
379         :rtype: dict
380         """
381         return self._data
382
383     def _get_data_from_mrr_test_msg(self, msg):
384         """Get info from message of MRR performance tests.
385
386         :param msg: Message to be processed.
387         :type msg: str
388         :returns: Processed message or original message if a problem occurs.
389         :rtype: str
390         """
391
392         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
393         if not groups or groups.lastindex != 1:
394             return u"Test Failed."
395
396         try:
397             data = groups.group(1).split(u", ")
398         except (AttributeError, IndexError, ValueError, KeyError):
399             return u"Test Failed."
400
401         out_str = u"["
402         try:
403             for item in data:
404                 out_str += f"{(float(item) / 1e6):.2f}, "
405             return out_str[:-2] + u"]"
406         except (AttributeError, IndexError, ValueError, KeyError):
407             return u"Test Failed."
408
409     def _get_data_from_cps_test_msg(self, msg):
410         """Get info from message of NDRPDR CPS tests.
411
412         :param msg: Message to be processed.
413         :type msg: str
414         :returns: Processed message or "Test Failed." if a problem occurs.
415         :rtype: str
416         """
417
418         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
419         if not groups or groups.lastindex != 2:
420             return u"Test Failed."
421
422         try:
423             return (
424                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
425                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
426             )
427         except (AttributeError, IndexError, ValueError, KeyError):
428             return u"Test Failed."
429
430     def _get_data_from_pps_test_msg(self, msg):
431         """Get info from message of NDRPDR PPS tests.
432
433         :param msg: Message to be processed.
434         :type msg: str
435         :returns: Processed message or "Test Failed." if a problem occurs.
436         :rtype: str
437         """
438
439         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
440         if not groups or groups.lastindex != 4:
441             return u"Test Failed."
442
443         try:
444             return (
445                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
446                 f"{float(groups.group(2)):5.2f}\n"
447                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
448                 f"{float(groups.group(4)):5.2f}"
449             )
450         except (AttributeError, IndexError, ValueError, KeyError):
451             return u"Test Failed."
452
453     def _get_data_from_perf_test_msg(self, msg):
454         """Get info from message of NDRPDR performance tests.
455
456         :param msg: Message to be processed.
457         :type msg: str
458         :returns: Processed message or "Test Failed." if a problem occurs.
459         :rtype: str
460         """
461
462         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
463         if not groups or groups.lastindex != 10:
464             return u"Test Failed."
465
466         try:
467             data = {
468                 u"ndr_low": float(groups.group(1)),
469                 u"ndr_low_b": float(groups.group(2)),
470                 u"pdr_low": float(groups.group(3)),
471                 u"pdr_low_b": float(groups.group(4)),
472                 u"pdr_lat_90_1": groups.group(5),
473                 u"pdr_lat_90_2": groups.group(6),
474                 u"pdr_lat_50_1": groups.group(7),
475                 u"pdr_lat_50_2": groups.group(8),
476                 u"pdr_lat_10_1": groups.group(9),
477                 u"pdr_lat_10_2": groups.group(10),
478             }
479         except (AttributeError, IndexError, ValueError, KeyError):
480             return u"Test Failed."
481
482         def _process_lat(in_str_1, in_str_2):
483             """Extract min, avg, max values from latency string.
484
485             :param in_str_1: Latency string for one direction produced by robot
486                 framework.
487             :param in_str_2: Latency string for second direction produced by
488                 robot framework.
489             :type in_str_1: str
490             :type in_str_2: str
491             :returns: Processed latency string or None if a problem occurs.
492             :rtype: tuple
493             """
494             in_list_1 = in_str_1.split('/', 3)
495             in_list_2 = in_str_2.split('/', 3)
496
497             if len(in_list_1) != 4 and len(in_list_2) != 4:
498                 return None
499
500             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
501             try:
502                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
503             except hdrh.codec.HdrLengthException:
504                 return None
505
506             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
507             try:
508                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
509             except hdrh.codec.HdrLengthException:
510                 return None
511
512             if hdr_lat_1 and hdr_lat_2:
513                 hdr_lat = (
514                     hdr_lat_1.get_value_at_percentile(50.0),
515                     hdr_lat_1.get_value_at_percentile(90.0),
516                     hdr_lat_1.get_value_at_percentile(99.0),
517                     hdr_lat_2.get_value_at_percentile(50.0),
518                     hdr_lat_2.get_value_at_percentile(90.0),
519                     hdr_lat_2.get_value_at_percentile(99.0)
520                 )
521
522                 if all(hdr_lat):
523                     return hdr_lat
524
525             return None
526
527         try:
528             out_msg = (
529                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
530                 f"{data[u'ndr_low_b']:5.2f}"
531                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
532                 f"{data[u'pdr_low_b']:5.2f}"
533             )
534             latency = (
535                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
536                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
537                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
538             )
539             if all(latency):
540                 max_len = len(str(max((max(item) for item in latency))))
541                 max_len = 4 if max_len < 4 else max_len
542
543                 for idx, lat in enumerate(latency):
544                     if not idx:
545                         out_msg += u"\n"
546                     out_msg += (
547                         f"\n{idx + 3}. "
548                         f"{lat[0]:{max_len}d} "
549                         f"{lat[1]:{max_len}d} "
550                         f"{lat[2]:{max_len}d}      "
551                         f"{lat[3]:{max_len}d} "
552                         f"{lat[4]:{max_len}d} "
553                         f"{lat[5]:{max_len}d} "
554                     )
555
556             return out_msg
557
558         except (AttributeError, IndexError, ValueError, KeyError):
559             return u"Test Failed."
560
561     def _get_testbed(self, msg):
562         """Called when extraction of testbed IP is required.
563         The testbed is identified by TG node IP address.
564
565         :param msg: Message to process.
566         :type msg: Message
567         :returns: Nothing.
568         """
569
570         if msg.message.count(u"Setup of TG node") or \
571                 msg.message.count(u"Setup of node TG host"):
572             reg_tg_ip = re.compile(
573                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
574             try:
575                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
576             except (KeyError, ValueError, IndexError, AttributeError):
577                 pass
578             finally:
579                 self._data[u"metadata"][u"testbed"] = self._testbed
580                 self._msg_type = None
581
582     def _get_vpp_version(self, msg):
583         """Called when extraction of VPP version is required.
584
585         :param msg: Message to process.
586         :type msg: Message
587         :returns: Nothing.
588         """
589
590         if msg.message.count(u"return STDOUT Version:") or \
591                 msg.message.count(u"VPP Version:") or \
592                 msg.message.count(u"VPP version:"):
593             self._version = str(
594                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
595             )
596             self._data[u"metadata"][u"version"] = self._version
597             self._msg_type = None
598
599     def _get_dpdk_version(self, msg):
600         """Called when extraction of DPDK version is required.
601
602         :param msg: Message to process.
603         :type msg: Message
604         :returns: Nothing.
605         """
606
607         if msg.message.count(u"DPDK Version:"):
608             try:
609                 self._version = str(re.search(
610                     self.REGEX_VERSION_DPDK, msg.message).group(2))
611                 self._data[u"metadata"][u"version"] = self._version
612             except IndexError:
613                 pass
614             finally:
615                 self._msg_type = None
616
617     def _get_timestamp(self, msg):
618         """Called when extraction of timestamp is required.
619
620         :param msg: Message to process.
621         :type msg: Message
622         :returns: Nothing.
623         """
624
625         self._timestamp = msg.timestamp[:14]
626         self._data[u"metadata"][u"generated"] = self._timestamp
627         self._msg_type = None
628
629     def _get_papi_history(self, msg):
630         """Called when extraction of PAPI command history is required.
631
632         :param msg: Message to process.
633         :type msg: Message
634         :returns: Nothing.
635         """
636         if msg.message.count(u"PAPI command history:"):
637             self._conf_history_lookup_nr += 1
638             if self._conf_history_lookup_nr == 1:
639                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
640             else:
641                 self._msg_type = None
642             text = re.sub(
643                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
644                 u"",
645                 msg.message,
646                 count=1
647             ).replace(u'"', u"'")
648             self._data[u"tests"][self._test_id][u"conf-history"] += (
649                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
650             )
651
652     def _get_show_run(self, msg):
653         """Called when extraction of VPP operational data (output of CLI command
654         Show Runtime) is required.
655
656         :param msg: Message to process.
657         :type msg: Message
658         :returns: Nothing.
659         """
660
661         if not msg.message.count(u"stats runtime"):
662             return
663
664         # Temporary solution
665         if self._sh_run_counter > 1:
666             return
667
668         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
669             self._data[u"tests"][self._test_id][u"show-run"] = dict()
670
671         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
672         if not groups:
673             return
674         try:
675             host = groups.group(1)
676         except (AttributeError, IndexError):
677             host = u""
678         try:
679             sock = groups.group(2)
680         except (AttributeError, IndexError):
681             sock = u""
682
683         dut = u"dut{nr}".format(
684             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
685
686         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
687             copy.copy(
688                 {
689                     u"host": host,
690                     u"socket": sock,
691                     u"runtime": str(msg.message).replace(u' ', u'').
692                                 replace(u'\n', u'').replace(u"'", u'"').
693                                 replace(u'b"', u'"').replace(u'u"', u'"').
694                                 split(u":", 1)[1]
695                 }
696             )
697
698     def _get_telemetry(self, msg):
699         """Called when extraction of VPP telemetry data is required.
700
701         :param msg: Message to process.
702         :type msg: Message
703         :returns: Nothing.
704         """
705
706         if self._telemetry_kw_counter > 1:
707             return
708         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
709             return
710
711         if u"telemetry-show-run" not in \
712                 self._data[u"tests"][self._test_id].keys():
713             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
714
715         self._telemetry_msg_counter += 1
716         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
717         if not groups:
718             return
719         try:
720             host = groups.group(1)
721         except (AttributeError, IndexError):
722             host = u""
723         try:
724             sock = groups.group(2)
725         except (AttributeError, IndexError):
726             sock = u""
727         runtime = {
728             u"source_type": u"node",
729             u"source_id": host,
730             u"msg_type": u"metric",
731             u"log_level": u"INFO",
732             u"timestamp": msg.timestamp,
733             u"msg": u"show_runtime",
734             u"host": host,
735             u"socket": sock,
736             u"data": list()
737         }
738         for line in msg.message.splitlines():
739             if not line.startswith(u"vpp_runtime_"):
740                 continue
741             try:
742                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
743                 cut = params.index(u"{")
744                 name = params[:cut].split(u"_", maxsplit=2)[-1]
745                 labels = eval(
746                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
747                 )
748                 labels[u"graph_node"] = labels.pop(u"name")
749                 runtime[u"data"].append(
750                     {
751                         u"name": name,
752                         u"value": value,
753                         u"timestamp": timestamp,
754                         u"labels": labels
755                     }
756                 )
757             except (TypeError, ValueError, IndexError):
758                 continue
759         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
760             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
761                 {
762                     u"host": host,
763                     u"socket": sock,
764                     u"runtime": runtime
765                 }
766             )
767
768     def _get_ndrpdr_throughput(self, msg):
769         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
770         message.
771
772         :param msg: The test message to be parsed.
773         :type msg: str
774         :returns: Parsed data as a dict and the status (PASS/FAIL).
775         :rtype: tuple(dict, str)
776         """
777
778         throughput = {
779             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
780             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
781         }
782         status = u"FAIL"
783         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
784
785         if groups is not None:
786             try:
787                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
788                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
789                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
790                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
791                 status = u"PASS"
792             except (IndexError, ValueError):
793                 pass
794
795         return throughput, status
796
797     def _get_ndrpdr_throughput_gbps(self, msg):
798         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
799         test message.
800
801         :param msg: The test message to be parsed.
802         :type msg: str
803         :returns: Parsed data as a dict and the status (PASS/FAIL).
804         :rtype: tuple(dict, str)
805         """
806
807         gbps = {
808             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
809             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
810         }
811         status = u"FAIL"
812         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
813
814         if groups is not None:
815             try:
816                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
817                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
818                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
819                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
820                 status = u"PASS"
821             except (IndexError, ValueError):
822                 pass
823
824         return gbps, status
825
826     def _get_plr_throughput(self, msg):
827         """Get PLRsearch lower bound and PLRsearch upper bound from the test
828         message.
829
830         :param msg: The test message to be parsed.
831         :type msg: str
832         :returns: Parsed data as a dict and the status (PASS/FAIL).
833         :rtype: tuple(dict, str)
834         """
835
836         throughput = {
837             u"LOWER": -1.0,
838             u"UPPER": -1.0
839         }
840         status = u"FAIL"
841         groups = re.search(self.REGEX_PLR_RATE, msg)
842
843         if groups is not None:
844             try:
845                 throughput[u"LOWER"] = float(groups.group(1))
846                 throughput[u"UPPER"] = float(groups.group(2))
847                 status = u"PASS"
848             except (IndexError, ValueError):
849                 pass
850
851         return throughput, status
852
853     def _get_ndrpdr_latency(self, msg):
854         """Get LATENCY from the test message.
855
856         :param msg: The test message to be parsed.
857         :type msg: str
858         :returns: Parsed data as a dict and the status (PASS/FAIL).
859         :rtype: tuple(dict, str)
860         """
861         latency_default = {
862             u"min": -1.0,
863             u"avg": -1.0,
864             u"max": -1.0,
865             u"hdrh": u""
866         }
867         latency = {
868             u"NDR": {
869                 u"direction1": copy.copy(latency_default),
870                 u"direction2": copy.copy(latency_default)
871             },
872             u"PDR": {
873                 u"direction1": copy.copy(latency_default),
874                 u"direction2": copy.copy(latency_default)
875             },
876             u"LAT0": {
877                 u"direction1": copy.copy(latency_default),
878                 u"direction2": copy.copy(latency_default)
879             },
880             u"PDR10": {
881                 u"direction1": copy.copy(latency_default),
882                 u"direction2": copy.copy(latency_default)
883             },
884             u"PDR50": {
885                 u"direction1": copy.copy(latency_default),
886                 u"direction2": copy.copy(latency_default)
887             },
888             u"PDR90": {
889                 u"direction1": copy.copy(latency_default),
890                 u"direction2": copy.copy(latency_default)
891             },
892         }
893
894         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
895         if groups is None:
896             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
897         if groups is None:
898             return latency, u"FAIL"
899
900         def process_latency(in_str):
901             """Return object with parsed latency values.
902
903             TODO: Define class for the return type.
904
905             :param in_str: Input string, min/avg/max/hdrh format.
906             :type in_str: str
907             :returns: Dict with corresponding keys, except hdrh float values.
908             :rtype dict:
909             :throws IndexError: If in_str does not have enough substrings.
910             :throws ValueError: If a substring does not convert to float.
911             """
912             in_list = in_str.split('/', 3)
913
914             rval = {
915                 u"min": float(in_list[0]),
916                 u"avg": float(in_list[1]),
917                 u"max": float(in_list[2]),
918                 u"hdrh": u""
919             }
920
921             if len(in_list) == 4:
922                 rval[u"hdrh"] = str(in_list[3])
923
924             return rval
925
926         try:
927             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
928             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
929             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
930             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
931             if groups.lastindex == 4:
932                 return latency, u"PASS"
933         except (IndexError, ValueError):
934             pass
935
936         try:
937             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
938             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
939             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
940             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
941             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
942             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
943             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
944             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
945             if groups.lastindex == 12:
946                 return latency, u"PASS"
947         except (IndexError, ValueError):
948             pass
949
950         return latency, u"FAIL"
951
952     @staticmethod
953     def _get_hoststack_data(msg, tags):
954         """Get data from the hoststack test message.
955
956         :param msg: The test message to be parsed.
957         :param tags: Test tags.
958         :type msg: str
959         :type tags: list
960         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
961         :rtype: tuple(dict, str)
962         """
963         result = dict()
964         status = u"FAIL"
965
966         msg = msg.replace(u"'", u'"').replace(u" ", u"")
967         if u"LDPRELOAD" in tags:
968             try:
969                 result = loads(msg)
970                 status = u"PASS"
971             except JSONDecodeError:
972                 pass
973         elif u"VPPECHO" in tags:
974             try:
975                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
976                 result = dict(
977                     client=loads(msg_lst[0]),
978                     server=loads(msg_lst[1])
979                 )
980                 status = u"PASS"
981             except (JSONDecodeError, IndexError):
982                 pass
983
984         return result, status
985
986     def _get_vsap_data(self, msg, tags):
987         """Get data from the vsap test message.
988
989         :param msg: The test message to be parsed.
990         :param tags: Test tags.
991         :type msg: str
992         :type tags: list
993         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
994         :rtype: tuple(dict, str)
995         """
996         result = dict()
997         status = u"FAIL"
998
999         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
1000         if groups is not None:
1001             try:
1002                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
1003                 result[u"latency"] = float(groups.group(2))
1004                 result[u"completed-requests"] = int(groups.group(3))
1005                 result[u"failed-requests"] = int(groups.group(4))
1006                 result[u"bytes-transferred"] = int(groups.group(5))
1007                 if u"TCP_CPS"in tags:
1008                     result[u"cps"] = float(groups.group(6))
1009                 elif u"TCP_RPS" in tags:
1010                     result[u"rps"] = float(groups.group(6))
1011                 else:
1012                     return result, status
1013                 status = u"PASS"
1014             except (IndexError, ValueError):
1015                 pass
1016
1017         return result, status
1018
1019     def visit_suite(self, suite):
1020         """Implements traversing through the suite and its direct children.
1021
1022         :param suite: Suite to process.
1023         :type suite: Suite
1024         :returns: Nothing.
1025         """
1026         if self.start_suite(suite) is not False:
1027             suite.suites.visit(self)
1028             suite.tests.visit(self)
1029             self.end_suite(suite)
1030
1031     def start_suite(self, suite):
1032         """Called when suite starts.
1033
1034         :param suite: Suite to process.
1035         :type suite: Suite
1036         :returns: Nothing.
1037         """
1038
1039         try:
1040             parent_name = suite.parent.name
1041         except AttributeError:
1042             return
1043
1044         self._data[u"suites"][suite.longname.lower().
1045                               replace(u'"', u"'").
1046                               replace(u" ", u"_")] = {
1047                                   u"name": suite.name.lower(),
1048                                   u"doc": suite.doc,
1049                                   u"parent": parent_name,
1050                                   u"level": len(suite.longname.split(u"."))
1051                               }
1052
1053         suite.keywords.visit(self)
1054
1055     def end_suite(self, suite):
1056         """Called when suite ends.
1057
1058         :param suite: Suite to process.
1059         :type suite: Suite
1060         :returns: Nothing.
1061         """
1062
1063     def visit_test(self, test):
1064         """Implements traversing through the test.
1065
1066         :param test: Test to process.
1067         :type test: Test
1068         :returns: Nothing.
1069         """
1070         if self.start_test(test) is not False:
1071             test.keywords.visit(self)
1072             self.end_test(test)
1073
1074     def start_test(self, test):
1075         """Called when test starts.
1076
1077         :param test: Test to process.
1078         :type test: Test
1079         :returns: Nothing.
1080         """
1081
1082         self._sh_run_counter = 0
1083         self._telemetry_kw_counter = 0
1084         self._telemetry_msg_counter = 0
1085
1086         longname_orig = test.longname.lower()
1087
1088         # Check the ignore list
1089         if longname_orig in self._ignore:
1090             return
1091
1092         tags = [str(tag) for tag in test.tags]
1093         test_result = dict()
1094
1095         # Change the TC long name and name if defined in the mapping table
1096         longname = self._mapping.get(longname_orig, None)
1097         if longname is not None:
1098             name = longname.split(u'.')[-1]
1099             logging.debug(
1100                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1101                 f"{name}"
1102             )
1103         else:
1104             longname = longname_orig
1105             name = test.name.lower()
1106
1107         # Remove TC number from the TC long name (backward compatibility):
1108         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1109         # Remove TC number from the TC name (not needed):
1110         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1111
1112         test_result[u"parent"] = test.parent.name.lower()
1113         test_result[u"tags"] = tags
1114         test_result["doc"] = test.doc
1115         test_result[u"type"] = u""
1116         test_result[u"status"] = test.status
1117         test_result[u"starttime"] = test.starttime
1118         test_result[u"endtime"] = test.endtime
1119
1120         if test.status == u"PASS":
1121             if u"NDRPDR" in tags:
1122                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1123                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1124                         test.message)
1125                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1126                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1127                         test.message)
1128                 else:
1129                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1130                         test.message)
1131             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1132                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1133                     test.message)
1134             else:
1135                 test_result[u"msg"] = test.message
1136         else:
1137             test_result[u"msg"] = test.message
1138
1139         if u"PERFTEST" in tags:
1140             # Replace info about cores (e.g. -1c-) with the info about threads
1141             # and cores (e.g. -1t1c-) in the long test case names and in the
1142             # test case names if necessary.
1143             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1144             if not groups:
1145                 tag_count = 0
1146                 tag_tc = str()
1147                 for tag in test_result[u"tags"]:
1148                     groups = re.search(self.REGEX_TC_TAG, tag)
1149                     if groups:
1150                         tag_count += 1
1151                         tag_tc = tag
1152
1153                 if tag_count == 1:
1154                     self._test_id = re.sub(
1155                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1156                         self._test_id, count=1
1157                     )
1158                     test_result[u"name"] = re.sub(
1159                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1160                         test_result["name"], count=1
1161                     )
1162                 else:
1163                     test_result[u"status"] = u"FAIL"
1164                     self._data[u"tests"][self._test_id] = test_result
1165                     logging.debug(
1166                         f"The test {self._test_id} has no or more than one "
1167                         f"multi-threading tags.\n"
1168                         f"Tags: {test_result[u'tags']}"
1169                     )
1170                     return
1171
1172         if u"DEVICETEST" in tags:
1173             test_result[u"type"] = u"DEVICETEST"
1174         elif u"NDRPDR" in tags:
1175             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1176                 test_result[u"type"] = u"CPS"
1177             else:
1178                 test_result[u"type"] = u"NDRPDR"
1179             if test.status == u"PASS":
1180                 test_result[u"throughput"], test_result[u"status"] = \
1181                     self._get_ndrpdr_throughput(test.message)
1182                 test_result[u"gbps"], test_result[u"status"] = \
1183                     self._get_ndrpdr_throughput_gbps(test.message)
1184                 test_result[u"latency"], test_result[u"status"] = \
1185                     self._get_ndrpdr_latency(test.message)
1186         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1187             if u"MRR" in tags:
1188                 test_result[u"type"] = u"MRR"
1189             else:
1190                 test_result[u"type"] = u"BMRR"
1191             if test.status == u"PASS":
1192                 test_result[u"result"] = dict()
1193                 groups = re.search(self.REGEX_BMRR, test.message)
1194                 if groups is not None:
1195                     items_str = groups.group(1)
1196                     items_float = [
1197                         float(item.strip().replace(u"'", u""))
1198                         for item in items_str.split(",")
1199                     ]
1200                     # Use whole list in CSIT-1180.
1201                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1202                     test_result[u"result"][u"samples"] = items_float
1203                     test_result[u"result"][u"receive-rate"] = stats.avg
1204                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1205                 else:
1206                     groups = re.search(self.REGEX_MRR, test.message)
1207                     test_result[u"result"][u"receive-rate"] = \
1208                         float(groups.group(3)) / float(groups.group(1))
1209         elif u"SOAK" in tags:
1210             test_result[u"type"] = u"SOAK"
1211             if test.status == u"PASS":
1212                 test_result[u"throughput"], test_result[u"status"] = \
1213                     self._get_plr_throughput(test.message)
1214         elif u"HOSTSTACK" in tags:
1215             test_result[u"type"] = u"HOSTSTACK"
1216             if test.status == u"PASS":
1217                 test_result[u"result"], test_result[u"status"] = \
1218                     self._get_hoststack_data(test.message, tags)
1219         elif u"LDP_NGINX" in tags:
1220             test_result[u"type"] = u"LDP_NGINX"
1221             test_result[u"result"], test_result[u"status"] = \
1222                 self._get_vsap_data(test.message, tags)
1223         # elif u"TCP" in tags:  # This might be not used
1224         #     test_result[u"type"] = u"TCP"
1225         #     if test.status == u"PASS":
1226         #         groups = re.search(self.REGEX_TCP, test.message)
1227         #         test_result[u"result"] = int(groups.group(2))
1228         elif u"RECONF" in tags:
1229             test_result[u"type"] = u"RECONF"
1230             if test.status == u"PASS":
1231                 test_result[u"result"] = None
1232                 try:
1233                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1234                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1235                     test_result[u"result"] = {
1236                         u"loss": int(grps_loss.group(1)),
1237                         u"time": float(grps_time.group(1))
1238                     }
1239                 except (AttributeError, IndexError, ValueError, TypeError):
1240                     test_result[u"status"] = u"FAIL"
1241         else:
1242             test_result[u"status"] = u"FAIL"
1243
1244         self._data[u"tests"][self._test_id] = test_result
1245
1246     def end_test(self, test):
1247         """Called when test ends.
1248
1249         :param test: Test to process.
1250         :type test: Test
1251         :returns: Nothing.
1252         """
1253
1254     def visit_keyword(self, keyword):
1255         """Implements traversing through the keyword and its child keywords.
1256
1257         :param keyword: Keyword to process.
1258         :type keyword: Keyword
1259         :returns: Nothing.
1260         """
1261         if self.start_keyword(keyword) is not False:
1262             self.end_keyword(keyword)
1263
1264     def start_keyword(self, keyword):
1265         """Called when keyword starts. Default implementation does nothing.
1266
1267         :param keyword: Keyword to process.
1268         :type keyword: Keyword
1269         :returns: Nothing.
1270         """
1271         try:
1272             if keyword.type == u"setup":
1273                 self.visit_setup_kw(keyword)
1274             elif keyword.type == u"teardown":
1275                 self.visit_teardown_kw(keyword)
1276             else:
1277                 self.visit_test_kw(keyword)
1278         except AttributeError:
1279             pass
1280
1281     def end_keyword(self, keyword):
1282         """Called when keyword ends. Default implementation does nothing.
1283
1284         :param keyword: Keyword to process.
1285         :type keyword: Keyword
1286         :returns: Nothing.
1287         """
1288
1289     def visit_test_kw(self, test_kw):
1290         """Implements traversing through the test keyword and its child
1291         keywords.
1292
1293         :param test_kw: Keyword to process.
1294         :type test_kw: Keyword
1295         :returns: Nothing.
1296         """
1297         for keyword in test_kw.keywords:
1298             if self.start_test_kw(keyword) is not False:
1299                 self.visit_test_kw(keyword)
1300                 self.end_test_kw(keyword)
1301
1302     def start_test_kw(self, test_kw):
1303         """Called when test keyword starts. Default implementation does
1304         nothing.
1305
1306         :param test_kw: Keyword to process.
1307         :type test_kw: Keyword
1308         :returns: Nothing.
1309         """
1310         if self._for_output == u"trending":
1311             return
1312
1313         if test_kw.name.count(u"Run Telemetry On All Duts"):
1314             self._msg_type = u"test-telemetry"
1315             self._telemetry_kw_counter += 1
1316         elif test_kw.name.count(u"Show Runtime On All Duts"):
1317             self._msg_type = u"test-show-runtime"
1318             self._sh_run_counter += 1
1319         else:
1320             return
1321         test_kw.messages.visit(self)
1322
1323     def end_test_kw(self, test_kw):
1324         """Called when keyword ends. Default implementation does nothing.
1325
1326         :param test_kw: Keyword to process.
1327         :type test_kw: Keyword
1328         :returns: Nothing.
1329         """
1330
1331     def visit_setup_kw(self, setup_kw):
1332         """Implements traversing through the teardown keyword and its child
1333         keywords.
1334
1335         :param setup_kw: Keyword to process.
1336         :type setup_kw: Keyword
1337         :returns: Nothing.
1338         """
1339         for keyword in setup_kw.keywords:
1340             if self.start_setup_kw(keyword) is not False:
1341                 self.visit_setup_kw(keyword)
1342                 self.end_setup_kw(keyword)
1343
1344     def start_setup_kw(self, setup_kw):
1345         """Called when teardown keyword starts. Default implementation does
1346         nothing.
1347
1348         :param setup_kw: Keyword to process.
1349         :type setup_kw: Keyword
1350         :returns: Nothing.
1351         """
1352         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1353                 and not self._version:
1354             self._msg_type = u"vpp-version"
1355         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1356                 not self._version:
1357             self._msg_type = u"dpdk-version"
1358         elif setup_kw.name.count(u"Set Global Variable") \
1359                 and not self._timestamp:
1360             self._msg_type = u"timestamp"
1361         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1362             self._msg_type = u"testbed"
1363         else:
1364             return
1365         setup_kw.messages.visit(self)
1366
1367     def end_setup_kw(self, setup_kw):
1368         """Called when keyword ends. Default implementation does nothing.
1369
1370         :param setup_kw: Keyword to process.
1371         :type setup_kw: Keyword
1372         :returns: Nothing.
1373         """
1374
1375     def visit_teardown_kw(self, teardown_kw):
1376         """Implements traversing through the teardown keyword and its child
1377         keywords.
1378
1379         :param teardown_kw: Keyword to process.
1380         :type teardown_kw: Keyword
1381         :returns: Nothing.
1382         """
1383         for keyword in teardown_kw.keywords:
1384             if self.start_teardown_kw(keyword) is not False:
1385                 self.visit_teardown_kw(keyword)
1386                 self.end_teardown_kw(keyword)
1387
1388     def start_teardown_kw(self, teardown_kw):
1389         """Called when teardown keyword starts
1390
1391         :param teardown_kw: Keyword to process.
1392         :type teardown_kw: Keyword
1393         :returns: Nothing.
1394         """
1395         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1396             self._conf_history_lookup_nr = 0
1397             self._msg_type = u"teardown-papi-history"
1398             teardown_kw.messages.visit(self)
1399
1400     def end_teardown_kw(self, teardown_kw):
1401         """Called when keyword ends. Default implementation does nothing.
1402
1403         :param teardown_kw: Keyword to process.
1404         :type teardown_kw: Keyword
1405         :returns: Nothing.
1406         """
1407
1408     def visit_message(self, msg):
1409         """Implements visiting the message.
1410
1411         :param msg: Message to process.
1412         :type msg: Message
1413         :returns: Nothing.
1414         """
1415         if self.start_message(msg) is not False:
1416             self.end_message(msg)
1417
1418     def start_message(self, msg):
1419         """Called when message starts. Get required information from messages:
1420         - VPP version.
1421
1422         :param msg: Message to process.
1423         :type msg: Message
1424         :returns: Nothing.
1425         """
1426         if self._msg_type:
1427             self.parse_msg[self._msg_type](msg)
1428
1429     def end_message(self, msg):
1430         """Called when message ends. Default implementation does nothing.
1431
1432         :param msg: Message to process.
1433         :type msg: Message
1434         :returns: Nothing.
1435         """
1436
1437
1438 class InputData:
1439     """Input data
1440
1441     The data is extracted from output.xml files generated by Jenkins jobs and
1442     stored in pandas' DataFrames.
1443
1444     The data structure:
1445     - job name
1446       - build number
1447         - metadata
1448           (as described in ExecutionChecker documentation)
1449         - suites
1450           (as described in ExecutionChecker documentation)
1451         - tests
1452           (as described in ExecutionChecker documentation)
1453     """
1454
1455     def __init__(self, spec, for_output):
1456         """Initialization.
1457
1458         :param spec: Specification.
1459         :param for_output: Output to be generated from downloaded data.
1460         :type spec: Specification
1461         :type for_output: str
1462         """
1463
1464         # Specification:
1465         self._cfg = spec
1466
1467         self._for_output = for_output
1468
1469         # Data store:
1470         self._input_data = pd.Series()
1471
1472     @property
1473     def data(self):
1474         """Getter - Input data.
1475
1476         :returns: Input data
1477         :rtype: pandas.Series
1478         """
1479         return self._input_data
1480
1481     def metadata(self, job, build):
1482         """Getter - metadata
1483
1484         :param job: Job which metadata we want.
1485         :param build: Build which metadata we want.
1486         :type job: str
1487         :type build: str
1488         :returns: Metadata
1489         :rtype: pandas.Series
1490         """
1491         return self.data[job][build][u"metadata"]
1492
1493     def suites(self, job, build):
1494         """Getter - suites
1495
1496         :param job: Job which suites we want.
1497         :param build: Build which suites we want.
1498         :type job: str
1499         :type build: str
1500         :returns: Suites.
1501         :rtype: pandas.Series
1502         """
1503         return self.data[job][str(build)][u"suites"]
1504
1505     def tests(self, job, build):
1506         """Getter - tests
1507
1508         :param job: Job which tests we want.
1509         :param build: Build which tests we want.
1510         :type job: str
1511         :type build: str
1512         :returns: Tests.
1513         :rtype: pandas.Series
1514         """
1515         return self.data[job][build][u"tests"]
1516
1517     def _parse_tests(self, job, build):
1518         """Process data from robot output.xml file and return JSON structured
1519         data.
1520
1521         :param job: The name of job which build output data will be processed.
1522         :param build: The build which output data will be processed.
1523         :type job: str
1524         :type build: dict
1525         :returns: JSON data structure.
1526         :rtype: dict
1527         """
1528
1529         metadata = {
1530             u"job": job,
1531             u"build": build
1532         }
1533
1534         with open(build[u"file-name"], u'r') as data_file:
1535             try:
1536                 result = ExecutionResult(data_file)
1537             except errors.DataError as err:
1538                 logging.error(
1539                     f"Error occurred while parsing output.xml: {repr(err)}"
1540                 )
1541                 return None
1542         checker = ExecutionChecker(
1543             metadata, self._cfg.mapping, self._cfg.ignore, self._for_output
1544         )
1545         result.visit(checker)
1546
1547         return checker.data
1548
1549     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1550         """Download and parse the input data file.
1551
1552         :param pid: PID of the process executing this method.
1553         :param job: Name of the Jenkins job which generated the processed input
1554             file.
1555         :param build: Information about the Jenkins build which generated the
1556             processed input file.
1557         :param repeat: Repeat the download specified number of times if not
1558             successful.
1559         :type pid: int
1560         :type job: str
1561         :type build: dict
1562         :type repeat: int
1563         """
1564
1565         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1566
1567         state = u"failed"
1568         success = False
1569         data = None
1570         do_repeat = repeat
1571         while do_repeat:
1572             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1573             if success:
1574                 break
1575             do_repeat -= 1
1576         if not success:
1577             logging.error(
1578                 f"It is not possible to download the input data file from the "
1579                 f"job {job}, build {build[u'build']}, or it is damaged. "
1580                 f"Skipped."
1581             )
1582         if success:
1583             logging.info(f"  Processing data from build {build[u'build']}")
1584             data = self._parse_tests(job, build)
1585             if data is None:
1586                 logging.error(
1587                     f"Input data file from the job {job}, build "
1588                     f"{build[u'build']} is damaged. Skipped."
1589                 )
1590             else:
1591                 state = u"processed"
1592
1593             try:
1594                 remove(build[u"file-name"])
1595             except OSError as err:
1596                 logging.error(
1597                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1598                 )
1599
1600         # If the time-period is defined in the specification file, remove all
1601         # files which are outside the time period.
1602         is_last = False
1603         timeperiod = self._cfg.environment.get(u"time-period", None)
1604         if timeperiod and data:
1605             now = dt.utcnow()
1606             timeperiod = timedelta(int(timeperiod))
1607             metadata = data.get(u"metadata", None)
1608             if metadata:
1609                 generated = metadata.get(u"generated", None)
1610                 if generated:
1611                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1612                     if (now - generated) > timeperiod:
1613                         # Remove the data and the file:
1614                         state = u"removed"
1615                         data = None
1616                         is_last = True
1617                         logging.info(
1618                             f"  The build {job}/{build[u'build']} is "
1619                             f"outdated, will be removed."
1620                         )
1621         return {
1622             u"data": data,
1623             u"state": state,
1624             u"job": job,
1625             u"build": build,
1626             u"last": is_last
1627         }
1628
1629     def download_and_parse_data(self, repeat=1):
1630         """Download the input data files, parse input data from input files and
1631         store in pandas' Series.
1632
1633         :param repeat: Repeat the download specified number of times if not
1634             successful.
1635         :type repeat: int
1636         """
1637
1638         logging.info(u"Downloading and parsing input files ...")
1639
1640         for job, builds in self._cfg.input.items():
1641             for build in builds:
1642
1643                 result = self._download_and_parse_build(job, build, repeat)
1644                 if result[u"last"]:
1645                     break
1646                 build_nr = result[u"build"][u"build"]
1647
1648                 if result[u"data"]:
1649                     data = result[u"data"]
1650                     build_data = pd.Series({
1651                         u"metadata": pd.Series(
1652                             list(data[u"metadata"].values()),
1653                             index=list(data[u"metadata"].keys())
1654                         ),
1655                         u"suites": pd.Series(
1656                             list(data[u"suites"].values()),
1657                             index=list(data[u"suites"].keys())
1658                         ),
1659                         u"tests": pd.Series(
1660                             list(data[u"tests"].values()),
1661                             index=list(data[u"tests"].keys())
1662                         )
1663                     })
1664
1665                     if self._input_data.get(job, None) is None:
1666                         self._input_data[job] = pd.Series()
1667                     self._input_data[job][str(build_nr)] = build_data
1668                     self._cfg.set_input_file_name(
1669                         job, build_nr, result[u"build"][u"file-name"]
1670                     )
1671                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1672
1673                 mem_alloc = \
1674                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1675                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1676
1677         logging.info(u"Done.")
1678
1679         msg = f"Successful downloads from the sources:\n"
1680         for source in self._cfg.environment[u"data-sources"]:
1681             if source[u"successful-downloads"]:
1682                 msg += (
1683                     f"{source[u'url']}/{source[u'path']}/"
1684                     f"{source[u'file-name']}: "
1685                     f"{source[u'successful-downloads']}\n"
1686                 )
1687         logging.info(msg)
1688
1689     def process_local_file(self, local_file, job=u"local", build_nr=1,
1690                            replace=True):
1691         """Process local XML file given as a command-line parameter.
1692
1693         :param local_file: The file to process.
1694         :param job: Job name.
1695         :param build_nr: Build number.
1696         :param replace: If True, the information about jobs and builds is
1697             replaced by the new one, otherwise the new jobs and builds are
1698             added.
1699         :type local_file: str
1700         :type job: str
1701         :type build_nr: int
1702         :type replace: bool
1703         :raises: PresentationError if an error occurs.
1704         """
1705         if not isfile(local_file):
1706             raise PresentationError(f"The file {local_file} does not exist.")
1707
1708         try:
1709             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1710         except (IndexError, ValueError):
1711             pass
1712
1713         build = {
1714             u"build": build_nr,
1715             u"status": u"failed",
1716             u"file-name": local_file
1717         }
1718         if replace:
1719             self._cfg.input = dict()
1720         self._cfg.add_build(job, build)
1721
1722         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1723         data = self._parse_tests(job, build)
1724         if data is None:
1725             raise PresentationError(
1726                 f"Error occurred while parsing the file {local_file}"
1727             )
1728
1729         build_data = pd.Series({
1730             u"metadata": pd.Series(
1731                 list(data[u"metadata"].values()),
1732                 index=list(data[u"metadata"].keys())
1733             ),
1734             u"suites": pd.Series(
1735                 list(data[u"suites"].values()),
1736                 index=list(data[u"suites"].keys())
1737             ),
1738             u"tests": pd.Series(
1739                 list(data[u"tests"].values()),
1740                 index=list(data[u"tests"].keys())
1741             )
1742         })
1743
1744         if self._input_data.get(job, None) is None:
1745             self._input_data[job] = pd.Series()
1746         self._input_data[job][str(build_nr)] = build_data
1747
1748         self._cfg.set_input_state(job, build_nr, u"processed")
1749
1750     def process_local_directory(self, local_dir, replace=True):
1751         """Process local directory with XML file(s). The directory is processed
1752         as a 'job' and the XML files in it as builds.
1753         If the given directory contains only sub-directories, these
1754         sub-directories processed as jobs and corresponding XML files as builds
1755         of their job.
1756
1757         :param local_dir: Local directory to process.
1758         :param replace: If True, the information about jobs and builds is
1759             replaced by the new one, otherwise the new jobs and builds are
1760             added.
1761         :type local_dir: str
1762         :type replace: bool
1763         """
1764         if not isdir(local_dir):
1765             raise PresentationError(
1766                 f"The directory {local_dir} does not exist."
1767             )
1768
1769         # Check if the given directory includes only files, or only directories
1770         _, dirnames, filenames = next(walk(local_dir))
1771
1772         if filenames and not dirnames:
1773             filenames.sort()
1774             # local_builds:
1775             # key: dir (job) name, value: list of file names (builds)
1776             local_builds = {
1777                 local_dir: [join(local_dir, name) for name in filenames]
1778             }
1779
1780         elif dirnames and not filenames:
1781             dirnames.sort()
1782             # local_builds:
1783             # key: dir (job) name, value: list of file names (builds)
1784             local_builds = dict()
1785             for dirname in dirnames:
1786                 builds = [
1787                     join(local_dir, dirname, name)
1788                     for name in listdir(join(local_dir, dirname))
1789                     if isfile(join(local_dir, dirname, name))
1790                 ]
1791                 if builds:
1792                     local_builds[dirname] = sorted(builds)
1793
1794         elif not filenames and not dirnames:
1795             raise PresentationError(f"The directory {local_dir} is empty.")
1796         else:
1797             raise PresentationError(
1798                 f"The directory {local_dir} can include only files or only "
1799                 f"directories, not both.\nThe directory {local_dir} includes "
1800                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1801             )
1802
1803         if replace:
1804             self._cfg.input = dict()
1805
1806         for job, files in local_builds.items():
1807             for idx, local_file in enumerate(files):
1808                 self.process_local_file(local_file, job, idx + 1, replace=False)
1809
1810     @staticmethod
1811     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1812         """Return the index of character in the string which is the end of tag.
1813
1814         :param tag_filter: The string where the end of tag is being searched.
1815         :param start: The index where the searching is stated.
1816         :param closer: The character which is the tag closer.
1817         :type tag_filter: str
1818         :type start: int
1819         :type closer: str
1820         :returns: The index of the tag closer.
1821         :rtype: int
1822         """
1823         try:
1824             idx_opener = tag_filter.index(closer, start)
1825             return tag_filter.index(closer, idx_opener + 1)
1826         except ValueError:
1827             return None
1828
1829     @staticmethod
1830     def _condition(tag_filter):
1831         """Create a conditional statement from the given tag filter.
1832
1833         :param tag_filter: Filter based on tags from the element specification.
1834         :type tag_filter: str
1835         :returns: Conditional statement which can be evaluated.
1836         :rtype: str
1837         """
1838         index = 0
1839         while True:
1840             index = InputData._end_of_tag(tag_filter, index)
1841             if index is None:
1842                 return tag_filter
1843             index += 1
1844             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1845
1846     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1847                     continue_on_error=False):
1848         """Filter required data from the given jobs and builds.
1849
1850         The output data structure is:
1851         - job 1
1852           - build 1
1853             - test (or suite) 1 ID:
1854               - param 1
1855               - param 2
1856               ...
1857               - param n
1858             ...
1859             - test (or suite) n ID:
1860             ...
1861           ...
1862           - build n
1863         ...
1864         - job n
1865
1866         :param element: Element which will use the filtered data.
1867         :param params: Parameters which will be included in the output. If None,
1868             all parameters are included.
1869         :param data: If not None, this data is used instead of data specified
1870             in the element.
1871         :param data_set: The set of data to be filtered: tests, suites,
1872             metadata.
1873         :param continue_on_error: Continue if there is error while reading the
1874             data. The Item will be empty then
1875         :type element: pandas.Series
1876         :type params: list
1877         :type data: dict
1878         :type data_set: str
1879         :type continue_on_error: bool
1880         :returns: Filtered data.
1881         :rtype pandas.Series
1882         """
1883
1884         try:
1885             if data_set == "suites":
1886                 cond = u"True"
1887             elif element[u"filter"] in (u"all", u"template"):
1888                 cond = u"True"
1889             else:
1890                 cond = InputData._condition(element[u"filter"])
1891             logging.debug(f"   Filter: {cond}")
1892         except KeyError:
1893             logging.error(u"  No filter defined.")
1894             return None
1895
1896         if params is None:
1897             params = element.get(u"parameters", None)
1898             if params:
1899                 params.extend((u"type", u"status"))
1900
1901         data_to_filter = data if data else element[u"data"]
1902         data = pd.Series()
1903         try:
1904             for job, builds in data_to_filter.items():
1905                 data[job] = pd.Series()
1906                 for build in builds:
1907                     data[job][str(build)] = pd.Series()
1908                     try:
1909                         data_dict = dict(
1910                             self.data[job][str(build)][data_set].items())
1911                     except KeyError:
1912                         if continue_on_error:
1913                             continue
1914                         return None
1915
1916                     for test_id, test_data in data_dict.items():
1917                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1918                             data[job][str(build)][test_id] = pd.Series()
1919                             if params is None:
1920                                 for param, val in test_data.items():
1921                                     data[job][str(build)][test_id][param] = val
1922                             else:
1923                                 for param in params:
1924                                     try:
1925                                         data[job][str(build)][test_id][param] =\
1926                                             test_data[param]
1927                                     except KeyError:
1928                                         data[job][str(build)][test_id][param] =\
1929                                             u"No Data"
1930             return data
1931
1932         except (KeyError, IndexError, ValueError) as err:
1933             logging.error(
1934                 f"Missing mandatory parameter in the element specification: "
1935                 f"{repr(err)}"
1936             )
1937             return None
1938         except AttributeError as err:
1939             logging.error(repr(err))
1940             return None
1941         except SyntaxError as err:
1942             logging.error(
1943                 f"The filter {cond} is not correct. Check if all tags are "
1944                 f"enclosed by apostrophes.\n{repr(err)}"
1945             )
1946             return None
1947
1948     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1949                              continue_on_error=False):
1950         """Filter required data from the given jobs and builds.
1951
1952         The output data structure is:
1953         - job 1
1954           - build 1
1955             - test (or suite) 1 ID:
1956               - param 1
1957               - param 2
1958               ...
1959               - param n
1960             ...
1961             - test (or suite) n ID:
1962             ...
1963           ...
1964           - build n
1965         ...
1966         - job n
1967
1968         :param element: Element which will use the filtered data.
1969         :param params: Parameters which will be included in the output. If None,
1970         all parameters are included.
1971         :param data_set: The set of data to be filtered: tests, suites,
1972         metadata.
1973         :param continue_on_error: Continue if there is error while reading the
1974         data. The Item will be empty then
1975         :type element: pandas.Series
1976         :type params: list
1977         :type data_set: str
1978         :type continue_on_error: bool
1979         :returns: Filtered data.
1980         :rtype pandas.Series
1981         """
1982
1983         include = element.get(u"include", None)
1984         if not include:
1985             logging.warning(u"No tests to include, skipping the element.")
1986             return None
1987
1988         if params is None:
1989             params = element.get(u"parameters", None)
1990             if params and u"type" not in params:
1991                 params.append(u"type")
1992
1993         cores = element.get(u"core", None)
1994         if cores:
1995             tests = list()
1996             for core in cores:
1997                 for test in include:
1998                     tests.append(test.format(core=core))
1999         else:
2000             tests = include
2001
2002         data = pd.Series()
2003         try:
2004             for job, builds in element[u"data"].items():
2005                 data[job] = pd.Series()
2006                 for build in builds:
2007                     data[job][str(build)] = pd.Series()
2008                     for test in tests:
2009                         try:
2010                             reg_ex = re.compile(str(test).lower())
2011                             for test_id in self.data[job][
2012                                     str(build)][data_set].keys():
2013                                 if re.match(reg_ex, str(test_id).lower()):
2014                                     test_data = self.data[job][
2015                                         str(build)][data_set][test_id]
2016                                     data[job][str(build)][test_id] = pd.Series()
2017                                     if params is None:
2018                                         for param, val in test_data.items():
2019                                             data[job][str(build)][test_id]\
2020                                                 [param] = val
2021                                     else:
2022                                         for param in params:
2023                                             try:
2024                                                 data[job][str(build)][
2025                                                     test_id][param] = \
2026                                                     test_data[param]
2027                                             except KeyError:
2028                                                 data[job][str(build)][
2029                                                     test_id][param] = u"No Data"
2030                         except KeyError as err:
2031                             if continue_on_error:
2032                                 logging.debug(repr(err))
2033                                 continue
2034                             logging.error(repr(err))
2035                             return None
2036             return data
2037
2038         except (KeyError, IndexError, ValueError) as err:
2039             logging.error(
2040                 f"Missing mandatory parameter in the element "
2041                 f"specification: {repr(err)}"
2042             )
2043             return None
2044         except AttributeError as err:
2045             logging.error(repr(err))
2046             return None
2047
2048     @staticmethod
2049     def merge_data(data):
2050         """Merge data from more jobs and builds to a simple data structure.
2051
2052         The output data structure is:
2053
2054         - test (suite) 1 ID:
2055           - param 1
2056           - param 2
2057           ...
2058           - param n
2059         ...
2060         - test (suite) n ID:
2061         ...
2062
2063         :param data: Data to merge.
2064         :type data: pandas.Series
2065         :returns: Merged data.
2066         :rtype: pandas.Series
2067         """
2068
2069         logging.info(u"    Merging data ...")
2070
2071         merged_data = pd.Series()
2072         for builds in data.values:
2073             for item in builds.values:
2074                 for item_id, item_data in item.items():
2075                     merged_data[item_id] = item_data
2076         return merged_data
2077
2078     def print_all_oper_data(self):
2079         """Print all operational data to console.
2080         """
2081
2082         for job in self._input_data.values:
2083             for build in job.values:
2084                 for test_id, test_data in build[u"tests"].items():
2085                     print(f"{test_id}")
2086                     if test_data.get(u"show-run", None) is None:
2087                         continue
2088                     for dut_name, data in test_data[u"show-run"].items():
2089                         if data.get(u"runtime", None) is None:
2090                             continue
2091                         runtime = loads(data[u"runtime"])
2092                         try:
2093                             threads_nr = len(runtime[0][u"clocks"])
2094                         except (IndexError, KeyError):
2095                             continue
2096                         threads = OrderedDict(
2097                             {idx: list() for idx in range(threads_nr)})
2098                         for item in runtime:
2099                             for idx in range(threads_nr):
2100                                 if item[u"vectors"][idx] > 0:
2101                                     clocks = item[u"clocks"][idx] / \
2102                                              item[u"vectors"][idx]
2103                                 elif item[u"calls"][idx] > 0:
2104                                     clocks = item[u"clocks"][idx] / \
2105                                              item[u"calls"][idx]
2106                                 elif item[u"suspends"][idx] > 0:
2107                                     clocks = item[u"clocks"][idx] / \
2108                                              item[u"suspends"][idx]
2109                                 else:
2110                                     clocks = 0.0
2111
2112                                 if item[u"calls"][idx] > 0:
2113                                     vectors_call = item[u"vectors"][idx] / \
2114                                                    item[u"calls"][idx]
2115                                 else:
2116                                     vectors_call = 0.0
2117
2118                                 if int(item[u"calls"][idx]) + int(
2119                                         item[u"vectors"][idx]) + \
2120                                         int(item[u"suspends"][idx]):
2121                                     threads[idx].append([
2122                                         item[u"name"],
2123                                         item[u"calls"][idx],
2124                                         item[u"vectors"][idx],
2125                                         item[u"suspends"][idx],
2126                                         clocks,
2127                                         vectors_call
2128                                     ])
2129
2130                         print(f"Host IP: {data.get(u'host', '')}, "
2131                               f"Socket: {data.get(u'socket', '')}")
2132                         for thread_nr, thread in threads.items():
2133                             txt_table = prettytable.PrettyTable(
2134                                 (
2135                                     u"Name",
2136                                     u"Nr of Vectors",
2137                                     u"Nr of Packets",
2138                                     u"Suspends",
2139                                     u"Cycles per Packet",
2140                                     u"Average Vector Size"
2141                                 )
2142                             )
2143                             avg = 0.0
2144                             for row in thread:
2145                                 txt_table.add_row(row)
2146                                 avg += row[-1]
2147                             if len(thread) == 0:
2148                                 avg = u""
2149                             else:
2150                                 avg = f", Average Vector Size per Node: " \
2151                                       f"{(avg / len(thread)):.2f}"
2152                             th_name = u"main" if thread_nr == 0 \
2153                                 else f"worker_{thread_nr}"
2154                             print(f"{dut_name}, {th_name}{avg}")
2155                             txt_table.float_format = u".2"
2156                             txt_table.align = u"r"
2157                             txt_table.align[u"Name"] = u"l"
2158                             print(f"{txt_table.get_string()}\n")