Trending: Add graph with statistics
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
291
292     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
293
294     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
295
296     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
297
298     REGEX_SH_RUN_HOST = re.compile(
299         r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
300     )
301
302     def __init__(self, metadata, mapping, ignore, for_output):
303         """Initialisation.
304
305         :param metadata: Key-value pairs to be included in "metadata" part of
306             JSON structure.
307         :param mapping: Mapping of the old names of test cases to the new
308             (actual) one.
309         :param ignore: List of TCs to be ignored.
310         :param for_output: Output to be generated from downloaded data.
311         :type metadata: dict
312         :type mapping: dict
313         :type ignore: list
314         :type for_output: str
315         """
316
317         # Type of message to parse out from the test messages
318         self._msg_type = None
319
320         # VPP version
321         self._version = None
322
323         # Timestamp
324         self._timestamp = None
325
326         # Testbed. The testbed is identified by TG node IP address.
327         self._testbed = None
328
329         # Mapping of TCs long names
330         self._mapping = mapping
331
332         # Ignore list
333         self._ignore = ignore
334
335         self._for_output = for_output
336
337         # Number of PAPI History messages found:
338         # 0 - no message
339         # 1 - PAPI History of DUT1
340         # 2 - PAPI History of DUT2
341         self._conf_history_lookup_nr = 0
342
343         self._sh_run_counter = 0
344         self._telemetry_kw_counter = 0
345         self._telemetry_msg_counter = 0
346
347         # Test ID of currently processed test- the lowercase full path to the
348         # test
349         self._test_id = None
350
351         # The main data structure
352         self._data = {
353             u"metadata": OrderedDict(),
354             u"suites": OrderedDict(),
355             u"tests": OrderedDict()
356         }
357
358         # Save the provided metadata
359         for key, val in metadata.items():
360             self._data[u"metadata"][key] = val
361
362         # Dictionary defining the methods used to parse different types of
363         # messages
364         self.parse_msg = {
365             u"vpp-version": self._get_vpp_version,
366             u"dpdk-version": self._get_dpdk_version,
367             u"teardown-papi-history": self._get_papi_history,
368             u"test-show-runtime": self._get_show_run,
369             u"testbed": self._get_testbed,
370             u"test-telemetry": self._get_telemetry
371         }
372
373     @property
374     def data(self):
375         """Getter - Data parsed from the XML file.
376
377         :returns: Data parsed from the XML file.
378         :rtype: dict
379         """
380         return self._data
381
382     def _get_data_from_mrr_test_msg(self, msg):
383         """Get info from message of MRR performance tests.
384
385         :param msg: Message to be processed.
386         :type msg: str
387         :returns: Processed message or original message if a problem occurs.
388         :rtype: str
389         """
390
391         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
392         if not groups or groups.lastindex != 1:
393             return u"Test Failed."
394
395         try:
396             data = groups.group(1).split(u", ")
397         except (AttributeError, IndexError, ValueError, KeyError):
398             return u"Test Failed."
399
400         out_str = u"["
401         try:
402             for item in data:
403                 out_str += f"{(float(item) / 1e6):.2f}, "
404             return out_str[:-2] + u"]"
405         except (AttributeError, IndexError, ValueError, KeyError):
406             return u"Test Failed."
407
408     def _get_data_from_cps_test_msg(self, msg):
409         """Get info from message of NDRPDR CPS tests.
410
411         :param msg: Message to be processed.
412         :type msg: str
413         :returns: Processed message or "Test Failed." if a problem occurs.
414         :rtype: str
415         """
416
417         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
418         if not groups or groups.lastindex != 2:
419             return u"Test Failed."
420
421         try:
422             return (
423                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
424                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
425             )
426         except (AttributeError, IndexError, ValueError, KeyError):
427             return u"Test Failed."
428
429     def _get_data_from_pps_test_msg(self, msg):
430         """Get info from message of NDRPDR PPS tests.
431
432         :param msg: Message to be processed.
433         :type msg: str
434         :returns: Processed message or "Test Failed." if a problem occurs.
435         :rtype: str
436         """
437
438         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
439         if not groups or groups.lastindex != 4:
440             return u"Test Failed."
441
442         try:
443             return (
444                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
445                 f"{float(groups.group(2)):5.2f}\n"
446                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
447                 f"{float(groups.group(4)):5.2f}"
448             )
449         except (AttributeError, IndexError, ValueError, KeyError):
450             return u"Test Failed."
451
452     def _get_data_from_perf_test_msg(self, msg):
453         """Get info from message of NDRPDR performance tests.
454
455         :param msg: Message to be processed.
456         :type msg: str
457         :returns: Processed message or "Test Failed." if a problem occurs.
458         :rtype: str
459         """
460
461         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
462         if not groups or groups.lastindex != 10:
463             return u"Test Failed."
464
465         try:
466             data = {
467                 u"ndr_low": float(groups.group(1)),
468                 u"ndr_low_b": float(groups.group(2)),
469                 u"pdr_low": float(groups.group(3)),
470                 u"pdr_low_b": float(groups.group(4)),
471                 u"pdr_lat_90_1": groups.group(5),
472                 u"pdr_lat_90_2": groups.group(6),
473                 u"pdr_lat_50_1": groups.group(7),
474                 u"pdr_lat_50_2": groups.group(8),
475                 u"pdr_lat_10_1": groups.group(9),
476                 u"pdr_lat_10_2": groups.group(10),
477             }
478         except (AttributeError, IndexError, ValueError, KeyError):
479             return u"Test Failed."
480
481         def _process_lat(in_str_1, in_str_2):
482             """Extract min, avg, max values from latency string.
483
484             :param in_str_1: Latency string for one direction produced by robot
485                 framework.
486             :param in_str_2: Latency string for second direction produced by
487                 robot framework.
488             :type in_str_1: str
489             :type in_str_2: str
490             :returns: Processed latency string or None if a problem occurs.
491             :rtype: tuple
492             """
493             in_list_1 = in_str_1.split('/', 3)
494             in_list_2 = in_str_2.split('/', 3)
495
496             if len(in_list_1) != 4 and len(in_list_2) != 4:
497                 return None
498
499             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
500             try:
501                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
502             except hdrh.codec.HdrLengthException:
503                 return None
504
505             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
506             try:
507                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
508             except hdrh.codec.HdrLengthException:
509                 return None
510
511             if hdr_lat_1 and hdr_lat_2:
512                 hdr_lat = (
513                     hdr_lat_1.get_value_at_percentile(50.0),
514                     hdr_lat_1.get_value_at_percentile(90.0),
515                     hdr_lat_1.get_value_at_percentile(99.0),
516                     hdr_lat_2.get_value_at_percentile(50.0),
517                     hdr_lat_2.get_value_at_percentile(90.0),
518                     hdr_lat_2.get_value_at_percentile(99.0)
519                 )
520
521                 if all(hdr_lat):
522                     return hdr_lat
523
524             return None
525
526         try:
527             out_msg = (
528                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
529                 f"{data[u'ndr_low_b']:5.2f}"
530                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
531                 f"{data[u'pdr_low_b']:5.2f}"
532             )
533             latency = (
534                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
535                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
536                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
537             )
538             if all(latency):
539                 max_len = len(str(max((max(item) for item in latency))))
540                 max_len = 4 if max_len < 4 else max_len
541
542                 for idx, lat in enumerate(latency):
543                     if not idx:
544                         out_msg += u"\n"
545                     out_msg += (
546                         f"\n{idx + 3}. "
547                         f"{lat[0]:{max_len}d} "
548                         f"{lat[1]:{max_len}d} "
549                         f"{lat[2]:{max_len}d}      "
550                         f"{lat[3]:{max_len}d} "
551                         f"{lat[4]:{max_len}d} "
552                         f"{lat[5]:{max_len}d} "
553                     )
554
555             return out_msg
556
557         except (AttributeError, IndexError, ValueError, KeyError):
558             return u"Test Failed."
559
560     def _get_testbed(self, msg):
561         """Called when extraction of testbed IP is required.
562         The testbed is identified by TG node IP address.
563
564         :param msg: Message to process.
565         :type msg: Message
566         :returns: Nothing.
567         """
568
569         if msg.message.count(u"Setup of TG node") or \
570                 msg.message.count(u"Setup of node TG host"):
571             reg_tg_ip = re.compile(
572                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
573             try:
574                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
575             except (KeyError, ValueError, IndexError, AttributeError):
576                 pass
577             finally:
578                 self._data[u"metadata"][u"testbed"] = self._testbed
579                 self._msg_type = None
580
581     def _get_vpp_version(self, msg):
582         """Called when extraction of VPP version is required.
583
584         :param msg: Message to process.
585         :type msg: Message
586         :returns: Nothing.
587         """
588
589         if msg.message.count(u"return STDOUT Version:") or \
590                 msg.message.count(u"VPP Version:") or \
591                 msg.message.count(u"VPP version:"):
592             self._version = str(
593                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
594             )
595             self._data[u"metadata"][u"version"] = self._version
596             self._msg_type = None
597
598     def _get_dpdk_version(self, msg):
599         """Called when extraction of DPDK version is required.
600
601         :param msg: Message to process.
602         :type msg: Message
603         :returns: Nothing.
604         """
605
606         if msg.message.count(u"DPDK Version:"):
607             try:
608                 self._version = str(re.search(
609                     self.REGEX_VERSION_DPDK, msg.message).group(2))
610                 self._data[u"metadata"][u"version"] = self._version
611             except IndexError:
612                 pass
613             finally:
614                 self._msg_type = None
615
616     def _get_papi_history(self, msg):
617         """Called when extraction of PAPI command history is required.
618
619         :param msg: Message to process.
620         :type msg: Message
621         :returns: Nothing.
622         """
623         if msg.message.count(u"PAPI command history:"):
624             self._conf_history_lookup_nr += 1
625             if self._conf_history_lookup_nr == 1:
626                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
627             else:
628                 self._msg_type = None
629             text = re.sub(
630                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
631                 u"",
632                 msg.message,
633                 count=1
634             ).replace(u'"', u"'")
635             self._data[u"tests"][self._test_id][u"conf-history"] += (
636                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
637             )
638
639     def _get_show_run(self, msg):
640         """Called when extraction of VPP operational data (output of CLI command
641         Show Runtime) is required.
642
643         :param msg: Message to process.
644         :type msg: Message
645         :returns: Nothing.
646         """
647
648         if not msg.message.count(u"stats runtime"):
649             return
650
651         # Temporary solution
652         if self._sh_run_counter > 1:
653             return
654
655         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
656             self._data[u"tests"][self._test_id][u"show-run"] = dict()
657
658         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
659         if not groups:
660             return
661         try:
662             host = groups.group(1)
663         except (AttributeError, IndexError):
664             host = u""
665         try:
666             sock = groups.group(2)
667         except (AttributeError, IndexError):
668             sock = u""
669
670         dut = u"dut{nr}".format(
671             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
672
673         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
674             copy.copy(
675                 {
676                     u"host": host,
677                     u"socket": sock,
678                     u"runtime": str(msg.message).replace(u' ', u'').
679                                 replace(u'\n', u'').replace(u"'", u'"').
680                                 replace(u'b"', u'"').replace(u'u"', u'"').
681                                 split(u":", 1)[1]
682                 }
683             )
684
685     def _get_telemetry(self, msg):
686         """Called when extraction of VPP telemetry data is required.
687
688         :param msg: Message to process.
689         :type msg: Message
690         :returns: Nothing.
691         """
692
693         if self._telemetry_kw_counter > 1:
694             return
695         if not msg.message.count(u"# TYPE vpp_runtime_calls"):
696             return
697
698         if u"telemetry-show-run" not in \
699                 self._data[u"tests"][self._test_id].keys():
700             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
701
702         self._telemetry_msg_counter += 1
703         groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
704         if not groups:
705             return
706         try:
707             host = groups.group(1)
708         except (AttributeError, IndexError):
709             host = u""
710         try:
711             sock = groups.group(2)
712         except (AttributeError, IndexError):
713             sock = u""
714         runtime = {
715             u"source_type": u"node",
716             u"source_id": host,
717             u"msg_type": u"metric",
718             u"log_level": u"INFO",
719             u"timestamp": msg.timestamp,
720             u"msg": u"show_runtime",
721             u"host": host,
722             u"socket": sock,
723             u"data": list()
724         }
725         for line in msg.message.splitlines():
726             if not line.startswith(u"vpp_runtime_"):
727                 continue
728             try:
729                 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
730                 cut = params.index(u"{")
731                 name = params[:cut].split(u"_", maxsplit=2)[-1]
732                 labels = eval(
733                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
734                 )
735                 labels[u"graph_node"] = labels.pop(u"name")
736                 runtime[u"data"].append(
737                     {
738                         u"name": name,
739                         u"value": value,
740                         u"timestamp": timestamp,
741                         u"labels": labels
742                     }
743                 )
744             except (TypeError, ValueError, IndexError):
745                 continue
746         self._data[u'tests'][self._test_id][u'telemetry-show-run']\
747             [f"dut{self._telemetry_msg_counter}"] = copy.copy(
748                 {
749                     u"host": host,
750                     u"socket": sock,
751                     u"runtime": runtime
752                 }
753             )
754
755     def _get_ndrpdr_throughput(self, msg):
756         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
757         message.
758
759         :param msg: The test message to be parsed.
760         :type msg: str
761         :returns: Parsed data as a dict and the status (PASS/FAIL).
762         :rtype: tuple(dict, str)
763         """
764
765         throughput = {
766             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
767             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
768         }
769         status = u"FAIL"
770         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
771
772         if groups is not None:
773             try:
774                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
775                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
776                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
777                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
778                 status = u"PASS"
779             except (IndexError, ValueError):
780                 pass
781
782         return throughput, status
783
784     def _get_ndrpdr_throughput_gbps(self, msg):
785         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
786         test message.
787
788         :param msg: The test message to be parsed.
789         :type msg: str
790         :returns: Parsed data as a dict and the status (PASS/FAIL).
791         :rtype: tuple(dict, str)
792         """
793
794         gbps = {
795             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
796             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
797         }
798         status = u"FAIL"
799         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
800
801         if groups is not None:
802             try:
803                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
804                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
805                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
806                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
807                 status = u"PASS"
808             except (IndexError, ValueError):
809                 pass
810
811         return gbps, status
812
813     def _get_plr_throughput(self, msg):
814         """Get PLRsearch lower bound and PLRsearch upper bound from the test
815         message.
816
817         :param msg: The test message to be parsed.
818         :type msg: str
819         :returns: Parsed data as a dict and the status (PASS/FAIL).
820         :rtype: tuple(dict, str)
821         """
822
823         throughput = {
824             u"LOWER": -1.0,
825             u"UPPER": -1.0
826         }
827         status = u"FAIL"
828         groups = re.search(self.REGEX_PLR_RATE, msg)
829
830         if groups is not None:
831             try:
832                 throughput[u"LOWER"] = float(groups.group(1))
833                 throughput[u"UPPER"] = float(groups.group(2))
834                 status = u"PASS"
835             except (IndexError, ValueError):
836                 pass
837
838         return throughput, status
839
840     def _get_ndrpdr_latency(self, msg):
841         """Get LATENCY from the test message.
842
843         :param msg: The test message to be parsed.
844         :type msg: str
845         :returns: Parsed data as a dict and the status (PASS/FAIL).
846         :rtype: tuple(dict, str)
847         """
848         latency_default = {
849             u"min": -1.0,
850             u"avg": -1.0,
851             u"max": -1.0,
852             u"hdrh": u""
853         }
854         latency = {
855             u"NDR": {
856                 u"direction1": copy.copy(latency_default),
857                 u"direction2": copy.copy(latency_default)
858             },
859             u"PDR": {
860                 u"direction1": copy.copy(latency_default),
861                 u"direction2": copy.copy(latency_default)
862             },
863             u"LAT0": {
864                 u"direction1": copy.copy(latency_default),
865                 u"direction2": copy.copy(latency_default)
866             },
867             u"PDR10": {
868                 u"direction1": copy.copy(latency_default),
869                 u"direction2": copy.copy(latency_default)
870             },
871             u"PDR50": {
872                 u"direction1": copy.copy(latency_default),
873                 u"direction2": copy.copy(latency_default)
874             },
875             u"PDR90": {
876                 u"direction1": copy.copy(latency_default),
877                 u"direction2": copy.copy(latency_default)
878             },
879         }
880
881         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
882         if groups is None:
883             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
884         if groups is None:
885             return latency, u"FAIL"
886
887         def process_latency(in_str):
888             """Return object with parsed latency values.
889
890             TODO: Define class for the return type.
891
892             :param in_str: Input string, min/avg/max/hdrh format.
893             :type in_str: str
894             :returns: Dict with corresponding keys, except hdrh float values.
895             :rtype dict:
896             :throws IndexError: If in_str does not have enough substrings.
897             :throws ValueError: If a substring does not convert to float.
898             """
899             in_list = in_str.split('/', 3)
900
901             rval = {
902                 u"min": float(in_list[0]),
903                 u"avg": float(in_list[1]),
904                 u"max": float(in_list[2]),
905                 u"hdrh": u""
906             }
907
908             if len(in_list) == 4:
909                 rval[u"hdrh"] = str(in_list[3])
910
911             return rval
912
913         try:
914             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
915             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
916             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
917             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
918             if groups.lastindex == 4:
919                 return latency, u"PASS"
920         except (IndexError, ValueError):
921             pass
922
923         try:
924             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
925             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
926             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
927             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
928             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
929             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
930             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
931             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
932             if groups.lastindex == 12:
933                 return latency, u"PASS"
934         except (IndexError, ValueError):
935             pass
936
937         return latency, u"FAIL"
938
939     @staticmethod
940     def _get_hoststack_data(msg, tags):
941         """Get data from the hoststack test message.
942
943         :param msg: The test message to be parsed.
944         :param tags: Test tags.
945         :type msg: str
946         :type tags: list
947         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
948         :rtype: tuple(dict, str)
949         """
950         result = dict()
951         status = u"FAIL"
952
953         msg = msg.replace(u"'", u'"').replace(u" ", u"")
954         if u"LDPRELOAD" in tags:
955             try:
956                 result = loads(msg)
957                 status = u"PASS"
958             except JSONDecodeError:
959                 pass
960         elif u"VPPECHO" in tags:
961             try:
962                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
963                 result = dict(
964                     client=loads(msg_lst[0]),
965                     server=loads(msg_lst[1])
966                 )
967                 status = u"PASS"
968             except (JSONDecodeError, IndexError):
969                 pass
970
971         return result, status
972
973     def _get_vsap_data(self, msg, tags):
974         """Get data from the vsap test message.
975
976         :param msg: The test message to be parsed.
977         :param tags: Test tags.
978         :type msg: str
979         :type tags: list
980         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
981         :rtype: tuple(dict, str)
982         """
983         result = dict()
984         status = u"FAIL"
985
986         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
987         if groups is not None:
988             try:
989                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
990                 result[u"latency"] = float(groups.group(2))
991                 result[u"completed-requests"] = int(groups.group(3))
992                 result[u"failed-requests"] = int(groups.group(4))
993                 result[u"bytes-transferred"] = int(groups.group(5))
994                 if u"TCP_CPS"in tags:
995                     result[u"cps"] = float(groups.group(6))
996                 elif u"TCP_RPS" in tags:
997                     result[u"rps"] = float(groups.group(6))
998                 else:
999                     return result, status
1000                 status = u"PASS"
1001             except (IndexError, ValueError):
1002                 pass
1003
1004         return result, status
1005
1006     def visit_suite(self, suite):
1007         """Implements traversing through the suite and its direct children.
1008
1009         :param suite: Suite to process.
1010         :type suite: Suite
1011         :returns: Nothing.
1012         """
1013         if self.start_suite(suite) is not False:
1014             suite.suites.visit(self)
1015             suite.tests.visit(self)
1016             self.end_suite(suite)
1017
1018     def start_suite(self, suite):
1019         """Called when suite starts.
1020
1021         :param suite: Suite to process.
1022         :type suite: Suite
1023         :returns: Nothing.
1024         """
1025
1026         try:
1027             parent_name = suite.parent.name
1028         except AttributeError:
1029             return
1030
1031         self._data[u"suites"][suite.longname.lower().
1032                               replace(u'"', u"'").
1033                               replace(u" ", u"_")] = {
1034                                   u"name": suite.name.lower(),
1035                                   u"doc": suite.doc,
1036                                   u"parent": parent_name,
1037                                   u"level": len(suite.longname.split(u"."))
1038                               }
1039
1040         suite.keywords.visit(self)
1041
1042     def end_suite(self, suite):
1043         """Called when suite ends.
1044
1045         :param suite: Suite to process.
1046         :type suite: Suite
1047         :returns: Nothing.
1048         """
1049
1050     def visit_test(self, test):
1051         """Implements traversing through the test.
1052
1053         :param test: Test to process.
1054         :type test: Test
1055         :returns: Nothing.
1056         """
1057         if self.start_test(test) is not False:
1058             test.keywords.visit(self)
1059             self.end_test(test)
1060
1061     def start_test(self, test):
1062         """Called when test starts.
1063
1064         :param test: Test to process.
1065         :type test: Test
1066         :returns: Nothing.
1067         """
1068
1069         self._sh_run_counter = 0
1070         self._telemetry_kw_counter = 0
1071         self._telemetry_msg_counter = 0
1072
1073         longname_orig = test.longname.lower()
1074
1075         # Check the ignore list
1076         if longname_orig in self._ignore:
1077             return
1078
1079         tags = [str(tag) for tag in test.tags]
1080         test_result = dict()
1081
1082         # Change the TC long name and name if defined in the mapping table
1083         longname = self._mapping.get(longname_orig, None)
1084         if longname is not None:
1085             name = longname.split(u'.')[-1]
1086             logging.debug(
1087                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1088                 f"{name}"
1089             )
1090         else:
1091             longname = longname_orig
1092             name = test.name.lower()
1093
1094         # Remove TC number from the TC long name (backward compatibility):
1095         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1096         # Remove TC number from the TC name (not needed):
1097         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1098
1099         test_result[u"parent"] = test.parent.name.lower()
1100         test_result[u"tags"] = tags
1101         test_result["doc"] = test.doc
1102         test_result[u"type"] = u""
1103         test_result[u"status"] = test.status
1104         test_result[u"starttime"] = test.starttime
1105         test_result[u"endtime"] = test.endtime
1106
1107         if test.status == u"PASS":
1108             if u"NDRPDR" in tags:
1109                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1110                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1111                         test.message)
1112                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1113                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1114                         test.message)
1115                 else:
1116                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1117                         test.message)
1118             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1119                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1120                     test.message)
1121             else:
1122                 test_result[u"msg"] = test.message
1123         else:
1124             test_result[u"msg"] = test.message
1125
1126         if u"PERFTEST" in tags:
1127             # Replace info about cores (e.g. -1c-) with the info about threads
1128             # and cores (e.g. -1t1c-) in the long test case names and in the
1129             # test case names if necessary.
1130             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1131             if not groups:
1132                 tag_count = 0
1133                 tag_tc = str()
1134                 for tag in test_result[u"tags"]:
1135                     groups = re.search(self.REGEX_TC_TAG, tag)
1136                     if groups:
1137                         tag_count += 1
1138                         tag_tc = tag
1139
1140                 if tag_count == 1:
1141                     self._test_id = re.sub(
1142                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1143                         self._test_id, count=1
1144                     )
1145                     test_result[u"name"] = re.sub(
1146                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1147                         test_result["name"], count=1
1148                     )
1149                 else:
1150                     test_result[u"status"] = u"FAIL"
1151                     self._data[u"tests"][self._test_id] = test_result
1152                     logging.debug(
1153                         f"The test {self._test_id} has no or more than one "
1154                         f"multi-threading tags.\n"
1155                         f"Tags: {test_result[u'tags']}"
1156                     )
1157                     return
1158
1159         if u"DEVICETEST" in tags:
1160             test_result[u"type"] = u"DEVICETEST"
1161         elif u"NDRPDR" in tags:
1162             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1163                 test_result[u"type"] = u"CPS"
1164             else:
1165                 test_result[u"type"] = u"NDRPDR"
1166             if test.status == u"PASS":
1167                 test_result[u"throughput"], test_result[u"status"] = \
1168                     self._get_ndrpdr_throughput(test.message)
1169                 test_result[u"gbps"], test_result[u"status"] = \
1170                     self._get_ndrpdr_throughput_gbps(test.message)
1171                 test_result[u"latency"], test_result[u"status"] = \
1172                     self._get_ndrpdr_latency(test.message)
1173         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1174             if u"MRR" in tags:
1175                 test_result[u"type"] = u"MRR"
1176             else:
1177                 test_result[u"type"] = u"BMRR"
1178             if test.status == u"PASS":
1179                 test_result[u"result"] = dict()
1180                 groups = re.search(self.REGEX_BMRR, test.message)
1181                 if groups is not None:
1182                     items_str = groups.group(1)
1183                     items_float = [
1184                         float(item.strip().replace(u"'", u""))
1185                         for item in items_str.split(",")
1186                     ]
1187                     # Use whole list in CSIT-1180.
1188                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1189                     test_result[u"result"][u"samples"] = items_float
1190                     test_result[u"result"][u"receive-rate"] = stats.avg
1191                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1192                 else:
1193                     groups = re.search(self.REGEX_MRR, test.message)
1194                     test_result[u"result"][u"receive-rate"] = \
1195                         float(groups.group(3)) / float(groups.group(1))
1196         elif u"SOAK" in tags:
1197             test_result[u"type"] = u"SOAK"
1198             if test.status == u"PASS":
1199                 test_result[u"throughput"], test_result[u"status"] = \
1200                     self._get_plr_throughput(test.message)
1201         elif u"HOSTSTACK" in tags:
1202             test_result[u"type"] = u"HOSTSTACK"
1203             if test.status == u"PASS":
1204                 test_result[u"result"], test_result[u"status"] = \
1205                     self._get_hoststack_data(test.message, tags)
1206         elif u"LDP_NGINX" in tags:
1207             test_result[u"type"] = u"LDP_NGINX"
1208             test_result[u"result"], test_result[u"status"] = \
1209                 self._get_vsap_data(test.message, tags)
1210         # elif u"TCP" in tags:  # This might be not used
1211         #     test_result[u"type"] = u"TCP"
1212         #     if test.status == u"PASS":
1213         #         groups = re.search(self.REGEX_TCP, test.message)
1214         #         test_result[u"result"] = int(groups.group(2))
1215         elif u"RECONF" in tags:
1216             test_result[u"type"] = u"RECONF"
1217             if test.status == u"PASS":
1218                 test_result[u"result"] = None
1219                 try:
1220                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1221                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1222                     test_result[u"result"] = {
1223                         u"loss": int(grps_loss.group(1)),
1224                         u"time": float(grps_time.group(1))
1225                     }
1226                 except (AttributeError, IndexError, ValueError, TypeError):
1227                     test_result[u"status"] = u"FAIL"
1228         else:
1229             test_result[u"status"] = u"FAIL"
1230
1231         self._data[u"tests"][self._test_id] = test_result
1232
1233     def end_test(self, test):
1234         """Called when test ends.
1235
1236         :param test: Test to process.
1237         :type test: Test
1238         :returns: Nothing.
1239         """
1240
1241     def visit_keyword(self, keyword):
1242         """Implements traversing through the keyword and its child keywords.
1243
1244         :param keyword: Keyword to process.
1245         :type keyword: Keyword
1246         :returns: Nothing.
1247         """
1248         if self.start_keyword(keyword) is not False:
1249             self.end_keyword(keyword)
1250
1251     def start_keyword(self, keyword):
1252         """Called when keyword starts. Default implementation does nothing.
1253
1254         :param keyword: Keyword to process.
1255         :type keyword: Keyword
1256         :returns: Nothing.
1257         """
1258         try:
1259             if keyword.type == u"setup":
1260                 self.visit_setup_kw(keyword)
1261             elif keyword.type == u"teardown":
1262                 self.visit_teardown_kw(keyword)
1263             else:
1264                 self.visit_test_kw(keyword)
1265         except AttributeError:
1266             pass
1267
1268     def end_keyword(self, keyword):
1269         """Called when keyword ends. Default implementation does nothing.
1270
1271         :param keyword: Keyword to process.
1272         :type keyword: Keyword
1273         :returns: Nothing.
1274         """
1275
1276     def visit_test_kw(self, test_kw):
1277         """Implements traversing through the test keyword and its child
1278         keywords.
1279
1280         :param test_kw: Keyword to process.
1281         :type test_kw: Keyword
1282         :returns: Nothing.
1283         """
1284         for keyword in test_kw.keywords:
1285             if self.start_test_kw(keyword) is not False:
1286                 self.visit_test_kw(keyword)
1287                 self.end_test_kw(keyword)
1288
1289     def start_test_kw(self, test_kw):
1290         """Called when test keyword starts. Default implementation does
1291         nothing.
1292
1293         :param test_kw: Keyword to process.
1294         :type test_kw: Keyword
1295         :returns: Nothing.
1296         """
1297         if self._for_output == u"trending":
1298             return
1299
1300         if test_kw.name.count(u"Run Telemetry On All Duts"):
1301             self._msg_type = u"test-telemetry"
1302             self._telemetry_kw_counter += 1
1303         elif test_kw.name.count(u"Show Runtime On All Duts"):
1304             self._msg_type = u"test-show-runtime"
1305             self._sh_run_counter += 1
1306         else:
1307             return
1308         test_kw.messages.visit(self)
1309
1310     def end_test_kw(self, test_kw):
1311         """Called when keyword ends. Default implementation does nothing.
1312
1313         :param test_kw: Keyword to process.
1314         :type test_kw: Keyword
1315         :returns: Nothing.
1316         """
1317
1318     def visit_setup_kw(self, setup_kw):
1319         """Implements traversing through the teardown keyword and its child
1320         keywords.
1321
1322         :param setup_kw: Keyword to process.
1323         :type setup_kw: Keyword
1324         :returns: Nothing.
1325         """
1326         for keyword in setup_kw.keywords:
1327             if self.start_setup_kw(keyword) is not False:
1328                 self.visit_setup_kw(keyword)
1329                 self.end_setup_kw(keyword)
1330
1331     def start_setup_kw(self, setup_kw):
1332         """Called when teardown keyword starts. Default implementation does
1333         nothing.
1334
1335         :param setup_kw: Keyword to process.
1336         :type setup_kw: Keyword
1337         :returns: Nothing.
1338         """
1339         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1340                 and not self._version:
1341             self._msg_type = u"vpp-version"
1342         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1343                 not self._version:
1344             self._msg_type = u"dpdk-version"
1345         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1346             self._msg_type = u"testbed"
1347         else:
1348             return
1349         setup_kw.messages.visit(self)
1350
1351     def end_setup_kw(self, setup_kw):
1352         """Called when keyword ends. Default implementation does nothing.
1353
1354         :param setup_kw: Keyword to process.
1355         :type setup_kw: Keyword
1356         :returns: Nothing.
1357         """
1358
1359     def visit_teardown_kw(self, teardown_kw):
1360         """Implements traversing through the teardown keyword and its child
1361         keywords.
1362
1363         :param teardown_kw: Keyword to process.
1364         :type teardown_kw: Keyword
1365         :returns: Nothing.
1366         """
1367         for keyword in teardown_kw.keywords:
1368             if self.start_teardown_kw(keyword) is not False:
1369                 self.visit_teardown_kw(keyword)
1370                 self.end_teardown_kw(keyword)
1371
1372     def start_teardown_kw(self, teardown_kw):
1373         """Called when teardown keyword starts
1374
1375         :param teardown_kw: Keyword to process.
1376         :type teardown_kw: Keyword
1377         :returns: Nothing.
1378         """
1379         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1380             self._conf_history_lookup_nr = 0
1381             self._msg_type = u"teardown-papi-history"
1382             teardown_kw.messages.visit(self)
1383
1384     def end_teardown_kw(self, teardown_kw):
1385         """Called when keyword ends. Default implementation does nothing.
1386
1387         :param teardown_kw: Keyword to process.
1388         :type teardown_kw: Keyword
1389         :returns: Nothing.
1390         """
1391
1392     def visit_message(self, msg):
1393         """Implements visiting the message.
1394
1395         :param msg: Message to process.
1396         :type msg: Message
1397         :returns: Nothing.
1398         """
1399         if self.start_message(msg) is not False:
1400             self.end_message(msg)
1401
1402     def start_message(self, msg):
1403         """Called when message starts. Get required information from messages:
1404         - VPP version.
1405
1406         :param msg: Message to process.
1407         :type msg: Message
1408         :returns: Nothing.
1409         """
1410         if self._msg_type:
1411             self.parse_msg[self._msg_type](msg)
1412
1413     def end_message(self, msg):
1414         """Called when message ends. Default implementation does nothing.
1415
1416         :param msg: Message to process.
1417         :type msg: Message
1418         :returns: Nothing.
1419         """
1420
1421
1422 class InputData:
1423     """Input data
1424
1425     The data is extracted from output.xml files generated by Jenkins jobs and
1426     stored in pandas' DataFrames.
1427
1428     The data structure:
1429     - job name
1430       - build number
1431         - metadata
1432           (as described in ExecutionChecker documentation)
1433         - suites
1434           (as described in ExecutionChecker documentation)
1435         - tests
1436           (as described in ExecutionChecker documentation)
1437     """
1438
1439     def __init__(self, spec, for_output):
1440         """Initialization.
1441
1442         :param spec: Specification.
1443         :param for_output: Output to be generated from downloaded data.
1444         :type spec: Specification
1445         :type for_output: str
1446         """
1447
1448         # Specification:
1449         self._cfg = spec
1450
1451         self._for_output = for_output
1452
1453         # Data store:
1454         self._input_data = pd.Series()
1455
1456     @property
1457     def data(self):
1458         """Getter - Input data.
1459
1460         :returns: Input data
1461         :rtype: pandas.Series
1462         """
1463         return self._input_data
1464
1465     def metadata(self, job, build):
1466         """Getter - metadata
1467
1468         :param job: Job which metadata we want.
1469         :param build: Build which metadata we want.
1470         :type job: str
1471         :type build: str
1472         :returns: Metadata
1473         :rtype: pandas.Series
1474         """
1475         return self.data[job][build][u"metadata"]
1476
1477     def suites(self, job, build):
1478         """Getter - suites
1479
1480         :param job: Job which suites we want.
1481         :param build: Build which suites we want.
1482         :type job: str
1483         :type build: str
1484         :returns: Suites.
1485         :rtype: pandas.Series
1486         """
1487         return self.data[job][str(build)][u"suites"]
1488
1489     def tests(self, job, build):
1490         """Getter - tests
1491
1492         :param job: Job which tests we want.
1493         :param build: Build which tests we want.
1494         :type job: str
1495         :type build: str
1496         :returns: Tests.
1497         :rtype: pandas.Series
1498         """
1499         return self.data[job][build][u"tests"]
1500
1501     def _parse_tests(self, job, build):
1502         """Process data from robot output.xml file and return JSON structured
1503         data.
1504
1505         :param job: The name of job which build output data will be processed.
1506         :param build: The build which output data will be processed.
1507         :type job: str
1508         :type build: dict
1509         :returns: JSON data structure.
1510         :rtype: dict
1511         """
1512
1513         metadata = {
1514             u"job": job,
1515             u"build": build
1516         }
1517
1518         with open(build[u"file-name"], u'r') as data_file:
1519             try:
1520                 result = ExecutionResult(data_file)
1521             except errors.DataError as err:
1522                 logging.error(
1523                     f"Error occurred while parsing output.xml: {repr(err)}"
1524                 )
1525                 return None
1526         checker = ExecutionChecker(
1527             metadata, self._cfg.mapping, self._cfg.ignore, self._for_output
1528         )
1529         result.visit(checker)
1530
1531         checker.data[u"metadata"][u"tests_total"] = \
1532             result.statistics.total.all.total
1533         checker.data[u"metadata"][u"tests_passed"] = \
1534             result.statistics.total.all.passed
1535         checker.data[u"metadata"][u"tests_failed"] = \
1536             result.statistics.total.all.failed
1537         checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1538         checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1539
1540         return checker.data
1541
1542     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1543         """Download and parse the input data file.
1544
1545         :param pid: PID of the process executing this method.
1546         :param job: Name of the Jenkins job which generated the processed input
1547             file.
1548         :param build: Information about the Jenkins build which generated the
1549             processed input file.
1550         :param repeat: Repeat the download specified number of times if not
1551             successful.
1552         :type pid: int
1553         :type job: str
1554         :type build: dict
1555         :type repeat: int
1556         """
1557
1558         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1559
1560         state = u"failed"
1561         success = False
1562         data = None
1563         do_repeat = repeat
1564         while do_repeat:
1565             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1566             if success:
1567                 break
1568             do_repeat -= 1
1569         if not success:
1570             logging.error(
1571                 f"It is not possible to download the input data file from the "
1572                 f"job {job}, build {build[u'build']}, or it is damaged. "
1573                 f"Skipped."
1574             )
1575         if success:
1576             logging.info(f"  Processing data from build {build[u'build']}")
1577             data = self._parse_tests(job, build)
1578             if data is None:
1579                 logging.error(
1580                     f"Input data file from the job {job}, build "
1581                     f"{build[u'build']} is damaged. Skipped."
1582                 )
1583             else:
1584                 state = u"processed"
1585
1586             try:
1587                 remove(build[u"file-name"])
1588             except OSError as err:
1589                 logging.error(
1590                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1591                 )
1592
1593         # If the time-period is defined in the specification file, remove all
1594         # files which are outside the time period.
1595         is_last = False
1596         timeperiod = self._cfg.environment.get(u"time-period", None)
1597         if timeperiod and data:
1598             now = dt.utcnow()
1599             timeperiod = timedelta(int(timeperiod))
1600             metadata = data.get(u"metadata", None)
1601             if metadata:
1602                 generated = metadata.get(u"generated", None)
1603                 if generated:
1604                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1605                     if (now - generated) > timeperiod:
1606                         # Remove the data and the file:
1607                         state = u"removed"
1608                         data = None
1609                         is_last = True
1610                         logging.info(
1611                             f"  The build {job}/{build[u'build']} is "
1612                             f"outdated, will be removed."
1613                         )
1614         return {
1615             u"data": data,
1616             u"state": state,
1617             u"job": job,
1618             u"build": build,
1619             u"last": is_last
1620         }
1621
1622     def download_and_parse_data(self, repeat=1):
1623         """Download the input data files, parse input data from input files and
1624         store in pandas' Series.
1625
1626         :param repeat: Repeat the download specified number of times if not
1627             successful.
1628         :type repeat: int
1629         """
1630
1631         logging.info(u"Downloading and parsing input files ...")
1632
1633         for job, builds in self._cfg.input.items():
1634             for build in builds:
1635
1636                 result = self._download_and_parse_build(job, build, repeat)
1637                 if result[u"last"]:
1638                     break
1639                 build_nr = result[u"build"][u"build"]
1640
1641                 if result[u"data"]:
1642                     data = result[u"data"]
1643                     build_data = pd.Series({
1644                         u"metadata": pd.Series(
1645                             list(data[u"metadata"].values()),
1646                             index=list(data[u"metadata"].keys())
1647                         ),
1648                         u"suites": pd.Series(
1649                             list(data[u"suites"].values()),
1650                             index=list(data[u"suites"].keys())
1651                         ),
1652                         u"tests": pd.Series(
1653                             list(data[u"tests"].values()),
1654                             index=list(data[u"tests"].keys())
1655                         )
1656                     })
1657
1658                     if self._input_data.get(job, None) is None:
1659                         self._input_data[job] = pd.Series()
1660                     self._input_data[job][str(build_nr)] = build_data
1661                     self._cfg.set_input_file_name(
1662                         job, build_nr, result[u"build"][u"file-name"]
1663                     )
1664                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1665
1666                 mem_alloc = \
1667                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1668                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1669
1670         logging.info(u"Done.")
1671
1672         msg = f"Successful downloads from the sources:\n"
1673         for source in self._cfg.environment[u"data-sources"]:
1674             if source[u"successful-downloads"]:
1675                 msg += (
1676                     f"{source[u'url']}/{source[u'path']}/"
1677                     f"{source[u'file-name']}: "
1678                     f"{source[u'successful-downloads']}\n"
1679                 )
1680         logging.info(msg)
1681
1682     def process_local_file(self, local_file, job=u"local", build_nr=1,
1683                            replace=True):
1684         """Process local XML file given as a command-line parameter.
1685
1686         :param local_file: The file to process.
1687         :param job: Job name.
1688         :param build_nr: Build number.
1689         :param replace: If True, the information about jobs and builds is
1690             replaced by the new one, otherwise the new jobs and builds are
1691             added.
1692         :type local_file: str
1693         :type job: str
1694         :type build_nr: int
1695         :type replace: bool
1696         :raises: PresentationError if an error occurs.
1697         """
1698         if not isfile(local_file):
1699             raise PresentationError(f"The file {local_file} does not exist.")
1700
1701         try:
1702             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1703         except (IndexError, ValueError):
1704             pass
1705
1706         build = {
1707             u"build": build_nr,
1708             u"status": u"failed",
1709             u"file-name": local_file
1710         }
1711         if replace:
1712             self._cfg.input = dict()
1713         self._cfg.add_build(job, build)
1714
1715         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1716         data = self._parse_tests(job, build)
1717         if data is None:
1718             raise PresentationError(
1719                 f"Error occurred while parsing the file {local_file}"
1720             )
1721
1722         build_data = pd.Series({
1723             u"metadata": pd.Series(
1724                 list(data[u"metadata"].values()),
1725                 index=list(data[u"metadata"].keys())
1726             ),
1727             u"suites": pd.Series(
1728                 list(data[u"suites"].values()),
1729                 index=list(data[u"suites"].keys())
1730             ),
1731             u"tests": pd.Series(
1732                 list(data[u"tests"].values()),
1733                 index=list(data[u"tests"].keys())
1734             )
1735         })
1736
1737         if self._input_data.get(job, None) is None:
1738             self._input_data[job] = pd.Series()
1739         self._input_data[job][str(build_nr)] = build_data
1740
1741         self._cfg.set_input_state(job, build_nr, u"processed")
1742
1743     def process_local_directory(self, local_dir, replace=True):
1744         """Process local directory with XML file(s). The directory is processed
1745         as a 'job' and the XML files in it as builds.
1746         If the given directory contains only sub-directories, these
1747         sub-directories processed as jobs and corresponding XML files as builds
1748         of their job.
1749
1750         :param local_dir: Local directory to process.
1751         :param replace: If True, the information about jobs and builds is
1752             replaced by the new one, otherwise the new jobs and builds are
1753             added.
1754         :type local_dir: str
1755         :type replace: bool
1756         """
1757         if not isdir(local_dir):
1758             raise PresentationError(
1759                 f"The directory {local_dir} does not exist."
1760             )
1761
1762         # Check if the given directory includes only files, or only directories
1763         _, dirnames, filenames = next(walk(local_dir))
1764
1765         if filenames and not dirnames:
1766             filenames.sort()
1767             # local_builds:
1768             # key: dir (job) name, value: list of file names (builds)
1769             local_builds = {
1770                 local_dir: [join(local_dir, name) for name in filenames]
1771             }
1772
1773         elif dirnames and not filenames:
1774             dirnames.sort()
1775             # local_builds:
1776             # key: dir (job) name, value: list of file names (builds)
1777             local_builds = dict()
1778             for dirname in dirnames:
1779                 builds = [
1780                     join(local_dir, dirname, name)
1781                     for name in listdir(join(local_dir, dirname))
1782                     if isfile(join(local_dir, dirname, name))
1783                 ]
1784                 if builds:
1785                     local_builds[dirname] = sorted(builds)
1786
1787         elif not filenames and not dirnames:
1788             raise PresentationError(f"The directory {local_dir} is empty.")
1789         else:
1790             raise PresentationError(
1791                 f"The directory {local_dir} can include only files or only "
1792                 f"directories, not both.\nThe directory {local_dir} includes "
1793                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1794             )
1795
1796         if replace:
1797             self._cfg.input = dict()
1798
1799         for job, files in local_builds.items():
1800             for idx, local_file in enumerate(files):
1801                 self.process_local_file(local_file, job, idx + 1, replace=False)
1802
1803     @staticmethod
1804     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1805         """Return the index of character in the string which is the end of tag.
1806
1807         :param tag_filter: The string where the end of tag is being searched.
1808         :param start: The index where the searching is stated.
1809         :param closer: The character which is the tag closer.
1810         :type tag_filter: str
1811         :type start: int
1812         :type closer: str
1813         :returns: The index of the tag closer.
1814         :rtype: int
1815         """
1816         try:
1817             idx_opener = tag_filter.index(closer, start)
1818             return tag_filter.index(closer, idx_opener + 1)
1819         except ValueError:
1820             return None
1821
1822     @staticmethod
1823     def _condition(tag_filter):
1824         """Create a conditional statement from the given tag filter.
1825
1826         :param tag_filter: Filter based on tags from the element specification.
1827         :type tag_filter: str
1828         :returns: Conditional statement which can be evaluated.
1829         :rtype: str
1830         """
1831         index = 0
1832         while True:
1833             index = InputData._end_of_tag(tag_filter, index)
1834             if index is None:
1835                 return tag_filter
1836             index += 1
1837             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1838
1839     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1840                     continue_on_error=False):
1841         """Filter required data from the given jobs and builds.
1842
1843         The output data structure is:
1844         - job 1
1845           - build 1
1846             - test (or suite) 1 ID:
1847               - param 1
1848               - param 2
1849               ...
1850               - param n
1851             ...
1852             - test (or suite) n ID:
1853             ...
1854           ...
1855           - build n
1856         ...
1857         - job n
1858
1859         :param element: Element which will use the filtered data.
1860         :param params: Parameters which will be included in the output. If None,
1861             all parameters are included.
1862         :param data: If not None, this data is used instead of data specified
1863             in the element.
1864         :param data_set: The set of data to be filtered: tests, suites,
1865             metadata.
1866         :param continue_on_error: Continue if there is error while reading the
1867             data. The Item will be empty then
1868         :type element: pandas.Series
1869         :type params: list
1870         :type data: dict
1871         :type data_set: str
1872         :type continue_on_error: bool
1873         :returns: Filtered data.
1874         :rtype pandas.Series
1875         """
1876
1877         try:
1878             if data_set == "suites":
1879                 cond = u"True"
1880             elif element[u"filter"] in (u"all", u"template"):
1881                 cond = u"True"
1882             else:
1883                 cond = InputData._condition(element[u"filter"])
1884             logging.debug(f"   Filter: {cond}")
1885         except KeyError:
1886             logging.error(u"  No filter defined.")
1887             return None
1888
1889         if params is None:
1890             params = element.get(u"parameters", None)
1891             if params:
1892                 params.extend((u"type", u"status"))
1893
1894         data_to_filter = data if data else element[u"data"]
1895         data = pd.Series()
1896         try:
1897             for job, builds in data_to_filter.items():
1898                 data[job] = pd.Series()
1899                 for build in builds:
1900                     data[job][str(build)] = pd.Series()
1901                     try:
1902                         data_dict = dict(
1903                             self.data[job][str(build)][data_set].items())
1904                     except KeyError:
1905                         if continue_on_error:
1906                             continue
1907                         return None
1908
1909                     for test_id, test_data in data_dict.items():
1910                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1911                             data[job][str(build)][test_id] = pd.Series()
1912                             if params is None:
1913                                 for param, val in test_data.items():
1914                                     data[job][str(build)][test_id][param] = val
1915                             else:
1916                                 for param in params:
1917                                     try:
1918                                         data[job][str(build)][test_id][param] =\
1919                                             test_data[param]
1920                                     except KeyError:
1921                                         data[job][str(build)][test_id][param] =\
1922                                             u"No Data"
1923             return data
1924
1925         except (KeyError, IndexError, ValueError) as err:
1926             logging.error(
1927                 f"Missing mandatory parameter in the element specification: "
1928                 f"{repr(err)}"
1929             )
1930             return None
1931         except AttributeError as err:
1932             logging.error(repr(err))
1933             return None
1934         except SyntaxError as err:
1935             logging.error(
1936                 f"The filter {cond} is not correct. Check if all tags are "
1937                 f"enclosed by apostrophes.\n{repr(err)}"
1938             )
1939             return None
1940
1941     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1942                              continue_on_error=False):
1943         """Filter required data from the given jobs and builds.
1944
1945         The output data structure is:
1946         - job 1
1947           - build 1
1948             - test (or suite) 1 ID:
1949               - param 1
1950               - param 2
1951               ...
1952               - param n
1953             ...
1954             - test (or suite) n ID:
1955             ...
1956           ...
1957           - build n
1958         ...
1959         - job n
1960
1961         :param element: Element which will use the filtered data.
1962         :param params: Parameters which will be included in the output. If None,
1963         all parameters are included.
1964         :param data_set: The set of data to be filtered: tests, suites,
1965         metadata.
1966         :param continue_on_error: Continue if there is error while reading the
1967         data. The Item will be empty then
1968         :type element: pandas.Series
1969         :type params: list
1970         :type data_set: str
1971         :type continue_on_error: bool
1972         :returns: Filtered data.
1973         :rtype pandas.Series
1974         """
1975
1976         include = element.get(u"include", None)
1977         if not include:
1978             logging.warning(u"No tests to include, skipping the element.")
1979             return None
1980
1981         if params is None:
1982             params = element.get(u"parameters", None)
1983             if params and u"type" not in params:
1984                 params.append(u"type")
1985
1986         cores = element.get(u"core", None)
1987         if cores:
1988             tests = list()
1989             for core in cores:
1990                 for test in include:
1991                     tests.append(test.format(core=core))
1992         else:
1993             tests = include
1994
1995         data = pd.Series()
1996         try:
1997             for job, builds in element[u"data"].items():
1998                 data[job] = pd.Series()
1999                 for build in builds:
2000                     data[job][str(build)] = pd.Series()
2001                     for test in tests:
2002                         try:
2003                             reg_ex = re.compile(str(test).lower())
2004                             for test_id in self.data[job][
2005                                     str(build)][data_set].keys():
2006                                 if re.match(reg_ex, str(test_id).lower()):
2007                                     test_data = self.data[job][
2008                                         str(build)][data_set][test_id]
2009                                     data[job][str(build)][test_id] = pd.Series()
2010                                     if params is None:
2011                                         for param, val in test_data.items():
2012                                             data[job][str(build)][test_id]\
2013                                                 [param] = val
2014                                     else:
2015                                         for param in params:
2016                                             try:
2017                                                 data[job][str(build)][
2018                                                     test_id][param] = \
2019                                                     test_data[param]
2020                                             except KeyError:
2021                                                 data[job][str(build)][
2022                                                     test_id][param] = u"No Data"
2023                         except KeyError as err:
2024                             if continue_on_error:
2025                                 logging.debug(repr(err))
2026                                 continue
2027                             logging.error(repr(err))
2028                             return None
2029             return data
2030
2031         except (KeyError, IndexError, ValueError) as err:
2032             logging.error(
2033                 f"Missing mandatory parameter in the element "
2034                 f"specification: {repr(err)}"
2035             )
2036             return None
2037         except AttributeError as err:
2038             logging.error(repr(err))
2039             return None
2040
2041     @staticmethod
2042     def merge_data(data):
2043         """Merge data from more jobs and builds to a simple data structure.
2044
2045         The output data structure is:
2046
2047         - test (suite) 1 ID:
2048           - param 1
2049           - param 2
2050           ...
2051           - param n
2052         ...
2053         - test (suite) n ID:
2054         ...
2055
2056         :param data: Data to merge.
2057         :type data: pandas.Series
2058         :returns: Merged data.
2059         :rtype: pandas.Series
2060         """
2061
2062         logging.info(u"    Merging data ...")
2063
2064         merged_data = pd.Series()
2065         for builds in data.values:
2066             for item in builds.values:
2067                 for item_id, item_data in item.items():
2068                     merged_data[item_id] = item_data
2069         return merged_data
2070
2071     def print_all_oper_data(self):
2072         """Print all operational data to console.
2073         """
2074
2075         for job in self._input_data.values:
2076             for build in job.values:
2077                 for test_id, test_data in build[u"tests"].items():
2078                     print(f"{test_id}")
2079                     if test_data.get(u"show-run", None) is None:
2080                         continue
2081                     for dut_name, data in test_data[u"show-run"].items():
2082                         if data.get(u"runtime", None) is None:
2083                             continue
2084                         runtime = loads(data[u"runtime"])
2085                         try:
2086                             threads_nr = len(runtime[0][u"clocks"])
2087                         except (IndexError, KeyError):
2088                             continue
2089                         threads = OrderedDict(
2090                             {idx: list() for idx in range(threads_nr)})
2091                         for item in runtime:
2092                             for idx in range(threads_nr):
2093                                 if item[u"vectors"][idx] > 0:
2094                                     clocks = item[u"clocks"][idx] / \
2095                                              item[u"vectors"][idx]
2096                                 elif item[u"calls"][idx] > 0:
2097                                     clocks = item[u"clocks"][idx] / \
2098                                              item[u"calls"][idx]
2099                                 elif item[u"suspends"][idx] > 0:
2100                                     clocks = item[u"clocks"][idx] / \
2101                                              item[u"suspends"][idx]
2102                                 else:
2103                                     clocks = 0.0
2104
2105                                 if item[u"calls"][idx] > 0:
2106                                     vectors_call = item[u"vectors"][idx] / \
2107                                                    item[u"calls"][idx]
2108                                 else:
2109                                     vectors_call = 0.0
2110
2111                                 if int(item[u"calls"][idx]) + int(
2112                                         item[u"vectors"][idx]) + \
2113                                         int(item[u"suspends"][idx]):
2114                                     threads[idx].append([
2115                                         item[u"name"],
2116                                         item[u"calls"][idx],
2117                                         item[u"vectors"][idx],
2118                                         item[u"suspends"][idx],
2119                                         clocks,
2120                                         vectors_call
2121                                     ])
2122
2123                         print(f"Host IP: {data.get(u'host', '')}, "
2124                               f"Socket: {data.get(u'socket', '')}")
2125                         for thread_nr, thread in threads.items():
2126                             txt_table = prettytable.PrettyTable(
2127                                 (
2128                                     u"Name",
2129                                     u"Nr of Vectors",
2130                                     u"Nr of Packets",
2131                                     u"Suspends",
2132                                     u"Cycles per Packet",
2133                                     u"Average Vector Size"
2134                                 )
2135                             )
2136                             avg = 0.0
2137                             for row in thread:
2138                                 txt_table.add_row(row)
2139                                 avg += row[-1]
2140                             if len(thread) == 0:
2141                                 avg = u""
2142                             else:
2143                                 avg = f", Average Vector Size per Node: " \
2144                                       f"{(avg / len(thread)):.2f}"
2145                             th_name = u"main" if thread_nr == 0 \
2146                                 else f"worker_{thread_nr}"
2147                             print(f"{dut_name}, {th_name}{avg}")
2148                             txt_table.float_format = u".2"
2149                             txt_table.align = u"r"
2150                             txt_table.align[u"Name"] = u"l"
2151                             print(f"{txt_table.get_string()}\n")