PAL: Process sh-run from telemetry
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
34
35 import hdrh.histogram
36 import hdrh.codec
37 import prettytable
38 import pandas as pd
39
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
42
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
46
47
48 # Separator used in file names
49 SEPARATOR = u"__"
50
51
52 class ExecutionChecker(ResultVisitor):
53     """Class to traverse through the test suite structure.
54
55     The functionality implemented in this class generates a json structure:
56
57     Performance tests:
58
59     {
60         "metadata": {
61             "generated": "Timestamp",
62             "version": "SUT version",
63             "job": "Jenkins job name",
64             "build": "Information about the build"
65         },
66         "suites": {
67             "Suite long name 1": {
68                 "name": Suite name,
69                 "doc": "Suite 1 documentation",
70                 "parent": "Suite 1 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73             "Suite long name N": {
74                 "name": Suite name,
75                 "doc": "Suite N documentation",
76                 "parent": "Suite 2 parent",
77                 "level": "Level of the suite in the suite hierarchy"
78             }
79         }
80         "tests": {
81             # NDRPDR tests:
82             "ID": {
83                 "name": "Test name",
84                 "parent": "Name of the parent of the test",
85                 "doc": "Test documentation",
86                 "msg": "Test message",
87                 "conf-history": "DUT1 and DUT2 VAT History",
88                 "show-run": "Show Run",
89                 "tags": ["tag 1", "tag 2", "tag n"],
90                 "type": "NDRPDR",
91                 "status": "PASS" | "FAIL",
92                 "throughput": {
93                     "NDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     },
97                     "PDR": {
98                         "LOWER": float,
99                         "UPPER": float
100                     }
101                 },
102                 "latency": {
103                     "NDR": {
104                         "direction1": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         },
110                         "direction2": {
111                             "min": float,
112                             "avg": float,
113                             "max": float,
114                             "hdrh": str
115                         }
116                     },
117                     "PDR": {
118                         "direction1": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         },
124                         "direction2": {
125                             "min": float,
126                             "avg": float,
127                             "max": float,
128                             "hdrh": str
129                         }
130                     }
131                 }
132             }
133
134             # TCP tests:
135             "ID": {
136                 "name": "Test name",
137                 "parent": "Name of the parent of the test",
138                 "doc": "Test documentation",
139                 "msg": "Test message",
140                 "tags": ["tag 1", "tag 2", "tag n"],
141                 "type": "TCP",
142                 "status": "PASS" | "FAIL",
143                 "result": int
144             }
145
146             # MRR, BMRR tests:
147             "ID": {
148                 "name": "Test name",
149                 "parent": "Name of the parent of the test",
150                 "doc": "Test documentation",
151                 "msg": "Test message",
152                 "tags": ["tag 1", "tag 2", "tag n"],
153                 "type": "MRR" | "BMRR",
154                 "status": "PASS" | "FAIL",
155                 "result": {
156                     "receive-rate": float,
157                     # Average of a list, computed using AvgStdevStats.
158                     # In CSIT-1180, replace with List[float].
159                 }
160             }
161
162             "ID" {
163                 # next test
164             }
165         }
166     }
167
168
169     Functional tests:
170
171     {
172         "metadata": {  # Optional
173             "version": "VPP version",
174             "job": "Jenkins job name",
175             "build": "Information about the build"
176         },
177         "suites": {
178             "Suite name 1": {
179                 "doc": "Suite 1 documentation",
180                 "parent": "Suite 1 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183             "Suite name N": {
184                 "doc": "Suite N documentation",
185                 "parent": "Suite 2 parent",
186                 "level": "Level of the suite in the suite hierarchy"
187             }
188         }
189         "tests": {
190             "ID": {
191                 "name": "Test name",
192                 "parent": "Name of the parent of the test",
193                 "doc": "Test documentation"
194                 "msg": "Test message"
195                 "tags": ["tag 1", "tag 2", "tag n"],
196                 "conf-history": "DUT1 and DUT2 VAT History"
197                 "show-run": "Show Run"
198                 "status": "PASS" | "FAIL"
199             },
200             "ID" {
201                 # next test
202             }
203         }
204     }
205
206     .. note:: ID is the lowercase full path to the test.
207     """
208
209     REGEX_PLR_RATE = re.compile(
210         r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211         r'PLRsearch upper bound::?\s(\d+.\d+)'
212     )
213     REGEX_NDRPDR_RATE = re.compile(
214         r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215         r'NDR_UPPER:\s(\d+.\d+).*\n'
216         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217         r'PDR_UPPER:\s(\d+.\d+)'
218     )
219     REGEX_NDRPDR_GBPS = re.compile(
220         r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221         r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222         r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223         r'PDR_UPPER:.*,\s(\d+.\d+)'
224     )
225     REGEX_PERF_MSG_INFO = re.compile(
226         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228         r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
231     )
232     REGEX_CPS_MSG_INFO = re.compile(
233         r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234         r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
235     )
236     REGEX_PPS_MSG_INFO = re.compile(
237         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
239     )
240     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
241
242     REGEX_VSAP_MSG_INFO = re.compile(
243         r'Transfer Rate: (\d*.\d*).*\n'
244         r'Latency: (\d*.\d*).*\n'
245         r'Completed requests: (\d*).*\n'
246         r'Failed requests: (\d*).*\n'
247         r'Total data transferred: (\d*).*\n'
248         r'Connection [cr]ps rate:\s*(\d*.\d*)'
249     )
250
251     # Needed for CPS and PPS tests
252     REGEX_NDRPDR_LAT_BASE = re.compile(
253         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
255     )
256     REGEX_NDRPDR_LAT = re.compile(
257         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
263     )
264
265     REGEX_VERSION_VPP = re.compile(
266         r"(return STDOUT Version:\s*|"
267         r"VPP Version:\s*|VPP version:\s*)(.*)"
268     )
269     REGEX_VERSION_DPDK = re.compile(
270         r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271     )
272     REGEX_TCP = re.compile(
273         r'Total\s(rps|cps|throughput):\s(\d*).*$'
274     )
275     REGEX_MRR = re.compile(
276         r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277         r'tx\s(\d*),\srx\s(\d*)'
278     )
279     REGEX_BMRR = re.compile(
280         r'.*trial results.*: \[(.*)\]'
281     )
282     REGEX_RECONF_LOSS = re.compile(
283         r'Packets lost due to reconfig: (\d*)'
284     )
285     REGEX_RECONF_TIME = re.compile(
286         r'Implied time lost: (\d*.[\de-]*)'
287     )
288     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289
290     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
291
292     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
293
294     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
295
296     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
297
298     def __init__(self, metadata, mapping, ignore, for_output):
299         """Initialisation.
300
301         :param metadata: Key-value pairs to be included in "metadata" part of
302             JSON structure.
303         :param mapping: Mapping of the old names of test cases to the new
304             (actual) one.
305         :param ignore: List of TCs to be ignored.
306         :param for_output: Output to be generated from downloaded data.
307         :type metadata: dict
308         :type mapping: dict
309         :type ignore: list
310         :type for_output: str
311         """
312
313         # Type of message to parse out from the test messages
314         self._msg_type = None
315
316         # VPP version
317         self._version = None
318
319         # Timestamp
320         self._timestamp = None
321
322         # Testbed. The testbed is identified by TG node IP address.
323         self._testbed = None
324
325         # Mapping of TCs long names
326         self._mapping = mapping
327
328         # Ignore list
329         self._ignore = ignore
330
331         self._for_output = for_output
332
333         # Number of PAPI History messages found:
334         # 0 - no message
335         # 1 - PAPI History of DUT1
336         # 2 - PAPI History of DUT2
337         self._conf_history_lookup_nr = 0
338
339         self._sh_run_counter = 0
340         self._telemetry_kw_counter = 0
341         self._telemetry_msg_counter = 0
342
343         # Test ID of currently processed test- the lowercase full path to the
344         # test
345         self._test_id = None
346
347         # The main data structure
348         self._data = {
349             u"metadata": OrderedDict(),
350             u"suites": OrderedDict(),
351             u"tests": OrderedDict()
352         }
353
354         # Save the provided metadata
355         for key, val in metadata.items():
356             self._data[u"metadata"][key] = val
357
358         # Dictionary defining the methods used to parse different types of
359         # messages
360         self.parse_msg = {
361             u"timestamp": self._get_timestamp,
362             u"vpp-version": self._get_vpp_version,
363             u"dpdk-version": self._get_dpdk_version,
364             u"teardown-papi-history": self._get_papi_history,
365             u"test-show-runtime": self._get_show_run,
366             u"testbed": self._get_testbed,
367             u"test-telemetry": self._get_telemetry
368         }
369
370     @property
371     def data(self):
372         """Getter - Data parsed from the XML file.
373
374         :returns: Data parsed from the XML file.
375         :rtype: dict
376         """
377         return self._data
378
379     def _get_data_from_mrr_test_msg(self, msg):
380         """Get info from message of MRR performance tests.
381
382         :param msg: Message to be processed.
383         :type msg: str
384         :returns: Processed message or original message if a problem occurs.
385         :rtype: str
386         """
387
388         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
389         if not groups or groups.lastindex != 1:
390             return u"Test Failed."
391
392         try:
393             data = groups.group(1).split(u", ")
394         except (AttributeError, IndexError, ValueError, KeyError):
395             return u"Test Failed."
396
397         out_str = u"["
398         try:
399             for item in data:
400                 out_str += f"{(float(item) / 1e6):.2f}, "
401             return out_str[:-2] + u"]"
402         except (AttributeError, IndexError, ValueError, KeyError):
403             return u"Test Failed."
404
405     def _get_data_from_cps_test_msg(self, msg):
406         """Get info from message of NDRPDR CPS tests.
407
408         :param msg: Message to be processed.
409         :type msg: str
410         :returns: Processed message or "Test Failed." if a problem occurs.
411         :rtype: str
412         """
413
414         groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
415         if not groups or groups.lastindex != 2:
416             return u"Test Failed."
417
418         try:
419             return (
420                 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
421                 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
422             )
423         except (AttributeError, IndexError, ValueError, KeyError):
424             return u"Test Failed."
425
426     def _get_data_from_pps_test_msg(self, msg):
427         """Get info from message of NDRPDR PPS tests.
428
429         :param msg: Message to be processed.
430         :type msg: str
431         :returns: Processed message or "Test Failed." if a problem occurs.
432         :rtype: str
433         """
434
435         groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
436         if not groups or groups.lastindex != 4:
437             return u"Test Failed."
438
439         try:
440             return (
441                 f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
442                 f"{float(groups.group(2)):5.2f}\n"
443                 f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
444                 f"{float(groups.group(4)):5.2f}"
445             )
446         except (AttributeError, IndexError, ValueError, KeyError):
447             return u"Test Failed."
448
449     def _get_data_from_perf_test_msg(self, msg):
450         """Get info from message of NDRPDR performance tests.
451
452         :param msg: Message to be processed.
453         :type msg: str
454         :returns: Processed message or "Test Failed." if a problem occurs.
455         :rtype: str
456         """
457
458         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
459         if not groups or groups.lastindex != 10:
460             return u"Test Failed."
461
462         try:
463             data = {
464                 u"ndr_low": float(groups.group(1)),
465                 u"ndr_low_b": float(groups.group(2)),
466                 u"pdr_low": float(groups.group(3)),
467                 u"pdr_low_b": float(groups.group(4)),
468                 u"pdr_lat_90_1": groups.group(5),
469                 u"pdr_lat_90_2": groups.group(6),
470                 u"pdr_lat_50_1": groups.group(7),
471                 u"pdr_lat_50_2": groups.group(8),
472                 u"pdr_lat_10_1": groups.group(9),
473                 u"pdr_lat_10_2": groups.group(10),
474             }
475         except (AttributeError, IndexError, ValueError, KeyError):
476             return u"Test Failed."
477
478         def _process_lat(in_str_1, in_str_2):
479             """Extract min, avg, max values from latency string.
480
481             :param in_str_1: Latency string for one direction produced by robot
482                 framework.
483             :param in_str_2: Latency string for second direction produced by
484                 robot framework.
485             :type in_str_1: str
486             :type in_str_2: str
487             :returns: Processed latency string or None if a problem occurs.
488             :rtype: tuple
489             """
490             in_list_1 = in_str_1.split('/', 3)
491             in_list_2 = in_str_2.split('/', 3)
492
493             if len(in_list_1) != 4 and len(in_list_2) != 4:
494                 return None
495
496             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
497             try:
498                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
499             except hdrh.codec.HdrLengthException:
500                 return None
501
502             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
503             try:
504                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
505             except hdrh.codec.HdrLengthException:
506                 return None
507
508             if hdr_lat_1 and hdr_lat_2:
509                 hdr_lat = (
510                     hdr_lat_1.get_value_at_percentile(50.0),
511                     hdr_lat_1.get_value_at_percentile(90.0),
512                     hdr_lat_1.get_value_at_percentile(99.0),
513                     hdr_lat_2.get_value_at_percentile(50.0),
514                     hdr_lat_2.get_value_at_percentile(90.0),
515                     hdr_lat_2.get_value_at_percentile(99.0)
516                 )
517
518                 if all(hdr_lat):
519                     return hdr_lat
520
521             return None
522
523         try:
524             out_msg = (
525                 f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
526                 f"{data[u'ndr_low_b']:5.2f}"
527                 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
528                 f"{data[u'pdr_low_b']:5.2f}"
529             )
530             latency = (
531                 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
532                 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
533                 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
534             )
535             if all(latency):
536                 max_len = len(str(max((max(item) for item in latency))))
537                 max_len = 4 if max_len < 4 else max_len
538
539                 for idx, lat in enumerate(latency):
540                     if not idx:
541                         out_msg += u"\n"
542                     out_msg += (
543                         f"\n{idx + 3}. "
544                         f"{lat[0]:{max_len}d} "
545                         f"{lat[1]:{max_len}d} "
546                         f"{lat[2]:{max_len}d}      "
547                         f"{lat[3]:{max_len}d} "
548                         f"{lat[4]:{max_len}d} "
549                         f"{lat[5]:{max_len}d} "
550                     )
551
552             return out_msg
553
554         except (AttributeError, IndexError, ValueError, KeyError):
555             return u"Test Failed."
556
557     def _get_testbed(self, msg):
558         """Called when extraction of testbed IP is required.
559         The testbed is identified by TG node IP address.
560
561         :param msg: Message to process.
562         :type msg: Message
563         :returns: Nothing.
564         """
565
566         if msg.message.count(u"Setup of TG node") or \
567                 msg.message.count(u"Setup of node TG host"):
568             reg_tg_ip = re.compile(
569                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
570             try:
571                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
572             except (KeyError, ValueError, IndexError, AttributeError):
573                 pass
574             finally:
575                 self._data[u"metadata"][u"testbed"] = self._testbed
576                 self._msg_type = None
577
578     def _get_vpp_version(self, msg):
579         """Called when extraction of VPP version is required.
580
581         :param msg: Message to process.
582         :type msg: Message
583         :returns: Nothing.
584         """
585
586         if msg.message.count(u"return STDOUT Version:") or \
587                 msg.message.count(u"VPP Version:") or \
588                 msg.message.count(u"VPP version:"):
589             self._version = str(
590                 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
591             )
592             self._data[u"metadata"][u"version"] = self._version
593             self._msg_type = None
594
595     def _get_dpdk_version(self, msg):
596         """Called when extraction of DPDK version is required.
597
598         :param msg: Message to process.
599         :type msg: Message
600         :returns: Nothing.
601         """
602
603         if msg.message.count(u"DPDK Version:"):
604             try:
605                 self._version = str(re.search(
606                     self.REGEX_VERSION_DPDK, msg.message).group(2))
607                 self._data[u"metadata"][u"version"] = self._version
608             except IndexError:
609                 pass
610             finally:
611                 self._msg_type = None
612
613     def _get_timestamp(self, msg):
614         """Called when extraction of timestamp is required.
615
616         :param msg: Message to process.
617         :type msg: Message
618         :returns: Nothing.
619         """
620
621         self._timestamp = msg.timestamp[:14]
622         self._data[u"metadata"][u"generated"] = self._timestamp
623         self._msg_type = None
624
625     def _get_papi_history(self, msg):
626         """Called when extraction of PAPI command history is required.
627
628         :param msg: Message to process.
629         :type msg: Message
630         :returns: Nothing.
631         """
632         if msg.message.count(u"PAPI command history:"):
633             self._conf_history_lookup_nr += 1
634             if self._conf_history_lookup_nr == 1:
635                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
636             else:
637                 self._msg_type = None
638             text = re.sub(
639                 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
640                 u"",
641                 msg.message,
642                 count=1
643             ).replace(u'"', u"'")
644             self._data[u"tests"][self._test_id][u"conf-history"] += (
645                 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
646             )
647
648     def _get_show_run(self, msg):
649         """Called when extraction of VPP operational data (output of CLI command
650         Show Runtime) is required.
651
652         :param msg: Message to process.
653         :type msg: Message
654         :returns: Nothing.
655         """
656
657         if not msg.message.count(u"stats runtime"):
658             return
659
660         # Temporary solution
661         if self._sh_run_counter > 1:
662             return
663
664         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
665             self._data[u"tests"][self._test_id][u"show-run"] = dict()
666
667         groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
668         if not groups:
669             return
670         try:
671             host = groups.group(1)
672         except (AttributeError, IndexError):
673             host = u""
674         try:
675             sock = groups.group(2)
676         except (AttributeError, IndexError):
677             sock = u""
678
679         dut = u"dut{nr}".format(
680             nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
681
682         self._data[u'tests'][self._test_id][u'show-run'][dut] = \
683             copy.copy(
684                 {
685                     u"host": host,
686                     u"socket": sock,
687                     u"runtime": str(msg.message).replace(u' ', u'').
688                                 replace(u'\n', u'').replace(u"'", u'"').
689                                 replace(u'b"', u'"').replace(u'u"', u'"').
690                                 split(u":", 1)[1]
691                 }
692             )
693
694     def _get_telemetry(self, msg):
695         """Called when extraction of VPP telemetry data is required.
696
697         :param msg: Message to process.
698         :type msg: Message
699         :returns: Nothing.
700         """
701
702         if self._telemetry_kw_counter > 1:
703             return
704         if not msg.message.count(u"vpp_runtime_calls"):
705             return
706
707         if u"telemetry-show-run" not in \
708                 self._data[u"tests"][self._test_id].keys():
709             self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
710
711         self._telemetry_msg_counter += 1
712         dut = f"dut{self._telemetry_msg_counter}"
713         runtime = {
714             u"source_type": u"node",
715             u"source_id": dut,
716             u"msg_type": u"metric",
717             u"log_level": u"INFO",
718             u"timestamp": msg.timestamp,
719             u"msg": u"show_runtime",
720             u"host": dut,  # No info, should be host IP
721             u"socket": u"",  # No info
722             u"data": list()
723         }
724         for line in msg.message.splitlines():
725             if not line.startswith(u"vpp_runtime_"):
726                 continue
727             try:
728                 params, value = line.rsplit(u" ", maxsplit=2)[:-1]
729                 cut = params.index(u"{")
730                 name = params[:cut].split(u"_", maxsplit=2)[-1]
731                 labels = eval(
732                     u"dict" + params[cut:].replace('{', '(').replace('}', ')')
733                 )
734                 labels[u"graph_node"] = labels.pop(u"name")
735                 runtime[u"data"].append(
736                     {
737                         u"name": name,
738                         u"value": value,
739                         u"labels": labels
740                     }
741                 )
742             except (TypeError, ValueError, IndexError):
743                 continue
744
745         self._data[u'tests'][self._test_id][u'telemetry-show-run'][dut] = \
746             copy.copy(
747                 {
748                     u"host": dut,
749                     u"socket": u"",
750                     u"runtime": runtime
751                 }
752             )
753
754     def _get_ndrpdr_throughput(self, msg):
755         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
756         message.
757
758         :param msg: The test message to be parsed.
759         :type msg: str
760         :returns: Parsed data as a dict and the status (PASS/FAIL).
761         :rtype: tuple(dict, str)
762         """
763
764         throughput = {
765             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
766             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
767         }
768         status = u"FAIL"
769         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
770
771         if groups is not None:
772             try:
773                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
774                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
775                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
776                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
777                 status = u"PASS"
778             except (IndexError, ValueError):
779                 pass
780
781         return throughput, status
782
783     def _get_ndrpdr_throughput_gbps(self, msg):
784         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
785         test message.
786
787         :param msg: The test message to be parsed.
788         :type msg: str
789         :returns: Parsed data as a dict and the status (PASS/FAIL).
790         :rtype: tuple(dict, str)
791         """
792
793         gbps = {
794             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
795             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
796         }
797         status = u"FAIL"
798         groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
799
800         if groups is not None:
801             try:
802                 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
803                 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
804                 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
805                 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
806                 status = u"PASS"
807             except (IndexError, ValueError):
808                 pass
809
810         return gbps, status
811
812     def _get_plr_throughput(self, msg):
813         """Get PLRsearch lower bound and PLRsearch upper bound from the test
814         message.
815
816         :param msg: The test message to be parsed.
817         :type msg: str
818         :returns: Parsed data as a dict and the status (PASS/FAIL).
819         :rtype: tuple(dict, str)
820         """
821
822         throughput = {
823             u"LOWER": -1.0,
824             u"UPPER": -1.0
825         }
826         status = u"FAIL"
827         groups = re.search(self.REGEX_PLR_RATE, msg)
828
829         if groups is not None:
830             try:
831                 throughput[u"LOWER"] = float(groups.group(1))
832                 throughput[u"UPPER"] = float(groups.group(2))
833                 status = u"PASS"
834             except (IndexError, ValueError):
835                 pass
836
837         return throughput, status
838
839     def _get_ndrpdr_latency(self, msg):
840         """Get LATENCY from the test message.
841
842         :param msg: The test message to be parsed.
843         :type msg: str
844         :returns: Parsed data as a dict and the status (PASS/FAIL).
845         :rtype: tuple(dict, str)
846         """
847         latency_default = {
848             u"min": -1.0,
849             u"avg": -1.0,
850             u"max": -1.0,
851             u"hdrh": u""
852         }
853         latency = {
854             u"NDR": {
855                 u"direction1": copy.copy(latency_default),
856                 u"direction2": copy.copy(latency_default)
857             },
858             u"PDR": {
859                 u"direction1": copy.copy(latency_default),
860                 u"direction2": copy.copy(latency_default)
861             },
862             u"LAT0": {
863                 u"direction1": copy.copy(latency_default),
864                 u"direction2": copy.copy(latency_default)
865             },
866             u"PDR10": {
867                 u"direction1": copy.copy(latency_default),
868                 u"direction2": copy.copy(latency_default)
869             },
870             u"PDR50": {
871                 u"direction1": copy.copy(latency_default),
872                 u"direction2": copy.copy(latency_default)
873             },
874             u"PDR90": {
875                 u"direction1": copy.copy(latency_default),
876                 u"direction2": copy.copy(latency_default)
877             },
878         }
879
880         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
881         if groups is None:
882             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
883         if groups is None:
884             return latency, u"FAIL"
885
886         def process_latency(in_str):
887             """Return object with parsed latency values.
888
889             TODO: Define class for the return type.
890
891             :param in_str: Input string, min/avg/max/hdrh format.
892             :type in_str: str
893             :returns: Dict with corresponding keys, except hdrh float values.
894             :rtype dict:
895             :throws IndexError: If in_str does not have enough substrings.
896             :throws ValueError: If a substring does not convert to float.
897             """
898             in_list = in_str.split('/', 3)
899
900             rval = {
901                 u"min": float(in_list[0]),
902                 u"avg": float(in_list[1]),
903                 u"max": float(in_list[2]),
904                 u"hdrh": u""
905             }
906
907             if len(in_list) == 4:
908                 rval[u"hdrh"] = str(in_list[3])
909
910             return rval
911
912         try:
913             latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
914             latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
915             latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
916             latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
917             if groups.lastindex == 4:
918                 return latency, u"PASS"
919         except (IndexError, ValueError):
920             pass
921
922         try:
923             latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
924             latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
925             latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
926             latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
927             latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
928             latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
929             latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
930             latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
931             if groups.lastindex == 12:
932                 return latency, u"PASS"
933         except (IndexError, ValueError):
934             pass
935
936         return latency, u"FAIL"
937
938     @staticmethod
939     def _get_hoststack_data(msg, tags):
940         """Get data from the hoststack test message.
941
942         :param msg: The test message to be parsed.
943         :param tags: Test tags.
944         :type msg: str
945         :type tags: list
946         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
947         :rtype: tuple(dict, str)
948         """
949         result = dict()
950         status = u"FAIL"
951
952         msg = msg.replace(u"'", u'"').replace(u" ", u"")
953         if u"LDPRELOAD" in tags:
954             try:
955                 result = loads(msg)
956                 status = u"PASS"
957             except JSONDecodeError:
958                 pass
959         elif u"VPPECHO" in tags:
960             try:
961                 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
962                 result = dict(
963                     client=loads(msg_lst[0]),
964                     server=loads(msg_lst[1])
965                 )
966                 status = u"PASS"
967             except (JSONDecodeError, IndexError):
968                 pass
969
970         return result, status
971
972     def _get_vsap_data(self, msg, tags):
973         """Get data from the vsap test message.
974
975         :param msg: The test message to be parsed.
976         :param tags: Test tags.
977         :type msg: str
978         :type tags: list
979         :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
980         :rtype: tuple(dict, str)
981         """
982         result = dict()
983         status = u"FAIL"
984
985         groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
986         if groups is not None:
987             try:
988                 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
989                 result[u"latency"] = float(groups.group(2))
990                 result[u"completed-requests"] = int(groups.group(3))
991                 result[u"failed-requests"] = int(groups.group(4))
992                 result[u"bytes-transferred"] = int(groups.group(5))
993                 if u"TCP_CPS"in tags:
994                     result[u"cps"] = float(groups.group(6))
995                 elif u"TCP_RPS" in tags:
996                     result[u"rps"] = float(groups.group(6))
997                 else:
998                     return result, status
999                 status = u"PASS"
1000             except (IndexError, ValueError):
1001                 pass
1002
1003         return result, status
1004
1005     def visit_suite(self, suite):
1006         """Implements traversing through the suite and its direct children.
1007
1008         :param suite: Suite to process.
1009         :type suite: Suite
1010         :returns: Nothing.
1011         """
1012         if self.start_suite(suite) is not False:
1013             suite.suites.visit(self)
1014             suite.tests.visit(self)
1015             self.end_suite(suite)
1016
1017     def start_suite(self, suite):
1018         """Called when suite starts.
1019
1020         :param suite: Suite to process.
1021         :type suite: Suite
1022         :returns: Nothing.
1023         """
1024
1025         try:
1026             parent_name = suite.parent.name
1027         except AttributeError:
1028             return
1029
1030         self._data[u"suites"][suite.longname.lower().
1031                               replace(u'"', u"'").
1032                               replace(u" ", u"_")] = {
1033                                   u"name": suite.name.lower(),
1034                                   u"doc": suite.doc,
1035                                   u"parent": parent_name,
1036                                   u"level": len(suite.longname.split(u"."))
1037                               }
1038
1039         suite.keywords.visit(self)
1040
1041     def end_suite(self, suite):
1042         """Called when suite ends.
1043
1044         :param suite: Suite to process.
1045         :type suite: Suite
1046         :returns: Nothing.
1047         """
1048
1049     def visit_test(self, test):
1050         """Implements traversing through the test.
1051
1052         :param test: Test to process.
1053         :type test: Test
1054         :returns: Nothing.
1055         """
1056         if self.start_test(test) is not False:
1057             test.keywords.visit(self)
1058             self.end_test(test)
1059
1060     def start_test(self, test):
1061         """Called when test starts.
1062
1063         :param test: Test to process.
1064         :type test: Test
1065         :returns: Nothing.
1066         """
1067
1068         self._sh_run_counter = 0
1069         self._telemetry_kw_counter = 0
1070         self._telemetry_msg_counter = 0
1071
1072         longname_orig = test.longname.lower()
1073
1074         # Check the ignore list
1075         if longname_orig in self._ignore:
1076             return
1077
1078         tags = [str(tag) for tag in test.tags]
1079         test_result = dict()
1080
1081         # Change the TC long name and name if defined in the mapping table
1082         longname = self._mapping.get(longname_orig, None)
1083         if longname is not None:
1084             name = longname.split(u'.')[-1]
1085             logging.debug(
1086                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1087                 f"{name}"
1088             )
1089         else:
1090             longname = longname_orig
1091             name = test.name.lower()
1092
1093         # Remove TC number from the TC long name (backward compatibility):
1094         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1095         # Remove TC number from the TC name (not needed):
1096         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1097
1098         test_result[u"parent"] = test.parent.name.lower()
1099         test_result[u"tags"] = tags
1100         test_result["doc"] = test.doc
1101         test_result[u"type"] = u""
1102         test_result[u"status"] = test.status
1103         test_result[u"starttime"] = test.starttime
1104         test_result[u"endtime"] = test.endtime
1105
1106         if test.status == u"PASS":
1107             if u"NDRPDR" in tags:
1108                 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1109                     test_result[u"msg"] = self._get_data_from_pps_test_msg(
1110                         test.message)
1111                 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1112                     test_result[u"msg"] = self._get_data_from_cps_test_msg(
1113                         test.message)
1114                 else:
1115                     test_result[u"msg"] = self._get_data_from_perf_test_msg(
1116                         test.message)
1117             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1118                 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1119                     test.message)
1120             else:
1121                 test_result[u"msg"] = test.message
1122         else:
1123             test_result[u"msg"] = test.message
1124
1125         if u"PERFTEST" in tags:
1126             # Replace info about cores (e.g. -1c-) with the info about threads
1127             # and cores (e.g. -1t1c-) in the long test case names and in the
1128             # test case names if necessary.
1129             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1130             if not groups:
1131                 tag_count = 0
1132                 tag_tc = str()
1133                 for tag in test_result[u"tags"]:
1134                     groups = re.search(self.REGEX_TC_TAG, tag)
1135                     if groups:
1136                         tag_count += 1
1137                         tag_tc = tag
1138
1139                 if tag_count == 1:
1140                     self._test_id = re.sub(
1141                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1142                         self._test_id, count=1
1143                     )
1144                     test_result[u"name"] = re.sub(
1145                         self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1146                         test_result["name"], count=1
1147                     )
1148                 else:
1149                     test_result[u"status"] = u"FAIL"
1150                     self._data[u"tests"][self._test_id] = test_result
1151                     logging.debug(
1152                         f"The test {self._test_id} has no or more than one "
1153                         f"multi-threading tags.\n"
1154                         f"Tags: {test_result[u'tags']}"
1155                     )
1156                     return
1157
1158         if u"DEVICETEST" in tags:
1159             test_result[u"type"] = u"DEVICETEST"
1160         elif u"NDRPDR" in tags:
1161             if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1162                 test_result[u"type"] = u"CPS"
1163             else:
1164                 test_result[u"type"] = u"NDRPDR"
1165             if test.status == u"PASS":
1166                 test_result[u"throughput"], test_result[u"status"] = \
1167                     self._get_ndrpdr_throughput(test.message)
1168                 test_result[u"gbps"], test_result[u"status"] = \
1169                     self._get_ndrpdr_throughput_gbps(test.message)
1170                 test_result[u"latency"], test_result[u"status"] = \
1171                     self._get_ndrpdr_latency(test.message)
1172         elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1173             if u"MRR" in tags:
1174                 test_result[u"type"] = u"MRR"
1175             else:
1176                 test_result[u"type"] = u"BMRR"
1177             if test.status == u"PASS":
1178                 test_result[u"result"] = dict()
1179                 groups = re.search(self.REGEX_BMRR, test.message)
1180                 if groups is not None:
1181                     items_str = groups.group(1)
1182                     items_float = [
1183                         float(item.strip().replace(u"'", u""))
1184                         for item in items_str.split(",")
1185                     ]
1186                     # Use whole list in CSIT-1180.
1187                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
1188                     test_result[u"result"][u"samples"] = items_float
1189                     test_result[u"result"][u"receive-rate"] = stats.avg
1190                     test_result[u"result"][u"receive-stdev"] = stats.stdev
1191                 else:
1192                     groups = re.search(self.REGEX_MRR, test.message)
1193                     test_result[u"result"][u"receive-rate"] = \
1194                         float(groups.group(3)) / float(groups.group(1))
1195         elif u"SOAK" in tags:
1196             test_result[u"type"] = u"SOAK"
1197             if test.status == u"PASS":
1198                 test_result[u"throughput"], test_result[u"status"] = \
1199                     self._get_plr_throughput(test.message)
1200         elif u"HOSTSTACK" in tags:
1201             test_result[u"type"] = u"HOSTSTACK"
1202             if test.status == u"PASS":
1203                 test_result[u"result"], test_result[u"status"] = \
1204                     self._get_hoststack_data(test.message, tags)
1205         elif u"LDP_NGINX" in tags:
1206             test_result[u"type"] = u"LDP_NGINX"
1207             test_result[u"result"], test_result[u"status"] = \
1208                 self._get_vsap_data(test.message, tags)
1209         # elif u"TCP" in tags:  # This might be not used
1210         #     test_result[u"type"] = u"TCP"
1211         #     if test.status == u"PASS":
1212         #         groups = re.search(self.REGEX_TCP, test.message)
1213         #         test_result[u"result"] = int(groups.group(2))
1214         elif u"RECONF" in tags:
1215             test_result[u"type"] = u"RECONF"
1216             if test.status == u"PASS":
1217                 test_result[u"result"] = None
1218                 try:
1219                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1220                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1221                     test_result[u"result"] = {
1222                         u"loss": int(grps_loss.group(1)),
1223                         u"time": float(grps_time.group(1))
1224                     }
1225                 except (AttributeError, IndexError, ValueError, TypeError):
1226                     test_result[u"status"] = u"FAIL"
1227         else:
1228             test_result[u"status"] = u"FAIL"
1229
1230         self._data[u"tests"][self._test_id] = test_result
1231
1232     def end_test(self, test):
1233         """Called when test ends.
1234
1235         :param test: Test to process.
1236         :type test: Test
1237         :returns: Nothing.
1238         """
1239
1240     def visit_keyword(self, keyword):
1241         """Implements traversing through the keyword and its child keywords.
1242
1243         :param keyword: Keyword to process.
1244         :type keyword: Keyword
1245         :returns: Nothing.
1246         """
1247         if self.start_keyword(keyword) is not False:
1248             self.end_keyword(keyword)
1249
1250     def start_keyword(self, keyword):
1251         """Called when keyword starts. Default implementation does nothing.
1252
1253         :param keyword: Keyword to process.
1254         :type keyword: Keyword
1255         :returns: Nothing.
1256         """
1257         try:
1258             if keyword.type == u"setup":
1259                 self.visit_setup_kw(keyword)
1260             elif keyword.type == u"teardown":
1261                 self.visit_teardown_kw(keyword)
1262             else:
1263                 self.visit_test_kw(keyword)
1264         except AttributeError:
1265             pass
1266
1267     def end_keyword(self, keyword):
1268         """Called when keyword ends. Default implementation does nothing.
1269
1270         :param keyword: Keyword to process.
1271         :type keyword: Keyword
1272         :returns: Nothing.
1273         """
1274
1275     def visit_test_kw(self, test_kw):
1276         """Implements traversing through the test keyword and its child
1277         keywords.
1278
1279         :param test_kw: Keyword to process.
1280         :type test_kw: Keyword
1281         :returns: Nothing.
1282         """
1283         for keyword in test_kw.keywords:
1284             if self.start_test_kw(keyword) is not False:
1285                 self.visit_test_kw(keyword)
1286                 self.end_test_kw(keyword)
1287
1288     def start_test_kw(self, test_kw):
1289         """Called when test keyword starts. Default implementation does
1290         nothing.
1291
1292         :param test_kw: Keyword to process.
1293         :type test_kw: Keyword
1294         :returns: Nothing.
1295         """
1296         if self._for_output == u"trending":
1297             return
1298
1299         if test_kw.name.count(u"Run Telemetry On All Duts"):
1300             self._msg_type = u"test-telemetry"
1301             self._telemetry_kw_counter += 1
1302         elif test_kw.name.count(u"Show Runtime On All Duts"):
1303             self._msg_type = u"test-show-runtime"
1304             self._sh_run_counter += 1
1305         else:
1306             return
1307         test_kw.messages.visit(self)
1308
1309     def end_test_kw(self, test_kw):
1310         """Called when keyword ends. Default implementation does nothing.
1311
1312         :param test_kw: Keyword to process.
1313         :type test_kw: Keyword
1314         :returns: Nothing.
1315         """
1316
1317     def visit_setup_kw(self, setup_kw):
1318         """Implements traversing through the teardown keyword and its child
1319         keywords.
1320
1321         :param setup_kw: Keyword to process.
1322         :type setup_kw: Keyword
1323         :returns: Nothing.
1324         """
1325         for keyword in setup_kw.keywords:
1326             if self.start_setup_kw(keyword) is not False:
1327                 self.visit_setup_kw(keyword)
1328                 self.end_setup_kw(keyword)
1329
1330     def start_setup_kw(self, setup_kw):
1331         """Called when teardown keyword starts. Default implementation does
1332         nothing.
1333
1334         :param setup_kw: Keyword to process.
1335         :type setup_kw: Keyword
1336         :returns: Nothing.
1337         """
1338         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1339                 and not self._version:
1340             self._msg_type = u"vpp-version"
1341         elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1342                 not self._version:
1343             self._msg_type = u"dpdk-version"
1344         elif setup_kw.name.count(u"Set Global Variable") \
1345                 and not self._timestamp:
1346             self._msg_type = u"timestamp"
1347         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1348             self._msg_type = u"testbed"
1349         else:
1350             return
1351         setup_kw.messages.visit(self)
1352
1353     def end_setup_kw(self, setup_kw):
1354         """Called when keyword ends. Default implementation does nothing.
1355
1356         :param setup_kw: Keyword to process.
1357         :type setup_kw: Keyword
1358         :returns: Nothing.
1359         """
1360
1361     def visit_teardown_kw(self, teardown_kw):
1362         """Implements traversing through the teardown keyword and its child
1363         keywords.
1364
1365         :param teardown_kw: Keyword to process.
1366         :type teardown_kw: Keyword
1367         :returns: Nothing.
1368         """
1369         for keyword in teardown_kw.keywords:
1370             if self.start_teardown_kw(keyword) is not False:
1371                 self.visit_teardown_kw(keyword)
1372                 self.end_teardown_kw(keyword)
1373
1374     def start_teardown_kw(self, teardown_kw):
1375         """Called when teardown keyword starts
1376
1377         :param teardown_kw: Keyword to process.
1378         :type teardown_kw: Keyword
1379         :returns: Nothing.
1380         """
1381         if teardown_kw.name.count(u"Show Papi History On All Duts"):
1382             self._conf_history_lookup_nr = 0
1383             self._msg_type = u"teardown-papi-history"
1384             teardown_kw.messages.visit(self)
1385
1386     def end_teardown_kw(self, teardown_kw):
1387         """Called when keyword ends. Default implementation does nothing.
1388
1389         :param teardown_kw: Keyword to process.
1390         :type teardown_kw: Keyword
1391         :returns: Nothing.
1392         """
1393
1394     def visit_message(self, msg):
1395         """Implements visiting the message.
1396
1397         :param msg: Message to process.
1398         :type msg: Message
1399         :returns: Nothing.
1400         """
1401         if self.start_message(msg) is not False:
1402             self.end_message(msg)
1403
1404     def start_message(self, msg):
1405         """Called when message starts. Get required information from messages:
1406         - VPP version.
1407
1408         :param msg: Message to process.
1409         :type msg: Message
1410         :returns: Nothing.
1411         """
1412         if self._msg_type:
1413             self.parse_msg[self._msg_type](msg)
1414
1415     def end_message(self, msg):
1416         """Called when message ends. Default implementation does nothing.
1417
1418         :param msg: Message to process.
1419         :type msg: Message
1420         :returns: Nothing.
1421         """
1422
1423
1424 class InputData:
1425     """Input data
1426
1427     The data is extracted from output.xml files generated by Jenkins jobs and
1428     stored in pandas' DataFrames.
1429
1430     The data structure:
1431     - job name
1432       - build number
1433         - metadata
1434           (as described in ExecutionChecker documentation)
1435         - suites
1436           (as described in ExecutionChecker documentation)
1437         - tests
1438           (as described in ExecutionChecker documentation)
1439     """
1440
1441     def __init__(self, spec, for_output):
1442         """Initialization.
1443
1444         :param spec: Specification.
1445         :param for_output: Output to be generated from downloaded data.
1446         :type spec: Specification
1447         :type for_output: str
1448         """
1449
1450         # Specification:
1451         self._cfg = spec
1452
1453         self._for_output = for_output
1454
1455         # Data store:
1456         self._input_data = pd.Series()
1457
1458     @property
1459     def data(self):
1460         """Getter - Input data.
1461
1462         :returns: Input data
1463         :rtype: pandas.Series
1464         """
1465         return self._input_data
1466
1467     def metadata(self, job, build):
1468         """Getter - metadata
1469
1470         :param job: Job which metadata we want.
1471         :param build: Build which metadata we want.
1472         :type job: str
1473         :type build: str
1474         :returns: Metadata
1475         :rtype: pandas.Series
1476         """
1477         return self.data[job][build][u"metadata"]
1478
1479     def suites(self, job, build):
1480         """Getter - suites
1481
1482         :param job: Job which suites we want.
1483         :param build: Build which suites we want.
1484         :type job: str
1485         :type build: str
1486         :returns: Suites.
1487         :rtype: pandas.Series
1488         """
1489         return self.data[job][str(build)][u"suites"]
1490
1491     def tests(self, job, build):
1492         """Getter - tests
1493
1494         :param job: Job which tests we want.
1495         :param build: Build which tests we want.
1496         :type job: str
1497         :type build: str
1498         :returns: Tests.
1499         :rtype: pandas.Series
1500         """
1501         return self.data[job][build][u"tests"]
1502
1503     def _parse_tests(self, job, build):
1504         """Process data from robot output.xml file and return JSON structured
1505         data.
1506
1507         :param job: The name of job which build output data will be processed.
1508         :param build: The build which output data will be processed.
1509         :type job: str
1510         :type build: dict
1511         :returns: JSON data structure.
1512         :rtype: dict
1513         """
1514
1515         metadata = {
1516             u"job": job,
1517             u"build": build
1518         }
1519
1520         with open(build[u"file-name"], u'r') as data_file:
1521             try:
1522                 result = ExecutionResult(data_file)
1523             except errors.DataError as err:
1524                 logging.error(
1525                     f"Error occurred while parsing output.xml: {repr(err)}"
1526                 )
1527                 return None
1528         checker = ExecutionChecker(
1529             metadata, self._cfg.mapping, self._cfg.ignore, self._for_output
1530         )
1531         result.visit(checker)
1532
1533         return checker.data
1534
1535     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1536         """Download and parse the input data file.
1537
1538         :param pid: PID of the process executing this method.
1539         :param job: Name of the Jenkins job which generated the processed input
1540             file.
1541         :param build: Information about the Jenkins build which generated the
1542             processed input file.
1543         :param repeat: Repeat the download specified number of times if not
1544             successful.
1545         :type pid: int
1546         :type job: str
1547         :type build: dict
1548         :type repeat: int
1549         """
1550
1551         logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1552
1553         state = u"failed"
1554         success = False
1555         data = None
1556         do_repeat = repeat
1557         while do_repeat:
1558             success = download_and_unzip_data_file(self._cfg, job, build, pid)
1559             if success:
1560                 break
1561             do_repeat -= 1
1562         if not success:
1563             logging.error(
1564                 f"It is not possible to download the input data file from the "
1565                 f"job {job}, build {build[u'build']}, or it is damaged. "
1566                 f"Skipped."
1567             )
1568         if success:
1569             logging.info(f"  Processing data from build {build[u'build']}")
1570             data = self._parse_tests(job, build)
1571             if data is None:
1572                 logging.error(
1573                     f"Input data file from the job {job}, build "
1574                     f"{build[u'build']} is damaged. Skipped."
1575                 )
1576             else:
1577                 state = u"processed"
1578
1579             try:
1580                 remove(build[u"file-name"])
1581             except OSError as err:
1582                 logging.error(
1583                     f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1584                 )
1585
1586         # If the time-period is defined in the specification file, remove all
1587         # files which are outside the time period.
1588         is_last = False
1589         timeperiod = self._cfg.environment.get(u"time-period", None)
1590         if timeperiod and data:
1591             now = dt.utcnow()
1592             timeperiod = timedelta(int(timeperiod))
1593             metadata = data.get(u"metadata", None)
1594             if metadata:
1595                 generated = metadata.get(u"generated", None)
1596                 if generated:
1597                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1598                     if (now - generated) > timeperiod:
1599                         # Remove the data and the file:
1600                         state = u"removed"
1601                         data = None
1602                         is_last = True
1603                         logging.info(
1604                             f"  The build {job}/{build[u'build']} is "
1605                             f"outdated, will be removed."
1606                         )
1607         return {
1608             u"data": data,
1609             u"state": state,
1610             u"job": job,
1611             u"build": build,
1612             u"last": is_last
1613         }
1614
1615     def download_and_parse_data(self, repeat=1):
1616         """Download the input data files, parse input data from input files and
1617         store in pandas' Series.
1618
1619         :param repeat: Repeat the download specified number of times if not
1620             successful.
1621         :type repeat: int
1622         """
1623
1624         logging.info(u"Downloading and parsing input files ...")
1625
1626         for job, builds in self._cfg.input.items():
1627             for build in builds:
1628
1629                 result = self._download_and_parse_build(job, build, repeat)
1630                 if result[u"last"]:
1631                     break
1632                 build_nr = result[u"build"][u"build"]
1633
1634                 if result[u"data"]:
1635                     data = result[u"data"]
1636                     build_data = pd.Series({
1637                         u"metadata": pd.Series(
1638                             list(data[u"metadata"].values()),
1639                             index=list(data[u"metadata"].keys())
1640                         ),
1641                         u"suites": pd.Series(
1642                             list(data[u"suites"].values()),
1643                             index=list(data[u"suites"].keys())
1644                         ),
1645                         u"tests": pd.Series(
1646                             list(data[u"tests"].values()),
1647                             index=list(data[u"tests"].keys())
1648                         )
1649                     })
1650
1651                     if self._input_data.get(job, None) is None:
1652                         self._input_data[job] = pd.Series()
1653                     self._input_data[job][str(build_nr)] = build_data
1654                     self._cfg.set_input_file_name(
1655                         job, build_nr, result[u"build"][u"file-name"]
1656                     )
1657                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1658
1659                 mem_alloc = \
1660                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1661                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1662
1663         logging.info(u"Done.")
1664
1665         msg = f"Successful downloads from the sources:\n"
1666         for source in self._cfg.environment[u"data-sources"]:
1667             if source[u"successful-downloads"]:
1668                 msg += (
1669                     f"{source[u'url']}/{source[u'path']}/"
1670                     f"{source[u'file-name']}: "
1671                     f"{source[u'successful-downloads']}\n"
1672                 )
1673         logging.info(msg)
1674
1675     def process_local_file(self, local_file, job=u"local", build_nr=1,
1676                            replace=True):
1677         """Process local XML file given as a command-line parameter.
1678
1679         :param local_file: The file to process.
1680         :param job: Job name.
1681         :param build_nr: Build number.
1682         :param replace: If True, the information about jobs and builds is
1683             replaced by the new one, otherwise the new jobs and builds are
1684             added.
1685         :type local_file: str
1686         :type job: str
1687         :type build_nr: int
1688         :type replace: bool
1689         :raises: PresentationError if an error occurs.
1690         """
1691         if not isfile(local_file):
1692             raise PresentationError(f"The file {local_file} does not exist.")
1693
1694         try:
1695             build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1696         except (IndexError, ValueError):
1697             pass
1698
1699         build = {
1700             u"build": build_nr,
1701             u"status": u"failed",
1702             u"file-name": local_file
1703         }
1704         if replace:
1705             self._cfg.input = dict()
1706         self._cfg.add_build(job, build)
1707
1708         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1709         data = self._parse_tests(job, build)
1710         if data is None:
1711             raise PresentationError(
1712                 f"Error occurred while parsing the file {local_file}"
1713             )
1714
1715         build_data = pd.Series({
1716             u"metadata": pd.Series(
1717                 list(data[u"metadata"].values()),
1718                 index=list(data[u"metadata"].keys())
1719             ),
1720             u"suites": pd.Series(
1721                 list(data[u"suites"].values()),
1722                 index=list(data[u"suites"].keys())
1723             ),
1724             u"tests": pd.Series(
1725                 list(data[u"tests"].values()),
1726                 index=list(data[u"tests"].keys())
1727             )
1728         })
1729
1730         if self._input_data.get(job, None) is None:
1731             self._input_data[job] = pd.Series()
1732         self._input_data[job][str(build_nr)] = build_data
1733
1734         self._cfg.set_input_state(job, build_nr, u"processed")
1735
1736     def process_local_directory(self, local_dir, replace=True):
1737         """Process local directory with XML file(s). The directory is processed
1738         as a 'job' and the XML files in it as builds.
1739         If the given directory contains only sub-directories, these
1740         sub-directories processed as jobs and corresponding XML files as builds
1741         of their job.
1742
1743         :param local_dir: Local directory to process.
1744         :param replace: If True, the information about jobs and builds is
1745             replaced by the new one, otherwise the new jobs and builds are
1746             added.
1747         :type local_dir: str
1748         :type replace: bool
1749         """
1750         if not isdir(local_dir):
1751             raise PresentationError(
1752                 f"The directory {local_dir} does not exist."
1753             )
1754
1755         # Check if the given directory includes only files, or only directories
1756         _, dirnames, filenames = next(walk(local_dir))
1757
1758         if filenames and not dirnames:
1759             filenames.sort()
1760             # local_builds:
1761             # key: dir (job) name, value: list of file names (builds)
1762             local_builds = {
1763                 local_dir: [join(local_dir, name) for name in filenames]
1764             }
1765
1766         elif dirnames and not filenames:
1767             dirnames.sort()
1768             # local_builds:
1769             # key: dir (job) name, value: list of file names (builds)
1770             local_builds = dict()
1771             for dirname in dirnames:
1772                 builds = [
1773                     join(local_dir, dirname, name)
1774                     for name in listdir(join(local_dir, dirname))
1775                     if isfile(join(local_dir, dirname, name))
1776                 ]
1777                 if builds:
1778                     local_builds[dirname] = sorted(builds)
1779
1780         elif not filenames and not dirnames:
1781             raise PresentationError(f"The directory {local_dir} is empty.")
1782         else:
1783             raise PresentationError(
1784                 f"The directory {local_dir} can include only files or only "
1785                 f"directories, not both.\nThe directory {local_dir} includes "
1786                 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1787             )
1788
1789         if replace:
1790             self._cfg.input = dict()
1791
1792         for job, files in local_builds.items():
1793             for idx, local_file in enumerate(files):
1794                 self.process_local_file(local_file, job, idx + 1, replace=False)
1795
1796     @staticmethod
1797     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1798         """Return the index of character in the string which is the end of tag.
1799
1800         :param tag_filter: The string where the end of tag is being searched.
1801         :param start: The index where the searching is stated.
1802         :param closer: The character which is the tag closer.
1803         :type tag_filter: str
1804         :type start: int
1805         :type closer: str
1806         :returns: The index of the tag closer.
1807         :rtype: int
1808         """
1809         try:
1810             idx_opener = tag_filter.index(closer, start)
1811             return tag_filter.index(closer, idx_opener + 1)
1812         except ValueError:
1813             return None
1814
1815     @staticmethod
1816     def _condition(tag_filter):
1817         """Create a conditional statement from the given tag filter.
1818
1819         :param tag_filter: Filter based on tags from the element specification.
1820         :type tag_filter: str
1821         :returns: Conditional statement which can be evaluated.
1822         :rtype: str
1823         """
1824         index = 0
1825         while True:
1826             index = InputData._end_of_tag(tag_filter, index)
1827             if index is None:
1828                 return tag_filter
1829             index += 1
1830             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1831
1832     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1833                     continue_on_error=False):
1834         """Filter required data from the given jobs and builds.
1835
1836         The output data structure is:
1837         - job 1
1838           - build 1
1839             - test (or suite) 1 ID:
1840               - param 1
1841               - param 2
1842               ...
1843               - param n
1844             ...
1845             - test (or suite) n ID:
1846             ...
1847           ...
1848           - build n
1849         ...
1850         - job n
1851
1852         :param element: Element which will use the filtered data.
1853         :param params: Parameters which will be included in the output. If None,
1854             all parameters are included.
1855         :param data: If not None, this data is used instead of data specified
1856             in the element.
1857         :param data_set: The set of data to be filtered: tests, suites,
1858             metadata.
1859         :param continue_on_error: Continue if there is error while reading the
1860             data. The Item will be empty then
1861         :type element: pandas.Series
1862         :type params: list
1863         :type data: dict
1864         :type data_set: str
1865         :type continue_on_error: bool
1866         :returns: Filtered data.
1867         :rtype pandas.Series
1868         """
1869
1870         try:
1871             if data_set == "suites":
1872                 cond = u"True"
1873             elif element[u"filter"] in (u"all", u"template"):
1874                 cond = u"True"
1875             else:
1876                 cond = InputData._condition(element[u"filter"])
1877             logging.debug(f"   Filter: {cond}")
1878         except KeyError:
1879             logging.error(u"  No filter defined.")
1880             return None
1881
1882         if params is None:
1883             params = element.get(u"parameters", None)
1884             if params:
1885                 params.extend((u"type", u"status"))
1886
1887         data_to_filter = data if data else element[u"data"]
1888         data = pd.Series()
1889         try:
1890             for job, builds in data_to_filter.items():
1891                 data[job] = pd.Series()
1892                 for build in builds:
1893                     data[job][str(build)] = pd.Series()
1894                     try:
1895                         data_dict = dict(
1896                             self.data[job][str(build)][data_set].items())
1897                     except KeyError:
1898                         if continue_on_error:
1899                             continue
1900                         return None
1901
1902                     for test_id, test_data in data_dict.items():
1903                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1904                             data[job][str(build)][test_id] = pd.Series()
1905                             if params is None:
1906                                 for param, val in test_data.items():
1907                                     data[job][str(build)][test_id][param] = val
1908                             else:
1909                                 for param in params:
1910                                     try:
1911                                         data[job][str(build)][test_id][param] =\
1912                                             test_data[param]
1913                                     except KeyError:
1914                                         data[job][str(build)][test_id][param] =\
1915                                             u"No Data"
1916             return data
1917
1918         except (KeyError, IndexError, ValueError) as err:
1919             logging.error(
1920                 f"Missing mandatory parameter in the element specification: "
1921                 f"{repr(err)}"
1922             )
1923             return None
1924         except AttributeError as err:
1925             logging.error(repr(err))
1926             return None
1927         except SyntaxError as err:
1928             logging.error(
1929                 f"The filter {cond} is not correct. Check if all tags are "
1930                 f"enclosed by apostrophes.\n{repr(err)}"
1931             )
1932             return None
1933
1934     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1935                              continue_on_error=False):
1936         """Filter required data from the given jobs and builds.
1937
1938         The output data structure is:
1939         - job 1
1940           - build 1
1941             - test (or suite) 1 ID:
1942               - param 1
1943               - param 2
1944               ...
1945               - param n
1946             ...
1947             - test (or suite) n ID:
1948             ...
1949           ...
1950           - build n
1951         ...
1952         - job n
1953
1954         :param element: Element which will use the filtered data.
1955         :param params: Parameters which will be included in the output. If None,
1956         all parameters are included.
1957         :param data_set: The set of data to be filtered: tests, suites,
1958         metadata.
1959         :param continue_on_error: Continue if there is error while reading the
1960         data. The Item will be empty then
1961         :type element: pandas.Series
1962         :type params: list
1963         :type data_set: str
1964         :type continue_on_error: bool
1965         :returns: Filtered data.
1966         :rtype pandas.Series
1967         """
1968
1969         include = element.get(u"include", None)
1970         if not include:
1971             logging.warning(u"No tests to include, skipping the element.")
1972             return None
1973
1974         if params is None:
1975             params = element.get(u"parameters", None)
1976             if params and u"type" not in params:
1977                 params.append(u"type")
1978
1979         cores = element.get(u"core", None)
1980         if cores:
1981             tests = list()
1982             for core in cores:
1983                 for test in include:
1984                     tests.append(test.format(core=core))
1985         else:
1986             tests = include
1987
1988         data = pd.Series()
1989         try:
1990             for job, builds in element[u"data"].items():
1991                 data[job] = pd.Series()
1992                 for build in builds:
1993                     data[job][str(build)] = pd.Series()
1994                     for test in tests:
1995                         try:
1996                             reg_ex = re.compile(str(test).lower())
1997                             for test_id in self.data[job][
1998                                     str(build)][data_set].keys():
1999                                 if re.match(reg_ex, str(test_id).lower()):
2000                                     test_data = self.data[job][
2001                                         str(build)][data_set][test_id]
2002                                     data[job][str(build)][test_id] = pd.Series()
2003                                     if params is None:
2004                                         for param, val in test_data.items():
2005                                             data[job][str(build)][test_id]\
2006                                                 [param] = val
2007                                     else:
2008                                         for param in params:
2009                                             try:
2010                                                 data[job][str(build)][
2011                                                     test_id][param] = \
2012                                                     test_data[param]
2013                                             except KeyError:
2014                                                 data[job][str(build)][
2015                                                     test_id][param] = u"No Data"
2016                         except KeyError as err:
2017                             if continue_on_error:
2018                                 logging.debug(repr(err))
2019                                 continue
2020                             logging.error(repr(err))
2021                             return None
2022             return data
2023
2024         except (KeyError, IndexError, ValueError) as err:
2025             logging.error(
2026                 f"Missing mandatory parameter in the element "
2027                 f"specification: {repr(err)}"
2028             )
2029             return None
2030         except AttributeError as err:
2031             logging.error(repr(err))
2032             return None
2033
2034     @staticmethod
2035     def merge_data(data):
2036         """Merge data from more jobs and builds to a simple data structure.
2037
2038         The output data structure is:
2039
2040         - test (suite) 1 ID:
2041           - param 1
2042           - param 2
2043           ...
2044           - param n
2045         ...
2046         - test (suite) n ID:
2047         ...
2048
2049         :param data: Data to merge.
2050         :type data: pandas.Series
2051         :returns: Merged data.
2052         :rtype: pandas.Series
2053         """
2054
2055         logging.info(u"    Merging data ...")
2056
2057         merged_data = pd.Series()
2058         for builds in data.values:
2059             for item in builds.values:
2060                 for item_id, item_data in item.items():
2061                     merged_data[item_id] = item_data
2062         return merged_data
2063
2064     def print_all_oper_data(self):
2065         """Print all operational data to console.
2066         """
2067
2068         for job in self._input_data.values:
2069             for build in job.values:
2070                 for test_id, test_data in build[u"tests"].items():
2071                     print(f"{test_id}")
2072                     if test_data.get(u"show-run", None) is None:
2073                         continue
2074                     for dut_name, data in test_data[u"show-run"].items():
2075                         if data.get(u"runtime", None) is None:
2076                             continue
2077                         runtime = loads(data[u"runtime"])
2078                         try:
2079                             threads_nr = len(runtime[0][u"clocks"])
2080                         except (IndexError, KeyError):
2081                             continue
2082                         threads = OrderedDict(
2083                             {idx: list() for idx in range(threads_nr)})
2084                         for item in runtime:
2085                             for idx in range(threads_nr):
2086                                 if item[u"vectors"][idx] > 0:
2087                                     clocks = item[u"clocks"][idx] / \
2088                                              item[u"vectors"][idx]
2089                                 elif item[u"calls"][idx] > 0:
2090                                     clocks = item[u"clocks"][idx] / \
2091                                              item[u"calls"][idx]
2092                                 elif item[u"suspends"][idx] > 0:
2093                                     clocks = item[u"clocks"][idx] / \
2094                                              item[u"suspends"][idx]
2095                                 else:
2096                                     clocks = 0.0
2097
2098                                 if item[u"calls"][idx] > 0:
2099                                     vectors_call = item[u"vectors"][idx] / \
2100                                                    item[u"calls"][idx]
2101                                 else:
2102                                     vectors_call = 0.0
2103
2104                                 if int(item[u"calls"][idx]) + int(
2105                                         item[u"vectors"][idx]) + \
2106                                         int(item[u"suspends"][idx]):
2107                                     threads[idx].append([
2108                                         item[u"name"],
2109                                         item[u"calls"][idx],
2110                                         item[u"vectors"][idx],
2111                                         item[u"suspends"][idx],
2112                                         clocks,
2113                                         vectors_call
2114                                     ])
2115
2116                         print(f"Host IP: {data.get(u'host', '')}, "
2117                               f"Socket: {data.get(u'socket', '')}")
2118                         for thread_nr, thread in threads.items():
2119                             txt_table = prettytable.PrettyTable(
2120                                 (
2121                                     u"Name",
2122                                     u"Nr of Vectors",
2123                                     u"Nr of Packets",
2124                                     u"Suspends",
2125                                     u"Cycles per Packet",
2126                                     u"Average Vector Size"
2127                                 )
2128                             )
2129                             avg = 0.0
2130                             for row in thread:
2131                                 txt_table.add_row(row)
2132                                 avg += row[-1]
2133                             if len(thread) == 0:
2134                                 avg = u""
2135                             else:
2136                                 avg = f", Average Vector Size per Node: " \
2137                                       f"{(avg / len(thread)):.2f}"
2138                             th_name = u"main" if thread_nr == 0 \
2139                                 else f"worker_{thread_nr}"
2140                             print(f"{dut_name}, {th_name}{avg}")
2141                             txt_table.float_format = u".2"
2142                             txt_table.align = u"r"
2143                             txt_table.align[u"Name"] = u"l"
2144                             print(f"{txt_table.get_string()}\n")