Python3: PAL
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
32
33 import prettytable
34 import pandas as pd
35
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
38
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
41
42
43 # Separator used in file names
44 SEPARATOR = u"__"
45
46
47 class ExecutionChecker(ResultVisitor):
48     """Class to traverse through the test suite structure.
49
50     The functionality implemented in this class generates a json structure:
51
52     Performance tests:
53
54     {
55         "metadata": {
56             "generated": "Timestamp",
57             "version": "SUT version",
58             "job": "Jenkins job name",
59             "build": "Information about the build"
60         },
61         "suites": {
62             "Suite long name 1": {
63                 "name": Suite name,
64                 "doc": "Suite 1 documentation",
65                 "parent": "Suite 1 parent",
66                 "level": "Level of the suite in the suite hierarchy"
67             }
68             "Suite long name N": {
69                 "name": Suite name,
70                 "doc": "Suite N documentation",
71                 "parent": "Suite 2 parent",
72                 "level": "Level of the suite in the suite hierarchy"
73             }
74         }
75         "tests": {
76             # NDRPDR tests:
77             "ID": {
78                 "name": "Test name",
79                 "parent": "Name of the parent of the test",
80                 "doc": "Test documentation",
81                 "msg": "Test message",
82                 "conf-history": "DUT1 and DUT2 VAT History",
83                 "show-run": "Show Run",
84                 "tags": ["tag 1", "tag 2", "tag n"],
85                 "type": "NDRPDR",
86                 "status": "PASS" | "FAIL",
87                 "throughput": {
88                     "NDR": {
89                         "LOWER": float,
90                         "UPPER": float
91                     },
92                     "PDR": {
93                         "LOWER": float,
94                         "UPPER": float
95                     }
96                 },
97                 "latency": {
98                     "NDR": {
99                         "direction1": {
100                             "min": float,
101                             "avg": float,
102                             "max": float,
103                             "hdrh": str
104                         },
105                         "direction2": {
106                             "min": float,
107                             "avg": float,
108                             "max": float,
109                             "hdrh": str
110                         }
111                     },
112                     "PDR": {
113                         "direction1": {
114                             "min": float,
115                             "avg": float,
116                             "max": float,
117                             "hdrh": str
118                         },
119                         "direction2": {
120                             "min": float,
121                             "avg": float,
122                             "max": float,
123                             "hdrh": str
124                         }
125                     }
126                 }
127             }
128
129             # TCP tests:
130             "ID": {
131                 "name": "Test name",
132                 "parent": "Name of the parent of the test",
133                 "doc": "Test documentation",
134                 "msg": "Test message",
135                 "tags": ["tag 1", "tag 2", "tag n"],
136                 "type": "TCP",
137                 "status": "PASS" | "FAIL",
138                 "result": int
139             }
140
141             # MRR, BMRR tests:
142             "ID": {
143                 "name": "Test name",
144                 "parent": "Name of the parent of the test",
145                 "doc": "Test documentation",
146                 "msg": "Test message",
147                 "tags": ["tag 1", "tag 2", "tag n"],
148                 "type": "MRR" | "BMRR",
149                 "status": "PASS" | "FAIL",
150                 "result": {
151                     "receive-rate": float,
152                     # Average of a list, computed using AvgStdevStats.
153                     # In CSIT-1180, replace with List[float].
154                 }
155             }
156
157             "ID" {
158                 # next test
159             }
160         }
161     }
162
163
164     Functional tests:
165
166     {
167         "metadata": {  # Optional
168             "version": "VPP version",
169             "job": "Jenkins job name",
170             "build": "Information about the build"
171         },
172         "suites": {
173             "Suite name 1": {
174                 "doc": "Suite 1 documentation",
175                 "parent": "Suite 1 parent",
176                 "level": "Level of the suite in the suite hierarchy"
177             }
178             "Suite name N": {
179                 "doc": "Suite N documentation",
180                 "parent": "Suite 2 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183         }
184         "tests": {
185             "ID": {
186                 "name": "Test name",
187                 "parent": "Name of the parent of the test",
188                 "doc": "Test documentation"
189                 "msg": "Test message"
190                 "tags": ["tag 1", "tag 2", "tag n"],
191                 "conf-history": "DUT1 and DUT2 VAT History"
192                 "show-run": "Show Run"
193                 "status": "PASS" | "FAIL"
194             },
195             "ID" {
196                 # next test
197             }
198         }
199     }
200
201     .. note:: ID is the lowercase full path to the test.
202     """
203
204     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205                                 r'PLRsearch upper bound::?\s(\d+.\d+)')
206
207     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
209                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210                                    r'PDR_UPPER:\s(\d+.\d+)')
211
212     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
214
215     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
216                                  r'[\D\d]*')
217
218     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
220
221     REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
222
223     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
224
225     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226                            r'tx\s(\d*),\srx\s(\d*)')
227
228     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229                             r' in packets per second: \[(.*)\]')
230
231     REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232     REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
233
234     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
235
236     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
237
238     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
239
240     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
241
242     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
243
244     def __init__(self, metadata, mapping, ignore):
245         """Initialisation.
246
247         :param metadata: Key-value pairs to be included in "metadata" part of
248             JSON structure.
249         :param mapping: Mapping of the old names of test cases to the new
250             (actual) one.
251         :param ignore: List of TCs to be ignored.
252         :type metadata: dict
253         :type mapping: dict
254         :type ignore: list
255         """
256
257         # Type of message to parse out from the test messages
258         self._msg_type = None
259
260         # VPP version
261         self._version = None
262
263         # Timestamp
264         self._timestamp = None
265
266         # Testbed. The testbed is identified by TG node IP address.
267         self._testbed = None
268
269         # Mapping of TCs long names
270         self._mapping = mapping
271
272         # Ignore list
273         self._ignore = ignore
274
275         # Number of PAPI History messages found:
276         # 0 - no message
277         # 1 - PAPI History of DUT1
278         # 2 - PAPI History of DUT2
279         self._lookup_kw_nr = 0
280         self._conf_history_lookup_nr = 0
281
282         # Number of Show Running messages found
283         # 0 - no message
284         # 1 - Show run message found
285         self._show_run_lookup_nr = 0
286
287         # Test ID of currently processed test- the lowercase full path to the
288         # test
289         self._test_id = None
290
291         # The main data structure
292         self._data = {
293             u"metadata": OrderedDict(),
294             u"suites": OrderedDict(),
295             u"tests": OrderedDict()
296         }
297
298         # Save the provided metadata
299         for key, val in metadata.items():
300             self._data[u"metadata"][key] = val
301
302         # Dictionary defining the methods used to parse different types of
303         # messages
304         self.parse_msg = {
305             u"timestamp": self._get_timestamp,
306             u"vpp-version": self._get_vpp_version,
307             u"dpdk-version": self._get_dpdk_version,
308             # TODO: Remove when not needed:
309             u"teardown-vat-history": self._get_vat_history,
310             u"teardown-papi-history": self._get_papi_history,
311             u"test-show-runtime": self._get_show_run,
312             u"testbed": self._get_testbed
313         }
314
315     @property
316     def data(self):
317         """Getter - Data parsed from the XML file.
318
319         :returns: Data parsed from the XML file.
320         :rtype: dict
321         """
322         return self._data
323
324     def _get_testbed(self, msg):
325         """Called when extraction of testbed IP is required.
326         The testbed is identified by TG node IP address.
327
328         :param msg: Message to process.
329         :type msg: Message
330         :returns: Nothing.
331         """
332
333         if msg.message.count(u"Setup of TG node") or \
334                 msg.message.count(u"Setup of node TG host"):
335             reg_tg_ip = re.compile(
336                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
337             try:
338                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
339             except (KeyError, ValueError, IndexError, AttributeError):
340                 pass
341             finally:
342                 self._data[u"metadata"][u"testbed"] = self._testbed
343                 self._msg_type = None
344
345     def _get_vpp_version(self, msg):
346         """Called when extraction of VPP version is required.
347
348         :param msg: Message to process.
349         :type msg: Message
350         :returns: Nothing.
351         """
352
353         if msg.message.count(u"return STDOUT Version:") or \
354             msg.message.count(u"VPP Version:") or \
355             msg.message.count(u"VPP version:"):
356             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
357                                 group(2))
358             self._data[u"metadata"][u"version"] = self._version
359             self._msg_type = None
360
361     def _get_dpdk_version(self, msg):
362         """Called when extraction of DPDK version is required.
363
364         :param msg: Message to process.
365         :type msg: Message
366         :returns: Nothing.
367         """
368
369         if msg.message.count(u"DPDK Version:"):
370             try:
371                 self._version = str(re.search(
372                     self.REGEX_VERSION_DPDK, msg.message).group(2))
373                 self._data[u"metadata"][u"version"] = self._version
374             except IndexError:
375                 pass
376             finally:
377                 self._msg_type = None
378
379     def _get_timestamp(self, msg):
380         """Called when extraction of timestamp is required.
381
382         :param msg: Message to process.
383         :type msg: Message
384         :returns: Nothing.
385         """
386
387         self._timestamp = msg.timestamp[:14]
388         self._data[u"metadata"][u"generated"] = self._timestamp
389         self._msg_type = None
390
391     def _get_vat_history(self, msg):
392         """Called when extraction of VAT command history is required.
393
394         TODO: Remove when not needed.
395
396         :param msg: Message to process.
397         :type msg: Message
398         :returns: Nothing.
399         """
400         if msg.message.count(u"VAT command history:"):
401             self._conf_history_lookup_nr += 1
402             if self._conf_history_lookup_nr == 1:
403                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
404             else:
405                 self._msg_type = None
406             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
407                           r"VAT command history:", u"", msg.message, count=1). \
408                 replace(u"\n\n", u"\n").replace(u'\n', u' |br| ').\
409                 replace(u'\r', u'').replace(u'"', u"'")
410
411             self._data[u"tests"][self._test_id][u"conf-history"] += (
412                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
413             )
414
415     def _get_papi_history(self, msg):
416         """Called when extraction of PAPI command history is required.
417
418         :param msg: Message to process.
419         :type msg: Message
420         :returns: Nothing.
421         """
422         if msg.message.count(u"PAPI command history:"):
423             self._conf_history_lookup_nr += 1
424             if self._conf_history_lookup_nr == 1:
425                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
426             else:
427                 self._msg_type = None
428             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
429                           r"PAPI command history:", u"",
430                           msg.message, count=1). \
431                 replace(u"\n\n", u"\n").replace(u'\n', u' |br| ').\
432                 replace(u'\r', u'').replace(u'"', u"'")
433
434             self._data[u"tests"][self._test_id][u"conf-history"] += (
435                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
436             )
437
438     def _get_show_run(self, msg):
439         """Called when extraction of VPP operational data (output of CLI command
440         Show Runtime) is required.
441
442         :param msg: Message to process.
443         :type msg: Message
444         :returns: Nothing.
445         """
446         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
447             self._data[u"tests"][self._test_id][u"show-run"] = str()
448
449         if msg.message.count(u"stats runtime"):
450             host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
451                        group(1))
452             socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
453                          group(2))
454             runtime = loads(
455                 str(msg.message).
456                 replace(u' ', u'').
457                 replace(u'\n', u'').
458                 replace(u"'", u'"').
459                 replace(u'b"', u'"').
460                 replace(u'u"', u'"').
461                 split(u":", 1)[1]
462             )
463             try:
464                 threads_nr = len(runtime[0][u"clocks"])
465             except (IndexError, KeyError):
466                 return
467             tbl_hdr = [
468                 u"Name",
469                 u"Calls",
470                 u"Vectors",
471                 u"Suspends",
472                 u"Clocks",
473                 u"Vectors/Calls"
474             ]
475             table = [[tbl_hdr, ] for _ in range(threads_nr)]
476             for item in runtime:
477                 for idx in range(threads_nr):
478                     name = format(item[u"name"])
479                     calls = format(item[u"calls"][idx])
480                     vectors = format(item[u"vectors"][idx])
481                     suspends = format(item[u"suspends"][idx])
482                     if item[u"vectors"][idx] > 0:
483                         clocks = format(
484                             item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
485                     elif item[u"calls"][idx] > 0:
486                         clocks = format(
487                             item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
488                     elif item[u"suspends"][idx] > 0:
489                         clocks = format(
490                             item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
491                     else:
492                         clocks = 0
493                     if item[u"calls"][idx] > 0:
494                         vectors_call = format(
495                             item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
496                     else:
497                         vectors_call = format(0, u".2f")
498                     if int(calls) + int(vectors) + int(suspends):
499                         table[idx].append([
500                             name, calls, vectors, suspends, clocks, vectors_call
501                         ])
502             text = ""
503             for idx in range(threads_nr):
504                 text += f"Thread {idx} "
505                 text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
506                 txt_table = None
507                 for row in table[idx]:
508                     if txt_table is None:
509                         txt_table = prettytable.PrettyTable(row)
510                     else:
511                         if any(row[1:]):
512                             txt_table.add_row(row)
513                 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
514                 txt_table.align[u"Name"] = u"l"
515                 txt_table.align[u"Calls"] = u"r"
516                 txt_table.align[u"Vectors"] = u"r"
517                 txt_table.align[u"Suspends"] = u"r"
518                 txt_table.align[u"Clocks"] = u"r"
519                 txt_table.align[u"Vectors/Calls"] = u"r"
520
521                 text += txt_table.get_string(sortby=u"Name") + u'\n'
522             text = f" \n **DUT: {host}/{socket}** \n {text}".\
523                 replace(u'\n', u' |br| ').\
524                 replace(u'\r', u'').\
525                 replace(u'"', u"'")
526             self._data[u"tests"][self._test_id][u"show-run"] += text
527
528     def _get_ndrpdr_throughput(self, msg):
529         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
530         message.
531
532         :param msg: The test message to be parsed.
533         :type msg: str
534         :returns: Parsed data as a dict and the status (PASS/FAIL).
535         :rtype: tuple(dict, str)
536         """
537
538         throughput = {
539             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
540             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
541         }
542         status = u"FAIL"
543         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
544
545         if groups is not None:
546             try:
547                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
548                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
549                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
550                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
551                 status = u"PASS"
552             except (IndexError, ValueError):
553                 pass
554
555         return throughput, status
556
557     def _get_plr_throughput(self, msg):
558         """Get PLRsearch lower bound and PLRsearch upper bound from the test
559         message.
560
561         :param msg: The test message to be parsed.
562         :type msg: str
563         :returns: Parsed data as a dict and the status (PASS/FAIL).
564         :rtype: tuple(dict, str)
565         """
566
567         throughput = {
568             u"LOWER": -1.0,
569             u"UPPER": -1.0
570         }
571         status = u"FAIL"
572         groups = re.search(self.REGEX_PLR_RATE, msg)
573
574         if groups is not None:
575             try:
576                 throughput[u"LOWER"] = float(groups.group(1))
577                 throughput[u"UPPER"] = float(groups.group(2))
578                 status = u"PASS"
579             except (IndexError, ValueError):
580                 pass
581
582         return throughput, status
583
584     def _get_ndrpdr_latency(self, msg):
585         """Get LATENCY from the test message.
586
587         :param msg: The test message to be parsed.
588         :type msg: str
589         :returns: Parsed data as a dict and the status (PASS/FAIL).
590         :rtype: tuple(dict, str)
591         """
592         latency_default = {
593             u"min": -1.0,
594             u"avg": -1.0,
595             u"max": -1.0,
596             u"hdrh": u""
597         }
598         latency = {
599             u"NDR": {
600                 u"direction1": copy.copy(latency_default),
601                 u"direction2": copy.copy(latency_default)
602             },
603             u"PDR": {
604                 u"direction1": copy.copy(latency_default),
605                 u"direction2": copy.copy(latency_default)
606             }
607         }
608         status = u"FAIL"
609         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
610
611         def process_latency(in_str):
612             """Return object with parsed latency values.
613
614             TODO: Define class for the return type.
615
616             :param in_str: Input string, min/avg/max/hdrh format.
617             :type in_str: str
618             :returns: Dict with corresponding keys, except hdrh float values.
619             :rtype dict:
620             :throws IndexError: If in_str does not have enough substrings.
621             :throws ValueError: If a substring does not convert to float.
622             """
623             in_list = in_str.split('/')
624
625             rval = {
626                 u"min": float(in_list[0]),
627                 u"avg": float(in_list[1]),
628                 u"max": float(in_list[2]),
629                 u"hdrh": u""
630             }
631
632             if len(in_list) == 4:
633                 rval[u"hdrh"] = str(in_list[3])
634
635             return rval
636
637         if groups is not None:
638             try:
639                 latency[u"NDR"][u"direction1"] = \
640                     process_latency(groups.group(1))
641                 latency[u"NDR"][u"direction2"] = \
642                     process_latency(groups.group(2))
643                 latency[u"PDR"][u"direction1"] = \
644                     process_latency(groups.group(3))
645                 latency[u"PDR"][u"direction2"] = \
646                     process_latency(groups.group(4))
647                 status = u"PASS"
648             except (IndexError, ValueError):
649                 pass
650
651         return latency, status
652
653     def visit_suite(self, suite):
654         """Implements traversing through the suite and its direct children.
655
656         :param suite: Suite to process.
657         :type suite: Suite
658         :returns: Nothing.
659         """
660         if self.start_suite(suite) is not False:
661             suite.suites.visit(self)
662             suite.tests.visit(self)
663             self.end_suite(suite)
664
665     def start_suite(self, suite):
666         """Called when suite starts.
667
668         :param suite: Suite to process.
669         :type suite: Suite
670         :returns: Nothing.
671         """
672
673         try:
674             parent_name = suite.parent.name
675         except AttributeError:
676             return
677
678         doc_str = suite.doc.\
679             replace(u'"', u"'").\
680             replace(u'\n', u' ').\
681             replace(u'\r', u'').\
682             replace(u'*[', u' |br| *[').\
683             replace(u"*", u"**").\
684             replace(u' |br| *[', u'*[', 1)
685
686         self._data[u"suites"][suite.longname.lower().
687                               replace(u'"', u"'").
688                               replace(u" ", u"_")] = {
689                                   u"name": suite.name.lower(),
690                                   u"doc": doc_str,
691                                   u"parent": parent_name,
692                                   u"level": len(suite.longname.split(u"."))
693                               }
694
695         suite.keywords.visit(self)
696
697     def end_suite(self, suite):
698         """Called when suite ends.
699
700         :param suite: Suite to process.
701         :type suite: Suite
702         :returns: Nothing.
703         """
704
705     def visit_test(self, test):
706         """Implements traversing through the test.
707
708         :param test: Test to process.
709         :type test: Test
710         :returns: Nothing.
711         """
712         if self.start_test(test) is not False:
713             test.keywords.visit(self)
714             self.end_test(test)
715
716     def start_test(self, test):
717         """Called when test starts.
718
719         :param test: Test to process.
720         :type test: Test
721         :returns: Nothing.
722         """
723
724         longname_orig = test.longname.lower()
725
726         # Check the ignore list
727         if longname_orig in self._ignore:
728             return
729
730         tags = [str(tag) for tag in test.tags]
731         test_result = dict()
732
733         # Change the TC long name and name if defined in the mapping table
734         longname = self._mapping.get(longname_orig, None)
735         if longname is not None:
736             name = longname.split(u'.')[-1]
737             logging.debug(
738                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
739                 f"{name}"
740             )
741         else:
742             longname = longname_orig
743             name = test.name.lower()
744
745         # Remove TC number from the TC long name (backward compatibility):
746         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
747         # Remove TC number from the TC name (not needed):
748         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
749
750         test_result[u"parent"] = test.parent.name.lower()
751         test_result[u"tags"] = tags
752         test_result["doc"] = test.doc.\
753             replace(u'"', u"'").\
754             replace(u'\n', u' ').\
755             replace(u'\r', u'').\
756             replace(u'[', u' |br| [').\
757             replace(u' |br| [', u'[', 1)
758         test_result[u"msg"] = test.message.\
759             replace(u'\n', u' |br| ').\
760             replace(u'\r', u'').\
761             replace(u'"', u"'")
762         test_result[u"type"] = u"FUNC"
763         test_result[u"status"] = test.status
764
765         if u"PERFTEST" in tags:
766             # Replace info about cores (e.g. -1c-) with the info about threads
767             # and cores (e.g. -1t1c-) in the long test case names and in the
768             # test case names if necessary.
769             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
770             if not groups:
771                 tag_count = 0
772                 tag_tc = str()
773                 for tag in test_result[u"tags"]:
774                     groups = re.search(self.REGEX_TC_TAG, tag)
775                     if groups:
776                         tag_count += 1
777                         tag_tc = tag
778
779                 if tag_count == 1:
780                     self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
781                                            f"-{tag_tc.lower()}-",
782                                            self._test_id,
783                                            count=1)
784                     test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
785                                                   f"-{tag_tc.lower()}-",
786                                                   test_result["name"],
787                                                   count=1)
788                 else:
789                     test_result[u"status"] = u"FAIL"
790                     self._data[u"tests"][self._test_id] = test_result
791                     logging.debug(
792                         f"The test {self._test_id} has no or more than one "
793                         f"multi-threading tags.\n"
794                         f"Tags: {test_result[u'tags']}"
795                     )
796                     return
797
798         if test.status == u"PASS":
799             if u"NDRPDR" in tags:
800                 test_result[u"type"] = u"NDRPDR"
801                 test_result[u"throughput"], test_result[u"status"] = \
802                     self._get_ndrpdr_throughput(test.message)
803                 test_result[u"latency"], test_result[u"status"] = \
804                     self._get_ndrpdr_latency(test.message)
805             elif u"SOAK" in tags:
806                 test_result[u"type"] = u"SOAK"
807                 test_result[u"throughput"], test_result[u"status"] = \
808                     self._get_plr_throughput(test.message)
809             elif u"TCP" in tags:
810                 test_result[u"type"] = u"TCP"
811                 groups = re.search(self.REGEX_TCP, test.message)
812                 test_result[u"result"] = int(groups.group(2))
813             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
814                 if u"MRR" in tags:
815                     test_result[u"type"] = u"MRR"
816                 else:
817                     test_result[u"type"] = u"BMRR"
818
819                 test_result[u"result"] = dict()
820                 groups = re.search(self.REGEX_BMRR, test.message)
821                 if groups is not None:
822                     items_str = groups.group(1)
823                     items_float = [float(item.strip()) for item
824                                    in items_str.split(",")]
825                     # Use whole list in CSIT-1180.
826                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
827                     test_result[u"result"][u"receive-rate"] = stats.avg
828                 else:
829                     groups = re.search(self.REGEX_MRR, test.message)
830                     test_result[u"result"][u"receive-rate"] = \
831                         float(groups.group(3)) / float(groups.group(1))
832             elif u"RECONF" in tags:
833                 test_result[u"type"] = u"RECONF"
834                 test_result[u"result"] = None
835                 try:
836                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
837                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
838                     test_result[u"result"] = {
839                         u"loss": int(grps_loss.group(1)),
840                         u"time": float(grps_time.group(1))
841                     }
842                 except (AttributeError, IndexError, ValueError, TypeError):
843                     test_result[u"status"] = u"FAIL"
844             else:
845                 test_result[u"status"] = u"FAIL"
846                 self._data[u"tests"][self._test_id] = test_result
847                 return
848
849         self._data[u"tests"][self._test_id] = test_result
850
851     def end_test(self, test):
852         """Called when test ends.
853
854         :param test: Test to process.
855         :type test: Test
856         :returns: Nothing.
857         """
858
859     def visit_keyword(self, keyword):
860         """Implements traversing through the keyword and its child keywords.
861
862         :param keyword: Keyword to process.
863         :type keyword: Keyword
864         :returns: Nothing.
865         """
866         if self.start_keyword(keyword) is not False:
867             self.end_keyword(keyword)
868
869     def start_keyword(self, keyword):
870         """Called when keyword starts. Default implementation does nothing.
871
872         :param keyword: Keyword to process.
873         :type keyword: Keyword
874         :returns: Nothing.
875         """
876         try:
877             if keyword.type == u"setup":
878                 self.visit_setup_kw(keyword)
879             elif keyword.type == u"teardown":
880                 self._lookup_kw_nr = 0
881                 self.visit_teardown_kw(keyword)
882             else:
883                 self._lookup_kw_nr = 0
884                 self.visit_test_kw(keyword)
885         except AttributeError:
886             pass
887
888     def end_keyword(self, keyword):
889         """Called when keyword ends. Default implementation does nothing.
890
891         :param keyword: Keyword to process.
892         :type keyword: Keyword
893         :returns: Nothing.
894         """
895
896     def visit_test_kw(self, test_kw):
897         """Implements traversing through the test keyword and its child
898         keywords.
899
900         :param test_kw: Keyword to process.
901         :type test_kw: Keyword
902         :returns: Nothing.
903         """
904         for keyword in test_kw.keywords:
905             if self.start_test_kw(keyword) is not False:
906                 self.visit_test_kw(keyword)
907                 self.end_test_kw(keyword)
908
909     def start_test_kw(self, test_kw):
910         """Called when test keyword starts. Default implementation does
911         nothing.
912
913         :param test_kw: Keyword to process.
914         :type test_kw: Keyword
915         :returns: Nothing.
916         """
917         if test_kw.name.count(u"Show Runtime Counters On All Duts"):
918             self._lookup_kw_nr += 1
919             self._show_run_lookup_nr = 0
920             self._msg_type = u"test-show-runtime"
921         elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
922             self._msg_type = u"dpdk-version"
923         else:
924             return
925         test_kw.messages.visit(self)
926
927     def end_test_kw(self, test_kw):
928         """Called when keyword ends. Default implementation does nothing.
929
930         :param test_kw: Keyword to process.
931         :type test_kw: Keyword
932         :returns: Nothing.
933         """
934
935     def visit_setup_kw(self, setup_kw):
936         """Implements traversing through the teardown keyword and its child
937         keywords.
938
939         :param setup_kw: Keyword to process.
940         :type setup_kw: Keyword
941         :returns: Nothing.
942         """
943         for keyword in setup_kw.keywords:
944             if self.start_setup_kw(keyword) is not False:
945                 self.visit_setup_kw(keyword)
946                 self.end_setup_kw(keyword)
947
948     def start_setup_kw(self, setup_kw):
949         """Called when teardown keyword starts. Default implementation does
950         nothing.
951
952         :param setup_kw: Keyword to process.
953         :type setup_kw: Keyword
954         :returns: Nothing.
955         """
956         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
957                 and not self._version:
958             self._msg_type = u"vpp-version"
959         elif setup_kw.name.count(u"Set Global Variable") \
960                 and not self._timestamp:
961             self._msg_type = u"timestamp"
962         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
963             self._msg_type = u"testbed"
964         else:
965             return
966         setup_kw.messages.visit(self)
967
968     def end_setup_kw(self, setup_kw):
969         """Called when keyword ends. Default implementation does nothing.
970
971         :param setup_kw: Keyword to process.
972         :type setup_kw: Keyword
973         :returns: Nothing.
974         """
975
976     def visit_teardown_kw(self, teardown_kw):
977         """Implements traversing through the teardown keyword and its child
978         keywords.
979
980         :param teardown_kw: Keyword to process.
981         :type teardown_kw: Keyword
982         :returns: Nothing.
983         """
984         for keyword in teardown_kw.keywords:
985             if self.start_teardown_kw(keyword) is not False:
986                 self.visit_teardown_kw(keyword)
987                 self.end_teardown_kw(keyword)
988
989     def start_teardown_kw(self, teardown_kw):
990         """Called when teardown keyword starts
991
992         :param teardown_kw: Keyword to process.
993         :type teardown_kw: Keyword
994         :returns: Nothing.
995         """
996
997         if teardown_kw.name.count(u"Show Vat History On All Duts"):
998             # TODO: Remove when not needed:
999             self._conf_history_lookup_nr = 0
1000             self._msg_type = u"teardown-vat-history"
1001             teardown_kw.messages.visit(self)
1002         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1003             self._conf_history_lookup_nr = 0
1004             self._msg_type = u"teardown-papi-history"
1005             teardown_kw.messages.visit(self)
1006
1007     def end_teardown_kw(self, teardown_kw):
1008         """Called when keyword ends. Default implementation does nothing.
1009
1010         :param teardown_kw: Keyword to process.
1011         :type teardown_kw: Keyword
1012         :returns: Nothing.
1013         """
1014
1015     def visit_message(self, msg):
1016         """Implements visiting the message.
1017
1018         :param msg: Message to process.
1019         :type msg: Message
1020         :returns: Nothing.
1021         """
1022         if self.start_message(msg) is not False:
1023             self.end_message(msg)
1024
1025     def start_message(self, msg):
1026         """Called when message starts. Get required information from messages:
1027         - VPP version.
1028
1029         :param msg: Message to process.
1030         :type msg: Message
1031         :returns: Nothing.
1032         """
1033
1034         if self._msg_type:
1035             self.parse_msg[self._msg_type](msg)
1036
1037     def end_message(self, msg):
1038         """Called when message ends. Default implementation does nothing.
1039
1040         :param msg: Message to process.
1041         :type msg: Message
1042         :returns: Nothing.
1043         """
1044
1045
1046 class InputData:
1047     """Input data
1048
1049     The data is extracted from output.xml files generated by Jenkins jobs and
1050     stored in pandas' DataFrames.
1051
1052     The data structure:
1053     - job name
1054       - build number
1055         - metadata
1056           (as described in ExecutionChecker documentation)
1057         - suites
1058           (as described in ExecutionChecker documentation)
1059         - tests
1060           (as described in ExecutionChecker documentation)
1061     """
1062
1063     def __init__(self, spec):
1064         """Initialization.
1065
1066         :param spec: Specification.
1067         :type spec: Specification
1068         """
1069
1070         # Specification:
1071         self._cfg = spec
1072
1073         # Data store:
1074         self._input_data = pd.Series()
1075
1076     @property
1077     def data(self):
1078         """Getter - Input data.
1079
1080         :returns: Input data
1081         :rtype: pandas.Series
1082         """
1083         return self._input_data
1084
1085     def metadata(self, job, build):
1086         """Getter - metadata
1087
1088         :param job: Job which metadata we want.
1089         :param build: Build which metadata we want.
1090         :type job: str
1091         :type build: str
1092         :returns: Metadata
1093         :rtype: pandas.Series
1094         """
1095
1096         return self.data[job][build][u"metadata"]
1097
1098     def suites(self, job, build):
1099         """Getter - suites
1100
1101         :param job: Job which suites we want.
1102         :param build: Build which suites we want.
1103         :type job: str
1104         :type build: str
1105         :returns: Suites.
1106         :rtype: pandas.Series
1107         """
1108
1109         return self.data[job][str(build)][u"suites"]
1110
1111     def tests(self, job, build):
1112         """Getter - tests
1113
1114         :param job: Job which tests we want.
1115         :param build: Build which tests we want.
1116         :type job: str
1117         :type build: str
1118         :returns: Tests.
1119         :rtype: pandas.Series
1120         """
1121
1122         return self.data[job][build][u"tests"]
1123
1124     def _parse_tests(self, job, build, log):
1125         """Process data from robot output.xml file and return JSON structured
1126         data.
1127
1128         :param job: The name of job which build output data will be processed.
1129         :param build: The build which output data will be processed.
1130         :param log: List of log messages.
1131         :type job: str
1132         :type build: dict
1133         :type log: list of tuples (severity, msg)
1134         :returns: JSON data structure.
1135         :rtype: dict
1136         """
1137
1138         metadata = {
1139             u"job": job,
1140             u"build": build
1141         }
1142
1143         with open(build[u"file-name"], u'r') as data_file:
1144             try:
1145                 result = ExecutionResult(data_file)
1146             except errors.DataError as err:
1147                 log.append(
1148                     (u"ERROR", f"Error occurred while parsing output.xml: "
1149                                f"{repr(err)}")
1150                 )
1151                 return None
1152         checker = ExecutionChecker(metadata, self._cfg.mapping,
1153                                    self._cfg.ignore)
1154         result.visit(checker)
1155
1156         return checker.data
1157
1158     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1159         """Download and parse the input data file.
1160
1161         :param pid: PID of the process executing this method.
1162         :param job: Name of the Jenkins job which generated the processed input
1163             file.
1164         :param build: Information about the Jenkins build which generated the
1165             processed input file.
1166         :param repeat: Repeat the download specified number of times if not
1167             successful.
1168         :type pid: int
1169         :type job: str
1170         :type build: dict
1171         :type repeat: int
1172         """
1173
1174         logs = list()
1175
1176         logs.append(
1177             (u"INFO", f"  Processing the job/build: {job}: {build[u'build']}")
1178         )
1179
1180         state = u"failed"
1181         success = False
1182         data = None
1183         do_repeat = repeat
1184         while do_repeat:
1185             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1186                                                    logs)
1187             if success:
1188                 break
1189             do_repeat -= 1
1190         if not success:
1191             logs.append(
1192                 (u"ERROR",
1193                  f"It is not possible to download the input data file from the "
1194                  f"job {job}, build {build[u'build']}, or it is damaged. "
1195                  f"Skipped.")
1196             )
1197         if success:
1198             logs.append(
1199                 (u"INFO",
1200                  f"    Processing data from the build {build[u'build']} ...")
1201             )
1202             data = self._parse_tests(job, build, logs)
1203             if data is None:
1204                 logs.append(
1205                     (u"ERROR",
1206                      f"Input data file from the job {job}, build "
1207                      f"{build[u'build']} is damaged. Skipped.")
1208                 )
1209             else:
1210                 state = u"processed"
1211
1212             try:
1213                 remove(build[u"file-name"])
1214             except OSError as err:
1215                 logs.append(
1216                     ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1217                               f"{repr(err)}")
1218                 )
1219
1220         # If the time-period is defined in the specification file, remove all
1221         # files which are outside the time period.
1222         timeperiod = self._cfg.input.get(u"time-period", None)
1223         if timeperiod and data:
1224             now = dt.utcnow()
1225             timeperiod = timedelta(int(timeperiod))
1226             metadata = data.get(u"metadata", None)
1227             if metadata:
1228                 generated = metadata.get(u"generated", None)
1229                 if generated:
1230                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1231                     if (now - generated) > timeperiod:
1232                         # Remove the data and the file:
1233                         state = u"removed"
1234                         data = None
1235                         logs.append(
1236                             (u"INFO",
1237                              f"    The build {job}/{build[u'build']} is "
1238                              f"outdated, will be removed.")
1239                         )
1240         logs.append((u"INFO", u"  Done."))
1241
1242         for level, line in logs:
1243             if level == u"INFO":
1244                 logging.info(line)
1245             elif level == u"ERROR":
1246                 logging.error(line)
1247             elif level == u"DEBUG":
1248                 logging.debug(line)
1249             elif level == u"CRITICAL":
1250                 logging.critical(line)
1251             elif level == u"WARNING":
1252                 logging.warning(line)
1253
1254         return {u"data": data, u"state": state, u"job": job, u"build": build}
1255
1256     def download_and_parse_data(self, repeat=1):
1257         """Download the input data files, parse input data from input files and
1258         store in pandas' Series.
1259
1260         :param repeat: Repeat the download specified number of times if not
1261             successful.
1262         :type repeat: int
1263         """
1264
1265         logging.info(u"Downloading and parsing input files ...")
1266
1267         for job, builds in self._cfg.builds.items():
1268             for build in builds:
1269
1270                 result = self._download_and_parse_build(job, build, repeat)
1271                 build_nr = result[u"build"][u"build"]
1272
1273                 if result[u"data"]:
1274                     data = result[u"data"]
1275                     build_data = pd.Series({
1276                         u"metadata": pd.Series(
1277                             list(data[u"metadata"].values()),
1278                             index=list(data[u"metadata"].keys())
1279                         ),
1280                         u"suites": pd.Series(
1281                             list(data[u"suites"].values()),
1282                             index=list(data[u"suites"].keys())
1283                         ),
1284                         u"tests": pd.Series(
1285                             list(data[u"tests"].values()),
1286                             index=list(data[u"tests"].keys())
1287                         )
1288                     })
1289
1290                     if self._input_data.get(job, None) is None:
1291                         self._input_data[job] = pd.Series()
1292                     self._input_data[job][str(build_nr)] = build_data
1293
1294                     self._cfg.set_input_file_name(
1295                         job, build_nr, result[u"build"][u"file-name"])
1296
1297                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1298
1299                 mem_alloc = \
1300                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1301                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1302
1303         logging.info(u"Done.")
1304
1305     @staticmethod
1306     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1307         """Return the index of character in the string which is the end of tag.
1308
1309         :param tag_filter: The string where the end of tag is being searched.
1310         :param start: The index where the searching is stated.
1311         :param closer: The character which is the tag closer.
1312         :type tag_filter: str
1313         :type start: int
1314         :type closer: str
1315         :returns: The index of the tag closer.
1316         :rtype: int
1317         """
1318
1319         try:
1320             idx_opener = tag_filter.index(closer, start)
1321             return tag_filter.index(closer, idx_opener + 1)
1322         except ValueError:
1323             return None
1324
1325     @staticmethod
1326     def _condition(tag_filter):
1327         """Create a conditional statement from the given tag filter.
1328
1329         :param tag_filter: Filter based on tags from the element specification.
1330         :type tag_filter: str
1331         :returns: Conditional statement which can be evaluated.
1332         :rtype: str
1333         """
1334
1335         index = 0
1336         while True:
1337             index = InputData._end_of_tag(tag_filter, index)
1338             if index is None:
1339                 return tag_filter
1340             index += 1
1341             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1342
1343     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1344                     continue_on_error=False):
1345         """Filter required data from the given jobs and builds.
1346
1347         The output data structure is:
1348
1349         - job 1
1350           - build 1
1351             - test (or suite) 1 ID:
1352               - param 1
1353               - param 2
1354               ...
1355               - param n
1356             ...
1357             - test (or suite) n ID:
1358             ...
1359           ...
1360           - build n
1361         ...
1362         - job n
1363
1364         :param element: Element which will use the filtered data.
1365         :param params: Parameters which will be included in the output. If None,
1366             all parameters are included.
1367         :param data: If not None, this data is used instead of data specified
1368             in the element.
1369         :param data_set: The set of data to be filtered: tests, suites,
1370             metadata.
1371         :param continue_on_error: Continue if there is error while reading the
1372             data. The Item will be empty then
1373         :type element: pandas.Series
1374         :type params: list
1375         :type data: dict
1376         :type data_set: str
1377         :type continue_on_error: bool
1378         :returns: Filtered data.
1379         :rtype pandas.Series
1380         """
1381
1382         try:
1383             if element[u"filter"] in (u"all", u"template"):
1384                 cond = u"True"
1385             else:
1386                 cond = InputData._condition(element[u"filter"])
1387             logging.debug(f"   Filter: {cond}")
1388         except KeyError:
1389             logging.error(u"  No filter defined.")
1390             return None
1391
1392         if params is None:
1393             params = element.get(u"parameters", None)
1394             if params:
1395                 params.append(u"type")
1396
1397         data_to_filter = data if data else element[u"data"]
1398         data = pd.Series()
1399         try:
1400             for job, builds in data_to_filter.items():
1401                 data[job] = pd.Series()
1402                 for build in builds:
1403                     data[job][str(build)] = pd.Series()
1404                     try:
1405                         data_dict = dict(
1406                             self.data[job][str(build)][data_set].items())
1407                     except KeyError:
1408                         if continue_on_error:
1409                             continue
1410                         return None
1411
1412                     for test_id, test_data in data_dict.items():
1413                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1414                             data[job][str(build)][test_id] = pd.Series()
1415                             if params is None:
1416                                 for param, val in test_data.items():
1417                                     data[job][str(build)][test_id][param] = val
1418                             else:
1419                                 for param in params:
1420                                     try:
1421                                         data[job][str(build)][test_id][param] =\
1422                                             test_data[param]
1423                                     except KeyError:
1424                                         data[job][str(build)][test_id][param] =\
1425                                             u"No Data"
1426             return data
1427
1428         except (KeyError, IndexError, ValueError) as err:
1429             logging.error(
1430                 f"Missing mandatory parameter in the element specification: "
1431                 f"{repr(err)}"
1432             )
1433             return None
1434         except AttributeError as err:
1435             logging.error(repr(err))
1436             return None
1437         except SyntaxError as err:
1438             logging.error(
1439                 f"The filter {cond} is not correct. Check if all tags are "
1440                 f"enclosed by apostrophes.\n{repr(err)}"
1441             )
1442             return None
1443
1444     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1445                              continue_on_error=False):
1446         """Filter required data from the given jobs and builds.
1447
1448         The output data structure is:
1449
1450         - job 1
1451           - build 1
1452             - test (or suite) 1 ID:
1453               - param 1
1454               - param 2
1455               ...
1456               - param n
1457             ...
1458             - test (or suite) n ID:
1459             ...
1460           ...
1461           - build n
1462         ...
1463         - job n
1464
1465         :param element: Element which will use the filtered data.
1466         :param params: Parameters which will be included in the output. If None,
1467         all parameters are included.
1468         :param data_set: The set of data to be filtered: tests, suites,
1469         metadata.
1470         :param continue_on_error: Continue if there is error while reading the
1471         data. The Item will be empty then
1472         :type element: pandas.Series
1473         :type params: list
1474         :type data_set: str
1475         :type continue_on_error: bool
1476         :returns: Filtered data.
1477         :rtype pandas.Series
1478         """
1479
1480         include = element.get(u"include", None)
1481         if not include:
1482             logging.warning(u"No tests to include, skipping the element.")
1483             return None
1484
1485         if params is None:
1486             params = element.get(u"parameters", None)
1487             if params:
1488                 params.append(u"type")
1489
1490         data = pd.Series()
1491         try:
1492             for job, builds in element[u"data"].items():
1493                 data[job] = pd.Series()
1494                 for build in builds:
1495                     data[job][str(build)] = pd.Series()
1496                     for test in include:
1497                         try:
1498                             reg_ex = re.compile(str(test).lower())
1499                             for test_id in self.data[job][
1500                                     str(build)][data_set].keys():
1501                                 if re.match(reg_ex, str(test_id).lower()):
1502                                     test_data = self.data[job][
1503                                         str(build)][data_set][test_id]
1504                                     data[job][str(build)][test_id] = pd.Series()
1505                                     if params is None:
1506                                         for param, val in test_data.items():
1507                                             data[job][str(build)][test_id]\
1508                                                 [param] = val
1509                                     else:
1510                                         for param in params:
1511                                             try:
1512                                                 data[job][str(build)][
1513                                                     test_id][param] = \
1514                                                     test_data[param]
1515                                             except KeyError:
1516                                                 data[job][str(build)][
1517                                                     test_id][param] = u"No Data"
1518                         except KeyError as err:
1519                             logging.error(repr(err))
1520                             if continue_on_error:
1521                                 continue
1522                             return None
1523             return data
1524
1525         except (KeyError, IndexError, ValueError) as err:
1526             logging.error(
1527                 f"Missing mandatory parameter in the element "
1528                 f"specification: {repr(err)}"
1529             )
1530             return None
1531         except AttributeError as err:
1532             logging.error(repr(err))
1533             return None
1534
1535     @staticmethod
1536     def merge_data(data):
1537         """Merge data from more jobs and builds to a simple data structure.
1538
1539         The output data structure is:
1540
1541         - test (suite) 1 ID:
1542           - param 1
1543           - param 2
1544           ...
1545           - param n
1546         ...
1547         - test (suite) n ID:
1548         ...
1549
1550         :param data: Data to merge.
1551         :type data: pandas.Series
1552         :returns: Merged data.
1553         :rtype: pandas.Series
1554         """
1555
1556         logging.info(u"    Merging data ...")
1557
1558         merged_data = pd.Series()
1559         for builds in data.values:
1560             for item in builds.values:
1561                 for item_id, item_data in item.items():
1562                     merged_data[item_id] = item_data
1563
1564         return merged_data