Add support for HDRhistogram
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import copy
23 import re
24 import resource
25 import pandas as pd
26 import logging
27 import prettytable
28
29 from robot.api import ExecutionResult, ResultVisitor
30 from robot import errors
31 from collections import OrderedDict
32 from string import replace
33 from os import remove
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38
39 from input_data_files import download_and_unzip_data_file
40
41
42 # Separator used in file names
43 SEPARATOR = "__"
44
45
46 class ExecutionChecker(ResultVisitor):
47     """Class to traverse through the test suite structure.
48
49     The functionality implemented in this class generates a json structure:
50
51     Performance tests:
52
53     {
54         "metadata": {
55             "generated": "Timestamp",
56             "version": "SUT version",
57             "job": "Jenkins job name",
58             "build": "Information about the build"
59         },
60         "suites": {
61             "Suite long name 1": {
62                 "name": Suite name,
63                 "doc": "Suite 1 documentation",
64                 "parent": "Suite 1 parent",
65                 "level": "Level of the suite in the suite hierarchy"
66             }
67             "Suite long name N": {
68                 "name": Suite name,
69                 "doc": "Suite N documentation",
70                 "parent": "Suite 2 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73         }
74         "tests": {
75             # NDRPDR tests:
76             "ID": {
77                 "name": "Test name",
78                 "parent": "Name of the parent of the test",
79                 "doc": "Test documentation",
80                 "msg": "Test message",
81                 "conf-history": "DUT1 and DUT2 VAT History",
82                 "show-run": "Show Run",
83                 "tags": ["tag 1", "tag 2", "tag n"],
84                 "type": "NDRPDR",
85                 "status": "PASS" | "FAIL",
86                 "throughput": {
87                     "NDR": {
88                         "LOWER": float,
89                         "UPPER": float
90                     },
91                     "PDR": {
92                         "LOWER": float,
93                         "UPPER": float
94                     }
95                 },
96                 "latency": {
97                     "NDR": {
98                         "direction1": {
99                             "min": float,
100                             "avg": float,
101                             "max": float,
102                             "hdrh": str
103                         },
104                         "direction2": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         }
110                     },
111                     "PDR": {
112                         "direction1": {
113                             "min": float,
114                             "avg": float,
115                             "max": float,
116                             "hdrh": str
117                         },
118                         "direction2": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         }
124                     }
125                 }
126             }
127
128             # TCP tests:
129             "ID": {
130                 "name": "Test name",
131                 "parent": "Name of the parent of the test",
132                 "doc": "Test documentation",
133                 "msg": "Test message",
134                 "tags": ["tag 1", "tag 2", "tag n"],
135                 "type": "TCP",
136                 "status": "PASS" | "FAIL",
137                 "result": int
138             }
139
140             # MRR, BMRR tests:
141             "ID": {
142                 "name": "Test name",
143                 "parent": "Name of the parent of the test",
144                 "doc": "Test documentation",
145                 "msg": "Test message",
146                 "tags": ["tag 1", "tag 2", "tag n"],
147                 "type": "MRR" | "BMRR",
148                 "status": "PASS" | "FAIL",
149                 "result": {
150                     "receive-rate": AvgStdevMetadata,
151                 }
152             }
153
154             "ID" {
155                 # next test
156             }
157         }
158     }
159
160
161     Functional tests:
162
163     {
164         "metadata": {  # Optional
165             "version": "VPP version",
166             "job": "Jenkins job name",
167             "build": "Information about the build"
168         },
169         "suites": {
170             "Suite name 1": {
171                 "doc": "Suite 1 documentation",
172                 "parent": "Suite 1 parent",
173                 "level": "Level of the suite in the suite hierarchy"
174             }
175             "Suite name N": {
176                 "doc": "Suite N documentation",
177                 "parent": "Suite 2 parent",
178                 "level": "Level of the suite in the suite hierarchy"
179             }
180         }
181         "tests": {
182             "ID": {
183                 "name": "Test name",
184                 "parent": "Name of the parent of the test",
185                 "doc": "Test documentation"
186                 "msg": "Test message"
187                 "tags": ["tag 1", "tag 2", "tag n"],
188                 "conf-history": "DUT1 and DUT2 VAT History"
189                 "show-run": "Show Run"
190                 "status": "PASS" | "FAIL"
191             },
192             "ID" {
193                 # next test
194             }
195         }
196     }
197
198     .. note:: ID is the lowercase full path to the test.
199     """
200
201     # TODO: Remove when definitely no NDRPDRDISC tests are used:
202     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
203
204     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205                                 r'PLRsearch upper bound::?\s(\d+.\d+)')
206
207     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
209                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210                                    r'PDR_UPPER:\s(\d+.\d+)')
211
212     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
214
215     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
216                                  r'[\D\d]*')
217
218     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
220
221     REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
222
223     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
224
225     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226                            r'tx\s(\d*),\srx\s(\d*)')
227
228     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229                             r' in packets per second: \[(.*)\]')
230
231     REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232     REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.\d*)')
233
234     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
235
236     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
237
238     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
239
240     REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
241
242     def __init__(self, metadata, mapping, ignore):
243         """Initialisation.
244
245         :param metadata: Key-value pairs to be included in "metadata" part of
246             JSON structure.
247         :param mapping: Mapping of the old names of test cases to the new
248             (actual) one.
249         :param ignore: List of TCs to be ignored.
250         :type metadata: dict
251         :type mapping: dict
252         :type ignore: list
253         """
254
255         # Type of message to parse out from the test messages
256         self._msg_type = None
257
258         # VPP version
259         self._version = None
260
261         # Timestamp
262         self._timestamp = None
263
264         # Testbed. The testbed is identified by TG node IP address.
265         self._testbed = None
266
267         # Mapping of TCs long names
268         self._mapping = mapping
269
270         # Ignore list
271         self._ignore = ignore
272
273         # Number of VAT History messages found:
274         # 0 - no message
275         # 1 - VAT History of DUT1
276         # 2 - VAT History of DUT2
277         self._lookup_kw_nr = 0
278         self._conf_history_lookup_nr = 0
279
280         # Number of Show Running messages found
281         # 0 - no message
282         # 1 - Show run message found
283         self._show_run_lookup_nr = 0
284
285         # Test ID of currently processed test- the lowercase full path to the
286         # test
287         self._test_ID = None
288
289         # The main data structure
290         self._data = {
291             "metadata": OrderedDict(),
292             "suites": OrderedDict(),
293             "tests": OrderedDict()
294         }
295
296         # Save the provided metadata
297         for key, val in metadata.items():
298             self._data["metadata"][key] = val
299
300         # Dictionary defining the methods used to parse different types of
301         # messages
302         self.parse_msg = {
303             "timestamp": self._get_timestamp,
304             "vpp-version": self._get_vpp_version,
305             "dpdk-version": self._get_dpdk_version,
306             "teardown-vat-history": self._get_vat_history,
307             "teardown-papi-history": self._get_papi_history,
308             "test-show-runtime": self._get_show_run,
309             "testbed": self._get_testbed
310         }
311
312     @property
313     def data(self):
314         """Getter - Data parsed from the XML file.
315
316         :returns: Data parsed from the XML file.
317         :rtype: dict
318         """
319         return self._data
320
321     def _get_testbed(self, msg):
322         """Called when extraction of testbed IP is required.
323         The testbed is identified by TG node IP address.
324
325         :param msg: Message to process.
326         :type msg: Message
327         :returns: Nothing.
328         """
329
330         if msg.message.count("Setup of TG node"):
331             reg_tg_ip = re.compile(
332                 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
333             try:
334                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
335             except (KeyError, ValueError, IndexError, AttributeError):
336                 pass
337             finally:
338                 self._data["metadata"]["testbed"] = self._testbed
339                 self._msg_type = None
340
341     def _get_vpp_version(self, msg):
342         """Called when extraction of VPP version is required.
343
344         :param msg: Message to process.
345         :type msg: Message
346         :returns: Nothing.
347         """
348
349         if msg.message.count("return STDOUT Version:") or \
350             msg.message.count("VPP Version:") or \
351             msg.message.count("VPP version:"):
352             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
353                                 group(2))
354             self._data["metadata"]["version"] = self._version
355             self._msg_type = None
356
357     def _get_dpdk_version(self, msg):
358         """Called when extraction of DPDK version is required.
359
360         :param msg: Message to process.
361         :type msg: Message
362         :returns: Nothing.
363         """
364
365         if msg.message.count("DPDK Version:"):
366             try:
367                 self._version = str(re.search(
368                     self.REGEX_VERSION_DPDK, msg.message). group(2))
369                 self._data["metadata"]["version"] = self._version
370             except IndexError:
371                 pass
372             finally:
373                 self._msg_type = None
374
375     def _get_timestamp(self, msg):
376         """Called when extraction of timestamp is required.
377
378         :param msg: Message to process.
379         :type msg: Message
380         :returns: Nothing.
381         """
382
383         self._timestamp = msg.timestamp[:14]
384         self._data["metadata"]["generated"] = self._timestamp
385         self._msg_type = None
386
387     def _get_vat_history(self, msg):
388         """Called when extraction of VAT command history is required.
389
390         :param msg: Message to process.
391         :type msg: Message
392         :returns: Nothing.
393         """
394         if msg.message.count("VAT command history:"):
395             self._conf_history_lookup_nr += 1
396             if self._conf_history_lookup_nr == 1:
397                 self._data["tests"][self._test_ID]["conf-history"] = str()
398             else:
399                 self._msg_type = None
400             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
401                           "VAT command history:", "", msg.message, count=1). \
402                 replace("\n\n", "\n").replace('\n', ' |br| ').\
403                 replace('\r', '').replace('"', "'")
404
405             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
406             self._data["tests"][self._test_ID]["conf-history"] += \
407                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
408
409     def _get_papi_history(self, msg):
410         """Called when extraction of PAPI command history is required.
411
412         :param msg: Message to process.
413         :type msg: Message
414         :returns: Nothing.
415         """
416         if msg.message.count("PAPI command history:"):
417             self._conf_history_lookup_nr += 1
418             if self._conf_history_lookup_nr == 1:
419                 self._data["tests"][self._test_ID]["conf-history"] = str()
420             else:
421                 self._msg_type = None
422             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
423                           "PAPI command history:", "", msg.message, count=1). \
424                 replace("\n\n", "\n").replace('\n', ' |br| ').\
425                 replace('\r', '').replace('"', "'")
426
427             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
428             self._data["tests"][self._test_ID]["conf-history"] += \
429                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
430
431     def _get_show_run(self, msg):
432         """Called when extraction of VPP operational data (output of CLI command
433         Show Runtime) is required.
434
435         :param msg: Message to process.
436         :type msg: Message
437         :returns: Nothing.
438         """
439         if msg.message.count("Runtime:"):
440             self._show_run_lookup_nr += 1
441             if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
442                 self._data["tests"][self._test_ID]["show-run"] = str()
443             if self._lookup_kw_nr > 1:
444                 self._msg_type = None
445             if self._show_run_lookup_nr > 0:
446                 message = str(msg.message).replace(' ', '').replace('\n', '').\
447                     replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
448                 runtime = loads(message)
449                 try:
450                     threads_nr = len(runtime[0]["clocks"])
451                 except (IndexError, KeyError):
452                     return
453                 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
454                 table = [[tbl_hdr, ] for _ in range(threads_nr)]
455                 for item in runtime:
456                     for idx in range(threads_nr):
457                         table[idx].append([
458                             item["name"],
459                             item["calls"][idx],
460                             item["vectors"][idx],
461                             item["suspends"][idx],
462                             item["clocks"][idx]
463                         ])
464                 text = ""
465                 for idx in range(threads_nr):
466                     text += "Thread {idx} ".format(idx=idx)
467                     text += "vpp_main\n" if idx == 0 else \
468                         "vpp_wk_{idx}\n".format(idx=idx-1)
469                     txt_table = None
470                     for row in table[idx]:
471                         if txt_table is None:
472                             txt_table = prettytable.PrettyTable(row)
473                         else:
474                             if any(row[1:]):
475                                 txt_table.add_row(row)
476                     txt_table.set_style(prettytable.MSWORD_FRIENDLY)
477                     txt_table.align["Name"] = "l"
478                     txt_table.align["Calls"] = "r"
479                     txt_table.align["Vectors"] = "r"
480                     txt_table.align["Suspends"] = "r"
481                     txt_table.align["Clocks"] = "r"
482
483                     text += txt_table.get_string(sortby="Name") + '\n'
484
485                 text = text.replace('\n', ' |br| ').replace('\r', '').\
486                     replace('"', "'")
487                 try:
488                     self._data["tests"][self._test_ID]["show-run"] += " |br| "
489                     self._data["tests"][self._test_ID]["show-run"] += \
490                         "**DUT" + str(self._show_run_lookup_nr) + ":** |br| " \
491                         + text
492                 except KeyError:
493                     pass
494
495     def _get_ndrpdr_throughput(self, msg):
496         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
497         message.
498
499         :param msg: The test message to be parsed.
500         :type msg: str
501         :returns: Parsed data as a dict and the status (PASS/FAIL).
502         :rtype: tuple(dict, str)
503         """
504
505         throughput = {
506             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
507             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
508         }
509         status = "FAIL"
510         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
511
512         if groups is not None:
513             try:
514                 throughput["NDR"]["LOWER"] = float(groups.group(1))
515                 throughput["NDR"]["UPPER"] = float(groups.group(2))
516                 throughput["PDR"]["LOWER"] = float(groups.group(3))
517                 throughput["PDR"]["UPPER"] = float(groups.group(4))
518                 status = "PASS"
519             except (IndexError, ValueError):
520                 pass
521
522         return throughput, status
523
524     def _get_plr_throughput(self, msg):
525         """Get PLRsearch lower bound and PLRsearch upper bound from the test
526         message.
527
528         :param msg: The test message to be parsed.
529         :type msg: str
530         :returns: Parsed data as a dict and the status (PASS/FAIL).
531         :rtype: tuple(dict, str)
532         """
533
534         throughput = {
535             "LOWER": -1.0,
536             "UPPER": -1.0
537         }
538         status = "FAIL"
539         groups = re.search(self.REGEX_PLR_RATE, msg)
540
541         if groups is not None:
542             try:
543                 throughput["LOWER"] = float(groups.group(1))
544                 throughput["UPPER"] = float(groups.group(2))
545                 status = "PASS"
546             except (IndexError, ValueError):
547                 pass
548
549         return throughput, status
550
551     def _get_ndrpdr_latency(self, msg):
552         """Get LATENCY from the test message.
553
554         :param msg: The test message to be parsed.
555         :type msg: str
556         :returns: Parsed data as a dict and the status (PASS/FAIL).
557         :rtype: tuple(dict, str)
558         """
559         latency_default = {"min": -1.0, "avg": -1.0, "max": -1.0, "hdrh": ""}
560         latency = {
561             "NDR": {
562                 "direction1": copy.copy(latency_default),
563                 "direction2": copy.copy(latency_default)
564             },
565             "PDR": {
566                 "direction1": copy.copy(latency_default),
567                 "direction2": copy.copy(latency_default)
568             }
569         }
570         status = "FAIL"
571         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
572
573         def process_latency(in_str):
574             """Return object with parsed latency values.
575
576             TODO: Define class for the return type.
577
578             :param in_str: Input string, min/avg/max/hdrh format.
579             :type in_str: str
580             :returns: Dict with corresponding keys, except hdrh float values.
581             :rtype dict:
582             :throws IndexError: If in_str does not have enough substrings.
583             :throws ValueError: If a substring does not convert to float.
584             """
585             in_list = in_str.split('/')
586
587             rval = {
588                 "min": float(in_list[0]),
589                 "avg": float(in_list[1]),
590                 "max": float(in_list[2]),
591                 "hdrh": ""
592             }
593
594             if len(in_list) == 4:
595                 rval["hdrh"] = str(in_list[3])
596
597             return rval
598
599         if groups is not None:
600             try:
601                 latency["NDR"]["direction1"] = process_latency(groups.group(1))
602                 latency["NDR"]["direction2"] = process_latency(groups.group(2))
603                 latency["PDR"]["direction1"] = process_latency(groups.group(3))
604                 latency["PDR"]["direction2"] = process_latency(groups.group(4))
605                 status = "PASS"
606             except (IndexError, ValueError):
607                 pass
608
609         return latency, status
610
611     def visit_suite(self, suite):
612         """Implements traversing through the suite and its direct children.
613
614         :param suite: Suite to process.
615         :type suite: Suite
616         :returns: Nothing.
617         """
618         if self.start_suite(suite) is not False:
619             suite.suites.visit(self)
620             suite.tests.visit(self)
621             self.end_suite(suite)
622
623     def start_suite(self, suite):
624         """Called when suite starts.
625
626         :param suite: Suite to process.
627         :type suite: Suite
628         :returns: Nothing.
629         """
630
631         try:
632             parent_name = suite.parent.name
633         except AttributeError:
634             return
635
636         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
637             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
638         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
639
640         self._data["suites"][suite.longname.lower().replace('"', "'").
641             replace(" ", "_")] = {
642                 "name": suite.name.lower(),
643                 "doc": doc_str,
644                 "parent": parent_name,
645                 "level": len(suite.longname.split("."))
646             }
647
648         suite.keywords.visit(self)
649
650     def end_suite(self, suite):
651         """Called when suite ends.
652
653         :param suite: Suite to process.
654         :type suite: Suite
655         :returns: Nothing.
656         """
657         pass
658
659     def visit_test(self, test):
660         """Implements traversing through the test.
661
662         :param test: Test to process.
663         :type test: Test
664         :returns: Nothing.
665         """
666         if self.start_test(test) is not False:
667             test.keywords.visit(self)
668             self.end_test(test)
669
670     def start_test(self, test):
671         """Called when test starts.
672
673         :param test: Test to process.
674         :type test: Test
675         :returns: Nothing.
676         """
677
678         longname_orig = test.longname.lower()
679
680         # Check the ignore list
681         if longname_orig in self._ignore:
682             return
683
684         tags = [str(tag) for tag in test.tags]
685         test_result = dict()
686
687         # Change the TC long name and name if defined in the mapping table
688         longname = self._mapping.get(longname_orig, None)
689         if longname is not None:
690             name = longname.split('.')[-1]
691             logging.debug("{0}\n{1}\n{2}\n{3}".format(
692                 self._data["metadata"], longname_orig, longname, name))
693         else:
694             longname = longname_orig
695             name = test.name.lower()
696
697         # Remove TC number from the TC long name (backward compatibility):
698         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
699         # Remove TC number from the TC name (not needed):
700         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
701
702         test_result["parent"] = test.parent.name.lower()
703         test_result["tags"] = tags
704         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
705             replace('\r', '').replace('[', ' |br| [')
706         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
707         test_result["msg"] = test.message.replace('\n', ' |br| '). \
708             replace('\r', '').replace('"', "'")
709         test_result["type"] = "FUNC"
710         test_result["status"] = test.status
711
712         if "PERFTEST" in tags:
713             # Replace info about cores (e.g. -1c-) with the info about threads
714             # and cores (e.g. -1t1c-) in the long test case names and in the
715             # test case names if necessary.
716             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
717             if not groups:
718                 tag_count = 0
719                 tag_tc = str()
720                 for tag in test_result["tags"]:
721                     groups = re.search(self.REGEX_TC_TAG, tag)
722                     if groups:
723                         tag_count += 1
724                         tag_tc = tag
725
726                 if tag_count == 1:
727                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
728                                            "-{0}-".format(tag_tc.lower()),
729                                            self._test_ID,
730                                            count=1)
731                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
732                                                  "-{0}-".format(tag_tc.lower()),
733                                                  test_result["name"],
734                                                  count=1)
735                 else:
736                     test_result["status"] = "FAIL"
737                     self._data["tests"][self._test_ID] = test_result
738                     logging.debug("The test '{0}' has no or more than one "
739                                   "multi-threading tags.".format(self._test_ID))
740                     logging.debug("Tags: {0}".format(test_result["tags"]))
741                     return
742
743         if test.status == "PASS" and ("NDRPDRDISC" in tags or
744                                       "NDRPDR" in tags or
745                                       "SOAK" in tags or
746                                       "TCP" in tags or
747                                       "MRR" in tags or
748                                       "BMRR" in tags or
749                                       "RECONF" in tags):
750             # TODO: Remove when definitely no NDRPDRDISC tests are used:
751             if "NDRDISC" in tags:
752                 test_result["type"] = "NDR"
753             # TODO: Remove when definitely no NDRPDRDISC tests are used:
754             elif "PDRDISC" in tags:
755                 test_result["type"] = "PDR"
756             elif "NDRPDR" in tags:
757                 test_result["type"] = "NDRPDR"
758             elif "SOAK" in tags:
759                 test_result["type"] = "SOAK"
760             elif "TCP" in tags:
761                 test_result["type"] = "TCP"
762             elif "MRR" in tags:
763                 test_result["type"] = "MRR"
764             elif "FRMOBL" in tags or "BMRR" in tags:
765                 test_result["type"] = "BMRR"
766             elif "RECONF" in tags:
767                 test_result["type"] = "RECONF"
768             else:
769                 test_result["status"] = "FAIL"
770                 self._data["tests"][self._test_ID] = test_result
771                 return
772
773             # TODO: Remove when definitely no NDRPDRDISC tests are used:
774             if test_result["type"] in ("NDR", "PDR"):
775                 try:
776                     rate_value = str(re.search(
777                         self.REGEX_RATE, test.message).group(1))
778                 except AttributeError:
779                     rate_value = "-1"
780                 try:
781                     rate_unit = str(re.search(
782                         self.REGEX_RATE, test.message).group(2))
783                 except AttributeError:
784                     rate_unit = "-1"
785
786                 test_result["throughput"] = dict()
787                 test_result["throughput"]["value"] = \
788                     int(rate_value.split('.')[0])
789                 test_result["throughput"]["unit"] = rate_unit
790                 test_result["latency"] = \
791                     self._get_latency(test.message, test_result["type"])
792                 if test_result["type"] == "PDR":
793                     test_result["lossTolerance"] = str(re.search(
794                         self.REGEX_TOLERANCE, test.message).group(1))
795
796             elif test_result["type"] in ("NDRPDR", ):
797                 test_result["throughput"], test_result["status"] = \
798                     self._get_ndrpdr_throughput(test.message)
799                 test_result["latency"], test_result["status"] = \
800                     self._get_ndrpdr_latency(test.message)
801
802             elif test_result["type"] in ("SOAK", ):
803                 test_result["throughput"], test_result["status"] = \
804                     self._get_plr_throughput(test.message)
805
806             elif test_result["type"] in ("TCP", ):
807                 groups = re.search(self.REGEX_TCP, test.message)
808                 test_result["result"] = int(groups.group(2))
809
810             elif test_result["type"] in ("MRR", "BMRR"):
811                 test_result["result"] = dict()
812                 groups = re.search(self.REGEX_BMRR, test.message)
813                 if groups is not None:
814                     items_str = groups.group(1)
815                     items_float = [float(item.strip()) for item
816                                    in items_str.split(",")]
817                     metadata = AvgStdevMetadataFactory.from_data(items_float)
818                     # Next two lines have been introduced in CSIT-1179,
819                     # to be removed in CSIT-1180.
820                     metadata.size = 1
821                     metadata.stdev = 0.0
822                     test_result["result"]["receive-rate"] = metadata
823                 else:
824                     groups = re.search(self.REGEX_MRR, test.message)
825                     test_result["result"]["receive-rate"] = \
826                         AvgStdevMetadataFactory.from_data([
827                             float(groups.group(3)) / float(groups.group(1)), ])
828
829             elif test_result["type"] == "RECONF":
830                 test_result["result"] = None
831                 try:
832                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
833                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
834                     test_result["result"] = {
835                         "loss": int(grps_loss.group(1)),
836                         "time": float(grps_time.group(1))
837                     }
838                 except (AttributeError, IndexError, ValueError, TypeError):
839                     test_result["status"] = "FAIL"
840
841         self._data["tests"][self._test_ID] = test_result
842
843     def end_test(self, test):
844         """Called when test ends.
845
846         :param test: Test to process.
847         :type test: Test
848         :returns: Nothing.
849         """
850         pass
851
852     def visit_keyword(self, keyword):
853         """Implements traversing through the keyword and its child keywords.
854
855         :param keyword: Keyword to process.
856         :type keyword: Keyword
857         :returns: Nothing.
858         """
859         if self.start_keyword(keyword) is not False:
860             self.end_keyword(keyword)
861
862     def start_keyword(self, keyword):
863         """Called when keyword starts. Default implementation does nothing.
864
865         :param keyword: Keyword to process.
866         :type keyword: Keyword
867         :returns: Nothing.
868         """
869         try:
870             if keyword.type == "setup":
871                 self.visit_setup_kw(keyword)
872             elif keyword.type == "teardown":
873                 self._lookup_kw_nr = 0
874                 self.visit_teardown_kw(keyword)
875             else:
876                 self._lookup_kw_nr = 0
877                 self.visit_test_kw(keyword)
878         except AttributeError:
879             pass
880
881     def end_keyword(self, keyword):
882         """Called when keyword ends. Default implementation does nothing.
883
884         :param keyword: Keyword to process.
885         :type keyword: Keyword
886         :returns: Nothing.
887         """
888         pass
889
890     def visit_test_kw(self, test_kw):
891         """Implements traversing through the test keyword and its child
892         keywords.
893
894         :param test_kw: Keyword to process.
895         :type test_kw: Keyword
896         :returns: Nothing.
897         """
898         for keyword in test_kw.keywords:
899             if self.start_test_kw(keyword) is not False:
900                 self.visit_test_kw(keyword)
901                 self.end_test_kw(keyword)
902
903     def start_test_kw(self, test_kw):
904         """Called when test keyword starts. Default implementation does
905         nothing.
906
907         :param test_kw: Keyword to process.
908         :type test_kw: Keyword
909         :returns: Nothing.
910         """
911         if test_kw.name.count("Show Runtime Counters On All Duts"):
912             self._lookup_kw_nr += 1
913             self._show_run_lookup_nr = 0
914             self._msg_type = "test-show-runtime"
915         elif test_kw.name.count("Install Dpdk Test") and not self._version:
916             self._msg_type = "dpdk-version"
917         else:
918             return
919         test_kw.messages.visit(self)
920
921     def end_test_kw(self, test_kw):
922         """Called when keyword ends. Default implementation does nothing.
923
924         :param test_kw: Keyword to process.
925         :type test_kw: Keyword
926         :returns: Nothing.
927         """
928         pass
929
930     def visit_setup_kw(self, setup_kw):
931         """Implements traversing through the teardown keyword and its child
932         keywords.
933
934         :param setup_kw: Keyword to process.
935         :type setup_kw: Keyword
936         :returns: Nothing.
937         """
938         for keyword in setup_kw.keywords:
939             if self.start_setup_kw(keyword) is not False:
940                 self.visit_setup_kw(keyword)
941                 self.end_setup_kw(keyword)
942
943     def start_setup_kw(self, setup_kw):
944         """Called when teardown keyword starts. Default implementation does
945         nothing.
946
947         :param setup_kw: Keyword to process.
948         :type setup_kw: Keyword
949         :returns: Nothing.
950         """
951         if setup_kw.name.count("Show Vpp Version On All Duts") \
952                 and not self._version:
953             self._msg_type = "vpp-version"
954         elif setup_kw.name.count("Set Global Variable") \
955                 and not self._timestamp:
956             self._msg_type = "timestamp"
957         elif setup_kw.name.count("Setup Framework") and not self._testbed:
958             self._msg_type = "testbed"
959         else:
960             return
961         setup_kw.messages.visit(self)
962
963     def end_setup_kw(self, setup_kw):
964         """Called when keyword ends. Default implementation does nothing.
965
966         :param setup_kw: Keyword to process.
967         :type setup_kw: Keyword
968         :returns: Nothing.
969         """
970         pass
971
972     def visit_teardown_kw(self, teardown_kw):
973         """Implements traversing through the teardown keyword and its child
974         keywords.
975
976         :param teardown_kw: Keyword to process.
977         :type teardown_kw: Keyword
978         :returns: Nothing.
979         """
980         for keyword in teardown_kw.keywords:
981             if self.start_teardown_kw(keyword) is not False:
982                 self.visit_teardown_kw(keyword)
983                 self.end_teardown_kw(keyword)
984
985     def start_teardown_kw(self, teardown_kw):
986         """Called when teardown keyword starts. Default implementation does
987         nothing.
988
989         :param teardown_kw: Keyword to process.
990         :type teardown_kw: Keyword
991         :returns: Nothing.
992         """
993
994         if teardown_kw.name.count("Show Vat History On All Duts"):
995             self._conf_history_lookup_nr = 0
996             self._msg_type = "teardown-vat-history"
997             teardown_kw.messages.visit(self)
998         elif teardown_kw.name.count("Show Papi History On All Duts"):
999             self._conf_history_lookup_nr = 0
1000             self._msg_type = "teardown-papi-history"
1001             teardown_kw.messages.visit(self)
1002
1003     def end_teardown_kw(self, teardown_kw):
1004         """Called when keyword ends. Default implementation does nothing.
1005
1006         :param teardown_kw: Keyword to process.
1007         :type teardown_kw: Keyword
1008         :returns: Nothing.
1009         """
1010         pass
1011
1012     def visit_message(self, msg):
1013         """Implements visiting the message.
1014
1015         :param msg: Message to process.
1016         :type msg: Message
1017         :returns: Nothing.
1018         """
1019         if self.start_message(msg) is not False:
1020             self.end_message(msg)
1021
1022     def start_message(self, msg):
1023         """Called when message starts. Get required information from messages:
1024         - VPP version.
1025
1026         :param msg: Message to process.
1027         :type msg: Message
1028         :returns: Nothing.
1029         """
1030
1031         if self._msg_type:
1032             self.parse_msg[self._msg_type](msg)
1033
1034     def end_message(self, msg):
1035         """Called when message ends. Default implementation does nothing.
1036
1037         :param msg: Message to process.
1038         :type msg: Message
1039         :returns: Nothing.
1040         """
1041         pass
1042
1043
1044 class InputData(object):
1045     """Input data
1046
1047     The data is extracted from output.xml files generated by Jenkins jobs and
1048     stored in pandas' DataFrames.
1049
1050     The data structure:
1051     - job name
1052       - build number
1053         - metadata
1054           (as described in ExecutionChecker documentation)
1055         - suites
1056           (as described in ExecutionChecker documentation)
1057         - tests
1058           (as described in ExecutionChecker documentation)
1059     """
1060
1061     def __init__(self, spec):
1062         """Initialization.
1063
1064         :param spec: Specification.
1065         :type spec: Specification
1066         """
1067
1068         # Specification:
1069         self._cfg = spec
1070
1071         # Data store:
1072         self._input_data = pd.Series()
1073
1074     @property
1075     def data(self):
1076         """Getter - Input data.
1077
1078         :returns: Input data
1079         :rtype: pandas.Series
1080         """
1081         return self._input_data
1082
1083     def metadata(self, job, build):
1084         """Getter - metadata
1085
1086         :param job: Job which metadata we want.
1087         :param build: Build which metadata we want.
1088         :type job: str
1089         :type build: str
1090         :returns: Metadata
1091         :rtype: pandas.Series
1092         """
1093
1094         return self.data[job][build]["metadata"]
1095
1096     def suites(self, job, build):
1097         """Getter - suites
1098
1099         :param job: Job which suites we want.
1100         :param build: Build which suites we want.
1101         :type job: str
1102         :type build: str
1103         :returns: Suites.
1104         :rtype: pandas.Series
1105         """
1106
1107         return self.data[job][str(build)]["suites"]
1108
1109     def tests(self, job, build):
1110         """Getter - tests
1111
1112         :param job: Job which tests we want.
1113         :param build: Build which tests we want.
1114         :type job: str
1115         :type build: str
1116         :returns: Tests.
1117         :rtype: pandas.Series
1118         """
1119
1120         return self.data[job][build]["tests"]
1121
1122     def _parse_tests(self, job, build, log):
1123         """Process data from robot output.xml file and return JSON structured
1124         data.
1125
1126         :param job: The name of job which build output data will be processed.
1127         :param build: The build which output data will be processed.
1128         :param log: List of log messages.
1129         :type job: str
1130         :type build: dict
1131         :type log: list of tuples (severity, msg)
1132         :returns: JSON data structure.
1133         :rtype: dict
1134         """
1135
1136         metadata = {
1137             "job": job,
1138             "build": build
1139         }
1140
1141         with open(build["file-name"], 'r') as data_file:
1142             try:
1143                 result = ExecutionResult(data_file)
1144             except errors.DataError as err:
1145                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1146                                      "{0}".format(err)))
1147                 return None
1148         checker = ExecutionChecker(metadata, self._cfg.mapping,
1149                                    self._cfg.ignore)
1150         result.visit(checker)
1151
1152         return checker.data
1153
1154     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1155         """Download and parse the input data file.
1156
1157         :param pid: PID of the process executing this method.
1158         :param job: Name of the Jenkins job which generated the processed input
1159             file.
1160         :param build: Information about the Jenkins build which generated the
1161             processed input file.
1162         :param repeat: Repeat the download specified number of times if not
1163             successful.
1164         :type pid: int
1165         :type job: str
1166         :type build: dict
1167         :type repeat: int
1168         """
1169
1170         logs = list()
1171
1172         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1173                      format(job, build["build"])))
1174
1175         state = "failed"
1176         success = False
1177         data = None
1178         do_repeat = repeat
1179         while do_repeat:
1180             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1181                                                    logs)
1182             if success:
1183                 break
1184             do_repeat -= 1
1185         if not success:
1186             logs.append(("ERROR", "It is not possible to download the input "
1187                                   "data file from the job '{job}', build "
1188                                   "'{build}', or it is damaged. Skipped.".
1189                          format(job=job, build=build["build"])))
1190         if success:
1191             logs.append(("INFO", "    Processing data from the build '{0}' ...".
1192                          format(build["build"])))
1193             data = self._parse_tests(job, build, logs)
1194             if data is None:
1195                 logs.append(("ERROR", "Input data file from the job '{job}', "
1196                                       "build '{build}' is damaged. Skipped.".
1197                              format(job=job, build=build["build"])))
1198             else:
1199                 state = "processed"
1200
1201             try:
1202                 remove(build["file-name"])
1203             except OSError as err:
1204                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1205                              format(build["file-name"], repr(err))))
1206
1207         # If the time-period is defined in the specification file, remove all
1208         # files which are outside the time period.
1209         timeperiod = self._cfg.input.get("time-period", None)
1210         if timeperiod and data:
1211             now = dt.utcnow()
1212             timeperiod = timedelta(int(timeperiod))
1213             metadata = data.get("metadata", None)
1214             if metadata:
1215                 generated = metadata.get("generated", None)
1216                 if generated:
1217                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1218                     if (now - generated) > timeperiod:
1219                         # Remove the data and the file:
1220                         state = "removed"
1221                         data = None
1222                         logs.append(
1223                             ("INFO",
1224                              "    The build {job}/{build} is outdated, will be "
1225                              "removed".format(job=job, build=build["build"])))
1226         logs.append(("INFO", "  Done."))
1227
1228         for level, line in logs:
1229             if level == "INFO":
1230                 logging.info(line)
1231             elif level == "ERROR":
1232                 logging.error(line)
1233             elif level == "DEBUG":
1234                 logging.debug(line)
1235             elif level == "CRITICAL":
1236                 logging.critical(line)
1237             elif level == "WARNING":
1238                 logging.warning(line)
1239
1240         return {"data": data, "state": state, "job": job, "build": build}
1241
1242     def download_and_parse_data(self, repeat=1):
1243         """Download the input data files, parse input data from input files and
1244         store in pandas' Series.
1245
1246         :param repeat: Repeat the download specified number of times if not
1247             successful.
1248         :type repeat: int
1249         """
1250
1251         logging.info("Downloading and parsing input files ...")
1252
1253         for job, builds in self._cfg.builds.items():
1254             for build in builds:
1255
1256                 result = self._download_and_parse_build(job, build, repeat)
1257                 build_nr = result["build"]["build"]
1258
1259                 if result["data"]:
1260                     data = result["data"]
1261                     build_data = pd.Series({
1262                         "metadata": pd.Series(
1263                             data["metadata"].values(),
1264                             index=data["metadata"].keys()),
1265                         "suites": pd.Series(data["suites"].values(),
1266                                             index=data["suites"].keys()),
1267                         "tests": pd.Series(data["tests"].values(),
1268                                            index=data["tests"].keys())})
1269
1270                     if self._input_data.get(job, None) is None:
1271                         self._input_data[job] = pd.Series()
1272                     self._input_data[job][str(build_nr)] = build_data
1273
1274                     self._cfg.set_input_file_name(
1275                         job, build_nr, result["build"]["file-name"])
1276
1277                 self._cfg.set_input_state(job, build_nr, result["state"])
1278
1279                 logging.info("Memory allocation: {0:,d}MB".format(
1280                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1281
1282         logging.info("Done.")
1283
1284     @staticmethod
1285     def _end_of_tag(tag_filter, start=0, closer="'"):
1286         """Return the index of character in the string which is the end of tag.
1287
1288         :param tag_filter: The string where the end of tag is being searched.
1289         :param start: The index where the searching is stated.
1290         :param closer: The character which is the tag closer.
1291         :type tag_filter: str
1292         :type start: int
1293         :type closer: str
1294         :returns: The index of the tag closer.
1295         :rtype: int
1296         """
1297
1298         try:
1299             idx_opener = tag_filter.index(closer, start)
1300             return tag_filter.index(closer, idx_opener + 1)
1301         except ValueError:
1302             return None
1303
1304     @staticmethod
1305     def _condition(tag_filter):
1306         """Create a conditional statement from the given tag filter.
1307
1308         :param tag_filter: Filter based on tags from the element specification.
1309         :type tag_filter: str
1310         :returns: Conditional statement which can be evaluated.
1311         :rtype: str
1312         """
1313
1314         index = 0
1315         while True:
1316             index = InputData._end_of_tag(tag_filter, index)
1317             if index is None:
1318                 return tag_filter
1319             index += 1
1320             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1321
1322     def filter_data(self, element, params=None, data=None, data_set="tests",
1323                     continue_on_error=False):
1324         """Filter required data from the given jobs and builds.
1325
1326         The output data structure is:
1327
1328         - job 1
1329           - build 1
1330             - test (or suite) 1 ID:
1331               - param 1
1332               - param 2
1333               ...
1334               - param n
1335             ...
1336             - test (or suite) n ID:
1337             ...
1338           ...
1339           - build n
1340         ...
1341         - job n
1342
1343         :param element: Element which will use the filtered data.
1344         :param params: Parameters which will be included in the output. If None,
1345             all parameters are included.
1346         :param data: If not None, this data is used instead of data specified
1347             in the element.
1348         :param data_set: The set of data to be filtered: tests, suites,
1349             metadata.
1350         :param continue_on_error: Continue if there is error while reading the
1351             data. The Item will be empty then
1352         :type element: pandas.Series
1353         :type params: list
1354         :type data: dict
1355         :type data_set: str
1356         :type continue_on_error: bool
1357         :returns: Filtered data.
1358         :rtype pandas.Series
1359         """
1360
1361         try:
1362             if element["filter"] in ("all", "template"):
1363                 cond = "True"
1364             else:
1365                 cond = InputData._condition(element["filter"])
1366             logging.debug("   Filter: {0}".format(cond))
1367         except KeyError:
1368             logging.error("  No filter defined.")
1369             return None
1370
1371         if params is None:
1372             params = element.get("parameters", None)
1373             if params:
1374                 params.append("type")
1375
1376         data_to_filter = data if data else element["data"]
1377         data = pd.Series()
1378         try:
1379             for job, builds in data_to_filter.items():
1380                 data[job] = pd.Series()
1381                 for build in builds:
1382                     data[job][str(build)] = pd.Series()
1383                     try:
1384                         data_iter = self.data[job][str(build)][data_set].\
1385                             iteritems()
1386                     except KeyError:
1387                         if continue_on_error:
1388                             continue
1389                         else:
1390                             return None
1391                     for test_ID, test_data in data_iter:
1392                         if eval(cond, {"tags": test_data.get("tags", "")}):
1393                             data[job][str(build)][test_ID] = pd.Series()
1394                             if params is None:
1395                                 for param, val in test_data.items():
1396                                     data[job][str(build)][test_ID][param] = val
1397                             else:
1398                                 for param in params:
1399                                     try:
1400                                         data[job][str(build)][test_ID][param] =\
1401                                             test_data[param]
1402                                     except KeyError:
1403                                         data[job][str(build)][test_ID][param] =\
1404                                             "No Data"
1405             return data
1406
1407         except (KeyError, IndexError, ValueError) as err:
1408             logging.error("   Missing mandatory parameter in the element "
1409                           "specification: {0}".format(err))
1410             return None
1411         except AttributeError:
1412             return None
1413         except SyntaxError:
1414             logging.error("   The filter '{0}' is not correct. Check if all "
1415                           "tags are enclosed by apostrophes.".format(cond))
1416             return None
1417
1418     def filter_tests_by_name(self, element, params=None, data_set="tests",
1419                              continue_on_error=False):
1420         """Filter required data from the given jobs and builds.
1421
1422         The output data structure is:
1423
1424         - job 1
1425           - build 1
1426             - test (or suite) 1 ID:
1427               - param 1
1428               - param 2
1429               ...
1430               - param n
1431             ...
1432             - test (or suite) n ID:
1433             ...
1434           ...
1435           - build n
1436         ...
1437         - job n
1438
1439         :param element: Element which will use the filtered data.
1440         :param params: Parameters which will be included in the output. If None,
1441         all parameters are included.
1442         :param data_set: The set of data to be filtered: tests, suites,
1443         metadata.
1444         :param continue_on_error: Continue if there is error while reading the
1445         data. The Item will be empty then
1446         :type element: pandas.Series
1447         :type params: list
1448         :type data_set: str
1449         :type continue_on_error: bool
1450         :returns: Filtered data.
1451         :rtype pandas.Series
1452         """
1453
1454         include = element.get("include", None)
1455         if not include:
1456             logging.warning("No tests to include, skipping the element.")
1457             return None
1458
1459         if params is None:
1460             params = element.get("parameters", None)
1461             if params:
1462                 params.append("type")
1463
1464         data = pd.Series()
1465         try:
1466             for job, builds in element["data"].items():
1467                 data[job] = pd.Series()
1468                 for build in builds:
1469                     data[job][str(build)] = pd.Series()
1470                     for test in include:
1471                         try:
1472                             reg_ex = re.compile(str(test).lower())
1473                             for test_ID in self.data[job][str(build)]\
1474                                     [data_set].keys():
1475                                 if re.match(reg_ex, str(test_ID).lower()):
1476                                     test_data = self.data[job][str(build)]\
1477                                         [data_set][test_ID]
1478                                     data[job][str(build)][test_ID] = pd.Series()
1479                                     if params is None:
1480                                         for param, val in test_data.items():
1481                                             data[job][str(build)][test_ID]\
1482                                                 [param] = val
1483                                     else:
1484                                         for param in params:
1485                                             try:
1486                                                 data[job][str(build)][test_ID]\
1487                                                     [param] = test_data[param]
1488                                             except KeyError:
1489                                                 data[job][str(build)][test_ID]\
1490                                                     [param] = "No Data"
1491                         except KeyError as err:
1492                             logging.error("{err!r}".format(err=err))
1493                             if continue_on_error:
1494                                 continue
1495                             else:
1496                                 return None
1497             return data
1498
1499         except (KeyError, IndexError, ValueError) as err:
1500             logging.error("Missing mandatory parameter in the element "
1501                           "specification: {err!r}".format(err=err))
1502             return None
1503         except AttributeError as err:
1504             logging.error("{err!r}".format(err=err))
1505             return None
1506
1507
1508     @staticmethod
1509     def merge_data(data):
1510         """Merge data from more jobs and builds to a simple data structure.
1511
1512         The output data structure is:
1513
1514         - test (suite) 1 ID:
1515           - param 1
1516           - param 2
1517           ...
1518           - param n
1519         ...
1520         - test (suite) n ID:
1521         ...
1522
1523         :param data: Data to merge.
1524         :type data: pandas.Series
1525         :returns: Merged data.
1526         :rtype: pandas.Series
1527         """
1528
1529         logging.info("    Merging data ...")
1530
1531         merged_data = pd.Series()
1532         for _, builds in data.iteritems():
1533             for _, item in builds.iteritems():
1534                 for ID, item_data in item.iteritems():
1535                     merged_data[ID] = item_data
1536
1537         return merged_data