Use Jumpavg 0.2.0 in PAL
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import copy
23 import re
24 import resource
25 import pandas as pd
26 import logging
27 import prettytable
28
29 from robot.api import ExecutionResult, ResultVisitor
30 from robot import errors
31 from collections import OrderedDict
32 from string import replace
33 from os import remove
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37
38 from resources.libraries.python import jumpavg
39 from input_data_files import download_and_unzip_data_file
40
41
42 # Separator used in file names
43 SEPARATOR = "__"
44
45
46 class ExecutionChecker(ResultVisitor):
47     """Class to traverse through the test suite structure.
48
49     The functionality implemented in this class generates a json structure:
50
51     Performance tests:
52
53     {
54         "metadata": {
55             "generated": "Timestamp",
56             "version": "SUT version",
57             "job": "Jenkins job name",
58             "build": "Information about the build"
59         },
60         "suites": {
61             "Suite long name 1": {
62                 "name": Suite name,
63                 "doc": "Suite 1 documentation",
64                 "parent": "Suite 1 parent",
65                 "level": "Level of the suite in the suite hierarchy"
66             }
67             "Suite long name N": {
68                 "name": Suite name,
69                 "doc": "Suite N documentation",
70                 "parent": "Suite 2 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73         }
74         "tests": {
75             # NDRPDR tests:
76             "ID": {
77                 "name": "Test name",
78                 "parent": "Name of the parent of the test",
79                 "doc": "Test documentation",
80                 "msg": "Test message",
81                 "conf-history": "DUT1 and DUT2 VAT History",
82                 "show-run": "Show Run",
83                 "tags": ["tag 1", "tag 2", "tag n"],
84                 "type": "NDRPDR",
85                 "status": "PASS" | "FAIL",
86                 "throughput": {
87                     "NDR": {
88                         "LOWER": float,
89                         "UPPER": float
90                     },
91                     "PDR": {
92                         "LOWER": float,
93                         "UPPER": float
94                     }
95                 },
96                 "latency": {
97                     "NDR": {
98                         "direction1": {
99                             "min": float,
100                             "avg": float,
101                             "max": float,
102                             "hdrh": str
103                         },
104                         "direction2": {
105                             "min": float,
106                             "avg": float,
107                             "max": float,
108                             "hdrh": str
109                         }
110                     },
111                     "PDR": {
112                         "direction1": {
113                             "min": float,
114                             "avg": float,
115                             "max": float,
116                             "hdrh": str
117                         },
118                         "direction2": {
119                             "min": float,
120                             "avg": float,
121                             "max": float,
122                             "hdrh": str
123                         }
124                     }
125                 }
126             }
127
128             # TCP tests:
129             "ID": {
130                 "name": "Test name",
131                 "parent": "Name of the parent of the test",
132                 "doc": "Test documentation",
133                 "msg": "Test message",
134                 "tags": ["tag 1", "tag 2", "tag n"],
135                 "type": "TCP",
136                 "status": "PASS" | "FAIL",
137                 "result": int
138             }
139
140             # MRR, BMRR tests:
141             "ID": {
142                 "name": "Test name",
143                 "parent": "Name of the parent of the test",
144                 "doc": "Test documentation",
145                 "msg": "Test message",
146                 "tags": ["tag 1", "tag 2", "tag n"],
147                 "type": "MRR" | "BMRR",
148                 "status": "PASS" | "FAIL",
149                 "result": {
150                     "receive-rate": float,
151                     # Average of a list, computed using AvgStdevStats.
152                     # In CSIT-1180, replace with List[float].
153                 }
154             }
155
156             "ID" {
157                 # next test
158             }
159         }
160     }
161
162
163     Functional tests:
164
165     {
166         "metadata": {  # Optional
167             "version": "VPP version",
168             "job": "Jenkins job name",
169             "build": "Information about the build"
170         },
171         "suites": {
172             "Suite name 1": {
173                 "doc": "Suite 1 documentation",
174                 "parent": "Suite 1 parent",
175                 "level": "Level of the suite in the suite hierarchy"
176             }
177             "Suite name N": {
178                 "doc": "Suite N documentation",
179                 "parent": "Suite 2 parent",
180                 "level": "Level of the suite in the suite hierarchy"
181             }
182         }
183         "tests": {
184             "ID": {
185                 "name": "Test name",
186                 "parent": "Name of the parent of the test",
187                 "doc": "Test documentation"
188                 "msg": "Test message"
189                 "tags": ["tag 1", "tag 2", "tag n"],
190                 "conf-history": "DUT1 and DUT2 VAT History"
191                 "show-run": "Show Run"
192                 "status": "PASS" | "FAIL"
193             },
194             "ID" {
195                 # next test
196             }
197         }
198     }
199
200     .. note:: ID is the lowercase full path to the test.
201     """
202
203     # TODO: Remove when definitely no NDRPDRDISC tests are used:
204     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
205
206     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
207                                 r'PLRsearch upper bound::?\s(\d+.\d+)')
208
209     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
210                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
211                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
212                                    r'PDR_UPPER:\s(\d+.\d+)')
213
214     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
215                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
216
217     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
218                                  r'[\D\d]*')
219
220     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
221                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
222
223     REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
224
225     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
226
227     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
228                            r'tx\s(\d*),\srx\s(\d*)')
229
230     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
231                             r' in packets per second: \[(.*)\]')
232
233     REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
234     REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
235
236     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
237
238     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
239
240     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
241
242     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
243
244     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
245
246     def __init__(self, metadata, mapping, ignore):
247         """Initialisation.
248
249         :param metadata: Key-value pairs to be included in "metadata" part of
250             JSON structure.
251         :param mapping: Mapping of the old names of test cases to the new
252             (actual) one.
253         :param ignore: List of TCs to be ignored.
254         :type metadata: dict
255         :type mapping: dict
256         :type ignore: list
257         """
258
259         # Type of message to parse out from the test messages
260         self._msg_type = None
261
262         # VPP version
263         self._version = None
264
265         # Timestamp
266         self._timestamp = None
267
268         # Testbed. The testbed is identified by TG node IP address.
269         self._testbed = None
270
271         # Mapping of TCs long names
272         self._mapping = mapping
273
274         # Ignore list
275         self._ignore = ignore
276
277         # Number of VAT History messages found:
278         # 0 - no message
279         # 1 - VAT History of DUT1
280         # 2 - VAT History of DUT2
281         self._lookup_kw_nr = 0
282         self._conf_history_lookup_nr = 0
283
284         # Number of Show Running messages found
285         # 0 - no message
286         # 1 - Show run message found
287         self._show_run_lookup_nr = 0
288
289         # Test ID of currently processed test- the lowercase full path to the
290         # test
291         self._test_ID = None
292
293         # The main data structure
294         self._data = {
295             "metadata": OrderedDict(),
296             "suites": OrderedDict(),
297             "tests": OrderedDict()
298         }
299
300         # Save the provided metadata
301         for key, val in metadata.items():
302             self._data["metadata"][key] = val
303
304         # Dictionary defining the methods used to parse different types of
305         # messages
306         self.parse_msg = {
307             "timestamp": self._get_timestamp,
308             "vpp-version": self._get_vpp_version,
309             "dpdk-version": self._get_dpdk_version,
310             "teardown-vat-history": self._get_vat_history,
311             "teardown-papi-history": self._get_papi_history,
312             "test-show-runtime": self._get_show_run,
313             "testbed": self._get_testbed
314         }
315
316     @property
317     def data(self):
318         """Getter - Data parsed from the XML file.
319
320         :returns: Data parsed from the XML file.
321         :rtype: dict
322         """
323         return self._data
324
325     def _get_testbed(self, msg):
326         """Called when extraction of testbed IP is required.
327         The testbed is identified by TG node IP address.
328
329         :param msg: Message to process.
330         :type msg: Message
331         :returns: Nothing.
332         """
333
334         if msg.message.count("Setup of TG node"):
335             reg_tg_ip = re.compile(
336                 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
337             try:
338                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
339             except (KeyError, ValueError, IndexError, AttributeError):
340                 pass
341             finally:
342                 self._data["metadata"]["testbed"] = self._testbed
343                 self._msg_type = None
344
345     def _get_vpp_version(self, msg):
346         """Called when extraction of VPP version is required.
347
348         :param msg: Message to process.
349         :type msg: Message
350         :returns: Nothing.
351         """
352
353         if msg.message.count("return STDOUT Version:") or \
354             msg.message.count("VPP Version:") or \
355             msg.message.count("VPP version:"):
356             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
357                                 group(2))
358             self._data["metadata"]["version"] = self._version
359             self._msg_type = None
360
361     def _get_dpdk_version(self, msg):
362         """Called when extraction of DPDK version is required.
363
364         :param msg: Message to process.
365         :type msg: Message
366         :returns: Nothing.
367         """
368
369         if msg.message.count("DPDK Version:"):
370             try:
371                 self._version = str(re.search(
372                     self.REGEX_VERSION_DPDK, msg.message). group(2))
373                 self._data["metadata"]["version"] = self._version
374             except IndexError:
375                 pass
376             finally:
377                 self._msg_type = None
378
379     def _get_timestamp(self, msg):
380         """Called when extraction of timestamp is required.
381
382         :param msg: Message to process.
383         :type msg: Message
384         :returns: Nothing.
385         """
386
387         self._timestamp = msg.timestamp[:14]
388         self._data["metadata"]["generated"] = self._timestamp
389         self._msg_type = None
390
391     def _get_vat_history(self, msg):
392         """Called when extraction of VAT command history is required.
393
394         :param msg: Message to process.
395         :type msg: Message
396         :returns: Nothing.
397         """
398         if msg.message.count("VAT command history:"):
399             self._conf_history_lookup_nr += 1
400             if self._conf_history_lookup_nr == 1:
401                 self._data["tests"][self._test_ID]["conf-history"] = str()
402             else:
403                 self._msg_type = None
404             text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
405                           "VAT command history:", "", msg.message, count=1). \
406                 replace("\n\n", "\n").replace('\n', ' |br| ').\
407                 replace('\r', '').replace('"', "'")
408
409             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
410             self._data["tests"][self._test_ID]["conf-history"] += \
411                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
412
413     def _get_papi_history(self, msg):
414         """Called when extraction of PAPI command history is required.
415
416         :param msg: Message to process.
417         :type msg: Message
418         :returns: Nothing.
419         """
420         if msg.message.count("PAPI command history:"):
421             self._conf_history_lookup_nr += 1
422             if self._conf_history_lookup_nr == 1:
423                 self._data["tests"][self._test_ID]["conf-history"] = str()
424             else:
425                 self._msg_type = None
426             text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
427                           "PAPI command history:", "", msg.message, count=1). \
428                 replace("\n\n", "\n").replace('\n', ' |br| ').\
429                 replace('\r', '').replace('"', "'")
430
431             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
432             self._data["tests"][self._test_ID]["conf-history"] += \
433                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
434
435     def _get_show_run(self, msg):
436         """Called when extraction of VPP operational data (output of CLI command
437         Show Runtime) is required.
438
439         :param msg: Message to process.
440         :type msg: Message
441         :returns: Nothing.
442         """
443         if not "show-run" in self._data["tests"][self._test_ID].keys():
444             self._data["tests"][self._test_ID]["show-run"] = str()
445
446         if msg.message.count("stats runtime"):
447             host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).\
448                        group(1))
449             socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).\
450                          group(2))
451             message = str(msg.message).replace(' ', '').replace('\n', '').\
452                 replace("'", '"').replace('b"', '"').replace('u"', '"').\
453                 split(":",1)[1]
454             runtime = loads(message)
455             try:
456                 threads_nr = len(runtime[0]["clocks"])
457             except (IndexError, KeyError):
458                 return
459             tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks",
460                        "Vectors/Calls"]
461             table = [[tbl_hdr, ] for _ in range(threads_nr)]
462             for item in runtime:
463                 for idx in range(threads_nr):
464                     name = format(item["name"])
465                     calls = format(item["calls"][idx])
466                     vectors = format(item["vectors"][idx])
467                     suspends = format(item["suspends"][idx])
468                     if item["vectors"][idx] > 0:
469                         clocks = format(
470                             item["clocks"][idx]/item["vectors"][idx], ".2e")
471                     elif item["calls"][idx] > 0:
472                         clocks = format(
473                             item["clocks"][idx]/item["calls"][idx], ".2e")
474                     elif item["suspends"][idx] > 0:
475                         clocks = format(
476                             item["clocks"][idx]/item["suspends"][idx], ".2e")
477                     else:
478                         clocks = 0
479                     if item["calls"][idx] > 0:
480                         vectors_call = format(
481                             item["vectors"][idx]/item["calls"][idx], ".2f")
482                     else:
483                         vectors_call = format(0, ".2f")
484                     if int(calls) + int(vectors) + int(suspends):
485                         table[idx].append([
486                             name, calls, vectors, suspends, clocks, vectors_call
487                         ])
488             text = ""
489             for idx in range(threads_nr):
490                 text += "Thread {idx} ".format(idx=idx)
491                 text += "vpp_main\n" if idx == 0 else \
492                     "vpp_wk_{idx}\n".format(idx=idx-1)
493                 txt_table = None
494                 for row in table[idx]:
495                     if txt_table is None:
496                         txt_table = prettytable.PrettyTable(row)
497                     else:
498                         if any(row[1:]):
499                             txt_table.add_row(row)
500                 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
501                 txt_table.align["Name"] = "l"
502                 txt_table.align["Calls"] = "r"
503                 txt_table.align["Vectors"] = "r"
504                 txt_table.align["Suspends"] = "r"
505                 txt_table.align["Clocks"] = "r"
506                 txt_table.align["Vectors/Calls"] = "r"
507
508                 text += txt_table.get_string(sortby="Name") + '\n'
509             text = (" \n **DUT: {host}/{socket}** \n {text}".
510                     format(host=host, socket=socket, text=text))
511             text = text.replace('\n', ' |br| ').replace('\r', '').\
512                 replace('"', "'")
513             self._data["tests"][self._test_ID]["show-run"] += text
514
515     def _get_ndrpdr_throughput(self, msg):
516         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
517         message.
518
519         :param msg: The test message to be parsed.
520         :type msg: str
521         :returns: Parsed data as a dict and the status (PASS/FAIL).
522         :rtype: tuple(dict, str)
523         """
524
525         throughput = {
526             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
527             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
528         }
529         status = "FAIL"
530         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
531
532         if groups is not None:
533             try:
534                 throughput["NDR"]["LOWER"] = float(groups.group(1))
535                 throughput["NDR"]["UPPER"] = float(groups.group(2))
536                 throughput["PDR"]["LOWER"] = float(groups.group(3))
537                 throughput["PDR"]["UPPER"] = float(groups.group(4))
538                 status = "PASS"
539             except (IndexError, ValueError):
540                 pass
541
542         return throughput, status
543
544     def _get_plr_throughput(self, msg):
545         """Get PLRsearch lower bound and PLRsearch upper bound from the test
546         message.
547
548         :param msg: The test message to be parsed.
549         :type msg: str
550         :returns: Parsed data as a dict and the status (PASS/FAIL).
551         :rtype: tuple(dict, str)
552         """
553
554         throughput = {
555             "LOWER": -1.0,
556             "UPPER": -1.0
557         }
558         status = "FAIL"
559         groups = re.search(self.REGEX_PLR_RATE, msg)
560
561         if groups is not None:
562             try:
563                 throughput["LOWER"] = float(groups.group(1))
564                 throughput["UPPER"] = float(groups.group(2))
565                 status = "PASS"
566             except (IndexError, ValueError):
567                 pass
568
569         return throughput, status
570
571     def _get_ndrpdr_latency(self, msg):
572         """Get LATENCY from the test message.
573
574         :param msg: The test message to be parsed.
575         :type msg: str
576         :returns: Parsed data as a dict and the status (PASS/FAIL).
577         :rtype: tuple(dict, str)
578         """
579         latency_default = {"min": -1.0, "avg": -1.0, "max": -1.0, "hdrh": ""}
580         latency = {
581             "NDR": {
582                 "direction1": copy.copy(latency_default),
583                 "direction2": copy.copy(latency_default)
584             },
585             "PDR": {
586                 "direction1": copy.copy(latency_default),
587                 "direction2": copy.copy(latency_default)
588             }
589         }
590         status = "FAIL"
591         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
592
593         def process_latency(in_str):
594             """Return object with parsed latency values.
595
596             TODO: Define class for the return type.
597
598             :param in_str: Input string, min/avg/max/hdrh format.
599             :type in_str: str
600             :returns: Dict with corresponding keys, except hdrh float values.
601             :rtype dict:
602             :throws IndexError: If in_str does not have enough substrings.
603             :throws ValueError: If a substring does not convert to float.
604             """
605             in_list = in_str.split('/')
606
607             rval = {
608                 "min": float(in_list[0]),
609                 "avg": float(in_list[1]),
610                 "max": float(in_list[2]),
611                 "hdrh": ""
612             }
613
614             if len(in_list) == 4:
615                 rval["hdrh"] = str(in_list[3])
616
617             return rval
618
619         if groups is not None:
620             try:
621                 latency["NDR"]["direction1"] = process_latency(groups.group(1))
622                 latency["NDR"]["direction2"] = process_latency(groups.group(2))
623                 latency["PDR"]["direction1"] = process_latency(groups.group(3))
624                 latency["PDR"]["direction2"] = process_latency(groups.group(4))
625                 status = "PASS"
626             except (IndexError, ValueError):
627                 pass
628
629         return latency, status
630
631     def visit_suite(self, suite):
632         """Implements traversing through the suite and its direct children.
633
634         :param suite: Suite to process.
635         :type suite: Suite
636         :returns: Nothing.
637         """
638         if self.start_suite(suite) is not False:
639             suite.suites.visit(self)
640             suite.tests.visit(self)
641             self.end_suite(suite)
642
643     def start_suite(self, suite):
644         """Called when suite starts.
645
646         :param suite: Suite to process.
647         :type suite: Suite
648         :returns: Nothing.
649         """
650
651         try:
652             parent_name = suite.parent.name
653         except AttributeError:
654             return
655
656         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
657             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
658         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
659
660         self._data["suites"][suite.longname.lower().replace('"', "'").
661             replace(" ", "_")] = {
662                 "name": suite.name.lower(),
663                 "doc": doc_str,
664                 "parent": parent_name,
665                 "level": len(suite.longname.split("."))
666             }
667
668         suite.keywords.visit(self)
669
670     def end_suite(self, suite):
671         """Called when suite ends.
672
673         :param suite: Suite to process.
674         :type suite: Suite
675         :returns: Nothing.
676         """
677         pass
678
679     def visit_test(self, test):
680         """Implements traversing through the test.
681
682         :param test: Test to process.
683         :type test: Test
684         :returns: Nothing.
685         """
686         if self.start_test(test) is not False:
687             test.keywords.visit(self)
688             self.end_test(test)
689
690     def start_test(self, test):
691         """Called when test starts.
692
693         :param test: Test to process.
694         :type test: Test
695         :returns: Nothing.
696         """
697
698         longname_orig = test.longname.lower()
699
700         # Check the ignore list
701         if longname_orig in self._ignore:
702             return
703
704         tags = [str(tag) for tag in test.tags]
705         test_result = dict()
706
707         # Change the TC long name and name if defined in the mapping table
708         longname = self._mapping.get(longname_orig, None)
709         if longname is not None:
710             name = longname.split('.')[-1]
711             logging.debug("{0}\n{1}\n{2}\n{3}".format(
712                 self._data["metadata"], longname_orig, longname, name))
713         else:
714             longname = longname_orig
715             name = test.name.lower()
716
717         # Remove TC number from the TC long name (backward compatibility):
718         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
719         # Remove TC number from the TC name (not needed):
720         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
721
722         test_result["parent"] = test.parent.name.lower()
723         test_result["tags"] = tags
724         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
725             replace('\r', '').replace('[', ' |br| [')
726         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
727         test_result["msg"] = test.message.replace('\n', ' |br| '). \
728             replace('\r', '').replace('"', "'")
729         test_result["type"] = "FUNC"
730         test_result["status"] = test.status
731
732         if "PERFTEST" in tags:
733             # Replace info about cores (e.g. -1c-) with the info about threads
734             # and cores (e.g. -1t1c-) in the long test case names and in the
735             # test case names if necessary.
736             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
737             if not groups:
738                 tag_count = 0
739                 tag_tc = str()
740                 for tag in test_result["tags"]:
741                     groups = re.search(self.REGEX_TC_TAG, tag)
742                     if groups:
743                         tag_count += 1
744                         tag_tc = tag
745
746                 if tag_count == 1:
747                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
748                                            "-{0}-".format(tag_tc.lower()),
749                                            self._test_ID,
750                                            count=1)
751                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
752                                                  "-{0}-".format(tag_tc.lower()),
753                                                  test_result["name"],
754                                                  count=1)
755                 else:
756                     test_result["status"] = "FAIL"
757                     self._data["tests"][self._test_ID] = test_result
758                     logging.debug("The test '{0}' has no or more than one "
759                                   "multi-threading tags.".format(self._test_ID))
760                     logging.debug("Tags: {0}".format(test_result["tags"]))
761                     return
762
763         if test.status == "PASS" and ("NDRPDRDISC" in tags or
764                                       "NDRPDR" in tags or
765                                       "SOAK" in tags or
766                                       "TCP" in tags or
767                                       "MRR" in tags or
768                                       "BMRR" in tags or
769                                       "RECONF" in tags):
770             # TODO: Remove when definitely no NDRPDRDISC tests are used:
771             if "NDRDISC" in tags:
772                 test_result["type"] = "NDR"
773             # TODO: Remove when definitely no NDRPDRDISC tests are used:
774             elif "PDRDISC" in tags:
775                 test_result["type"] = "PDR"
776             elif "NDRPDR" in tags:
777                 test_result["type"] = "NDRPDR"
778             elif "SOAK" in tags:
779                 test_result["type"] = "SOAK"
780             elif "TCP" in tags:
781                 test_result["type"] = "TCP"
782             elif "MRR" in tags:
783                 test_result["type"] = "MRR"
784             elif "FRMOBL" in tags or "BMRR" in tags:
785                 test_result["type"] = "BMRR"
786             elif "RECONF" in tags:
787                 test_result["type"] = "RECONF"
788             else:
789                 test_result["status"] = "FAIL"
790                 self._data["tests"][self._test_ID] = test_result
791                 return
792
793             # TODO: Remove when definitely no NDRPDRDISC tests are used:
794             if test_result["type"] in ("NDR", "PDR"):
795                 try:
796                     rate_value = str(re.search(
797                         self.REGEX_RATE, test.message).group(1))
798                 except AttributeError:
799                     rate_value = "-1"
800                 try:
801                     rate_unit = str(re.search(
802                         self.REGEX_RATE, test.message).group(2))
803                 except AttributeError:
804                     rate_unit = "-1"
805
806                 test_result["throughput"] = dict()
807                 test_result["throughput"]["value"] = \
808                     int(rate_value.split('.')[0])
809                 test_result["throughput"]["unit"] = rate_unit
810                 test_result["latency"] = \
811                     self._get_latency(test.message, test_result["type"])
812                 if test_result["type"] == "PDR":
813                     test_result["lossTolerance"] = str(re.search(
814                         self.REGEX_TOLERANCE, test.message).group(1))
815
816             elif test_result["type"] in ("NDRPDR", ):
817                 test_result["throughput"], test_result["status"] = \
818                     self._get_ndrpdr_throughput(test.message)
819                 test_result["latency"], test_result["status"] = \
820                     self._get_ndrpdr_latency(test.message)
821
822             elif test_result["type"] in ("SOAK", ):
823                 test_result["throughput"], test_result["status"] = \
824                     self._get_plr_throughput(test.message)
825
826             elif test_result["type"] in ("TCP", ):
827                 groups = re.search(self.REGEX_TCP, test.message)
828                 test_result["result"] = int(groups.group(2))
829
830             elif test_result["type"] in ("MRR", "BMRR"):
831                 test_result["result"] = dict()
832                 groups = re.search(self.REGEX_BMRR, test.message)
833                 if groups is not None:
834                     items_str = groups.group(1)
835                     items_float = [float(item.strip()) for item
836                                    in items_str.split(",")]
837                     # Use whole list in CSIT-1180.
838                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
839                     test_result["result"]["receive-rate"] = stats.avg
840                 else:
841                     groups = re.search(self.REGEX_MRR, test.message)
842                     test_result["result"]["receive-rate"] = \
843                         float(groups.group(3)) / float(groups.group(1))
844
845             elif test_result["type"] == "RECONF":
846                 test_result["result"] = None
847                 try:
848                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
849                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
850                     test_result["result"] = {
851                         "loss": int(grps_loss.group(1)),
852                         "time": float(grps_time.group(1))
853                     }
854                 except (AttributeError, IndexError, ValueError, TypeError):
855                     test_result["status"] = "FAIL"
856
857         self._data["tests"][self._test_ID] = test_result
858
859     def end_test(self, test):
860         """Called when test ends.
861
862         :param test: Test to process.
863         :type test: Test
864         :returns: Nothing.
865         """
866         pass
867
868     def visit_keyword(self, keyword):
869         """Implements traversing through the keyword and its child keywords.
870
871         :param keyword: Keyword to process.
872         :type keyword: Keyword
873         :returns: Nothing.
874         """
875         if self.start_keyword(keyword) is not False:
876             self.end_keyword(keyword)
877
878     def start_keyword(self, keyword):
879         """Called when keyword starts. Default implementation does nothing.
880
881         :param keyword: Keyword to process.
882         :type keyword: Keyword
883         :returns: Nothing.
884         """
885         try:
886             if keyword.type == "setup":
887                 self.visit_setup_kw(keyword)
888             elif keyword.type == "teardown":
889                 self._lookup_kw_nr = 0
890                 self.visit_teardown_kw(keyword)
891             else:
892                 self._lookup_kw_nr = 0
893                 self.visit_test_kw(keyword)
894         except AttributeError:
895             pass
896
897     def end_keyword(self, keyword):
898         """Called when keyword ends. Default implementation does nothing.
899
900         :param keyword: Keyword to process.
901         :type keyword: Keyword
902         :returns: Nothing.
903         """
904         pass
905
906     def visit_test_kw(self, test_kw):
907         """Implements traversing through the test keyword and its child
908         keywords.
909
910         :param test_kw: Keyword to process.
911         :type test_kw: Keyword
912         :returns: Nothing.
913         """
914         for keyword in test_kw.keywords:
915             if self.start_test_kw(keyword) is not False:
916                 self.visit_test_kw(keyword)
917                 self.end_test_kw(keyword)
918
919     def start_test_kw(self, test_kw):
920         """Called when test keyword starts. Default implementation does
921         nothing.
922
923         :param test_kw: Keyword to process.
924         :type test_kw: Keyword
925         :returns: Nothing.
926         """
927         if test_kw.name.count("Show Runtime Counters On All Duts"):
928             self._lookup_kw_nr += 1
929             self._show_run_lookup_nr = 0
930             self._msg_type = "test-show-runtime"
931         elif test_kw.name.count("Install Dpdk Test") and not self._version:
932             self._msg_type = "dpdk-version"
933         else:
934             return
935         test_kw.messages.visit(self)
936
937     def end_test_kw(self, test_kw):
938         """Called when keyword ends. Default implementation does nothing.
939
940         :param test_kw: Keyword to process.
941         :type test_kw: Keyword
942         :returns: Nothing.
943         """
944         pass
945
946     def visit_setup_kw(self, setup_kw):
947         """Implements traversing through the teardown keyword and its child
948         keywords.
949
950         :param setup_kw: Keyword to process.
951         :type setup_kw: Keyword
952         :returns: Nothing.
953         """
954         for keyword in setup_kw.keywords:
955             if self.start_setup_kw(keyword) is not False:
956                 self.visit_setup_kw(keyword)
957                 self.end_setup_kw(keyword)
958
959     def start_setup_kw(self, setup_kw):
960         """Called when teardown keyword starts. Default implementation does
961         nothing.
962
963         :param setup_kw: Keyword to process.
964         :type setup_kw: Keyword
965         :returns: Nothing.
966         """
967         if setup_kw.name.count("Show Vpp Version On All Duts") \
968                 and not self._version:
969             self._msg_type = "vpp-version"
970         elif setup_kw.name.count("Set Global Variable") \
971                 and not self._timestamp:
972             self._msg_type = "timestamp"
973         elif setup_kw.name.count("Setup Framework") and not self._testbed:
974             self._msg_type = "testbed"
975         else:
976             return
977         setup_kw.messages.visit(self)
978
979     def end_setup_kw(self, setup_kw):
980         """Called when keyword ends. Default implementation does nothing.
981
982         :param setup_kw: Keyword to process.
983         :type setup_kw: Keyword
984         :returns: Nothing.
985         """
986         pass
987
988     def visit_teardown_kw(self, teardown_kw):
989         """Implements traversing through the teardown keyword and its child
990         keywords.
991
992         :param teardown_kw: Keyword to process.
993         :type teardown_kw: Keyword
994         :returns: Nothing.
995         """
996         for keyword in teardown_kw.keywords:
997             if self.start_teardown_kw(keyword) is not False:
998                 self.visit_teardown_kw(keyword)
999                 self.end_teardown_kw(keyword)
1000
1001     def start_teardown_kw(self, teardown_kw):
1002         """Called when teardown keyword starts. Default implementation does
1003         nothing.
1004
1005         :param teardown_kw: Keyword to process.
1006         :type teardown_kw: Keyword
1007         :returns: Nothing.
1008         """
1009
1010         if teardown_kw.name.count("Show Vat History On All Duts"):
1011             self._conf_history_lookup_nr = 0
1012             self._msg_type = "teardown-vat-history"
1013             teardown_kw.messages.visit(self)
1014         elif teardown_kw.name.count("Show Papi History On All Duts"):
1015             self._conf_history_lookup_nr = 0
1016             self._msg_type = "teardown-papi-history"
1017             teardown_kw.messages.visit(self)
1018
1019     def end_teardown_kw(self, teardown_kw):
1020         """Called when keyword ends. Default implementation does nothing.
1021
1022         :param teardown_kw: Keyword to process.
1023         :type teardown_kw: Keyword
1024         :returns: Nothing.
1025         """
1026         pass
1027
1028     def visit_message(self, msg):
1029         """Implements visiting the message.
1030
1031         :param msg: Message to process.
1032         :type msg: Message
1033         :returns: Nothing.
1034         """
1035         if self.start_message(msg) is not False:
1036             self.end_message(msg)
1037
1038     def start_message(self, msg):
1039         """Called when message starts. Get required information from messages:
1040         - VPP version.
1041
1042         :param msg: Message to process.
1043         :type msg: Message
1044         :returns: Nothing.
1045         """
1046
1047         if self._msg_type:
1048             self.parse_msg[self._msg_type](msg)
1049
1050     def end_message(self, msg):
1051         """Called when message ends. Default implementation does nothing.
1052
1053         :param msg: Message to process.
1054         :type msg: Message
1055         :returns: Nothing.
1056         """
1057         pass
1058
1059
1060 class InputData:
1061     """Input data
1062
1063     The data is extracted from output.xml files generated by Jenkins jobs and
1064     stored in pandas' DataFrames.
1065
1066     The data structure:
1067     - job name
1068       - build number
1069         - metadata
1070           (as described in ExecutionChecker documentation)
1071         - suites
1072           (as described in ExecutionChecker documentation)
1073         - tests
1074           (as described in ExecutionChecker documentation)
1075     """
1076
1077     def __init__(self, spec):
1078         """Initialization.
1079
1080         :param spec: Specification.
1081         :type spec: Specification
1082         """
1083
1084         # Specification:
1085         self._cfg = spec
1086
1087         # Data store:
1088         self._input_data = pd.Series()
1089
1090     @property
1091     def data(self):
1092         """Getter - Input data.
1093
1094         :returns: Input data
1095         :rtype: pandas.Series
1096         """
1097         return self._input_data
1098
1099     def metadata(self, job, build):
1100         """Getter - metadata
1101
1102         :param job: Job which metadata we want.
1103         :param build: Build which metadata we want.
1104         :type job: str
1105         :type build: str
1106         :returns: Metadata
1107         :rtype: pandas.Series
1108         """
1109
1110         return self.data[job][build]["metadata"]
1111
1112     def suites(self, job, build):
1113         """Getter - suites
1114
1115         :param job: Job which suites we want.
1116         :param build: Build which suites we want.
1117         :type job: str
1118         :type build: str
1119         :returns: Suites.
1120         :rtype: pandas.Series
1121         """
1122
1123         return self.data[job][str(build)]["suites"]
1124
1125     def tests(self, job, build):
1126         """Getter - tests
1127
1128         :param job: Job which tests we want.
1129         :param build: Build which tests we want.
1130         :type job: str
1131         :type build: str
1132         :returns: Tests.
1133         :rtype: pandas.Series
1134         """
1135
1136         return self.data[job][build]["tests"]
1137
1138     def _parse_tests(self, job, build, log):
1139         """Process data from robot output.xml file and return JSON structured
1140         data.
1141
1142         :param job: The name of job which build output data will be processed.
1143         :param build: The build which output data will be processed.
1144         :param log: List of log messages.
1145         :type job: str
1146         :type build: dict
1147         :type log: list of tuples (severity, msg)
1148         :returns: JSON data structure.
1149         :rtype: dict
1150         """
1151
1152         metadata = {
1153             "job": job,
1154             "build": build
1155         }
1156
1157         with open(build["file-name"], 'r') as data_file:
1158             try:
1159                 result = ExecutionResult(data_file)
1160             except errors.DataError as err:
1161                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1162                                      "{0}".format(err)))
1163                 return None
1164         checker = ExecutionChecker(metadata, self._cfg.mapping,
1165                                    self._cfg.ignore)
1166         result.visit(checker)
1167
1168         return checker.data
1169
1170     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1171         """Download and parse the input data file.
1172
1173         :param pid: PID of the process executing this method.
1174         :param job: Name of the Jenkins job which generated the processed input
1175             file.
1176         :param build: Information about the Jenkins build which generated the
1177             processed input file.
1178         :param repeat: Repeat the download specified number of times if not
1179             successful.
1180         :type pid: int
1181         :type job: str
1182         :type build: dict
1183         :type repeat: int
1184         """
1185
1186         logs = list()
1187
1188         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1189                      format(job, build["build"])))
1190
1191         state = "failed"
1192         success = False
1193         data = None
1194         do_repeat = repeat
1195         while do_repeat:
1196             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1197                                                    logs)
1198             if success:
1199                 break
1200             do_repeat -= 1
1201         if not success:
1202             logs.append(("ERROR", "It is not possible to download the input "
1203                                   "data file from the job '{job}', build "
1204                                   "'{build}', or it is damaged. Skipped.".
1205                          format(job=job, build=build["build"])))
1206         if success:
1207             logs.append(("INFO", "    Processing data from the build '{0}' ...".
1208                          format(build["build"])))
1209             data = self._parse_tests(job, build, logs)
1210             if data is None:
1211                 logs.append(("ERROR", "Input data file from the job '{job}', "
1212                                       "build '{build}' is damaged. Skipped.".
1213                              format(job=job, build=build["build"])))
1214             else:
1215                 state = "processed"
1216
1217             try:
1218                 remove(build["file-name"])
1219             except OSError as err:
1220                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1221                              format(build["file-name"], repr(err))))
1222
1223         # If the time-period is defined in the specification file, remove all
1224         # files which are outside the time period.
1225         timeperiod = self._cfg.input.get("time-period", None)
1226         if timeperiod and data:
1227             now = dt.utcnow()
1228             timeperiod = timedelta(int(timeperiod))
1229             metadata = data.get("metadata", None)
1230             if metadata:
1231                 generated = metadata.get("generated", None)
1232                 if generated:
1233                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1234                     if (now - generated) > timeperiod:
1235                         # Remove the data and the file:
1236                         state = "removed"
1237                         data = None
1238                         logs.append(
1239                             ("INFO",
1240                              "    The build {job}/{build} is outdated, will be "
1241                              "removed".format(job=job, build=build["build"])))
1242         logs.append(("INFO", "  Done."))
1243
1244         for level, line in logs:
1245             if level == "INFO":
1246                 logging.info(line)
1247             elif level == "ERROR":
1248                 logging.error(line)
1249             elif level == "DEBUG":
1250                 logging.debug(line)
1251             elif level == "CRITICAL":
1252                 logging.critical(line)
1253             elif level == "WARNING":
1254                 logging.warning(line)
1255
1256         return {"data": data, "state": state, "job": job, "build": build}
1257
1258     def download_and_parse_data(self, repeat=1):
1259         """Download the input data files, parse input data from input files and
1260         store in pandas' Series.
1261
1262         :param repeat: Repeat the download specified number of times if not
1263             successful.
1264         :type repeat: int
1265         """
1266
1267         logging.info("Downloading and parsing input files ...")
1268
1269         for job, builds in self._cfg.builds.items():
1270             for build in builds:
1271
1272                 result = self._download_and_parse_build(job, build, repeat)
1273                 build_nr = result["build"]["build"]
1274
1275                 if result["data"]:
1276                     data = result["data"]
1277                     build_data = pd.Series({
1278                         "metadata": pd.Series(
1279                             data["metadata"].values(),
1280                             index=data["metadata"].keys()),
1281                         "suites": pd.Series(data["suites"].values(),
1282                                             index=data["suites"].keys()),
1283                         "tests": pd.Series(data["tests"].values(),
1284                                            index=data["tests"].keys())})
1285
1286                     if self._input_data.get(job, None) is None:
1287                         self._input_data[job] = pd.Series()
1288                     self._input_data[job][str(build_nr)] = build_data
1289
1290                     self._cfg.set_input_file_name(
1291                         job, build_nr, result["build"]["file-name"])
1292
1293                 self._cfg.set_input_state(job, build_nr, result["state"])
1294
1295                 logging.info("Memory allocation: {0:,d}MB".format(
1296                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1297
1298         logging.info("Done.")
1299
1300     @staticmethod
1301     def _end_of_tag(tag_filter, start=0, closer="'"):
1302         """Return the index of character in the string which is the end of tag.
1303
1304         :param tag_filter: The string where the end of tag is being searched.
1305         :param start: The index where the searching is stated.
1306         :param closer: The character which is the tag closer.
1307         :type tag_filter: str
1308         :type start: int
1309         :type closer: str
1310         :returns: The index of the tag closer.
1311         :rtype: int
1312         """
1313
1314         try:
1315             idx_opener = tag_filter.index(closer, start)
1316             return tag_filter.index(closer, idx_opener + 1)
1317         except ValueError:
1318             return None
1319
1320     @staticmethod
1321     def _condition(tag_filter):
1322         """Create a conditional statement from the given tag filter.
1323
1324         :param tag_filter: Filter based on tags from the element specification.
1325         :type tag_filter: str
1326         :returns: Conditional statement which can be evaluated.
1327         :rtype: str
1328         """
1329
1330         index = 0
1331         while True:
1332             index = InputData._end_of_tag(tag_filter, index)
1333             if index is None:
1334                 return tag_filter
1335             index += 1
1336             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1337
1338     def filter_data(self, element, params=None, data=None, data_set="tests",
1339                     continue_on_error=False):
1340         """Filter required data from the given jobs and builds.
1341
1342         The output data structure is:
1343
1344         - job 1
1345           - build 1
1346             - test (or suite) 1 ID:
1347               - param 1
1348               - param 2
1349               ...
1350               - param n
1351             ...
1352             - test (or suite) n ID:
1353             ...
1354           ...
1355           - build n
1356         ...
1357         - job n
1358
1359         :param element: Element which will use the filtered data.
1360         :param params: Parameters which will be included in the output. If None,
1361             all parameters are included.
1362         :param data: If not None, this data is used instead of data specified
1363             in the element.
1364         :param data_set: The set of data to be filtered: tests, suites,
1365             metadata.
1366         :param continue_on_error: Continue if there is error while reading the
1367             data. The Item will be empty then
1368         :type element: pandas.Series
1369         :type params: list
1370         :type data: dict
1371         :type data_set: str
1372         :type continue_on_error: bool
1373         :returns: Filtered data.
1374         :rtype pandas.Series
1375         """
1376
1377         try:
1378             if element["filter"] in ("all", "template"):
1379                 cond = "True"
1380             else:
1381                 cond = InputData._condition(element["filter"])
1382             logging.debug("   Filter: {0}".format(cond))
1383         except KeyError:
1384             logging.error("  No filter defined.")
1385             return None
1386
1387         if params is None:
1388             params = element.get("parameters", None)
1389             if params:
1390                 params.append("type")
1391
1392         data_to_filter = data if data else element["data"]
1393         data = pd.Series()
1394         try:
1395             for job, builds in data_to_filter.items():
1396                 data[job] = pd.Series()
1397                 for build in builds:
1398                     data[job][str(build)] = pd.Series()
1399                     try:
1400                         data_iter = self.data[job][str(build)][data_set].\
1401                             iteritems()
1402                     except KeyError:
1403                         if continue_on_error:
1404                             continue
1405                         else:
1406                             return None
1407                     for test_ID, test_data in data_iter:
1408                         if eval(cond, {"tags": test_data.get("tags", "")}):
1409                             data[job][str(build)][test_ID] = pd.Series()
1410                             if params is None:
1411                                 for param, val in test_data.items():
1412                                     data[job][str(build)][test_ID][param] = val
1413                             else:
1414                                 for param in params:
1415                                     try:
1416                                         data[job][str(build)][test_ID][param] =\
1417                                             test_data[param]
1418                                     except KeyError:
1419                                         data[job][str(build)][test_ID][param] =\
1420                                             "No Data"
1421             return data
1422
1423         except (KeyError, IndexError, ValueError) as err:
1424             logging.error("   Missing mandatory parameter in the element "
1425                           "specification: {0}".format(err))
1426             return None
1427         except AttributeError:
1428             return None
1429         except SyntaxError:
1430             logging.error("   The filter '{0}' is not correct. Check if all "
1431                           "tags are enclosed by apostrophes.".format(cond))
1432             return None
1433
1434     def filter_tests_by_name(self, element, params=None, data_set="tests",
1435                              continue_on_error=False):
1436         """Filter required data from the given jobs and builds.
1437
1438         The output data structure is:
1439
1440         - job 1
1441           - build 1
1442             - test (or suite) 1 ID:
1443               - param 1
1444               - param 2
1445               ...
1446               - param n
1447             ...
1448             - test (or suite) n ID:
1449             ...
1450           ...
1451           - build n
1452         ...
1453         - job n
1454
1455         :param element: Element which will use the filtered data.
1456         :param params: Parameters which will be included in the output. If None,
1457         all parameters are included.
1458         :param data_set: The set of data to be filtered: tests, suites,
1459         metadata.
1460         :param continue_on_error: Continue if there is error while reading the
1461         data. The Item will be empty then
1462         :type element: pandas.Series
1463         :type params: list
1464         :type data_set: str
1465         :type continue_on_error: bool
1466         :returns: Filtered data.
1467         :rtype pandas.Series
1468         """
1469
1470         include = element.get("include", None)
1471         if not include:
1472             logging.warning("No tests to include, skipping the element.")
1473             return None
1474
1475         if params is None:
1476             params = element.get("parameters", None)
1477             if params:
1478                 params.append("type")
1479
1480         data = pd.Series()
1481         try:
1482             for job, builds in element["data"].items():
1483                 data[job] = pd.Series()
1484                 for build in builds:
1485                     data[job][str(build)] = pd.Series()
1486                     for test in include:
1487                         try:
1488                             reg_ex = re.compile(str(test).lower())
1489                             for test_ID in self.data[job][str(build)]\
1490                                     [data_set].keys():
1491                                 if re.match(reg_ex, str(test_ID).lower()):
1492                                     test_data = self.data[job][str(build)]\
1493                                         [data_set][test_ID]
1494                                     data[job][str(build)][test_ID] = pd.Series()
1495                                     if params is None:
1496                                         for param, val in test_data.items():
1497                                             data[job][str(build)][test_ID]\
1498                                                 [param] = val
1499                                     else:
1500                                         for param in params:
1501                                             try:
1502                                                 data[job][str(build)][test_ID]\
1503                                                     [param] = test_data[param]
1504                                             except KeyError:
1505                                                 data[job][str(build)][test_ID]\
1506                                                     [param] = "No Data"
1507                         except KeyError as err:
1508                             logging.error("{err!r}".format(err=err))
1509                             if continue_on_error:
1510                                 continue
1511                             else:
1512                                 return None
1513             return data
1514
1515         except (KeyError, IndexError, ValueError) as err:
1516             logging.error("Missing mandatory parameter in the element "
1517                           "specification: {err!r}".format(err=err))
1518             return None
1519         except AttributeError as err:
1520             logging.error("{err!r}".format(err=err))
1521             return None
1522
1523
1524     @staticmethod
1525     def merge_data(data):
1526         """Merge data from more jobs and builds to a simple data structure.
1527
1528         The output data structure is:
1529
1530         - test (suite) 1 ID:
1531           - param 1
1532           - param 2
1533           ...
1534           - param n
1535         ...
1536         - test (suite) n ID:
1537         ...
1538
1539         :param data: Data to merge.
1540         :type data: pandas.Series
1541         :returns: Merged data.
1542         :rtype: pandas.Series
1543         """
1544
1545         logging.info("    Merging data ...")
1546
1547         merged_data = pd.Series()
1548         for _, builds in data.iteritems():
1549             for _, item in builds.iteritems():
1550                 for ID, item_data in item.iteritems():
1551                     merged_data[ID] = item_data
1552
1553         return merged_data