PAL: Process show runtime output 2
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import resource
24 import pandas as pd
25 import logging
26 import prettytable
27
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
32 from os import remove
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38
39 from input_data_files import download_and_unzip_data_file
40
41
42 # Separator used in file names
43 SEPARATOR = "__"
44
45
46 class ExecutionChecker(ResultVisitor):
47     """Class to traverse through the test suite structure.
48
49     The functionality implemented in this class generates a json structure:
50
51     Performance tests:
52
53     {
54         "metadata": {
55             "generated": "Timestamp",
56             "version": "SUT version",
57             "job": "Jenkins job name",
58             "build": "Information about the build"
59         },
60         "suites": {
61             "Suite long name 1": {
62                 "name": Suite name,
63                 "doc": "Suite 1 documentation",
64                 "parent": "Suite 1 parent",
65                 "level": "Level of the suite in the suite hierarchy"
66             }
67             "Suite long name N": {
68                 "name": Suite name,
69                 "doc": "Suite N documentation",
70                 "parent": "Suite 2 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73         }
74         "tests": {
75             # NDRPDR tests:
76             "ID": {
77                 "name": "Test name",
78                 "parent": "Name of the parent of the test",
79                 "doc": "Test documentation",
80                 "msg": "Test message",
81                 "conf-history": "DUT1 and DUT2 VAT History",
82                 "show-run": "Show Run",
83                 "tags": ["tag 1", "tag 2", "tag n"],
84                 "type": "NDRPDR",
85                 "status": "PASS" | "FAIL",
86                 "throughput": {
87                     "NDR": {
88                         "LOWER": float,
89                         "UPPER": float
90                     },
91                     "PDR": {
92                         "LOWER": float,
93                         "UPPER": float
94                     }
95                 },
96                 "latency": {
97                     "NDR": {
98                         "direction1": {
99                             "min": float,
100                             "avg": float,
101                             "max": float
102                         },
103                         "direction2": {
104                             "min": float,
105                             "avg": float,
106                             "max": float
107                         }
108                     },
109                     "PDR": {
110                         "direction1": {
111                             "min": float,
112                             "avg": float,
113                             "max": float
114                         },
115                         "direction2": {
116                             "min": float,
117                             "avg": float,
118                             "max": float
119                         }
120                     }
121                 }
122             }
123
124             # TCP tests:
125             "ID": {
126                 "name": "Test name",
127                 "parent": "Name of the parent of the test",
128                 "doc": "Test documentation",
129                 "msg": "Test message",
130                 "tags": ["tag 1", "tag 2", "tag n"],
131                 "type": "TCP",
132                 "status": "PASS" | "FAIL",
133                 "result": int
134             }
135
136             # MRR, BMRR tests:
137             "ID": {
138                 "name": "Test name",
139                 "parent": "Name of the parent of the test",
140                 "doc": "Test documentation",
141                 "msg": "Test message",
142                 "tags": ["tag 1", "tag 2", "tag n"],
143                 "type": "MRR" | "BMRR",
144                 "status": "PASS" | "FAIL",
145                 "result": {
146                     "receive-rate": AvgStdevMetadata,
147                 }
148             }
149
150             # TODO: Remove when definitely no NDRPDRDISC tests are used:
151             # NDRPDRDISC tests:
152             "ID": {
153                 "name": "Test name",
154                 "parent": "Name of the parent of the test",
155                 "doc": "Test documentation",
156                 "msg": "Test message",
157                 "tags": ["tag 1", "tag 2", "tag n"],
158                 "type": "PDR" | "NDR",
159                 "status": "PASS" | "FAIL",
160                 "throughput": {  # Only type: "PDR" | "NDR"
161                     "value": int,
162                     "unit": "pps" | "bps" | "percentage"
163                 },
164                 "latency": {  # Only type: "PDR" | "NDR"
165                     "direction1": {
166                         "100": {
167                             "min": int,
168                             "avg": int,
169                             "max": int
170                         },
171                         "50": {  # Only for NDR
172                             "min": int,
173                             "avg": int,
174                             "max": int
175                         },
176                         "10": {  # Only for NDR
177                             "min": int,
178                             "avg": int,
179                             "max": int
180                         }
181                     },
182                     "direction2": {
183                         "100": {
184                             "min": int,
185                             "avg": int,
186                             "max": int
187                         },
188                         "50": {  # Only for NDR
189                             "min": int,
190                             "avg": int,
191                             "max": int
192                         },
193                         "10": {  # Only for NDR
194                             "min": int,
195                             "avg": int,
196                             "max": int
197                         }
198                     }
199                 },
200                 "lossTolerance": "lossTolerance",  # Only type: "PDR"
201                 "conf-history": "DUT1 and DUT2 VAT History"
202                 "show-run": "Show Run"
203             },
204             "ID" {
205                 # next test
206             }
207         }
208     }
209
210
211     Functional tests:
212
213     {
214         "metadata": {  # Optional
215             "version": "VPP version",
216             "job": "Jenkins job name",
217             "build": "Information about the build"
218         },
219         "suites": {
220             "Suite name 1": {
221                 "doc": "Suite 1 documentation",
222                 "parent": "Suite 1 parent",
223                 "level": "Level of the suite in the suite hierarchy"
224             }
225             "Suite name N": {
226                 "doc": "Suite N documentation",
227                 "parent": "Suite 2 parent",
228                 "level": "Level of the suite in the suite hierarchy"
229             }
230         }
231         "tests": {
232             "ID": {
233                 "name": "Test name",
234                 "parent": "Name of the parent of the test",
235                 "doc": "Test documentation"
236                 "msg": "Test message"
237                 "tags": ["tag 1", "tag 2", "tag n"],
238                 "conf-history": "DUT1 and DUT2 VAT History"
239                 "show-run": "Show Run"
240                 "status": "PASS" | "FAIL"
241             },
242             "ID" {
243                 # next test
244             }
245         }
246     }
247
248     .. note:: ID is the lowercase full path to the test.
249     """
250
251     # TODO: Remove when definitely no NDRPDRDISC tests are used:
252     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
253
254     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
255                                 r'PLRsearch upper bound::\s(\d+.\d+)')
256
257     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
258                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
259                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
260                                    r'PDR_UPPER:\s(\d+.\d+)')
261
262     # TODO: Remove when definitely no NDRPDRDISC tests are used:
263     REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
264                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
265                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
266                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
270
271     REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
272                                r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
273                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
274
275     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
276                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
277
278     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
279                                  r'[\D\d]*')
280
281     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
282                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
283
284     REGEX_VERSION_DPDK = re.compile(r"DPDK Version: (\d*.\d*)")
285
286     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
287
288     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289                            r'tx\s(\d*),\srx\s(\d*)')
290
291     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292                             r' in packets per second: \[(.*)\]')
293
294     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
295
296     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
297
298     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
299
300     REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
301
302     def __init__(self, metadata, mapping, ignore):
303         """Initialisation.
304
305         :param metadata: Key-value pairs to be included in "metadata" part of
306             JSON structure.
307         :param mapping: Mapping of the old names of test cases to the new
308             (actual) one.
309         :param ignore: List of TCs to be ignored.
310         :type metadata: dict
311         :type mapping: dict
312         :type ignore: list
313         """
314
315         # Type of message to parse out from the test messages
316         self._msg_type = None
317
318         # VPP version
319         self._version = None
320
321         # Timestamp
322         self._timestamp = None
323
324         # Testbed. The testbed is identified by TG node IP address.
325         self._testbed = None
326
327         # Mapping of TCs long names
328         self._mapping = mapping
329
330         # Ignore list
331         self._ignore = ignore
332
333         # Number of VAT History messages found:
334         # 0 - no message
335         # 1 - VAT History of DUT1
336         # 2 - VAT History of DUT2
337         self._lookup_kw_nr = 0
338         self._conf_history_lookup_nr = 0
339
340         # Number of Show Running messages found
341         # 0 - no message
342         # 1 - Show run message found
343         self._show_run_lookup_nr = 0
344
345         # Test ID of currently processed test- the lowercase full path to the
346         # test
347         self._test_ID = None
348
349         # The main data structure
350         self._data = {
351             "metadata": OrderedDict(),
352             "suites": OrderedDict(),
353             "tests": OrderedDict()
354         }
355
356         # Save the provided metadata
357         for key, val in metadata.items():
358             self._data["metadata"][key] = val
359
360         # Dictionary defining the methods used to parse different types of
361         # messages
362         self.parse_msg = {
363             "timestamp": self._get_timestamp,
364             "vpp-version": self._get_vpp_version,
365             "dpdk-version": self._get_dpdk_version,
366             "teardown-vat-history": self._get_vat_history,
367             "teardown-papi-history": self._get_papi_history,
368             "test-show-runtime": self._get_show_run,
369             "testbed": self._get_testbed
370         }
371
372     @property
373     def data(self):
374         """Getter - Data parsed from the XML file.
375
376         :returns: Data parsed from the XML file.
377         :rtype: dict
378         """
379         return self._data
380
381     def _get_testbed(self, msg):
382         """Called when extraction of testbed IP is required.
383         The testbed is identified by TG node IP address.
384
385         :param msg: Message to process.
386         :type msg: Message
387         :returns: Nothing.
388         """
389
390         if msg.message.count("Setup of TG node"):
391             reg_tg_ip = re.compile(
392                 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
393             try:
394                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
395             except (KeyError, ValueError, IndexError, AttributeError):
396                 pass
397             finally:
398                 self._data["metadata"]["testbed"] = self._testbed
399                 self._msg_type = None
400
401     def _get_vpp_version(self, msg):
402         """Called when extraction of VPP version is required.
403
404         :param msg: Message to process.
405         :type msg: Message
406         :returns: Nothing.
407         """
408
409         if msg.message.count("return STDOUT Version:") or \
410             msg.message.count("VPP Version:") or \
411             msg.message.count("VPP version:"):
412             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
413                                 group(2))
414             self._data["metadata"]["version"] = self._version
415             self._msg_type = None
416
417     def _get_dpdk_version(self, msg):
418         """Called when extraction of DPDK version is required.
419
420         :param msg: Message to process.
421         :type msg: Message
422         :returns: Nothing.
423         """
424
425         if msg.message.count("DPDK Version:"):
426             try:
427                 self._version = str(re.search(
428                     self.REGEX_VERSION_DPDK, msg.message). group(1))
429                 self._data["metadata"]["version"] = self._version
430             except IndexError:
431                 pass
432             finally:
433                 self._msg_type = None
434
435     def _get_timestamp(self, msg):
436         """Called when extraction of timestamp is required.
437
438         :param msg: Message to process.
439         :type msg: Message
440         :returns: Nothing.
441         """
442
443         self._timestamp = msg.timestamp[:14]
444         self._data["metadata"]["generated"] = self._timestamp
445         self._msg_type = None
446
447     def _get_vat_history(self, msg):
448         """Called when extraction of VAT command history is required.
449
450         :param msg: Message to process.
451         :type msg: Message
452         :returns: Nothing.
453         """
454         if msg.message.count("VAT command history:"):
455             self._conf_history_lookup_nr += 1
456             if self._conf_history_lookup_nr == 1:
457                 self._data["tests"][self._test_ID]["conf-history"] = str()
458             else:
459                 self._msg_type = None
460             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
461                           "VAT command history:", "", msg.message, count=1). \
462                 replace("\n\n", "\n").replace('\n', ' |br| ').\
463                 replace('\r', '').replace('"', "'")
464
465             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
466             self._data["tests"][self._test_ID]["conf-history"] += \
467                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
468
469     def _get_papi_history(self, msg):
470         """Called when extraction of PAPI command history is required.
471
472         :param msg: Message to process.
473         :type msg: Message
474         :returns: Nothing.
475         """
476         if msg.message.count("PAPI command history:"):
477             self._conf_history_lookup_nr += 1
478             if self._conf_history_lookup_nr == 1:
479                 self._data["tests"][self._test_ID]["conf-history"] = str()
480             else:
481                 self._msg_type = None
482             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
483                           "PAPI command history:", "", msg.message, count=1). \
484                 replace("\n\n", "\n").replace('\n', ' |br| ').\
485                 replace('\r', '').replace('"', "'")
486
487             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
488             self._data["tests"][self._test_ID]["conf-history"] += \
489                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
490
491     def _get_show_run(self, msg):
492         """Called when extraction of VPP operational data (output of CLI command
493         Show Runtime) is required.
494
495         :param msg: Message to process.
496         :type msg: Message
497         :returns: Nothing.
498         """
499         if msg.message.count("Runtime:"):
500             self._show_run_lookup_nr += 1
501             if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
502                 self._data["tests"][self._test_ID]["show-run"] = str()
503             if self._lookup_kw_nr > 1:
504                 self._msg_type = None
505             if self._show_run_lookup_nr == 1:
506                 message = str(msg.message).replace(' ', '').replace('\n', '').\
507                     replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
508                 runtime = loads(message)
509                 try:
510                     threads_nr = len(runtime[0]["clocks"])
511                 except (IndexError, KeyError):
512                     return
513                 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
514                 table = [[tbl_hdr, ] for _ in range(threads_nr)]
515                 for item in runtime:
516                     for idx in range(threads_nr):
517                         table[idx].append([
518                             item["name"],
519                             item["calls"][idx],
520                             item["vectors"][idx],
521                             item["suspends"][idx],
522                             item["clocks"][idx]
523                         ])
524                 text = ""
525                 for idx in range(threads_nr):
526                     text += "Thread {idx} ".format(idx=idx)
527                     text += "vpp_main\n" if idx == 0 else \
528                         "vpp_wk_{idx}\n".format(idx=idx-1)
529                     txt_table = None
530                     for row in table[idx]:
531                         if txt_table is None:
532                             txt_table = prettytable.PrettyTable(row)
533                         else:
534                             if any(row[1:]):
535                                 txt_table.add_row(row)
536                     txt_table.align["Name"] = "l"
537                     txt_table.align["Calls"] = "r"
538                     txt_table.align["Vectors"] = "r"
539                     txt_table.align["Suspends"] = "r"
540                     txt_table.align["Clocks"] = "r"
541
542                     text += txt_table.get_html_string(sortby="Name") + '\n'
543
544                 text = text.replace('\n', '').replace('\r', '')
545                 try:
546                     self._data["tests"][self._test_ID]["show-run"] += " |br| "
547                     self._data["tests"][self._test_ID]["show-run"] += \
548                         "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
549                 except KeyError:
550                     pass
551
552     # TODO: Remove when definitely no NDRPDRDISC tests are used:
553     def _get_latency(self, msg, test_type):
554         """Get the latency data from the test message.
555
556         :param msg: Message to be parsed.
557         :param test_type: Type of the test - NDR or PDR.
558         :type msg: str
559         :type test_type: str
560         :returns: Latencies parsed from the message.
561         :rtype: dict
562         """
563
564         if test_type == "NDR":
565             groups = re.search(self.REGEX_LAT_NDR, msg)
566             groups_range = range(1, 7)
567         elif test_type == "PDR":
568             groups = re.search(self.REGEX_LAT_PDR, msg)
569             groups_range = range(1, 3)
570         else:
571             return {}
572
573         latencies = list()
574         for idx in groups_range:
575             try:
576                 lat = [int(item) for item in str(groups.group(idx)).split('/')]
577             except (AttributeError, ValueError):
578                 lat = [-1, -1, -1]
579             latencies.append(lat)
580
581         keys = ("min", "avg", "max")
582         latency = {
583             "direction1": {
584             },
585             "direction2": {
586             }
587         }
588
589         latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
590         latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
591         if test_type == "NDR":
592             latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
593             latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
594             latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
595             latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
596
597         return latency
598
599     def _get_ndrpdr_throughput(self, msg):
600         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
601         message.
602
603         :param msg: The test message to be parsed.
604         :type msg: str
605         :returns: Parsed data as a dict and the status (PASS/FAIL).
606         :rtype: tuple(dict, str)
607         """
608
609         throughput = {
610             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
611             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
612         }
613         status = "FAIL"
614         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
615
616         if groups is not None:
617             try:
618                 throughput["NDR"]["LOWER"] = float(groups.group(1))
619                 throughput["NDR"]["UPPER"] = float(groups.group(2))
620                 throughput["PDR"]["LOWER"] = float(groups.group(3))
621                 throughput["PDR"]["UPPER"] = float(groups.group(4))
622                 status = "PASS"
623             except (IndexError, ValueError):
624                 pass
625
626         return throughput, status
627
628     def _get_plr_throughput(self, msg):
629         """Get PLRsearch lower bound and PLRsearch upper bound from the test
630         message.
631
632         :param msg: The test message to be parsed.
633         :type msg: str
634         :returns: Parsed data as a dict and the status (PASS/FAIL).
635         :rtype: tuple(dict, str)
636         """
637
638         throughput = {
639             "LOWER": -1.0,
640             "UPPER": -1.0
641         }
642         status = "FAIL"
643         groups = re.search(self.REGEX_PLR_RATE, msg)
644
645         if groups is not None:
646             try:
647                 throughput["LOWER"] = float(groups.group(1))
648                 throughput["UPPER"] = float(groups.group(2))
649                 status = "PASS"
650             except (IndexError, ValueError):
651                 pass
652
653         return throughput, status
654
655     def _get_ndrpdr_latency(self, msg):
656         """Get LATENCY from the test message.
657
658         :param msg: The test message to be parsed.
659         :type msg: str
660         :returns: Parsed data as a dict and the status (PASS/FAIL).
661         :rtype: tuple(dict, str)
662         """
663
664         latency = {
665             "NDR": {
666                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
667                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
668             },
669             "PDR": {
670                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
671                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
672             }
673         }
674         status = "FAIL"
675         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
676
677         if groups is not None:
678             keys = ("min", "avg", "max")
679             try:
680                 latency["NDR"]["direction1"] = dict(
681                     zip(keys, [float(l) for l in groups.group(1).split('/')]))
682                 latency["NDR"]["direction2"] = dict(
683                     zip(keys, [float(l) for l in groups.group(2).split('/')]))
684                 latency["PDR"]["direction1"] = dict(
685                     zip(keys, [float(l) for l in groups.group(3).split('/')]))
686                 latency["PDR"]["direction2"] = dict(
687                     zip(keys, [float(l) for l in groups.group(4).split('/')]))
688                 status = "PASS"
689             except (IndexError, ValueError):
690                 pass
691
692         return latency, status
693
694     def visit_suite(self, suite):
695         """Implements traversing through the suite and its direct children.
696
697         :param suite: Suite to process.
698         :type suite: Suite
699         :returns: Nothing.
700         """
701         if self.start_suite(suite) is not False:
702             suite.suites.visit(self)
703             suite.tests.visit(self)
704             self.end_suite(suite)
705
706     def start_suite(self, suite):
707         """Called when suite starts.
708
709         :param suite: Suite to process.
710         :type suite: Suite
711         :returns: Nothing.
712         """
713
714         try:
715             parent_name = suite.parent.name
716         except AttributeError:
717             return
718
719         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
720             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
721         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
722
723         self._data["suites"][suite.longname.lower().replace('"', "'").
724             replace(" ", "_")] = {
725                 "name": suite.name.lower(),
726                 "doc": doc_str,
727                 "parent": parent_name,
728                 "level": len(suite.longname.split("."))
729             }
730
731         suite.keywords.visit(self)
732
733     def end_suite(self, suite):
734         """Called when suite ends.
735
736         :param suite: Suite to process.
737         :type suite: Suite
738         :returns: Nothing.
739         """
740         pass
741
742     def visit_test(self, test):
743         """Implements traversing through the test.
744
745         :param test: Test to process.
746         :type test: Test
747         :returns: Nothing.
748         """
749         if self.start_test(test) is not False:
750             test.keywords.visit(self)
751             self.end_test(test)
752
753     def start_test(self, test):
754         """Called when test starts.
755
756         :param test: Test to process.
757         :type test: Test
758         :returns: Nothing.
759         """
760
761         longname_orig = test.longname.lower()
762
763         # Check the ignore list
764         if longname_orig in self._ignore:
765             return
766
767         tags = [str(tag) for tag in test.tags]
768         test_result = dict()
769
770         # Change the TC long name and name if defined in the mapping table
771         longname = self._mapping.get(longname_orig, None)
772         if longname is not None:
773             name = longname.split('.')[-1]
774             logging.debug("{0}\n{1}\n{2}\n{3}".format(
775                 self._data["metadata"], longname_orig, longname, name))
776         else:
777             longname = longname_orig
778             name = test.name.lower()
779
780         # Remove TC number from the TC long name (backward compatibility):
781         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
782         # Remove TC number from the TC name (not needed):
783         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
784
785         test_result["parent"] = test.parent.name.lower()
786         test_result["tags"] = tags
787         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
788             replace('\r', '').replace('[', ' |br| [')
789         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
790         test_result["msg"] = test.message.replace('\n', ' |br| '). \
791             replace('\r', '').replace('"', "'")
792         test_result["type"] = "FUNC"
793         test_result["status"] = test.status
794
795         if "PERFTEST" in tags:
796             # Replace info about cores (e.g. -1c-) with the info about threads
797             # and cores (e.g. -1t1c-) in the long test case names and in the
798             # test case names if necessary.
799             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
800             if not groups:
801                 tag_count = 0
802                 for tag in test_result["tags"]:
803                     groups = re.search(self.REGEX_TC_TAG, tag)
804                     if groups:
805                         tag_count += 1
806                         tag_tc = tag
807
808                 if tag_count == 1:
809                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
810                                            "-{0}-".format(tag_tc.lower()),
811                                            self._test_ID,
812                                            count=1)
813                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
814                                                  "-{0}-".format(tag_tc.lower()),
815                                                  test_result["name"],
816                                                  count=1)
817                 else:
818                     test_result["status"] = "FAIL"
819                     self._data["tests"][self._test_ID] = test_result
820                     logging.debug("The test '{0}' has no or more than one "
821                                   "multi-threading tags.".format(self._test_ID))
822                     logging.debug("Tags: {0}".format(test_result["tags"]))
823                     return
824
825         if test.status == "PASS" and ("NDRPDRDISC" in tags or
826                                       "NDRPDR" in tags or
827                                       "SOAK" in tags or
828                                       "TCP" in tags or
829                                       "MRR" in tags or
830                                       "BMRR" in tags):
831             # TODO: Remove when definitely no NDRPDRDISC tests are used:
832             if "NDRDISC" in tags:
833                 test_result["type"] = "NDR"
834             # TODO: Remove when definitely no NDRPDRDISC tests are used:
835             elif "PDRDISC" in tags:
836                 test_result["type"] = "PDR"
837             elif "NDRPDR" in tags:
838                 test_result["type"] = "NDRPDR"
839             elif "SOAK" in tags:
840                 test_result["type"] = "SOAK"
841             elif "TCP" in tags:
842                 test_result["type"] = "TCP"
843             elif "MRR" in tags:
844                 test_result["type"] = "MRR"
845             elif "FRMOBL" in tags or "BMRR" in tags:
846                 test_result["type"] = "BMRR"
847             else:
848                 test_result["status"] = "FAIL"
849                 self._data["tests"][self._test_ID] = test_result
850                 return
851
852             # TODO: Remove when definitely no NDRPDRDISC tests are used:
853             if test_result["type"] in ("NDR", "PDR"):
854                 try:
855                     rate_value = str(re.search(
856                         self.REGEX_RATE, test.message).group(1))
857                 except AttributeError:
858                     rate_value = "-1"
859                 try:
860                     rate_unit = str(re.search(
861                         self.REGEX_RATE, test.message).group(2))
862                 except AttributeError:
863                     rate_unit = "-1"
864
865                 test_result["throughput"] = dict()
866                 test_result["throughput"]["value"] = \
867                     int(rate_value.split('.')[0])
868                 test_result["throughput"]["unit"] = rate_unit
869                 test_result["latency"] = \
870                     self._get_latency(test.message, test_result["type"])
871                 if test_result["type"] == "PDR":
872                     test_result["lossTolerance"] = str(re.search(
873                         self.REGEX_TOLERANCE, test.message).group(1))
874
875             elif test_result["type"] in ("NDRPDR", ):
876                 test_result["throughput"], test_result["status"] = \
877                     self._get_ndrpdr_throughput(test.message)
878                 test_result["latency"], test_result["status"] = \
879                     self._get_ndrpdr_latency(test.message)
880
881             elif test_result["type"] in ("SOAK", ):
882                 test_result["throughput"], test_result["status"] = \
883                     self._get_plr_throughput(test.message)
884
885             elif test_result["type"] in ("TCP", ):
886                 groups = re.search(self.REGEX_TCP, test.message)
887                 test_result["result"] = int(groups.group(2))
888
889             elif test_result["type"] in ("MRR", "BMRR"):
890                 test_result["result"] = dict()
891                 groups = re.search(self.REGEX_BMRR, test.message)
892                 if groups is not None:
893                     items_str = groups.group(1)
894                     items_float = [float(item.strip()) for item
895                                    in items_str.split(",")]
896                     metadata = AvgStdevMetadataFactory.from_data(items_float)
897                     # Next two lines have been introduced in CSIT-1179,
898                     # to be removed in CSIT-1180.
899                     metadata.size = 1
900                     metadata.stdev = 0.0
901                     test_result["result"]["receive-rate"] = metadata
902                 else:
903                     groups = re.search(self.REGEX_MRR, test.message)
904                     test_result["result"]["receive-rate"] = \
905                         AvgStdevMetadataFactory.from_data([
906                             float(groups.group(3)) / float(groups.group(1)), ])
907
908         self._data["tests"][self._test_ID] = test_result
909
910     def end_test(self, test):
911         """Called when test ends.
912
913         :param test: Test to process.
914         :type test: Test
915         :returns: Nothing.
916         """
917         pass
918
919     def visit_keyword(self, keyword):
920         """Implements traversing through the keyword and its child keywords.
921
922         :param keyword: Keyword to process.
923         :type keyword: Keyword
924         :returns: Nothing.
925         """
926         if self.start_keyword(keyword) is not False:
927             self.end_keyword(keyword)
928
929     def start_keyword(self, keyword):
930         """Called when keyword starts. Default implementation does nothing.
931
932         :param keyword: Keyword to process.
933         :type keyword: Keyword
934         :returns: Nothing.
935         """
936         try:
937             if keyword.type == "setup":
938                 self.visit_setup_kw(keyword)
939             elif keyword.type == "teardown":
940                 self._lookup_kw_nr = 0
941                 self.visit_teardown_kw(keyword)
942             else:
943                 self._lookup_kw_nr = 0
944                 self.visit_test_kw(keyword)
945         except AttributeError:
946             pass
947
948     def end_keyword(self, keyword):
949         """Called when keyword ends. Default implementation does nothing.
950
951         :param keyword: Keyword to process.
952         :type keyword: Keyword
953         :returns: Nothing.
954         """
955         pass
956
957     def visit_test_kw(self, test_kw):
958         """Implements traversing through the test keyword and its child
959         keywords.
960
961         :param test_kw: Keyword to process.
962         :type test_kw: Keyword
963         :returns: Nothing.
964         """
965         for keyword in test_kw.keywords:
966             if self.start_test_kw(keyword) is not False:
967                 self.visit_test_kw(keyword)
968                 self.end_test_kw(keyword)
969
970     def start_test_kw(self, test_kw):
971         """Called when test keyword starts. Default implementation does
972         nothing.
973
974         :param test_kw: Keyword to process.
975         :type test_kw: Keyword
976         :returns: Nothing.
977         """
978         if test_kw.name.count("Show Runtime Counters On All Duts"):
979             self._lookup_kw_nr += 1
980             self._show_run_lookup_nr = 0
981             self._msg_type = "test-show-runtime"
982         elif test_kw.name.count("Install Dpdk Test") and not self._version:
983             self._msg_type = "dpdk-version"
984         else:
985             return
986         test_kw.messages.visit(self)
987
988     def end_test_kw(self, test_kw):
989         """Called when keyword ends. Default implementation does nothing.
990
991         :param test_kw: Keyword to process.
992         :type test_kw: Keyword
993         :returns: Nothing.
994         """
995         pass
996
997     def visit_setup_kw(self, setup_kw):
998         """Implements traversing through the teardown keyword and its child
999         keywords.
1000
1001         :param setup_kw: Keyword to process.
1002         :type setup_kw: Keyword
1003         :returns: Nothing.
1004         """
1005         for keyword in setup_kw.keywords:
1006             if self.start_setup_kw(keyword) is not False:
1007                 self.visit_setup_kw(keyword)
1008                 self.end_setup_kw(keyword)
1009
1010     def start_setup_kw(self, setup_kw):
1011         """Called when teardown keyword starts. Default implementation does
1012         nothing.
1013
1014         :param setup_kw: Keyword to process.
1015         :type setup_kw: Keyword
1016         :returns: Nothing.
1017         """
1018         if setup_kw.name.count("Show Vpp Version On All Duts") \
1019                 and not self._version:
1020             self._msg_type = "vpp-version"
1021         elif setup_kw.name.count("Set Global Variable") \
1022                 and not self._timestamp:
1023             self._msg_type = "timestamp"
1024         elif setup_kw.name.count("Setup Framework") and not self._testbed:
1025             self._msg_type = "testbed"
1026         else:
1027             return
1028         setup_kw.messages.visit(self)
1029
1030     def end_setup_kw(self, setup_kw):
1031         """Called when keyword ends. Default implementation does nothing.
1032
1033         :param setup_kw: Keyword to process.
1034         :type setup_kw: Keyword
1035         :returns: Nothing.
1036         """
1037         pass
1038
1039     def visit_teardown_kw(self, teardown_kw):
1040         """Implements traversing through the teardown keyword and its child
1041         keywords.
1042
1043         :param teardown_kw: Keyword to process.
1044         :type teardown_kw: Keyword
1045         :returns: Nothing.
1046         """
1047         for keyword in teardown_kw.keywords:
1048             if self.start_teardown_kw(keyword) is not False:
1049                 self.visit_teardown_kw(keyword)
1050                 self.end_teardown_kw(keyword)
1051
1052     def start_teardown_kw(self, teardown_kw):
1053         """Called when teardown keyword starts. Default implementation does
1054         nothing.
1055
1056         :param teardown_kw: Keyword to process.
1057         :type teardown_kw: Keyword
1058         :returns: Nothing.
1059         """
1060
1061         if teardown_kw.name.count("Show Vat History On All Duts"):
1062             self._conf_history_lookup_nr = 0
1063             self._msg_type = "teardown-vat-history"
1064             teardown_kw.messages.visit(self)
1065         elif teardown_kw.name.count("Show Papi History On All Duts"):
1066             self._conf_history_lookup_nr = 0
1067             self._msg_type = "teardown-papi-history"
1068             teardown_kw.messages.visit(self)
1069
1070     def end_teardown_kw(self, teardown_kw):
1071         """Called when keyword ends. Default implementation does nothing.
1072
1073         :param teardown_kw: Keyword to process.
1074         :type teardown_kw: Keyword
1075         :returns: Nothing.
1076         """
1077         pass
1078
1079     def visit_message(self, msg):
1080         """Implements visiting the message.
1081
1082         :param msg: Message to process.
1083         :type msg: Message
1084         :returns: Nothing.
1085         """
1086         if self.start_message(msg) is not False:
1087             self.end_message(msg)
1088
1089     def start_message(self, msg):
1090         """Called when message starts. Get required information from messages:
1091         - VPP version.
1092
1093         :param msg: Message to process.
1094         :type msg: Message
1095         :returns: Nothing.
1096         """
1097
1098         if self._msg_type:
1099             self.parse_msg[self._msg_type](msg)
1100
1101     def end_message(self, msg):
1102         """Called when message ends. Default implementation does nothing.
1103
1104         :param msg: Message to process.
1105         :type msg: Message
1106         :returns: Nothing.
1107         """
1108         pass
1109
1110
1111 class InputData(object):
1112     """Input data
1113
1114     The data is extracted from output.xml files generated by Jenkins jobs and
1115     stored in pandas' DataFrames.
1116
1117     The data structure:
1118     - job name
1119       - build number
1120         - metadata
1121           (as described in ExecutionChecker documentation)
1122         - suites
1123           (as described in ExecutionChecker documentation)
1124         - tests
1125           (as described in ExecutionChecker documentation)
1126     """
1127
1128     def __init__(self, spec):
1129         """Initialization.
1130
1131         :param spec: Specification.
1132         :type spec: Specification
1133         """
1134
1135         # Specification:
1136         self._cfg = spec
1137
1138         # Data store:
1139         self._input_data = pd.Series()
1140
1141     @property
1142     def data(self):
1143         """Getter - Input data.
1144
1145         :returns: Input data
1146         :rtype: pandas.Series
1147         """
1148         return self._input_data
1149
1150     def metadata(self, job, build):
1151         """Getter - metadata
1152
1153         :param job: Job which metadata we want.
1154         :param build: Build which metadata we want.
1155         :type job: str
1156         :type build: str
1157         :returns: Metadata
1158         :rtype: pandas.Series
1159         """
1160
1161         return self.data[job][build]["metadata"]
1162
1163     def suites(self, job, build):
1164         """Getter - suites
1165
1166         :param job: Job which suites we want.
1167         :param build: Build which suites we want.
1168         :type job: str
1169         :type build: str
1170         :returns: Suites.
1171         :rtype: pandas.Series
1172         """
1173
1174         return self.data[job][str(build)]["suites"]
1175
1176     def tests(self, job, build):
1177         """Getter - tests
1178
1179         :param job: Job which tests we want.
1180         :param build: Build which tests we want.
1181         :type job: str
1182         :type build: str
1183         :returns: Tests.
1184         :rtype: pandas.Series
1185         """
1186
1187         return self.data[job][build]["tests"]
1188
1189     def _parse_tests(self, job, build, log):
1190         """Process data from robot output.xml file and return JSON structured
1191         data.
1192
1193         :param job: The name of job which build output data will be processed.
1194         :param build: The build which output data will be processed.
1195         :param log: List of log messages.
1196         :type job: str
1197         :type build: dict
1198         :type log: list of tuples (severity, msg)
1199         :returns: JSON data structure.
1200         :rtype: dict
1201         """
1202
1203         metadata = {
1204             "job": job,
1205             "build": build
1206         }
1207
1208         with open(build["file-name"], 'r') as data_file:
1209             try:
1210                 result = ExecutionResult(data_file)
1211             except errors.DataError as err:
1212                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1213                                      "{0}".format(err)))
1214                 return None
1215         checker = ExecutionChecker(metadata, self._cfg.mapping,
1216                                    self._cfg.ignore)
1217         result.visit(checker)
1218
1219         return checker.data
1220
1221     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1222         """Download and parse the input data file.
1223
1224         :param pid: PID of the process executing this method.
1225         :param job: Name of the Jenkins job which generated the processed input
1226             file.
1227         :param build: Information about the Jenkins build which generated the
1228             processed input file.
1229         :param repeat: Repeat the download specified number of times if not
1230             successful.
1231         :type pid: int
1232         :type job: str
1233         :type build: dict
1234         :type repeat: int
1235         """
1236
1237         logs = list()
1238
1239         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1240                      format(job, build["build"])))
1241
1242         state = "failed"
1243         success = False
1244         data = None
1245         do_repeat = repeat
1246         while do_repeat:
1247             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1248                                                    logs)
1249             if success:
1250                 break
1251             do_repeat -= 1
1252         if not success:
1253             logs.append(("ERROR", "It is not possible to download the input "
1254                                   "data file from the job '{job}', build "
1255                                   "'{build}', or it is damaged. Skipped.".
1256                          format(job=job, build=build["build"])))
1257         if success:
1258             logs.append(("INFO", "    Processing data from the build '{0}' ...".
1259                          format(build["build"])))
1260             data = self._parse_tests(job, build, logs)
1261             if data is None:
1262                 logs.append(("ERROR", "Input data file from the job '{job}', "
1263                                       "build '{build}' is damaged. Skipped.".
1264                              format(job=job, build=build["build"])))
1265             else:
1266                 state = "processed"
1267
1268             try:
1269                 remove(build["file-name"])
1270             except OSError as err:
1271                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1272                              format(build["file-name"], repr(err))))
1273
1274         # If the time-period is defined in the specification file, remove all
1275         # files which are outside the time period.
1276         timeperiod = self._cfg.input.get("time-period", None)
1277         if timeperiod and data:
1278             now = dt.utcnow()
1279             timeperiod = timedelta(int(timeperiod))
1280             metadata = data.get("metadata", None)
1281             if metadata:
1282                 generated = metadata.get("generated", None)
1283                 if generated:
1284                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1285                     if (now - generated) > timeperiod:
1286                         # Remove the data and the file:
1287                         state = "removed"
1288                         data = None
1289                         logs.append(
1290                             ("INFO",
1291                              "    The build {job}/{build} is outdated, will be "
1292                              "removed".format(job=job, build=build["build"])))
1293                         file_name = self._cfg.input["file-name"]
1294                         full_name = join(
1295                             self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1296                             "{job}{sep}{build}{sep}{name}".format(
1297                                 job=job,
1298                                 sep=SEPARATOR,
1299                                 build=build["build"],
1300                                 name=file_name))
1301                         try:
1302                             remove(full_name)
1303                             logs.append(("INFO",
1304                                          "    The file {name} has been removed".
1305                                          format(name=full_name)))
1306                         except OSError as err:
1307                             logs.append(("ERROR",
1308                                          "Cannot remove the file '{0}': {1}".
1309                                          format(full_name, repr(err))))
1310         logs.append(("INFO", "  Done."))
1311
1312         for level, line in logs:
1313             if level == "INFO":
1314                 logging.info(line)
1315             elif level == "ERROR":
1316                 logging.error(line)
1317             elif level == "DEBUG":
1318                 logging.debug(line)
1319             elif level == "CRITICAL":
1320                 logging.critical(line)
1321             elif level == "WARNING":
1322                 logging.warning(line)
1323
1324         return {"data": data, "state": state, "job": job, "build": build}
1325
1326     def download_and_parse_data(self, repeat=1):
1327         """Download the input data files, parse input data from input files and
1328         store in pandas' Series.
1329
1330         :param repeat: Repeat the download specified number of times if not
1331             successful.
1332         :type repeat: int
1333         """
1334
1335         logging.info("Downloading and parsing input files ...")
1336
1337         for job, builds in self._cfg.builds.items():
1338             for build in builds:
1339
1340                 result = self._download_and_parse_build(job, build, repeat)
1341                 build_nr = result["build"]["build"]
1342
1343                 if result["data"]:
1344                     data = result["data"]
1345                     build_data = pd.Series({
1346                         "metadata": pd.Series(
1347                             data["metadata"].values(),
1348                             index=data["metadata"].keys()),
1349                         "suites": pd.Series(data["suites"].values(),
1350                                             index=data["suites"].keys()),
1351                         "tests": pd.Series(data["tests"].values(),
1352                                            index=data["tests"].keys())})
1353
1354                     if self._input_data.get(job, None) is None:
1355                         self._input_data[job] = pd.Series()
1356                     self._input_data[job][str(build_nr)] = build_data
1357
1358                     self._cfg.set_input_file_name(
1359                         job, build_nr, result["build"]["file-name"])
1360
1361                 self._cfg.set_input_state(job, build_nr, result["state"])
1362
1363                 logging.info("Memory allocation: {0:,d}MB".format(
1364                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1365
1366         logging.info("Done.")
1367
1368     @staticmethod
1369     def _end_of_tag(tag_filter, start=0, closer="'"):
1370         """Return the index of character in the string which is the end of tag.
1371
1372         :param tag_filter: The string where the end of tag is being searched.
1373         :param start: The index where the searching is stated.
1374         :param closer: The character which is the tag closer.
1375         :type tag_filter: str
1376         :type start: int
1377         :type closer: str
1378         :returns: The index of the tag closer.
1379         :rtype: int
1380         """
1381
1382         try:
1383             idx_opener = tag_filter.index(closer, start)
1384             return tag_filter.index(closer, idx_opener + 1)
1385         except ValueError:
1386             return None
1387
1388     @staticmethod
1389     def _condition(tag_filter):
1390         """Create a conditional statement from the given tag filter.
1391
1392         :param tag_filter: Filter based on tags from the element specification.
1393         :type tag_filter: str
1394         :returns: Conditional statement which can be evaluated.
1395         :rtype: str
1396         """
1397
1398         index = 0
1399         while True:
1400             index = InputData._end_of_tag(tag_filter, index)
1401             if index is None:
1402                 return tag_filter
1403             index += 1
1404             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1405
1406     def filter_data(self, element, params=None, data_set="tests",
1407                     continue_on_error=False):
1408         """Filter required data from the given jobs and builds.
1409
1410         The output data structure is:
1411
1412         - job 1
1413           - build 1
1414             - test (or suite) 1 ID:
1415               - param 1
1416               - param 2
1417               ...
1418               - param n
1419             ...
1420             - test (or suite) n ID:
1421             ...
1422           ...
1423           - build n
1424         ...
1425         - job n
1426
1427         :param element: Element which will use the filtered data.
1428         :param params: Parameters which will be included in the output. If None,
1429         all parameters are included.
1430         :param data_set: The set of data to be filtered: tests, suites,
1431         metadata.
1432         :param continue_on_error: Continue if there is error while reading the
1433         data. The Item will be empty then
1434         :type element: pandas.Series
1435         :type params: list
1436         :type data_set: str
1437         :type continue_on_error: bool
1438         :returns: Filtered data.
1439         :rtype pandas.Series
1440         """
1441
1442         try:
1443             if element["filter"] in ("all", "template"):
1444                 cond = "True"
1445             else:
1446                 cond = InputData._condition(element["filter"])
1447             logging.debug("   Filter: {0}".format(cond))
1448         except KeyError:
1449             logging.error("  No filter defined.")
1450             return None
1451
1452         if params is None:
1453             params = element.get("parameters", None)
1454             if params:
1455                 params.append("type")
1456
1457         data = pd.Series()
1458         try:
1459             for job, builds in element["data"].items():
1460                 data[job] = pd.Series()
1461                 for build in builds:
1462                     data[job][str(build)] = pd.Series()
1463                     try:
1464                         data_iter = self.data[job][str(build)][data_set].\
1465                             iteritems()
1466                     except KeyError:
1467                         if continue_on_error:
1468                             continue
1469                         else:
1470                             return None
1471                     for test_ID, test_data in data_iter:
1472                         if eval(cond, {"tags": test_data.get("tags", "")}):
1473                             data[job][str(build)][test_ID] = pd.Series()
1474                             if params is None:
1475                                 for param, val in test_data.items():
1476                                     data[job][str(build)][test_ID][param] = val
1477                             else:
1478                                 for param in params:
1479                                     try:
1480                                         data[job][str(build)][test_ID][param] =\
1481                                             test_data[param]
1482                                     except KeyError:
1483                                         data[job][str(build)][test_ID][param] =\
1484                                             "No Data"
1485             return data
1486
1487         except (KeyError, IndexError, ValueError) as err:
1488             logging.error("   Missing mandatory parameter in the element "
1489                           "specification: {0}".format(err))
1490             return None
1491         except AttributeError:
1492             return None
1493         except SyntaxError:
1494             logging.error("   The filter '{0}' is not correct. Check if all "
1495                           "tags are enclosed by apostrophes.".format(cond))
1496             return None
1497
1498     @staticmethod
1499     def merge_data(data):
1500         """Merge data from more jobs and builds to a simple data structure.
1501
1502         The output data structure is:
1503
1504         - test (suite) 1 ID:
1505           - param 1
1506           - param 2
1507           ...
1508           - param n
1509         ...
1510         - test (suite) n ID:
1511         ...
1512
1513         :param data: Data to merge.
1514         :type data: pandas.Series
1515         :returns: Merged data.
1516         :rtype: pandas.Series
1517         """
1518
1519         logging.info("    Merging data ...")
1520
1521         merged_data = pd.Series()
1522         for _, builds in data.iteritems():
1523             for _, item in builds.iteritems():
1524                 for ID, item_data in item.iteritems():
1525                     merged_data[ID] = item_data
1526
1527         return merged_data