PAL: Process show runtime output
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import resource
24 import pandas as pd
25 import logging
26 import prettytable
27
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
32 from os import remove
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38
39 from input_data_files import download_and_unzip_data_file
40
41
42 # Separator used in file names
43 SEPARATOR = "__"
44
45
46 class ExecutionChecker(ResultVisitor):
47     """Class to traverse through the test suite structure.
48
49     The functionality implemented in this class generates a json structure:
50
51     Performance tests:
52
53     {
54         "metadata": {
55             "generated": "Timestamp",
56             "version": "SUT version",
57             "job": "Jenkins job name",
58             "build": "Information about the build"
59         },
60         "suites": {
61             "Suite long name 1": {
62                 "name": Suite name,
63                 "doc": "Suite 1 documentation",
64                 "parent": "Suite 1 parent",
65                 "level": "Level of the suite in the suite hierarchy"
66             }
67             "Suite long name N": {
68                 "name": Suite name,
69                 "doc": "Suite N documentation",
70                 "parent": "Suite 2 parent",
71                 "level": "Level of the suite in the suite hierarchy"
72             }
73         }
74         "tests": {
75             # NDRPDR tests:
76             "ID": {
77                 "name": "Test name",
78                 "parent": "Name of the parent of the test",
79                 "doc": "Test documentation",
80                 "msg": "Test message",
81                 "conf-history": "DUT1 and DUT2 VAT History",
82                 "show-run": "Show Run",
83                 "tags": ["tag 1", "tag 2", "tag n"],
84                 "type": "NDRPDR",
85                 "status": "PASS" | "FAIL",
86                 "throughput": {
87                     "NDR": {
88                         "LOWER": float,
89                         "UPPER": float
90                     },
91                     "PDR": {
92                         "LOWER": float,
93                         "UPPER": float
94                     }
95                 },
96                 "latency": {
97                     "NDR": {
98                         "direction1": {
99                             "min": float,
100                             "avg": float,
101                             "max": float
102                         },
103                         "direction2": {
104                             "min": float,
105                             "avg": float,
106                             "max": float
107                         }
108                     },
109                     "PDR": {
110                         "direction1": {
111                             "min": float,
112                             "avg": float,
113                             "max": float
114                         },
115                         "direction2": {
116                             "min": float,
117                             "avg": float,
118                             "max": float
119                         }
120                     }
121                 }
122             }
123
124             # TCP tests:
125             "ID": {
126                 "name": "Test name",
127                 "parent": "Name of the parent of the test",
128                 "doc": "Test documentation",
129                 "msg": "Test message",
130                 "tags": ["tag 1", "tag 2", "tag n"],
131                 "type": "TCP",
132                 "status": "PASS" | "FAIL",
133                 "result": int
134             }
135
136             # MRR, BMRR tests:
137             "ID": {
138                 "name": "Test name",
139                 "parent": "Name of the parent of the test",
140                 "doc": "Test documentation",
141                 "msg": "Test message",
142                 "tags": ["tag 1", "tag 2", "tag n"],
143                 "type": "MRR" | "BMRR",
144                 "status": "PASS" | "FAIL",
145                 "result": {
146                     "receive-rate": AvgStdevMetadata,
147                 }
148             }
149
150             # TODO: Remove when definitely no NDRPDRDISC tests are used:
151             # NDRPDRDISC tests:
152             "ID": {
153                 "name": "Test name",
154                 "parent": "Name of the parent of the test",
155                 "doc": "Test documentation",
156                 "msg": "Test message",
157                 "tags": ["tag 1", "tag 2", "tag n"],
158                 "type": "PDR" | "NDR",
159                 "status": "PASS" | "FAIL",
160                 "throughput": {  # Only type: "PDR" | "NDR"
161                     "value": int,
162                     "unit": "pps" | "bps" | "percentage"
163                 },
164                 "latency": {  # Only type: "PDR" | "NDR"
165                     "direction1": {
166                         "100": {
167                             "min": int,
168                             "avg": int,
169                             "max": int
170                         },
171                         "50": {  # Only for NDR
172                             "min": int,
173                             "avg": int,
174                             "max": int
175                         },
176                         "10": {  # Only for NDR
177                             "min": int,
178                             "avg": int,
179                             "max": int
180                         }
181                     },
182                     "direction2": {
183                         "100": {
184                             "min": int,
185                             "avg": int,
186                             "max": int
187                         },
188                         "50": {  # Only for NDR
189                             "min": int,
190                             "avg": int,
191                             "max": int
192                         },
193                         "10": {  # Only for NDR
194                             "min": int,
195                             "avg": int,
196                             "max": int
197                         }
198                     }
199                 },
200                 "lossTolerance": "lossTolerance",  # Only type: "PDR"
201                 "conf-history": "DUT1 and DUT2 VAT History"
202                 "show-run": "Show Run"
203             },
204             "ID" {
205                 # next test
206             }
207         }
208     }
209
210
211     Functional tests:
212
213     {
214         "metadata": {  # Optional
215             "version": "VPP version",
216             "job": "Jenkins job name",
217             "build": "Information about the build"
218         },
219         "suites": {
220             "Suite name 1": {
221                 "doc": "Suite 1 documentation",
222                 "parent": "Suite 1 parent",
223                 "level": "Level of the suite in the suite hierarchy"
224             }
225             "Suite name N": {
226                 "doc": "Suite N documentation",
227                 "parent": "Suite 2 parent",
228                 "level": "Level of the suite in the suite hierarchy"
229             }
230         }
231         "tests": {
232             "ID": {
233                 "name": "Test name",
234                 "parent": "Name of the parent of the test",
235                 "doc": "Test documentation"
236                 "msg": "Test message"
237                 "tags": ["tag 1", "tag 2", "tag n"],
238                 "conf-history": "DUT1 and DUT2 VAT History"
239                 "show-run": "Show Run"
240                 "status": "PASS" | "FAIL"
241             },
242             "ID" {
243                 # next test
244             }
245         }
246     }
247
248     .. note:: ID is the lowercase full path to the test.
249     """
250
251     # TODO: Remove when definitely no NDRPDRDISC tests are used:
252     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
253
254     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
255                                 r'PLRsearch upper bound::\s(\d+.\d+)')
256
257     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
258                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
259                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
260                                    r'PDR_UPPER:\s(\d+.\d+)')
261
262     # TODO: Remove when definitely no NDRPDRDISC tests are used:
263     REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
264                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
265                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
266                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
270
271     REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
272                                r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
273                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
274
275     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
276                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
277
278     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
279                                  r'[\D\d]*')
280
281     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
282                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
283
284     REGEX_VERSION_DPDK = re.compile(r"DPDK Version: (\d*.\d*)")
285
286     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
287
288     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289                            r'tx\s(\d*),\srx\s(\d*)')
290
291     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292                             r' in packets per second: \[(.*)\]')
293
294     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
295
296     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
297
298     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
299
300     REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
301
302     def __init__(self, metadata, mapping, ignore):
303         """Initialisation.
304
305         :param metadata: Key-value pairs to be included in "metadata" part of
306             JSON structure.
307         :param mapping: Mapping of the old names of test cases to the new
308             (actual) one.
309         :param ignore: List of TCs to be ignored.
310         :type metadata: dict
311         :type mapping: dict
312         :type ignore: list
313         """
314
315         # Type of message to parse out from the test messages
316         self._msg_type = None
317
318         # VPP version
319         self._version = None
320
321         # Timestamp
322         self._timestamp = None
323
324         # Testbed. The testbed is identified by TG node IP address.
325         self._testbed = None
326
327         # Mapping of TCs long names
328         self._mapping = mapping
329
330         # Ignore list
331         self._ignore = ignore
332
333         # Number of VAT History messages found:
334         # 0 - no message
335         # 1 - VAT History of DUT1
336         # 2 - VAT History of DUT2
337         self._lookup_kw_nr = 0
338         self._conf_history_lookup_nr = 0
339
340         # Number of Show Running messages found
341         # 0 - no message
342         # 1 - Show run message found
343         self._show_run_lookup_nr = 0
344
345         # Test ID of currently processed test- the lowercase full path to the
346         # test
347         self._test_ID = None
348
349         # The main data structure
350         self._data = {
351             "metadata": OrderedDict(),
352             "suites": OrderedDict(),
353             "tests": OrderedDict()
354         }
355
356         # Save the provided metadata
357         for key, val in metadata.items():
358             self._data["metadata"][key] = val
359
360         # Dictionary defining the methods used to parse different types of
361         # messages
362         self.parse_msg = {
363             "timestamp": self._get_timestamp,
364             "vpp-version": self._get_vpp_version,
365             "dpdk-version": self._get_dpdk_version,
366             "teardown-vat-history": self._get_vat_history,
367             "teardown-papi-history": self._get_papi_history,
368             "test-show-runtime": self._get_show_run,
369             "testbed": self._get_testbed
370         }
371
372     @property
373     def data(self):
374         """Getter - Data parsed from the XML file.
375
376         :returns: Data parsed from the XML file.
377         :rtype: dict
378         """
379         return self._data
380
381     def _get_testbed(self, msg):
382         """Called when extraction of testbed IP is required.
383         The testbed is identified by TG node IP address.
384
385         :param msg: Message to process.
386         :type msg: Message
387         :returns: Nothing.
388         """
389
390         if msg.message.count("Setup of TG node"):
391             reg_tg_ip = re.compile(
392                 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
393             try:
394                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
395             except (KeyError, ValueError, IndexError, AttributeError):
396                 pass
397             finally:
398                 self._data["metadata"]["testbed"] = self._testbed
399                 self._msg_type = None
400
401     def _get_vpp_version(self, msg):
402         """Called when extraction of VPP version is required.
403
404         :param msg: Message to process.
405         :type msg: Message
406         :returns: Nothing.
407         """
408
409         if msg.message.count("return STDOUT Version:") or \
410             msg.message.count("VPP Version:") or \
411             msg.message.count("VPP version:"):
412             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
413                                 group(2))
414             self._data["metadata"]["version"] = self._version
415             self._msg_type = None
416
417     def _get_dpdk_version(self, msg):
418         """Called when extraction of DPDK version is required.
419
420         :param msg: Message to process.
421         :type msg: Message
422         :returns: Nothing.
423         """
424
425         if msg.message.count("DPDK Version:"):
426             try:
427                 self._version = str(re.search(
428                     self.REGEX_VERSION_DPDK, msg.message). group(1))
429                 self._data["metadata"]["version"] = self._version
430             except IndexError:
431                 pass
432             finally:
433                 self._msg_type = None
434
435     def _get_timestamp(self, msg):
436         """Called when extraction of timestamp is required.
437
438         :param msg: Message to process.
439         :type msg: Message
440         :returns: Nothing.
441         """
442
443         self._timestamp = msg.timestamp[:14]
444         self._data["metadata"]["generated"] = self._timestamp
445         self._msg_type = None
446
447     def _get_vat_history(self, msg):
448         """Called when extraction of VAT command history is required.
449
450         :param msg: Message to process.
451         :type msg: Message
452         :returns: Nothing.
453         """
454         if msg.message.count("VAT command history:"):
455             self._conf_history_lookup_nr += 1
456             if self._conf_history_lookup_nr == 1:
457                 self._data["tests"][self._test_ID]["conf-history"] = str()
458             else:
459                 self._msg_type = None
460             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
461                           "VAT command history:", "", msg.message, count=1). \
462                 replace("\n\n", "\n").replace('\n', ' |br| ').\
463                 replace('\r', '').replace('"', "'")
464
465             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
466             self._data["tests"][self._test_ID]["conf-history"] += \
467                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
468
469     def _get_papi_history(self, msg):
470         """Called when extraction of PAPI command history is required.
471
472         :param msg: Message to process.
473         :type msg: Message
474         :returns: Nothing.
475         """
476         if msg.message.count("PAPI command history:"):
477             self._conf_history_lookup_nr += 1
478             if self._conf_history_lookup_nr == 1:
479                 self._data["tests"][self._test_ID]["conf-history"] = str()
480             else:
481                 self._msg_type = None
482             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
483                           "PAPI command history:", "", msg.message, count=1). \
484                 replace("\n\n", "\n").replace('\n', ' |br| ').\
485                 replace('\r', '').replace('"', "'")
486
487             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
488             self._data["tests"][self._test_ID]["conf-history"] += \
489                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
490
491     def _get_show_run(self, msg):
492         """Called when extraction of VPP operational data (output of CLI command
493         Show Runtime) is required.
494
495         :param msg: Message to process.
496         :type msg: Message
497         :returns: Nothing.
498         """
499         if msg.message.count("Runtime:"):
500             self._show_run_lookup_nr += 1
501             if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
502                 self._data["tests"][self._test_ID]["show-run"] = str()
503             if self._lookup_kw_nr > 1:
504                 self._msg_type = None
505             if self._show_run_lookup_nr == 1:
506                 message = str(msg.message).replace(' ', '').replace('\n', '').\
507                     replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
508                 runtime = loads(message)
509                 try:
510                     threads_nr = len(runtime[0]["clocks"])
511                 except (IndexError, KeyError):
512                     return
513                 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
514                 table = [[tbl_hdr, ] for _ in range(threads_nr)]
515                 for item in runtime:
516                     for idx in range(threads_nr):
517                         table[idx].append([
518                             item["name"],
519                             item["calls"][idx],
520                             item["vectors"][idx],
521                             item["suspends"][idx],
522                             item["clocks"][idx]
523                         ])
524                 text = ""
525                 for idx in range(threads_nr):
526                     text += "Thread {idx} ".format(idx=idx)
527                     text += "vpp_main\n" if idx == 0 else \
528                         "vpp_wk_{idx}\n".format(idx=idx-1)
529                     txt_table = None
530                     for row in table[idx]:
531                         if txt_table is None:
532                             txt_table = prettytable.PrettyTable(row)
533                         else:
534                             if any(row[1:]):
535                                 txt_table.add_row(row)
536                     txt_table.align["Name"] = "l"
537                     txt_table.align["Calls"] = "r"
538                     txt_table.align["Vectors"] = "r"
539                     txt_table.align["Suspends"] = "r"
540                     txt_table.align["Clocks"] = "r"
541
542                     text += txt_table.get_string(sortby="Name") + '\n'
543
544                 text = text.replace('\n', ' |br| ').replace('\r', '').\
545                     replace('"', "'")
546                 try:
547                     self._data["tests"][self._test_ID]["show-run"] += " |br| "
548                     self._data["tests"][self._test_ID]["show-run"] += \
549                         "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
550                 except KeyError:
551                     pass
552
553     # TODO: Remove when definitely no NDRPDRDISC tests are used:
554     def _get_latency(self, msg, test_type):
555         """Get the latency data from the test message.
556
557         :param msg: Message to be parsed.
558         :param test_type: Type of the test - NDR or PDR.
559         :type msg: str
560         :type test_type: str
561         :returns: Latencies parsed from the message.
562         :rtype: dict
563         """
564
565         if test_type == "NDR":
566             groups = re.search(self.REGEX_LAT_NDR, msg)
567             groups_range = range(1, 7)
568         elif test_type == "PDR":
569             groups = re.search(self.REGEX_LAT_PDR, msg)
570             groups_range = range(1, 3)
571         else:
572             return {}
573
574         latencies = list()
575         for idx in groups_range:
576             try:
577                 lat = [int(item) for item in str(groups.group(idx)).split('/')]
578             except (AttributeError, ValueError):
579                 lat = [-1, -1, -1]
580             latencies.append(lat)
581
582         keys = ("min", "avg", "max")
583         latency = {
584             "direction1": {
585             },
586             "direction2": {
587             }
588         }
589
590         latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
591         latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
592         if test_type == "NDR":
593             latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
594             latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
595             latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
596             latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
597
598         return latency
599
600     def _get_ndrpdr_throughput(self, msg):
601         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
602         message.
603
604         :param msg: The test message to be parsed.
605         :type msg: str
606         :returns: Parsed data as a dict and the status (PASS/FAIL).
607         :rtype: tuple(dict, str)
608         """
609
610         throughput = {
611             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
612             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
613         }
614         status = "FAIL"
615         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
616
617         if groups is not None:
618             try:
619                 throughput["NDR"]["LOWER"] = float(groups.group(1))
620                 throughput["NDR"]["UPPER"] = float(groups.group(2))
621                 throughput["PDR"]["LOWER"] = float(groups.group(3))
622                 throughput["PDR"]["UPPER"] = float(groups.group(4))
623                 status = "PASS"
624             except (IndexError, ValueError):
625                 pass
626
627         return throughput, status
628
629     def _get_plr_throughput(self, msg):
630         """Get PLRsearch lower bound and PLRsearch upper bound from the test
631         message.
632
633         :param msg: The test message to be parsed.
634         :type msg: str
635         :returns: Parsed data as a dict and the status (PASS/FAIL).
636         :rtype: tuple(dict, str)
637         """
638
639         throughput = {
640             "LOWER": -1.0,
641             "UPPER": -1.0
642         }
643         status = "FAIL"
644         groups = re.search(self.REGEX_PLR_RATE, msg)
645
646         if groups is not None:
647             try:
648                 throughput["LOWER"] = float(groups.group(1))
649                 throughput["UPPER"] = float(groups.group(2))
650                 status = "PASS"
651             except (IndexError, ValueError):
652                 pass
653
654         return throughput, status
655
656     def _get_ndrpdr_latency(self, msg):
657         """Get LATENCY from the test message.
658
659         :param msg: The test message to be parsed.
660         :type msg: str
661         :returns: Parsed data as a dict and the status (PASS/FAIL).
662         :rtype: tuple(dict, str)
663         """
664
665         latency = {
666             "NDR": {
667                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
668                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
669             },
670             "PDR": {
671                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
672                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
673             }
674         }
675         status = "FAIL"
676         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
677
678         if groups is not None:
679             keys = ("min", "avg", "max")
680             try:
681                 latency["NDR"]["direction1"] = dict(
682                     zip(keys, [float(l) for l in groups.group(1).split('/')]))
683                 latency["NDR"]["direction2"] = dict(
684                     zip(keys, [float(l) for l in groups.group(2).split('/')]))
685                 latency["PDR"]["direction1"] = dict(
686                     zip(keys, [float(l) for l in groups.group(3).split('/')]))
687                 latency["PDR"]["direction2"] = dict(
688                     zip(keys, [float(l) for l in groups.group(4).split('/')]))
689                 status = "PASS"
690             except (IndexError, ValueError):
691                 pass
692
693         return latency, status
694
695     def visit_suite(self, suite):
696         """Implements traversing through the suite and its direct children.
697
698         :param suite: Suite to process.
699         :type suite: Suite
700         :returns: Nothing.
701         """
702         if self.start_suite(suite) is not False:
703             suite.suites.visit(self)
704             suite.tests.visit(self)
705             self.end_suite(suite)
706
707     def start_suite(self, suite):
708         """Called when suite starts.
709
710         :param suite: Suite to process.
711         :type suite: Suite
712         :returns: Nothing.
713         """
714
715         try:
716             parent_name = suite.parent.name
717         except AttributeError:
718             return
719
720         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
721             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
722         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
723
724         self._data["suites"][suite.longname.lower().replace('"', "'").
725             replace(" ", "_")] = {
726                 "name": suite.name.lower(),
727                 "doc": doc_str,
728                 "parent": parent_name,
729                 "level": len(suite.longname.split("."))
730             }
731
732         suite.keywords.visit(self)
733
734     def end_suite(self, suite):
735         """Called when suite ends.
736
737         :param suite: Suite to process.
738         :type suite: Suite
739         :returns: Nothing.
740         """
741         pass
742
743     def visit_test(self, test):
744         """Implements traversing through the test.
745
746         :param test: Test to process.
747         :type test: Test
748         :returns: Nothing.
749         """
750         if self.start_test(test) is not False:
751             test.keywords.visit(self)
752             self.end_test(test)
753
754     def start_test(self, test):
755         """Called when test starts.
756
757         :param test: Test to process.
758         :type test: Test
759         :returns: Nothing.
760         """
761
762         longname_orig = test.longname.lower()
763
764         # Check the ignore list
765         if longname_orig in self._ignore:
766             return
767
768         tags = [str(tag) for tag in test.tags]
769         test_result = dict()
770
771         # Change the TC long name and name if defined in the mapping table
772         longname = self._mapping.get(longname_orig, None)
773         if longname is not None:
774             name = longname.split('.')[-1]
775             logging.debug("{0}\n{1}\n{2}\n{3}".format(
776                 self._data["metadata"], longname_orig, longname, name))
777         else:
778             longname = longname_orig
779             name = test.name.lower()
780
781         # Remove TC number from the TC long name (backward compatibility):
782         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
783         # Remove TC number from the TC name (not needed):
784         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
785
786         test_result["parent"] = test.parent.name.lower()
787         test_result["tags"] = tags
788         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
789             replace('\r', '').replace('[', ' |br| [')
790         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
791         test_result["msg"] = test.message.replace('\n', ' |br| '). \
792             replace('\r', '').replace('"', "'")
793         test_result["type"] = "FUNC"
794         test_result["status"] = test.status
795
796         if "PERFTEST" in tags:
797             # Replace info about cores (e.g. -1c-) with the info about threads
798             # and cores (e.g. -1t1c-) in the long test case names and in the
799             # test case names if necessary.
800             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
801             if not groups:
802                 tag_count = 0
803                 for tag in test_result["tags"]:
804                     groups = re.search(self.REGEX_TC_TAG, tag)
805                     if groups:
806                         tag_count += 1
807                         tag_tc = tag
808
809                 if tag_count == 1:
810                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
811                                            "-{0}-".format(tag_tc.lower()),
812                                            self._test_ID,
813                                            count=1)
814                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
815                                                  "-{0}-".format(tag_tc.lower()),
816                                                  test_result["name"],
817                                                  count=1)
818                 else:
819                     test_result["status"] = "FAIL"
820                     self._data["tests"][self._test_ID] = test_result
821                     logging.debug("The test '{0}' has no or more than one "
822                                   "multi-threading tags.".format(self._test_ID))
823                     logging.debug("Tags: {0}".format(test_result["tags"]))
824                     return
825
826         if test.status == "PASS" and ("NDRPDRDISC" in tags or
827                                       "NDRPDR" in tags or
828                                       "SOAK" in tags or
829                                       "TCP" in tags or
830                                       "MRR" in tags or
831                                       "BMRR" in tags):
832             # TODO: Remove when definitely no NDRPDRDISC tests are used:
833             if "NDRDISC" in tags:
834                 test_result["type"] = "NDR"
835             # TODO: Remove when definitely no NDRPDRDISC tests are used:
836             elif "PDRDISC" in tags:
837                 test_result["type"] = "PDR"
838             elif "NDRPDR" in tags:
839                 test_result["type"] = "NDRPDR"
840             elif "SOAK" in tags:
841                 test_result["type"] = "SOAK"
842             elif "TCP" in tags:
843                 test_result["type"] = "TCP"
844             elif "MRR" in tags:
845                 test_result["type"] = "MRR"
846             elif "FRMOBL" in tags or "BMRR" in tags:
847                 test_result["type"] = "BMRR"
848             else:
849                 test_result["status"] = "FAIL"
850                 self._data["tests"][self._test_ID] = test_result
851                 return
852
853             # TODO: Remove when definitely no NDRPDRDISC tests are used:
854             if test_result["type"] in ("NDR", "PDR"):
855                 try:
856                     rate_value = str(re.search(
857                         self.REGEX_RATE, test.message).group(1))
858                 except AttributeError:
859                     rate_value = "-1"
860                 try:
861                     rate_unit = str(re.search(
862                         self.REGEX_RATE, test.message).group(2))
863                 except AttributeError:
864                     rate_unit = "-1"
865
866                 test_result["throughput"] = dict()
867                 test_result["throughput"]["value"] = \
868                     int(rate_value.split('.')[0])
869                 test_result["throughput"]["unit"] = rate_unit
870                 test_result["latency"] = \
871                     self._get_latency(test.message, test_result["type"])
872                 if test_result["type"] == "PDR":
873                     test_result["lossTolerance"] = str(re.search(
874                         self.REGEX_TOLERANCE, test.message).group(1))
875
876             elif test_result["type"] in ("NDRPDR", ):
877                 test_result["throughput"], test_result["status"] = \
878                     self._get_ndrpdr_throughput(test.message)
879                 test_result["latency"], test_result["status"] = \
880                     self._get_ndrpdr_latency(test.message)
881
882             elif test_result["type"] in ("SOAK", ):
883                 test_result["throughput"], test_result["status"] = \
884                     self._get_plr_throughput(test.message)
885
886             elif test_result["type"] in ("TCP", ):
887                 groups = re.search(self.REGEX_TCP, test.message)
888                 test_result["result"] = int(groups.group(2))
889
890             elif test_result["type"] in ("MRR", "BMRR"):
891                 test_result["result"] = dict()
892                 groups = re.search(self.REGEX_BMRR, test.message)
893                 if groups is not None:
894                     items_str = groups.group(1)
895                     items_float = [float(item.strip()) for item
896                                    in items_str.split(",")]
897                     metadata = AvgStdevMetadataFactory.from_data(items_float)
898                     # Next two lines have been introduced in CSIT-1179,
899                     # to be removed in CSIT-1180.
900                     metadata.size = 1
901                     metadata.stdev = 0.0
902                     test_result["result"]["receive-rate"] = metadata
903                 else:
904                     groups = re.search(self.REGEX_MRR, test.message)
905                     test_result["result"]["receive-rate"] = \
906                         AvgStdevMetadataFactory.from_data([
907                             float(groups.group(3)) / float(groups.group(1)), ])
908
909         self._data["tests"][self._test_ID] = test_result
910
911     def end_test(self, test):
912         """Called when test ends.
913
914         :param test: Test to process.
915         :type test: Test
916         :returns: Nothing.
917         """
918         pass
919
920     def visit_keyword(self, keyword):
921         """Implements traversing through the keyword and its child keywords.
922
923         :param keyword: Keyword to process.
924         :type keyword: Keyword
925         :returns: Nothing.
926         """
927         if self.start_keyword(keyword) is not False:
928             self.end_keyword(keyword)
929
930     def start_keyword(self, keyword):
931         """Called when keyword starts. Default implementation does nothing.
932
933         :param keyword: Keyword to process.
934         :type keyword: Keyword
935         :returns: Nothing.
936         """
937         try:
938             if keyword.type == "setup":
939                 self.visit_setup_kw(keyword)
940             elif keyword.type == "teardown":
941                 self._lookup_kw_nr = 0
942                 self.visit_teardown_kw(keyword)
943             else:
944                 self._lookup_kw_nr = 0
945                 self.visit_test_kw(keyword)
946         except AttributeError:
947             pass
948
949     def end_keyword(self, keyword):
950         """Called when keyword ends. Default implementation does nothing.
951
952         :param keyword: Keyword to process.
953         :type keyword: Keyword
954         :returns: Nothing.
955         """
956         pass
957
958     def visit_test_kw(self, test_kw):
959         """Implements traversing through the test keyword and its child
960         keywords.
961
962         :param test_kw: Keyword to process.
963         :type test_kw: Keyword
964         :returns: Nothing.
965         """
966         for keyword in test_kw.keywords:
967             if self.start_test_kw(keyword) is not False:
968                 self.visit_test_kw(keyword)
969                 self.end_test_kw(keyword)
970
971     def start_test_kw(self, test_kw):
972         """Called when test keyword starts. Default implementation does
973         nothing.
974
975         :param test_kw: Keyword to process.
976         :type test_kw: Keyword
977         :returns: Nothing.
978         """
979         if test_kw.name.count("Show Runtime Counters On All Duts"):
980             self._lookup_kw_nr += 1
981             self._show_run_lookup_nr = 0
982             self._msg_type = "test-show-runtime"
983         elif test_kw.name.count("Install Dpdk Test") and not self._version:
984             self._msg_type = "dpdk-version"
985         else:
986             return
987         test_kw.messages.visit(self)
988
989     def end_test_kw(self, test_kw):
990         """Called when keyword ends. Default implementation does nothing.
991
992         :param test_kw: Keyword to process.
993         :type test_kw: Keyword
994         :returns: Nothing.
995         """
996         pass
997
998     def visit_setup_kw(self, setup_kw):
999         """Implements traversing through the teardown keyword and its child
1000         keywords.
1001
1002         :param setup_kw: Keyword to process.
1003         :type setup_kw: Keyword
1004         :returns: Nothing.
1005         """
1006         for keyword in setup_kw.keywords:
1007             if self.start_setup_kw(keyword) is not False:
1008                 self.visit_setup_kw(keyword)
1009                 self.end_setup_kw(keyword)
1010
1011     def start_setup_kw(self, setup_kw):
1012         """Called when teardown keyword starts. Default implementation does
1013         nothing.
1014
1015         :param setup_kw: Keyword to process.
1016         :type setup_kw: Keyword
1017         :returns: Nothing.
1018         """
1019         if setup_kw.name.count("Show Vpp Version On All Duts") \
1020                 and not self._version:
1021             self._msg_type = "vpp-version"
1022         elif setup_kw.name.count("Set Global Variable") \
1023                 and not self._timestamp:
1024             self._msg_type = "timestamp"
1025         elif setup_kw.name.count("Setup Framework") and not self._testbed:
1026             self._msg_type = "testbed"
1027         else:
1028             return
1029         setup_kw.messages.visit(self)
1030
1031     def end_setup_kw(self, setup_kw):
1032         """Called when keyword ends. Default implementation does nothing.
1033
1034         :param setup_kw: Keyword to process.
1035         :type setup_kw: Keyword
1036         :returns: Nothing.
1037         """
1038         pass
1039
1040     def visit_teardown_kw(self, teardown_kw):
1041         """Implements traversing through the teardown keyword and its child
1042         keywords.
1043
1044         :param teardown_kw: Keyword to process.
1045         :type teardown_kw: Keyword
1046         :returns: Nothing.
1047         """
1048         for keyword in teardown_kw.keywords:
1049             if self.start_teardown_kw(keyword) is not False:
1050                 self.visit_teardown_kw(keyword)
1051                 self.end_teardown_kw(keyword)
1052
1053     def start_teardown_kw(self, teardown_kw):
1054         """Called when teardown keyword starts. Default implementation does
1055         nothing.
1056
1057         :param teardown_kw: Keyword to process.
1058         :type teardown_kw: Keyword
1059         :returns: Nothing.
1060         """
1061
1062         if teardown_kw.name.count("Show Vat History On All Duts"):
1063             self._conf_history_lookup_nr = 0
1064             self._msg_type = "teardown-vat-history"
1065             teardown_kw.messages.visit(self)
1066         elif teardown_kw.name.count("Show Papi History On All Duts"):
1067             self._conf_history_lookup_nr = 0
1068             self._msg_type = "teardown-papi-history"
1069             teardown_kw.messages.visit(self)
1070
1071     def end_teardown_kw(self, teardown_kw):
1072         """Called when keyword ends. Default implementation does nothing.
1073
1074         :param teardown_kw: Keyword to process.
1075         :type teardown_kw: Keyword
1076         :returns: Nothing.
1077         """
1078         pass
1079
1080     def visit_message(self, msg):
1081         """Implements visiting the message.
1082
1083         :param msg: Message to process.
1084         :type msg: Message
1085         :returns: Nothing.
1086         """
1087         if self.start_message(msg) is not False:
1088             self.end_message(msg)
1089
1090     def start_message(self, msg):
1091         """Called when message starts. Get required information from messages:
1092         - VPP version.
1093
1094         :param msg: Message to process.
1095         :type msg: Message
1096         :returns: Nothing.
1097         """
1098
1099         if self._msg_type:
1100             self.parse_msg[self._msg_type](msg)
1101
1102     def end_message(self, msg):
1103         """Called when message ends. Default implementation does nothing.
1104
1105         :param msg: Message to process.
1106         :type msg: Message
1107         :returns: Nothing.
1108         """
1109         pass
1110
1111
1112 class InputData(object):
1113     """Input data
1114
1115     The data is extracted from output.xml files generated by Jenkins jobs and
1116     stored in pandas' DataFrames.
1117
1118     The data structure:
1119     - job name
1120       - build number
1121         - metadata
1122           (as described in ExecutionChecker documentation)
1123         - suites
1124           (as described in ExecutionChecker documentation)
1125         - tests
1126           (as described in ExecutionChecker documentation)
1127     """
1128
1129     def __init__(self, spec):
1130         """Initialization.
1131
1132         :param spec: Specification.
1133         :type spec: Specification
1134         """
1135
1136         # Specification:
1137         self._cfg = spec
1138
1139         # Data store:
1140         self._input_data = pd.Series()
1141
1142     @property
1143     def data(self):
1144         """Getter - Input data.
1145
1146         :returns: Input data
1147         :rtype: pandas.Series
1148         """
1149         return self._input_data
1150
1151     def metadata(self, job, build):
1152         """Getter - metadata
1153
1154         :param job: Job which metadata we want.
1155         :param build: Build which metadata we want.
1156         :type job: str
1157         :type build: str
1158         :returns: Metadata
1159         :rtype: pandas.Series
1160         """
1161
1162         return self.data[job][build]["metadata"]
1163
1164     def suites(self, job, build):
1165         """Getter - suites
1166
1167         :param job: Job which suites we want.
1168         :param build: Build which suites we want.
1169         :type job: str
1170         :type build: str
1171         :returns: Suites.
1172         :rtype: pandas.Series
1173         """
1174
1175         return self.data[job][str(build)]["suites"]
1176
1177     def tests(self, job, build):
1178         """Getter - tests
1179
1180         :param job: Job which tests we want.
1181         :param build: Build which tests we want.
1182         :type job: str
1183         :type build: str
1184         :returns: Tests.
1185         :rtype: pandas.Series
1186         """
1187
1188         return self.data[job][build]["tests"]
1189
1190     def _parse_tests(self, job, build, log):
1191         """Process data from robot output.xml file and return JSON structured
1192         data.
1193
1194         :param job: The name of job which build output data will be processed.
1195         :param build: The build which output data will be processed.
1196         :param log: List of log messages.
1197         :type job: str
1198         :type build: dict
1199         :type log: list of tuples (severity, msg)
1200         :returns: JSON data structure.
1201         :rtype: dict
1202         """
1203
1204         metadata = {
1205             "job": job,
1206             "build": build
1207         }
1208
1209         with open(build["file-name"], 'r') as data_file:
1210             try:
1211                 result = ExecutionResult(data_file)
1212             except errors.DataError as err:
1213                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1214                                      "{0}".format(err)))
1215                 return None
1216         checker = ExecutionChecker(metadata, self._cfg.mapping,
1217                                    self._cfg.ignore)
1218         result.visit(checker)
1219
1220         return checker.data
1221
1222     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1223         """Download and parse the input data file.
1224
1225         :param pid: PID of the process executing this method.
1226         :param job: Name of the Jenkins job which generated the processed input
1227             file.
1228         :param build: Information about the Jenkins build which generated the
1229             processed input file.
1230         :param repeat: Repeat the download specified number of times if not
1231             successful.
1232         :type pid: int
1233         :type job: str
1234         :type build: dict
1235         :type repeat: int
1236         """
1237
1238         logs = list()
1239
1240         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1241                      format(job, build["build"])))
1242
1243         state = "failed"
1244         success = False
1245         data = None
1246         do_repeat = repeat
1247         while do_repeat:
1248             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1249                                                    logs)
1250             if success:
1251                 break
1252             do_repeat -= 1
1253         if not success:
1254             logs.append(("ERROR", "It is not possible to download the input "
1255                                   "data file from the job '{job}', build "
1256                                   "'{build}', or it is damaged. Skipped.".
1257                          format(job=job, build=build["build"])))
1258         if success:
1259             logs.append(("INFO", "    Processing data from the build '{0}' ...".
1260                          format(build["build"])))
1261             data = self._parse_tests(job, build, logs)
1262             if data is None:
1263                 logs.append(("ERROR", "Input data file from the job '{job}', "
1264                                       "build '{build}' is damaged. Skipped.".
1265                              format(job=job, build=build["build"])))
1266             else:
1267                 state = "processed"
1268
1269             try:
1270                 remove(build["file-name"])
1271             except OSError as err:
1272                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1273                              format(build["file-name"], repr(err))))
1274
1275         # If the time-period is defined in the specification file, remove all
1276         # files which are outside the time period.
1277         timeperiod = self._cfg.input.get("time-period", None)
1278         if timeperiod and data:
1279             now = dt.utcnow()
1280             timeperiod = timedelta(int(timeperiod))
1281             metadata = data.get("metadata", None)
1282             if metadata:
1283                 generated = metadata.get("generated", None)
1284                 if generated:
1285                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1286                     if (now - generated) > timeperiod:
1287                         # Remove the data and the file:
1288                         state = "removed"
1289                         data = None
1290                         logs.append(
1291                             ("INFO",
1292                              "    The build {job}/{build} is outdated, will be "
1293                              "removed".format(job=job, build=build["build"])))
1294                         file_name = self._cfg.input["file-name"]
1295                         full_name = join(
1296                             self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1297                             "{job}{sep}{build}{sep}{name}".format(
1298                                 job=job,
1299                                 sep=SEPARATOR,
1300                                 build=build["build"],
1301                                 name=file_name))
1302                         try:
1303                             remove(full_name)
1304                             logs.append(("INFO",
1305                                          "    The file {name} has been removed".
1306                                          format(name=full_name)))
1307                         except OSError as err:
1308                             logs.append(("ERROR",
1309                                          "Cannot remove the file '{0}': {1}".
1310                                          format(full_name, repr(err))))
1311         logs.append(("INFO", "  Done."))
1312
1313         for level, line in logs:
1314             if level == "INFO":
1315                 logging.info(line)
1316             elif level == "ERROR":
1317                 logging.error(line)
1318             elif level == "DEBUG":
1319                 logging.debug(line)
1320             elif level == "CRITICAL":
1321                 logging.critical(line)
1322             elif level == "WARNING":
1323                 logging.warning(line)
1324
1325         return {"data": data, "state": state, "job": job, "build": build}
1326
1327     def download_and_parse_data(self, repeat=1):
1328         """Download the input data files, parse input data from input files and
1329         store in pandas' Series.
1330
1331         :param repeat: Repeat the download specified number of times if not
1332             successful.
1333         :type repeat: int
1334         """
1335
1336         logging.info("Downloading and parsing input files ...")
1337
1338         for job, builds in self._cfg.builds.items():
1339             for build in builds:
1340
1341                 result = self._download_and_parse_build(job, build, repeat)
1342                 build_nr = result["build"]["build"]
1343
1344                 if result["data"]:
1345                     data = result["data"]
1346                     build_data = pd.Series({
1347                         "metadata": pd.Series(
1348                             data["metadata"].values(),
1349                             index=data["metadata"].keys()),
1350                         "suites": pd.Series(data["suites"].values(),
1351                                             index=data["suites"].keys()),
1352                         "tests": pd.Series(data["tests"].values(),
1353                                            index=data["tests"].keys())})
1354
1355                     if self._input_data.get(job, None) is None:
1356                         self._input_data[job] = pd.Series()
1357                     self._input_data[job][str(build_nr)] = build_data
1358
1359                     self._cfg.set_input_file_name(
1360                         job, build_nr, result["build"]["file-name"])
1361
1362                 self._cfg.set_input_state(job, build_nr, result["state"])
1363
1364                 logging.info("Memory allocation: {0:,d}MB".format(
1365                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1366
1367         logging.info("Done.")
1368
1369     @staticmethod
1370     def _end_of_tag(tag_filter, start=0, closer="'"):
1371         """Return the index of character in the string which is the end of tag.
1372
1373         :param tag_filter: The string where the end of tag is being searched.
1374         :param start: The index where the searching is stated.
1375         :param closer: The character which is the tag closer.
1376         :type tag_filter: str
1377         :type start: int
1378         :type closer: str
1379         :returns: The index of the tag closer.
1380         :rtype: int
1381         """
1382
1383         try:
1384             idx_opener = tag_filter.index(closer, start)
1385             return tag_filter.index(closer, idx_opener + 1)
1386         except ValueError:
1387             return None
1388
1389     @staticmethod
1390     def _condition(tag_filter):
1391         """Create a conditional statement from the given tag filter.
1392
1393         :param tag_filter: Filter based on tags from the element specification.
1394         :type tag_filter: str
1395         :returns: Conditional statement which can be evaluated.
1396         :rtype: str
1397         """
1398
1399         index = 0
1400         while True:
1401             index = InputData._end_of_tag(tag_filter, index)
1402             if index is None:
1403                 return tag_filter
1404             index += 1
1405             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1406
1407     def filter_data(self, element, params=None, data_set="tests",
1408                     continue_on_error=False):
1409         """Filter required data from the given jobs and builds.
1410
1411         The output data structure is:
1412
1413         - job 1
1414           - build 1
1415             - test (or suite) 1 ID:
1416               - param 1
1417               - param 2
1418               ...
1419               - param n
1420             ...
1421             - test (or suite) n ID:
1422             ...
1423           ...
1424           - build n
1425         ...
1426         - job n
1427
1428         :param element: Element which will use the filtered data.
1429         :param params: Parameters which will be included in the output. If None,
1430         all parameters are included.
1431         :param data_set: The set of data to be filtered: tests, suites,
1432         metadata.
1433         :param continue_on_error: Continue if there is error while reading the
1434         data. The Item will be empty then
1435         :type element: pandas.Series
1436         :type params: list
1437         :type data_set: str
1438         :type continue_on_error: bool
1439         :returns: Filtered data.
1440         :rtype pandas.Series
1441         """
1442
1443         try:
1444             if element["filter"] in ("all", "template"):
1445                 cond = "True"
1446             else:
1447                 cond = InputData._condition(element["filter"])
1448             logging.debug("   Filter: {0}".format(cond))
1449         except KeyError:
1450             logging.error("  No filter defined.")
1451             return None
1452
1453         if params is None:
1454             params = element.get("parameters", None)
1455             if params:
1456                 params.append("type")
1457
1458         data = pd.Series()
1459         try:
1460             for job, builds in element["data"].items():
1461                 data[job] = pd.Series()
1462                 for build in builds:
1463                     data[job][str(build)] = pd.Series()
1464                     try:
1465                         data_iter = self.data[job][str(build)][data_set].\
1466                             iteritems()
1467                     except KeyError:
1468                         if continue_on_error:
1469                             continue
1470                         else:
1471                             return None
1472                     for test_ID, test_data in data_iter:
1473                         if eval(cond, {"tags": test_data.get("tags", "")}):
1474                             data[job][str(build)][test_ID] = pd.Series()
1475                             if params is None:
1476                                 for param, val in test_data.items():
1477                                     data[job][str(build)][test_ID][param] = val
1478                             else:
1479                                 for param in params:
1480                                     try:
1481                                         data[job][str(build)][test_ID][param] =\
1482                                             test_data[param]
1483                                     except KeyError:
1484                                         data[job][str(build)][test_ID][param] =\
1485                                             "No Data"
1486             return data
1487
1488         except (KeyError, IndexError, ValueError) as err:
1489             logging.error("   Missing mandatory parameter in the element "
1490                           "specification: {0}".format(err))
1491             return None
1492         except AttributeError:
1493             return None
1494         except SyntaxError:
1495             logging.error("   The filter '{0}' is not correct. Check if all "
1496                           "tags are enclosed by apostrophes.".format(cond))
1497             return None
1498
1499     @staticmethod
1500     def merge_data(data):
1501         """Merge data from more jobs and builds to a simple data structure.
1502
1503         The output data structure is:
1504
1505         - test (suite) 1 ID:
1506           - param 1
1507           - param 2
1508           ...
1509           - param n
1510         ...
1511         - test (suite) n ID:
1512         ...
1513
1514         :param data: Data to merge.
1515         :type data: pandas.Series
1516         :returns: Merged data.
1517         :rtype: pandas.Series
1518         """
1519
1520         logging.info("    Merging data ...")
1521
1522         merged_data = pd.Series()
1523         for _, builds in data.iteritems():
1524             for _, item in builds.iteritems():
1525                 for ID, item_data in item.iteritems():
1526                     merged_data[ID] = item_data
1527
1528         return merged_data