Trending: Add multiprocessing, remove archiving
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import multiprocessing
23 import os
24 import re
25 import resource
26 import pandas as pd
27 import logging
28
29 from robot.api import ExecutionResult, ResultVisitor
30 from robot import errors
31 from collections import OrderedDict
32 from string import replace
33 from os import remove
34 from os.path import join
35 from datetime import datetime as dt
36 from datetime import timedelta
37 from json import loads
38 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39
40 from input_data_files import download_and_unzip_data_file
41 from utils import Worker
42
43
44 # Separator used in file names
45 SEPARATOR = "__"
46
47
48 class ExecutionChecker(ResultVisitor):
49     """Class to traverse through the test suite structure.
50
51     The functionality implemented in this class generates a json structure:
52
53     Performance tests:
54
55     {
56         "metadata": {
57             "generated": "Timestamp",
58             "version": "SUT version",
59             "job": "Jenkins job name",
60             "build": "Information about the build"
61         },
62         "suites": {
63             "Suite long name 1": {
64                 "name": Suite name,
65                 "doc": "Suite 1 documentation",
66                 "parent": "Suite 1 parent",
67                 "level": "Level of the suite in the suite hierarchy"
68             }
69             "Suite long name N": {
70                 "name": Suite name,
71                 "doc": "Suite N documentation",
72                 "parent": "Suite 2 parent",
73                 "level": "Level of the suite in the suite hierarchy"
74             }
75         }
76         "tests": {
77             # NDRPDR tests:
78             "ID": {
79                 "name": "Test name",
80                 "parent": "Name of the parent of the test",
81                 "doc": "Test documentation",
82                 "msg": "Test message",
83                 "conf-history": "DUT1 and DUT2 VAT History",
84                 "show-run": "Show Run",
85                 "tags": ["tag 1", "tag 2", "tag n"],
86                 "type": "NDRPDR",
87                 "status": "PASS" | "FAIL",
88                 "throughput": {
89                     "NDR": {
90                         "LOWER": float,
91                         "UPPER": float
92                     },
93                     "PDR": {
94                         "LOWER": float,
95                         "UPPER": float
96                     }
97                 },
98                 "latency": {
99                     "NDR": {
100                         "direction1": {
101                             "min": float,
102                             "avg": float,
103                             "max": float
104                         },
105                         "direction2": {
106                             "min": float,
107                             "avg": float,
108                             "max": float
109                         }
110                     },
111                     "PDR": {
112                         "direction1": {
113                             "min": float,
114                             "avg": float,
115                             "max": float
116                         },
117                         "direction2": {
118                             "min": float,
119                             "avg": float,
120                             "max": float
121                         }
122                     }
123                 }
124             }
125
126             # TCP tests:
127             "ID": {
128                 "name": "Test name",
129                 "parent": "Name of the parent of the test",
130                 "doc": "Test documentation",
131                 "msg": "Test message",
132                 "tags": ["tag 1", "tag 2", "tag n"],
133                 "type": "TCP",
134                 "status": "PASS" | "FAIL",
135                 "result": int
136             }
137
138             # MRR, BMRR tests:
139             "ID": {
140                 "name": "Test name",
141                 "parent": "Name of the parent of the test",
142                 "doc": "Test documentation",
143                 "msg": "Test message",
144                 "tags": ["tag 1", "tag 2", "tag n"],
145                 "type": "MRR" | "BMRR",
146                 "status": "PASS" | "FAIL",
147                 "result": {
148                     "receive-rate": AvgStdevMetadata,
149                 }
150             }
151
152             # TODO: Remove when definitely no NDRPDRDISC tests are used:
153             # NDRPDRDISC tests:
154             "ID": {
155                 "name": "Test name",
156                 "parent": "Name of the parent of the test",
157                 "doc": "Test documentation",
158                 "msg": "Test message",
159                 "tags": ["tag 1", "tag 2", "tag n"],
160                 "type": "PDR" | "NDR",
161                 "status": "PASS" | "FAIL",
162                 "throughput": {  # Only type: "PDR" | "NDR"
163                     "value": int,
164                     "unit": "pps" | "bps" | "percentage"
165                 },
166                 "latency": {  # Only type: "PDR" | "NDR"
167                     "direction1": {
168                         "100": {
169                             "min": int,
170                             "avg": int,
171                             "max": int
172                         },
173                         "50": {  # Only for NDR
174                             "min": int,
175                             "avg": int,
176                             "max": int
177                         },
178                         "10": {  # Only for NDR
179                             "min": int,
180                             "avg": int,
181                             "max": int
182                         }
183                     },
184                     "direction2": {
185                         "100": {
186                             "min": int,
187                             "avg": int,
188                             "max": int
189                         },
190                         "50": {  # Only for NDR
191                             "min": int,
192                             "avg": int,
193                             "max": int
194                         },
195                         "10": {  # Only for NDR
196                             "min": int,
197                             "avg": int,
198                             "max": int
199                         }
200                     }
201                 },
202                 "lossTolerance": "lossTolerance",  # Only type: "PDR"
203                 "conf-history": "DUT1 and DUT2 VAT History"
204                 "show-run": "Show Run"
205             },
206             "ID" {
207                 # next test
208             }
209         }
210     }
211
212
213     Functional tests:
214
215     {
216         "metadata": {  # Optional
217             "version": "VPP version",
218             "job": "Jenkins job name",
219             "build": "Information about the build"
220         },
221         "suites": {
222             "Suite name 1": {
223                 "doc": "Suite 1 documentation",
224                 "parent": "Suite 1 parent",
225                 "level": "Level of the suite in the suite hierarchy"
226             }
227             "Suite name N": {
228                 "doc": "Suite N documentation",
229                 "parent": "Suite 2 parent",
230                 "level": "Level of the suite in the suite hierarchy"
231             }
232         }
233         "tests": {
234             "ID": {
235                 "name": "Test name",
236                 "parent": "Name of the parent of the test",
237                 "doc": "Test documentation"
238                 "msg": "Test message"
239                 "tags": ["tag 1", "tag 2", "tag n"],
240                 "conf-history": "DUT1 and DUT2 VAT History"
241                 "show-run": "Show Run"
242                 "status": "PASS" | "FAIL"
243             },
244             "ID" {
245                 # next test
246             }
247         }
248     }
249
250     .. note:: ID is the lowercase full path to the test.
251     """
252
253     # TODO: Remove when definitely no NDRPDRDISC tests are used:
254     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
255
256     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
257                                 r'PLRsearch upper bound::\s(\d+.\d+)')
258
259     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
260                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
261                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
262                                    r'PDR_UPPER:\s(\d+.\d+)')
263
264     # TODO: Remove when definitely no NDRPDRDISC tests are used:
265     REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
266                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
270                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
271                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
272
273     REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
274                                r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
275                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
276
277     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
278                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
279
280     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
281                                  r'[\D\d]*')
282
283     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
284                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
285
286     REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
287                                     r"(RTE Version: 'DPDK )(.*)(')")
288
289     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
290
291     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
292                            r'tx\s(\d*),\srx\s(\d*)')
293
294     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
295                             r' in packets per second: \[(.*)\]')
296
297     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
298
299     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
300
301     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
302
303     REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
304
305     def __init__(self, metadata, mapping, ignore):
306         """Initialisation.
307
308         :param metadata: Key-value pairs to be included in "metadata" part of
309             JSON structure.
310         :param mapping: Mapping of the old names of test cases to the new
311             (actual) one.
312         :param ignore: List of TCs to be ignored.
313         :type metadata: dict
314         :type mapping: dict
315         :type ignore: list
316         """
317
318         # Type of message to parse out from the test messages
319         self._msg_type = None
320
321         # VPP version
322         self._version = None
323
324         # Timestamp
325         self._timestamp = None
326
327         # Testbed. The testbed is identified by TG node IP address.
328         self._testbed = None
329
330         # Mapping of TCs long names
331         self._mapping = mapping
332
333         # Ignore list
334         self._ignore = ignore
335
336         # Number of VAT History messages found:
337         # 0 - no message
338         # 1 - VAT History of DUT1
339         # 2 - VAT History of DUT2
340         self._lookup_kw_nr = 0
341         self._conf_history_lookup_nr = 0
342
343         # Number of Show Running messages found
344         # 0 - no message
345         # 1 - Show run message found
346         self._show_run_lookup_nr = 0
347
348         # Test ID of currently processed test- the lowercase full path to the
349         # test
350         self._test_ID = None
351
352         # The main data structure
353         self._data = {
354             "metadata": OrderedDict(),
355             "suites": OrderedDict(),
356             "tests": OrderedDict()
357         }
358
359         # Save the provided metadata
360         for key, val in metadata.items():
361             self._data["metadata"][key] = val
362
363         # Dictionary defining the methods used to parse different types of
364         # messages
365         self.parse_msg = {
366             "timestamp": self._get_timestamp,
367             "vpp-version": self._get_vpp_version,
368             "dpdk-version": self._get_dpdk_version,
369             "teardown-vat-history": self._get_vat_history,
370             "teardown-papi-history": self._get_papi_history,
371             "test-show-runtime": self._get_show_run,
372             "testbed": self._get_testbed
373         }
374
375     @property
376     def data(self):
377         """Getter - Data parsed from the XML file.
378
379         :returns: Data parsed from the XML file.
380         :rtype: dict
381         """
382         return self._data
383
384     def _get_testbed(self, msg):
385         """Called when extraction of testbed IP is required.
386         The testbed is identified by TG node IP address.
387
388         :param msg: Message to process.
389         :type msg: Message
390         :returns: Nothing.
391         """
392
393         if msg.message.count("Arguments:"):
394             message = str(msg.message).replace(' ', '').replace('\n', '').\
395                 replace("'", '"').replace('b"', '"').\
396                 replace("honeycom", "honeycomb")
397             message = loads(message[11:-1])
398             try:
399                 self._testbed = message["TG"]["host"]
400             except (KeyError, ValueError):
401                 pass
402             finally:
403                 self._data["metadata"]["testbed"] = self._testbed
404                 self._msg_type = None
405
406     def _get_vpp_version(self, msg):
407         """Called when extraction of VPP version is required.
408
409         :param msg: Message to process.
410         :type msg: Message
411         :returns: Nothing.
412         """
413
414         if msg.message.count("return STDOUT Version:") or \
415             msg.message.count("VPP Version:") or \
416             msg.message.count("VPP version:"):
417             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
418                                 group(2))
419             self._data["metadata"]["version"] = self._version
420             self._msg_type = None
421
422     def _get_dpdk_version(self, msg):
423         """Called when extraction of DPDK version is required.
424
425         :param msg: Message to process.
426         :type msg: Message
427         :returns: Nothing.
428         """
429
430         if msg.message.count("return STDOUT testpmd"):
431             try:
432                 self._version = str(re.search(
433                     self.REGEX_VERSION_DPDK, msg.message). group(4))
434                 self._data["metadata"]["version"] = self._version
435             except IndexError:
436                 pass
437             finally:
438                 self._msg_type = None
439
440     def _get_timestamp(self, msg):
441         """Called when extraction of timestamp is required.
442
443         :param msg: Message to process.
444         :type msg: Message
445         :returns: Nothing.
446         """
447
448         self._timestamp = msg.timestamp[:14]
449         self._data["metadata"]["generated"] = self._timestamp
450         self._msg_type = None
451
452     def _get_vat_history(self, msg):
453         """Called when extraction of VAT command history is required.
454
455         :param msg: Message to process.
456         :type msg: Message
457         :returns: Nothing.
458         """
459         if msg.message.count("VAT command history:"):
460             self._conf_history_lookup_nr += 1
461             if self._conf_history_lookup_nr == 1:
462                 self._data["tests"][self._test_ID]["conf-history"] = str()
463             else:
464                 self._msg_type = None
465             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
466                           "VAT command history:", "", msg.message, count=1). \
467                 replace("\n\n", "\n").replace('\n', ' |br| ').\
468                 replace('\r', '').replace('"', "'")
469
470             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
471             self._data["tests"][self._test_ID]["conf-history"] += \
472                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
473
474     def _get_papi_history(self, msg):
475         """Called when extraction of PAPI command history is required.
476
477         :param msg: Message to process.
478         :type msg: Message
479         :returns: Nothing.
480         """
481         if msg.message.count("PAPI command history:"):
482             self._conf_history_lookup_nr += 1
483             if self._conf_history_lookup_nr == 1:
484                 self._data["tests"][self._test_ID]["conf-history"] = str()
485             else:
486                 self._msg_type = None
487             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
488                           "PAPI command history:", "", msg.message, count=1). \
489                 replace("\n\n", "\n").replace('\n', ' |br| ').\
490                 replace('\r', '').replace('"', "'")
491
492             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
493             self._data["tests"][self._test_ID]["conf-history"] += \
494                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
495
496     def _get_show_run(self, msg):
497         """Called when extraction of VPP operational data (output of CLI command
498         Show Runtime) is required.
499
500         :param msg: Message to process.
501         :type msg: Message
502         :returns: Nothing.
503         """
504         if msg.message.count("Thread 0 vpp_main"):
505             self._show_run_lookup_nr += 1
506             if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
507                 self._data["tests"][self._test_ID]["show-run"] = str()
508             if self._lookup_kw_nr > 1:
509                 self._msg_type = None
510             if self._show_run_lookup_nr == 1:
511                 text = msg.message.replace("vat# ", "").\
512                     replace("return STDOUT ", "").replace("\n\n", "\n").\
513                     replace('\n', ' |br| ').\
514                     replace('\r', '').replace('"', "'")
515                 try:
516                     self._data["tests"][self._test_ID]["show-run"] += " |br| "
517                     self._data["tests"][self._test_ID]["show-run"] += \
518                         "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
519                 except KeyError:
520                     pass
521
522     # TODO: Remove when definitely no NDRPDRDISC tests are used:
523     def _get_latency(self, msg, test_type):
524         """Get the latency data from the test message.
525
526         :param msg: Message to be parsed.
527         :param test_type: Type of the test - NDR or PDR.
528         :type msg: str
529         :type test_type: str
530         :returns: Latencies parsed from the message.
531         :rtype: dict
532         """
533
534         if test_type == "NDR":
535             groups = re.search(self.REGEX_LAT_NDR, msg)
536             groups_range = range(1, 7)
537         elif test_type == "PDR":
538             groups = re.search(self.REGEX_LAT_PDR, msg)
539             groups_range = range(1, 3)
540         else:
541             return {}
542
543         latencies = list()
544         for idx in groups_range:
545             try:
546                 lat = [int(item) for item in str(groups.group(idx)).split('/')]
547             except (AttributeError, ValueError):
548                 lat = [-1, -1, -1]
549             latencies.append(lat)
550
551         keys = ("min", "avg", "max")
552         latency = {
553             "direction1": {
554             },
555             "direction2": {
556             }
557         }
558
559         latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
560         latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
561         if test_type == "NDR":
562             latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
563             latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
564             latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
565             latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
566
567         return latency
568
569     def _get_ndrpdr_throughput(self, msg):
570         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
571         message.
572
573         :param msg: The test message to be parsed.
574         :type msg: str
575         :returns: Parsed data as a dict and the status (PASS/FAIL).
576         :rtype: tuple(dict, str)
577         """
578
579         throughput = {
580             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
581             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
582         }
583         status = "FAIL"
584         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
585
586         if groups is not None:
587             try:
588                 throughput["NDR"]["LOWER"] = float(groups.group(1))
589                 throughput["NDR"]["UPPER"] = float(groups.group(2))
590                 throughput["PDR"]["LOWER"] = float(groups.group(3))
591                 throughput["PDR"]["UPPER"] = float(groups.group(4))
592                 status = "PASS"
593             except (IndexError, ValueError):
594                 pass
595
596         return throughput, status
597
598     def _get_plr_throughput(self, msg):
599         """Get PLRsearch lower bound and PLRsearch upper bound from the test
600         message.
601
602         :param msg: The test message to be parsed.
603         :type msg: str
604         :returns: Parsed data as a dict and the status (PASS/FAIL).
605         :rtype: tuple(dict, str)
606         """
607
608         throughput = {
609             "LOWER": -1.0,
610             "UPPER": -1.0
611         }
612         status = "FAIL"
613         groups = re.search(self.REGEX_PLR_RATE, msg)
614
615         if groups is not None:
616             try:
617                 throughput["LOWER"] = float(groups.group(1))
618                 throughput["UPPER"] = float(groups.group(2))
619                 status = "PASS"
620             except (IndexError, ValueError):
621                 pass
622
623         return throughput, status
624
625     def _get_ndrpdr_latency(self, msg):
626         """Get LATENCY from the test message.
627
628         :param msg: The test message to be parsed.
629         :type msg: str
630         :returns: Parsed data as a dict and the status (PASS/FAIL).
631         :rtype: tuple(dict, str)
632         """
633
634         latency = {
635             "NDR": {
636                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
637                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
638             },
639             "PDR": {
640                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
641                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
642             }
643         }
644         status = "FAIL"
645         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
646
647         if groups is not None:
648             keys = ("min", "avg", "max")
649             try:
650                 latency["NDR"]["direction1"] = dict(
651                     zip(keys, [float(l) for l in groups.group(1).split('/')]))
652                 latency["NDR"]["direction2"] = dict(
653                     zip(keys, [float(l) for l in groups.group(2).split('/')]))
654                 latency["PDR"]["direction1"] = dict(
655                     zip(keys, [float(l) for l in groups.group(3).split('/')]))
656                 latency["PDR"]["direction2"] = dict(
657                     zip(keys, [float(l) for l in groups.group(4).split('/')]))
658                 status = "PASS"
659             except (IndexError, ValueError):
660                 pass
661
662         return latency, status
663
664     def visit_suite(self, suite):
665         """Implements traversing through the suite and its direct children.
666
667         :param suite: Suite to process.
668         :type suite: Suite
669         :returns: Nothing.
670         """
671         if self.start_suite(suite) is not False:
672             suite.suites.visit(self)
673             suite.tests.visit(self)
674             self.end_suite(suite)
675
676     def start_suite(self, suite):
677         """Called when suite starts.
678
679         :param suite: Suite to process.
680         :type suite: Suite
681         :returns: Nothing.
682         """
683
684         try:
685             parent_name = suite.parent.name
686         except AttributeError:
687             return
688
689         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
690             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
691         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
692
693         self._data["suites"][suite.longname.lower().replace('"', "'").
694             replace(" ", "_")] = {
695                 "name": suite.name.lower(),
696                 "doc": doc_str,
697                 "parent": parent_name,
698                 "level": len(suite.longname.split("."))
699             }
700
701         suite.keywords.visit(self)
702
703     def end_suite(self, suite):
704         """Called when suite ends.
705
706         :param suite: Suite to process.
707         :type suite: Suite
708         :returns: Nothing.
709         """
710         pass
711
712     def visit_test(self, test):
713         """Implements traversing through the test.
714
715         :param test: Test to process.
716         :type test: Test
717         :returns: Nothing.
718         """
719         if self.start_test(test) is not False:
720             test.keywords.visit(self)
721             self.end_test(test)
722
723     def start_test(self, test):
724         """Called when test starts.
725
726         :param test: Test to process.
727         :type test: Test
728         :returns: Nothing.
729         """
730
731         longname_orig = test.longname.lower()
732
733         # Check the ignore list
734         if longname_orig in self._ignore:
735             return
736
737         tags = [str(tag) for tag in test.tags]
738         test_result = dict()
739
740         # Change the TC long name and name if defined in the mapping table
741         longname = self._mapping.get(longname_orig, None)
742         if longname is not None:
743             name = longname.split('.')[-1]
744             logging.debug("{0}\n{1}\n{2}\n{3}".format(
745                 self._data["metadata"], longname_orig, longname, name))
746         else:
747             longname = longname_orig
748             name = test.name.lower()
749
750         # Remove TC number from the TC long name (backward compatibility):
751         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
752         # Remove TC number from the TC name (not needed):
753         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
754
755         test_result["parent"] = test.parent.name.lower()
756         test_result["tags"] = tags
757         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
758             replace('\r', '').replace('[', ' |br| [')
759         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
760         test_result["msg"] = test.message.replace('\n', ' |br| '). \
761             replace('\r', '').replace('"', "'")
762         test_result["type"] = "FUNC"
763         test_result["status"] = test.status
764
765         if "PERFTEST" in tags:
766             # Replace info about cores (e.g. -1c-) with the info about threads
767             # and cores (e.g. -1t1c-) in the long test case names and in the
768             # test case names if necessary.
769             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
770             if not groups:
771                 tag_count = 0
772                 for tag in test_result["tags"]:
773                     groups = re.search(self.REGEX_TC_TAG, tag)
774                     if groups:
775                         tag_count += 1
776                         tag_tc = tag
777
778                 if tag_count == 1:
779                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
780                                            "-{0}-".format(tag_tc.lower()),
781                                            self._test_ID,
782                                            count=1)
783                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
784                                                  "-{0}-".format(tag_tc.lower()),
785                                                  test_result["name"],
786                                                  count=1)
787                 else:
788                     test_result["status"] = "FAIL"
789                     self._data["tests"][self._test_ID] = test_result
790                     logging.debug("The test '{0}' has no or more than one "
791                                   "multi-threading tags.".format(self._test_ID))
792                     logging.debug("Tags: {0}".format(test_result["tags"]))
793                     return
794
795         if test.status == "PASS" and ("NDRPDRDISC" in tags or
796                                       "NDRPDR" in tags or
797                                       "SOAK" in tags or
798                                       "TCP" in tags or
799                                       "MRR" in tags or
800                                       "BMRR" in tags):
801             # TODO: Remove when definitely no NDRPDRDISC tests are used:
802             if "NDRDISC" in tags:
803                 test_result["type"] = "NDR"
804             # TODO: Remove when definitely no NDRPDRDISC tests are used:
805             elif "PDRDISC" in tags:
806                 test_result["type"] = "PDR"
807             elif "NDRPDR" in tags:
808                 test_result["type"] = "NDRPDR"
809             elif "SOAK" in tags:
810                 test_result["type"] = "SOAK"
811             elif "TCP" in tags:
812                 test_result["type"] = "TCP"
813             elif "MRR" in tags:
814                 test_result["type"] = "MRR"
815             elif "FRMOBL" in tags or "BMRR" in tags:
816                 test_result["type"] = "BMRR"
817             else:
818                 test_result["status"] = "FAIL"
819                 self._data["tests"][self._test_ID] = test_result
820                 return
821
822             # TODO: Remove when definitely no NDRPDRDISC tests are used:
823             if test_result["type"] in ("NDR", "PDR"):
824                 try:
825                     rate_value = str(re.search(
826                         self.REGEX_RATE, test.message).group(1))
827                 except AttributeError:
828                     rate_value = "-1"
829                 try:
830                     rate_unit = str(re.search(
831                         self.REGEX_RATE, test.message).group(2))
832                 except AttributeError:
833                     rate_unit = "-1"
834
835                 test_result["throughput"] = dict()
836                 test_result["throughput"]["value"] = \
837                     int(rate_value.split('.')[0])
838                 test_result["throughput"]["unit"] = rate_unit
839                 test_result["latency"] = \
840                     self._get_latency(test.message, test_result["type"])
841                 if test_result["type"] == "PDR":
842                     test_result["lossTolerance"] = str(re.search(
843                         self.REGEX_TOLERANCE, test.message).group(1))
844
845             elif test_result["type"] in ("NDRPDR", ):
846                 test_result["throughput"], test_result["status"] = \
847                     self._get_ndrpdr_throughput(test.message)
848                 test_result["latency"], test_result["status"] = \
849                     self._get_ndrpdr_latency(test.message)
850
851             elif test_result["type"] in ("SOAK", ):
852                 test_result["throughput"], test_result["status"] = \
853                     self._get_plr_throughput(test.message)
854
855             elif test_result["type"] in ("TCP", ):
856                 groups = re.search(self.REGEX_TCP, test.message)
857                 test_result["result"] = int(groups.group(2))
858
859             elif test_result["type"] in ("MRR", "BMRR"):
860                 test_result["result"] = dict()
861                 groups = re.search(self.REGEX_BMRR, test.message)
862                 if groups is not None:
863                     items_str = groups.group(1)
864                     items_float = [float(item.strip()) for item
865                                    in items_str.split(",")]
866                     metadata = AvgStdevMetadataFactory.from_data(items_float)
867                     # Next two lines have been introduced in CSIT-1179,
868                     # to be removed in CSIT-1180.
869                     metadata.size = 1
870                     metadata.stdev = 0.0
871                     test_result["result"]["receive-rate"] = metadata
872                 else:
873                     groups = re.search(self.REGEX_MRR, test.message)
874                     test_result["result"]["receive-rate"] = \
875                         AvgStdevMetadataFactory.from_data([
876                             float(groups.group(3)) / float(groups.group(1)), ])
877
878         self._data["tests"][self._test_ID] = test_result
879
880     def end_test(self, test):
881         """Called when test ends.
882
883         :param test: Test to process.
884         :type test: Test
885         :returns: Nothing.
886         """
887         pass
888
889     def visit_keyword(self, keyword):
890         """Implements traversing through the keyword and its child keywords.
891
892         :param keyword: Keyword to process.
893         :type keyword: Keyword
894         :returns: Nothing.
895         """
896         if self.start_keyword(keyword) is not False:
897             self.end_keyword(keyword)
898
899     def start_keyword(self, keyword):
900         """Called when keyword starts. Default implementation does nothing.
901
902         :param keyword: Keyword to process.
903         :type keyword: Keyword
904         :returns: Nothing.
905         """
906         try:
907             if keyword.type == "setup":
908                 self.visit_setup_kw(keyword)
909             elif keyword.type == "teardown":
910                 self._lookup_kw_nr = 0
911                 self.visit_teardown_kw(keyword)
912             else:
913                 self._lookup_kw_nr = 0
914                 self.visit_test_kw(keyword)
915         except AttributeError:
916             pass
917
918     def end_keyword(self, keyword):
919         """Called when keyword ends. Default implementation does nothing.
920
921         :param keyword: Keyword to process.
922         :type keyword: Keyword
923         :returns: Nothing.
924         """
925         pass
926
927     def visit_test_kw(self, test_kw):
928         """Implements traversing through the test keyword and its child
929         keywords.
930
931         :param test_kw: Keyword to process.
932         :type test_kw: Keyword
933         :returns: Nothing.
934         """
935         for keyword in test_kw.keywords:
936             if self.start_test_kw(keyword) is not False:
937                 self.visit_test_kw(keyword)
938                 self.end_test_kw(keyword)
939
940     def start_test_kw(self, test_kw):
941         """Called when test keyword starts. Default implementation does
942         nothing.
943
944         :param test_kw: Keyword to process.
945         :type test_kw: Keyword
946         :returns: Nothing.
947         """
948         if test_kw.name.count("Show Runtime Counters On All Duts"):
949             self._lookup_kw_nr += 1
950             self._show_run_lookup_nr = 0
951             self._msg_type = "test-show-runtime"
952         elif test_kw.name.count("Start The L2fwd Test") and not self._version:
953             self._msg_type = "dpdk-version"
954         else:
955             return
956         test_kw.messages.visit(self)
957
958     def end_test_kw(self, test_kw):
959         """Called when keyword ends. Default implementation does nothing.
960
961         :param test_kw: Keyword to process.
962         :type test_kw: Keyword
963         :returns: Nothing.
964         """
965         pass
966
967     def visit_setup_kw(self, setup_kw):
968         """Implements traversing through the teardown keyword and its child
969         keywords.
970
971         :param setup_kw: Keyword to process.
972         :type setup_kw: Keyword
973         :returns: Nothing.
974         """
975         for keyword in setup_kw.keywords:
976             if self.start_setup_kw(keyword) is not False:
977                 self.visit_setup_kw(keyword)
978                 self.end_setup_kw(keyword)
979
980     def start_setup_kw(self, setup_kw):
981         """Called when teardown keyword starts. Default implementation does
982         nothing.
983
984         :param setup_kw: Keyword to process.
985         :type setup_kw: Keyword
986         :returns: Nothing.
987         """
988         if setup_kw.name.count("Show Vpp Version On All Duts") \
989                 and not self._version:
990             self._msg_type = "vpp-version"
991         elif setup_kw.name.count("Set Global Variable") \
992                 and not self._timestamp:
993             self._msg_type = "timestamp"
994         elif setup_kw.name.count("Setup Framework") and not self._testbed:
995             self._msg_type = "testbed"
996         else:
997             return
998         setup_kw.messages.visit(self)
999
1000     def end_setup_kw(self, setup_kw):
1001         """Called when keyword ends. Default implementation does nothing.
1002
1003         :param setup_kw: Keyword to process.
1004         :type setup_kw: Keyword
1005         :returns: Nothing.
1006         """
1007         pass
1008
1009     def visit_teardown_kw(self, teardown_kw):
1010         """Implements traversing through the teardown keyword and its child
1011         keywords.
1012
1013         :param teardown_kw: Keyword to process.
1014         :type teardown_kw: Keyword
1015         :returns: Nothing.
1016         """
1017         for keyword in teardown_kw.keywords:
1018             if self.start_teardown_kw(keyword) is not False:
1019                 self.visit_teardown_kw(keyword)
1020                 self.end_teardown_kw(keyword)
1021
1022     def start_teardown_kw(self, teardown_kw):
1023         """Called when teardown keyword starts. Default implementation does
1024         nothing.
1025
1026         :param teardown_kw: Keyword to process.
1027         :type teardown_kw: Keyword
1028         :returns: Nothing.
1029         """
1030
1031         if teardown_kw.name.count("Show Vat History On All Duts"):
1032             self._conf_history_lookup_nr = 0
1033             self._msg_type = "teardown-vat-history"
1034             teardown_kw.messages.visit(self)
1035         elif teardown_kw.name.count("Show Papi History On All Duts"):
1036             self._conf_history_lookup_nr = 0
1037             self._msg_type = "teardown-papi-history"
1038             teardown_kw.messages.visit(self)
1039
1040     def end_teardown_kw(self, teardown_kw):
1041         """Called when keyword ends. Default implementation does nothing.
1042
1043         :param teardown_kw: Keyword to process.
1044         :type teardown_kw: Keyword
1045         :returns: Nothing.
1046         """
1047         pass
1048
1049     def visit_message(self, msg):
1050         """Implements visiting the message.
1051
1052         :param msg: Message to process.
1053         :type msg: Message
1054         :returns: Nothing.
1055         """
1056         if self.start_message(msg) is not False:
1057             self.end_message(msg)
1058
1059     def start_message(self, msg):
1060         """Called when message starts. Get required information from messages:
1061         - VPP version.
1062
1063         :param msg: Message to process.
1064         :type msg: Message
1065         :returns: Nothing.
1066         """
1067
1068         if self._msg_type:
1069             self.parse_msg[self._msg_type](msg)
1070
1071     def end_message(self, msg):
1072         """Called when message ends. Default implementation does nothing.
1073
1074         :param msg: Message to process.
1075         :type msg: Message
1076         :returns: Nothing.
1077         """
1078         pass
1079
1080
1081 class InputData(object):
1082     """Input data
1083
1084     The data is extracted from output.xml files generated by Jenkins jobs and
1085     stored in pandas' DataFrames.
1086
1087     The data structure:
1088     - job name
1089       - build number
1090         - metadata
1091           (as described in ExecutionChecker documentation)
1092         - suites
1093           (as described in ExecutionChecker documentation)
1094         - tests
1095           (as described in ExecutionChecker documentation)
1096     """
1097
1098     def __init__(self, spec):
1099         """Initialization.
1100
1101         :param spec: Specification.
1102         :type spec: Specification
1103         """
1104
1105         # Specification:
1106         self._cfg = spec
1107
1108         # Data store:
1109         self._input_data = pd.Series()
1110
1111     @property
1112     def data(self):
1113         """Getter - Input data.
1114
1115         :returns: Input data
1116         :rtype: pandas.Series
1117         """
1118         return self._input_data
1119
1120     def metadata(self, job, build):
1121         """Getter - metadata
1122
1123         :param job: Job which metadata we want.
1124         :param build: Build which metadata we want.
1125         :type job: str
1126         :type build: str
1127         :returns: Metadata
1128         :rtype: pandas.Series
1129         """
1130
1131         return self.data[job][build]["metadata"]
1132
1133     def suites(self, job, build):
1134         """Getter - suites
1135
1136         :param job: Job which suites we want.
1137         :param build: Build which suites we want.
1138         :type job: str
1139         :type build: str
1140         :returns: Suites.
1141         :rtype: pandas.Series
1142         """
1143
1144         return self.data[job][str(build)]["suites"]
1145
1146     def tests(self, job, build):
1147         """Getter - tests
1148
1149         :param job: Job which tests we want.
1150         :param build: Build which tests we want.
1151         :type job: str
1152         :type build: str
1153         :returns: Tests.
1154         :rtype: pandas.Series
1155         """
1156
1157         return self.data[job][build]["tests"]
1158
1159     def _parse_tests(self, job, build, log):
1160         """Process data from robot output.xml file and return JSON structured
1161         data.
1162
1163         :param job: The name of job which build output data will be processed.
1164         :param build: The build which output data will be processed.
1165         :param log: List of log messages.
1166         :type job: str
1167         :type build: dict
1168         :type log: list of tuples (severity, msg)
1169         :returns: JSON data structure.
1170         :rtype: dict
1171         """
1172
1173         metadata = {
1174             "job": job,
1175             "build": build
1176         }
1177
1178         with open(build["file-name"], 'r') as data_file:
1179             try:
1180                 result = ExecutionResult(data_file)
1181             except errors.DataError as err:
1182                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1183                                      "{0}".format(err)))
1184                 return None
1185         checker = ExecutionChecker(metadata, self._cfg.mapping,
1186                                    self._cfg.ignore)
1187         result.visit(checker)
1188
1189         return checker.data
1190
1191     def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1192         """Download and parse the input data file.
1193
1194         :param pid: PID of the process executing this method.
1195         :param data_queue: Shared memory between processes. Queue which keeps
1196             the result data. This data is then read by the main process and used
1197             in further processing.
1198         :param job: Name of the Jenkins job which generated the processed input
1199             file.
1200         :param build: Information about the Jenkins build which generated the
1201             processed input file.
1202         :param repeat: Repeat the download specified number of times if not
1203             successful.
1204         :type pid: int
1205         :type data_queue: multiprocessing.Manager().Queue()
1206         :type job: str
1207         :type build: dict
1208         :type repeat: int
1209         """
1210
1211         logs = list()
1212
1213         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1214                      format(job, build["build"])))
1215
1216         state = "failed"
1217         success = False
1218         data = None
1219         do_repeat = repeat
1220         while do_repeat:
1221             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1222                                                    logs)
1223             if success:
1224                 break
1225             do_repeat -= 1
1226         if not success:
1227             logs.append(("ERROR", "It is not possible to download the input "
1228                                   "data file from the job '{job}', build "
1229                                   "'{build}', or it is damaged. Skipped.".
1230                          format(job=job, build=build["build"])))
1231         if success:
1232             logs.append(("INFO", "    Processing data from the build '{0}' ...".
1233                          format(build["build"])))
1234             data = self._parse_tests(job, build, logs)
1235             if data is None:
1236                 logs.append(("ERROR", "Input data file from the job '{job}', "
1237                                       "build '{build}' is damaged. Skipped.".
1238                              format(job=job, build=build["build"])))
1239             else:
1240                 state = "processed"
1241
1242             try:
1243                 remove(build["file-name"])
1244             except OSError as err:
1245                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1246                              format(build["file-name"], repr(err))))
1247
1248         # If the time-period is defined in the specification file, remove all
1249         # files which are outside the time period.
1250         timeperiod = self._cfg.input.get("time-period", None)
1251         if timeperiod and data:
1252             now = dt.utcnow()
1253             timeperiod = timedelta(int(timeperiod))
1254             metadata = data.get("metadata", None)
1255             if metadata:
1256                 generated = metadata.get("generated", None)
1257                 if generated:
1258                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1259                     if (now - generated) > timeperiod:
1260                         # Remove the data and the file:
1261                         state = "removed"
1262                         data = None
1263                         logs.append(
1264                             ("INFO",
1265                              "    The build {job}/{build} is outdated, will be "
1266                              "removed".format(job=job, build=build["build"])))
1267                         file_name = self._cfg.input["file-name"]
1268                         full_name = join(
1269                             self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1270                             "{job}{sep}{build}{sep}{name}".format(
1271                                 job=job,
1272                                 sep=SEPARATOR,
1273                                 build=build["build"],
1274                                 name=file_name))
1275                         try:
1276                             remove(full_name)
1277                             logs.append(("INFO",
1278                                          "    The file {name} has been removed".
1279                                          format(name=full_name)))
1280                         except OSError as err:
1281                             logs.append(("ERROR",
1282                                          "Cannot remove the file '{0}': {1}".
1283                                          format(full_name, repr(err))))
1284         logs.append(("INFO", "  Done."))
1285
1286         result = {
1287             "data": data,
1288             "state": state,
1289             "job": job,
1290             "build": build
1291         }
1292         data_queue.put(result)
1293
1294         for level, line in logs:
1295             if level == "INFO":
1296                 logging.info(line)
1297             elif level == "ERROR":
1298                 logging.error(line)
1299             elif level == "DEBUG":
1300                 logging.debug(line)
1301             elif level == "CRITICAL":
1302                 logging.critical(line)
1303             elif level == "WARNING":
1304                 logging.warning(line)
1305
1306         logging.info("Memory allocation: {0:,d}MB".format(
1307             resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1308
1309     def download_and_parse_data(self, repeat=1):
1310         """Download the input data files, parse input data from input files and
1311         store in pandas' Series.
1312
1313         :param repeat: Repeat the download specified number of times if not
1314             successful.
1315         :type repeat: int
1316         """
1317
1318         logging.info("Downloading and parsing input files ...")
1319
1320         work_queue = multiprocessing.JoinableQueue()
1321         manager = multiprocessing.Manager()
1322         data_queue = manager.Queue()
1323         cpus = multiprocessing.cpu_count()
1324
1325         workers = list()
1326         for cpu in range(cpus):
1327             worker = Worker(work_queue,
1328                             data_queue,
1329                             self._download_and_parse_build)
1330             worker.daemon = True
1331             worker.start()
1332             workers.append(worker)
1333             os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1334                       format(cpu, worker.pid))
1335
1336         for job, builds in self._cfg.builds.items():
1337             for build in builds:
1338                 work_queue.put((job, build, repeat))
1339
1340         work_queue.join()
1341
1342         logging.info("Done.")
1343         logging.info("Collecting data:")
1344
1345         while not data_queue.empty():
1346             result = data_queue.get()
1347
1348             job = result["job"]
1349             build_nr = result["build"]["build"]
1350             logging.info("  {job}-{build}".format(job=job, build=build_nr))
1351
1352             if result["data"]:
1353                 data = result["data"]
1354                 build_data = pd.Series({
1355                     "metadata": pd.Series(
1356                         data["metadata"].values(),
1357                         index=data["metadata"].keys()),
1358                     "suites": pd.Series(data["suites"].values(),
1359                                         index=data["suites"].keys()),
1360                     "tests": pd.Series(data["tests"].values(),
1361                                        index=data["tests"].keys())})
1362
1363                 if self._input_data.get(job, None) is None:
1364                     self._input_data[job] = pd.Series()
1365                 self._input_data[job][str(build_nr)] = build_data
1366
1367                 self._cfg.set_input_file_name(
1368                     job, build_nr, result["build"]["file-name"])
1369
1370             self._cfg.set_input_state(job, build_nr, result["state"])
1371
1372             logging.info("Memory allocation: {0:,d}MB".format(
1373                 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1374
1375         del data_queue
1376
1377         # Terminate all workers
1378         for worker in workers:
1379             worker.terminate()
1380             worker.join()
1381
1382         logging.info("Done.")
1383
1384     @staticmethod
1385     def _end_of_tag(tag_filter, start=0, closer="'"):
1386         """Return the index of character in the string which is the end of tag.
1387
1388         :param tag_filter: The string where the end of tag is being searched.
1389         :param start: The index where the searching is stated.
1390         :param closer: The character which is the tag closer.
1391         :type tag_filter: str
1392         :type start: int
1393         :type closer: str
1394         :returns: The index of the tag closer.
1395         :rtype: int
1396         """
1397
1398         try:
1399             idx_opener = tag_filter.index(closer, start)
1400             return tag_filter.index(closer, idx_opener + 1)
1401         except ValueError:
1402             return None
1403
1404     @staticmethod
1405     def _condition(tag_filter):
1406         """Create a conditional statement from the given tag filter.
1407
1408         :param tag_filter: Filter based on tags from the element specification.
1409         :type tag_filter: str
1410         :returns: Conditional statement which can be evaluated.
1411         :rtype: str
1412         """
1413
1414         index = 0
1415         while True:
1416             index = InputData._end_of_tag(tag_filter, index)
1417             if index is None:
1418                 return tag_filter
1419             index += 1
1420             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1421
1422     def filter_data(self, element, params=None, data_set="tests",
1423                     continue_on_error=False):
1424         """Filter required data from the given jobs and builds.
1425
1426         The output data structure is:
1427
1428         - job 1
1429           - build 1
1430             - test (or suite) 1 ID:
1431               - param 1
1432               - param 2
1433               ...
1434               - param n
1435             ...
1436             - test (or suite) n ID:
1437             ...
1438           ...
1439           - build n
1440         ...
1441         - job n
1442
1443         :param element: Element which will use the filtered data.
1444         :param params: Parameters which will be included in the output. If None,
1445         all parameters are included.
1446         :param data_set: The set of data to be filtered: tests, suites,
1447         metadata.
1448         :param continue_on_error: Continue if there is error while reading the
1449         data. The Item will be empty then
1450         :type element: pandas.Series
1451         :type params: list
1452         :type data_set: str
1453         :type continue_on_error: bool
1454         :returns: Filtered data.
1455         :rtype pandas.Series
1456         """
1457
1458         try:
1459             if element["filter"] in ("all", "template"):
1460                 cond = "True"
1461             else:
1462                 cond = InputData._condition(element["filter"])
1463             logging.debug("   Filter: {0}".format(cond))
1464         except KeyError:
1465             logging.error("  No filter defined.")
1466             return None
1467
1468         if params is None:
1469             params = element.get("parameters", None)
1470             if params:
1471                 params.append("type")
1472
1473         data = pd.Series()
1474         try:
1475             for job, builds in element["data"].items():
1476                 data[job] = pd.Series()
1477                 for build in builds:
1478                     data[job][str(build)] = pd.Series()
1479                     try:
1480                         data_iter = self.data[job][str(build)][data_set].\
1481                             iteritems()
1482                     except KeyError:
1483                         if continue_on_error:
1484                             continue
1485                         else:
1486                             return None
1487                     for test_ID, test_data in data_iter:
1488                         if eval(cond, {"tags": test_data.get("tags", "")}):
1489                             data[job][str(build)][test_ID] = pd.Series()
1490                             if params is None:
1491                                 for param, val in test_data.items():
1492                                     data[job][str(build)][test_ID][param] = val
1493                             else:
1494                                 for param in params:
1495                                     try:
1496                                         data[job][str(build)][test_ID][param] =\
1497                                             test_data[param]
1498                                     except KeyError:
1499                                         data[job][str(build)][test_ID][param] =\
1500                                             "No Data"
1501             return data
1502
1503         except (KeyError, IndexError, ValueError) as err:
1504             logging.error("   Missing mandatory parameter in the element "
1505                           "specification: {0}".format(err))
1506             return None
1507         except AttributeError:
1508             return None
1509         except SyntaxError:
1510             logging.error("   The filter '{0}' is not correct. Check if all "
1511                           "tags are enclosed by apostrophes.".format(cond))
1512             return None
1513
1514     @staticmethod
1515     def merge_data(data):
1516         """Merge data from more jobs and builds to a simple data structure.
1517
1518         The output data structure is:
1519
1520         - test (suite) 1 ID:
1521           - param 1
1522           - param 2
1523           ...
1524           - param n
1525         ...
1526         - test (suite) n ID:
1527         ...
1528
1529         :param data: Data to merge.
1530         :type data: pandas.Series
1531         :returns: Merged data.
1532         :rtype: pandas.Series
1533         """
1534
1535         logging.info("    Merging data ...")
1536
1537         merged_data = pd.Series()
1538         for _, builds in data.iteritems():
1539             for _, item in builds.iteritems():
1540                 for ID, item_data in item.iteritems():
1541                     merged_data[ID] = item_data
1542
1543         return merged_data