CSIT-1452: Make PAL use PapiHistory instead of VatHistory
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import multiprocessing
23 import os
24 import re
25 import pandas as pd
26 import logging
27
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
32 from os import remove
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38
39 from input_data_files import download_and_unzip_data_file
40 from utils import Worker
41
42
43 # Separator used in file names
44 SEPARATOR = "__"
45
46
47 class ExecutionChecker(ResultVisitor):
48     """Class to traverse through the test suite structure.
49
50     The functionality implemented in this class generates a json structure:
51
52     Performance tests:
53
54     {
55         "metadata": {
56             "generated": "Timestamp",
57             "version": "SUT version",
58             "job": "Jenkins job name",
59             "build": "Information about the build"
60         },
61         "suites": {
62             "Suite long name 1": {
63                 "name": Suite name,
64                 "doc": "Suite 1 documentation",
65                 "parent": "Suite 1 parent",
66                 "level": "Level of the suite in the suite hierarchy"
67             }
68             "Suite long name N": {
69                 "name": Suite name,
70                 "doc": "Suite N documentation",
71                 "parent": "Suite 2 parent",
72                 "level": "Level of the suite in the suite hierarchy"
73             }
74         }
75         "tests": {
76             # NDRPDR tests:
77             "ID": {
78                 "name": "Test name",
79                 "parent": "Name of the parent of the test",
80                 "doc": "Test documentation",
81                 "msg": "Test message",
82                 "conf-history": "DUT1 and DUT2 VAT History",
83                 "show-run": "Show Run",
84                 "tags": ["tag 1", "tag 2", "tag n"],
85                 "type": "NDRPDR",
86                 "status": "PASS" | "FAIL",
87                 "throughput": {
88                     "NDR": {
89                         "LOWER": float,
90                         "UPPER": float
91                     },
92                     "PDR": {
93                         "LOWER": float,
94                         "UPPER": float
95                     }
96                 },
97                 "latency": {
98                     "NDR": {
99                         "direction1": {
100                             "min": float,
101                             "avg": float,
102                             "max": float
103                         },
104                         "direction2": {
105                             "min": float,
106                             "avg": float,
107                             "max": float
108                         }
109                     },
110                     "PDR": {
111                         "direction1": {
112                             "min": float,
113                             "avg": float,
114                             "max": float
115                         },
116                         "direction2": {
117                             "min": float,
118                             "avg": float,
119                             "max": float
120                         }
121                     }
122                 }
123             }
124
125             # TCP tests:
126             "ID": {
127                 "name": "Test name",
128                 "parent": "Name of the parent of the test",
129                 "doc": "Test documentation",
130                 "msg": "Test message",
131                 "tags": ["tag 1", "tag 2", "tag n"],
132                 "type": "TCP",
133                 "status": "PASS" | "FAIL",
134                 "result": int
135             }
136
137             # MRR, BMRR tests:
138             "ID": {
139                 "name": "Test name",
140                 "parent": "Name of the parent of the test",
141                 "doc": "Test documentation",
142                 "msg": "Test message",
143                 "tags": ["tag 1", "tag 2", "tag n"],
144                 "type": "MRR" | "BMRR",
145                 "status": "PASS" | "FAIL",
146                 "result": {
147                     "receive-rate": AvgStdevMetadata,
148                 }
149             }
150
151             # TODO: Remove when definitely no NDRPDRDISC tests are used:
152             # NDRPDRDISC tests:
153             "ID": {
154                 "name": "Test name",
155                 "parent": "Name of the parent of the test",
156                 "doc": "Test documentation",
157                 "msg": "Test message",
158                 "tags": ["tag 1", "tag 2", "tag n"],
159                 "type": "PDR" | "NDR",
160                 "status": "PASS" | "FAIL",
161                 "throughput": {  # Only type: "PDR" | "NDR"
162                     "value": int,
163                     "unit": "pps" | "bps" | "percentage"
164                 },
165                 "latency": {  # Only type: "PDR" | "NDR"
166                     "direction1": {
167                         "100": {
168                             "min": int,
169                             "avg": int,
170                             "max": int
171                         },
172                         "50": {  # Only for NDR
173                             "min": int,
174                             "avg": int,
175                             "max": int
176                         },
177                         "10": {  # Only for NDR
178                             "min": int,
179                             "avg": int,
180                             "max": int
181                         }
182                     },
183                     "direction2": {
184                         "100": {
185                             "min": int,
186                             "avg": int,
187                             "max": int
188                         },
189                         "50": {  # Only for NDR
190                             "min": int,
191                             "avg": int,
192                             "max": int
193                         },
194                         "10": {  # Only for NDR
195                             "min": int,
196                             "avg": int,
197                             "max": int
198                         }
199                     }
200                 },
201                 "lossTolerance": "lossTolerance",  # Only type: "PDR"
202                 "conf-history": "DUT1 and DUT2 VAT History"
203                 "show-run": "Show Run"
204             },
205             "ID" {
206                 # next test
207             }
208         }
209     }
210
211
212     Functional tests:
213
214     {
215         "metadata": {  # Optional
216             "version": "VPP version",
217             "job": "Jenkins job name",
218             "build": "Information about the build"
219         },
220         "suites": {
221             "Suite name 1": {
222                 "doc": "Suite 1 documentation",
223                 "parent": "Suite 1 parent",
224                 "level": "Level of the suite in the suite hierarchy"
225             }
226             "Suite name N": {
227                 "doc": "Suite N documentation",
228                 "parent": "Suite 2 parent",
229                 "level": "Level of the suite in the suite hierarchy"
230             }
231         }
232         "tests": {
233             "ID": {
234                 "name": "Test name",
235                 "parent": "Name of the parent of the test",
236                 "doc": "Test documentation"
237                 "msg": "Test message"
238                 "tags": ["tag 1", "tag 2", "tag n"],
239                 "conf-history": "DUT1 and DUT2 VAT History"
240                 "show-run": "Show Run"
241                 "status": "PASS" | "FAIL"
242             },
243             "ID" {
244                 # next test
245             }
246         }
247     }
248
249     .. note:: ID is the lowercase full path to the test.
250     """
251
252     # TODO: Remove when definitely no NDRPDRDISC tests are used:
253     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
254
255     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
256                                 r'PLRsearch upper bound::\s(\d+.\d+)')
257
258     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
259                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
260                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
261                                    r'PDR_UPPER:\s(\d+.\d+)')
262
263     # TODO: Remove when definitely no NDRPDRDISC tests are used:
264     REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
265                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
269                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
270                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
271
272     REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
273                                r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
274                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
275
276     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
277                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
278
279     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
280                                  r'[\D\d]*')
281
282     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
283                                    r"VPP Version:\s*)(.*)")
284
285     REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
286                                     r"(RTE Version: 'DPDK )(.*)(')")
287
288     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
289
290     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
291                            r'tx\s(\d*),\srx\s(\d*)')
292
293     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
294                             r' in packets per second: \[(.*)\]')
295
296     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
297
298     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
299
300     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
301
302     REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
303
304     def __init__(self, metadata, mapping, ignore):
305         """Initialisation.
306
307         :param metadata: Key-value pairs to be included in "metadata" part of
308             JSON structure.
309         :param mapping: Mapping of the old names of test cases to the new
310             (actual) one.
311         :param ignore: List of TCs to be ignored.
312         :type metadata: dict
313         :type mapping: dict
314         :type ignore: list
315         """
316
317         # Type of message to parse out from the test messages
318         self._msg_type = None
319
320         # VPP version
321         self._version = None
322
323         # Timestamp
324         self._timestamp = None
325
326         # Testbed. The testbed is identified by TG node IP address.
327         self._testbed = None
328
329         # Mapping of TCs long names
330         self._mapping = mapping
331
332         # Ignore list
333         self._ignore = ignore
334
335         # Number of VAT History messages found:
336         # 0 - no message
337         # 1 - VAT History of DUT1
338         # 2 - VAT History of DUT2
339         self._lookup_kw_nr = 0
340         self._conf_history_lookup_nr = 0
341
342         # Number of Show Running messages found
343         # 0 - no message
344         # 1 - Show run message found
345         self._show_run_lookup_nr = 0
346
347         # Test ID of currently processed test- the lowercase full path to the
348         # test
349         self._test_ID = None
350
351         # The main data structure
352         self._data = {
353             "metadata": OrderedDict(),
354             "suites": OrderedDict(),
355             "tests": OrderedDict()
356         }
357
358         # Save the provided metadata
359         for key, val in metadata.items():
360             self._data["metadata"][key] = val
361
362         # Dictionary defining the methods used to parse different types of
363         # messages
364         self.parse_msg = {
365             "timestamp": self._get_timestamp,
366             "vpp-version": self._get_vpp_version,
367             "dpdk-version": self._get_dpdk_version,
368             "teardown-vat-history": self._get_vat_history,
369             "teardown-papi-history": self._get_papi_history,
370             "test-show-runtime": self._get_show_run,
371             "testbed": self._get_testbed
372         }
373
374     @property
375     def data(self):
376         """Getter - Data parsed from the XML file.
377
378         :returns: Data parsed from the XML file.
379         :rtype: dict
380         """
381         return self._data
382
383     def _get_testbed(self, msg):
384         """Called when extraction of testbed IP is required.
385         The testbed is identified by TG node IP address.
386
387         :param msg: Message to process.
388         :type msg: Message
389         :returns: Nothing.
390         """
391
392         if msg.message.count("Arguments:"):
393             message = str(msg.message).replace(' ', '').replace('\n', '').\
394                 replace("'", '"').replace('b"', '"').\
395                 replace("honeycom", "honeycomb")
396             message = loads(message[11:-1])
397             try:
398                 self._testbed = message["TG"]["host"]
399             except (KeyError, ValueError):
400                 pass
401             finally:
402                 self._data["metadata"]["testbed"] = self._testbed
403                 self._msg_type = None
404
405     def _get_vpp_version(self, msg):
406         """Called when extraction of VPP version is required.
407
408         :param msg: Message to process.
409         :type msg: Message
410         :returns: Nothing.
411         """
412
413         if msg.message.count("return STDOUT Version:") or \
414             msg.message.count("VPP Version:"):
415             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
416                                 group(2))
417             self._data["metadata"]["version"] = self._version
418             self._msg_type = None
419
420     def _get_dpdk_version(self, msg):
421         """Called when extraction of DPDK version is required.
422
423         :param msg: Message to process.
424         :type msg: Message
425         :returns: Nothing.
426         """
427
428         if msg.message.count("return STDOUT testpmd"):
429             try:
430                 self._version = str(re.search(
431                     self.REGEX_VERSION_DPDK, msg.message). group(4))
432                 self._data["metadata"]["version"] = self._version
433             except IndexError:
434                 pass
435             finally:
436                 self._msg_type = None
437
438     def _get_timestamp(self, msg):
439         """Called when extraction of timestamp is required.
440
441         :param msg: Message to process.
442         :type msg: Message
443         :returns: Nothing.
444         """
445
446         self._timestamp = msg.timestamp[:14]
447         self._data["metadata"]["generated"] = self._timestamp
448         self._msg_type = None
449
450     def _get_vat_history(self, msg):
451         """Called when extraction of VAT command history is required.
452
453         :param msg: Message to process.
454         :type msg: Message
455         :returns: Nothing.
456         """
457         if msg.message.count("VAT command history:"):
458             self._conf_history_lookup_nr += 1
459             if self._conf_history_lookup_nr == 1:
460                 self._data["tests"][self._test_ID]["conf-history"] = str()
461             else:
462                 self._msg_type = None
463             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
464                           "VAT command history:", "", msg.message, count=1). \
465                 replace("\n\n", "\n").replace('\n', ' |br| ').\
466                 replace('\r', '').replace('"', "'")
467
468             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
469             self._data["tests"][self._test_ID]["conf-history"] += \
470                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
471
472     def _get_papi_history(self, msg):
473         """Called when extraction of PAPI command history is required.
474
475         :param msg: Message to process.
476         :type msg: Message
477         :returns: Nothing.
478         """
479         if msg.message.count("PAPI command history:"):
480             self._conf_history_lookup_nr += 1
481             if self._conf_history_lookup_nr == 1:
482                 self._data["tests"][self._test_ID]["conf-history"] = str()
483             else:
484                 self._msg_type = None
485             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
486                           "PAPI command history:", "", msg.message, count=1). \
487                 replace("\n\n", "\n").replace('\n', ' |br| ').\
488                 replace('\r', '').replace('"', "'")
489
490             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
491             self._data["tests"][self._test_ID]["conf-history"] += \
492                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
493
494     def _get_show_run(self, msg):
495         """Called when extraction of VPP operational data (output of CLI command
496         Show Runtime) is required.
497
498         :param msg: Message to process.
499         :type msg: Message
500         :returns: Nothing.
501         """
502         if msg.message.count("return STDOUT Thread "):
503             self._show_run_lookup_nr += 1
504             if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
505                 self._data["tests"][self._test_ID]["show-run"] = str()
506             if self._lookup_kw_nr > 1:
507                 self._msg_type = None
508             if self._show_run_lookup_nr == 1:
509                 text = msg.message.replace("vat# ", "").\
510                     replace("return STDOUT ", "").replace("\n\n", "\n").\
511                     replace('\n', ' |br| ').\
512                     replace('\r', '').replace('"', "'")
513                 try:
514                     self._data["tests"][self._test_ID]["show-run"] += " |br| "
515                     self._data["tests"][self._test_ID]["show-run"] += \
516                         "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
517                 except KeyError:
518                     pass
519
520     # TODO: Remove when definitely no NDRPDRDISC tests are used:
521     def _get_latency(self, msg, test_type):
522         """Get the latency data from the test message.
523
524         :param msg: Message to be parsed.
525         :param test_type: Type of the test - NDR or PDR.
526         :type msg: str
527         :type test_type: str
528         :returns: Latencies parsed from the message.
529         :rtype: dict
530         """
531
532         if test_type == "NDR":
533             groups = re.search(self.REGEX_LAT_NDR, msg)
534             groups_range = range(1, 7)
535         elif test_type == "PDR":
536             groups = re.search(self.REGEX_LAT_PDR, msg)
537             groups_range = range(1, 3)
538         else:
539             return {}
540
541         latencies = list()
542         for idx in groups_range:
543             try:
544                 lat = [int(item) for item in str(groups.group(idx)).split('/')]
545             except (AttributeError, ValueError):
546                 lat = [-1, -1, -1]
547             latencies.append(lat)
548
549         keys = ("min", "avg", "max")
550         latency = {
551             "direction1": {
552             },
553             "direction2": {
554             }
555         }
556
557         latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
558         latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
559         if test_type == "NDR":
560             latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
561             latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
562             latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
563             latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
564
565         return latency
566
567     def _get_ndrpdr_throughput(self, msg):
568         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
569         message.
570
571         :param msg: The test message to be parsed.
572         :type msg: str
573         :returns: Parsed data as a dict and the status (PASS/FAIL).
574         :rtype: tuple(dict, str)
575         """
576
577         throughput = {
578             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
579             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
580         }
581         status = "FAIL"
582         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
583
584         if groups is not None:
585             try:
586                 throughput["NDR"]["LOWER"] = float(groups.group(1))
587                 throughput["NDR"]["UPPER"] = float(groups.group(2))
588                 throughput["PDR"]["LOWER"] = float(groups.group(3))
589                 throughput["PDR"]["UPPER"] = float(groups.group(4))
590                 status = "PASS"
591             except (IndexError, ValueError):
592                 pass
593
594         return throughput, status
595
596     def _get_plr_throughput(self, msg):
597         """Get PLRsearch lower bound and PLRsearch upper bound from the test
598         message.
599
600         :param msg: The test message to be parsed.
601         :type msg: str
602         :returns: Parsed data as a dict and the status (PASS/FAIL).
603         :rtype: tuple(dict, str)
604         """
605
606         throughput = {
607             "LOWER": -1.0,
608             "UPPER": -1.0
609         }
610         status = "FAIL"
611         groups = re.search(self.REGEX_PLR_RATE, msg)
612
613         if groups is not None:
614             try:
615                 throughput["LOWER"] = float(groups.group(1))
616                 throughput["UPPER"] = float(groups.group(2))
617                 status = "PASS"
618             except (IndexError, ValueError):
619                 pass
620
621         return throughput, status
622
623     def _get_ndrpdr_latency(self, msg):
624         """Get LATENCY from the test message.
625
626         :param msg: The test message to be parsed.
627         :type msg: str
628         :returns: Parsed data as a dict and the status (PASS/FAIL).
629         :rtype: tuple(dict, str)
630         """
631
632         latency = {
633             "NDR": {
634                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
635                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
636             },
637             "PDR": {
638                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
639                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
640             }
641         }
642         status = "FAIL"
643         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
644
645         if groups is not None:
646             keys = ("min", "avg", "max")
647             try:
648                 latency["NDR"]["direction1"] = dict(
649                     zip(keys, [float(l) for l in groups.group(1).split('/')]))
650                 latency["NDR"]["direction2"] = dict(
651                     zip(keys, [float(l) for l in groups.group(2).split('/')]))
652                 latency["PDR"]["direction1"] = dict(
653                     zip(keys, [float(l) for l in groups.group(3).split('/')]))
654                 latency["PDR"]["direction2"] = dict(
655                     zip(keys, [float(l) for l in groups.group(4).split('/')]))
656                 status = "PASS"
657             except (IndexError, ValueError):
658                 pass
659
660         return latency, status
661
662     def visit_suite(self, suite):
663         """Implements traversing through the suite and its direct children.
664
665         :param suite: Suite to process.
666         :type suite: Suite
667         :returns: Nothing.
668         """
669         if self.start_suite(suite) is not False:
670             suite.suites.visit(self)
671             suite.tests.visit(self)
672             self.end_suite(suite)
673
674     def start_suite(self, suite):
675         """Called when suite starts.
676
677         :param suite: Suite to process.
678         :type suite: Suite
679         :returns: Nothing.
680         """
681
682         try:
683             parent_name = suite.parent.name
684         except AttributeError:
685             return
686
687         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
688             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
689         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
690
691         self._data["suites"][suite.longname.lower().replace('"', "'").
692             replace(" ", "_")] = {
693                 "name": suite.name.lower(),
694                 "doc": doc_str,
695                 "parent": parent_name,
696                 "level": len(suite.longname.split("."))
697             }
698
699         suite.keywords.visit(self)
700
701     def end_suite(self, suite):
702         """Called when suite ends.
703
704         :param suite: Suite to process.
705         :type suite: Suite
706         :returns: Nothing.
707         """
708         pass
709
710     def visit_test(self, test):
711         """Implements traversing through the test.
712
713         :param test: Test to process.
714         :type test: Test
715         :returns: Nothing.
716         """
717         if self.start_test(test) is not False:
718             test.keywords.visit(self)
719             self.end_test(test)
720
721     def start_test(self, test):
722         """Called when test starts.
723
724         :param test: Test to process.
725         :type test: Test
726         :returns: Nothing.
727         """
728
729         longname_orig = test.longname.lower()
730
731         # Check the ignore list
732         if longname_orig in self._ignore:
733             return
734
735         tags = [str(tag) for tag in test.tags]
736         test_result = dict()
737
738         # Change the TC long name and name if defined in the mapping table
739         longname = self._mapping.get(longname_orig, None)
740         if longname is not None:
741             name = longname.split('.')[-1]
742             logging.debug("{0}\n{1}\n{2}\n{3}".format(
743                 self._data["metadata"], longname_orig, longname, name))
744         else:
745             longname = longname_orig
746             name = test.name.lower()
747
748         # Remove TC number from the TC long name (backward compatibility):
749         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
750         # Remove TC number from the TC name (not needed):
751         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
752
753         test_result["parent"] = test.parent.name.lower()
754         test_result["tags"] = tags
755         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
756             replace('\r', '').replace('[', ' |br| [')
757         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
758         test_result["msg"] = test.message.replace('\n', ' |br| '). \
759             replace('\r', '').replace('"', "'")
760         test_result["type"] = "FUNC"
761         test_result["status"] = test.status
762
763         if "PERFTEST" in tags:
764             # Replace info about cores (e.g. -1c-) with the info about threads
765             # and cores (e.g. -1t1c-) in the long test case names and in the
766             # test case names if necessary.
767             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
768             if not groups:
769                 tag_count = 0
770                 for tag in test_result["tags"]:
771                     groups = re.search(self.REGEX_TC_TAG, tag)
772                     if groups:
773                         tag_count += 1
774                         tag_tc = tag
775
776                 if tag_count == 1:
777                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
778                                            "-{0}-".format(tag_tc.lower()),
779                                            self._test_ID,
780                                            count=1)
781                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
782                                                  "-{0}-".format(tag_tc.lower()),
783                                                  test_result["name"],
784                                                  count=1)
785                 else:
786                     test_result["status"] = "FAIL"
787                     self._data["tests"][self._test_ID] = test_result
788                     logging.debug("The test '{0}' has no or more than one "
789                                   "multi-threading tags.".format(self._test_ID))
790                     logging.debug("Tags: {0}".format(test_result["tags"]))
791                     return
792
793         if test.status == "PASS" and ("NDRPDRDISC" in tags or
794                                       "NDRPDR" in tags or
795                                       "SOAK" in tags or
796                                       "TCP" in tags or
797                                       "MRR" in tags or
798                                       "BMRR" in tags):
799             # TODO: Remove when definitely no NDRPDRDISC tests are used:
800             if "NDRDISC" in tags:
801                 test_result["type"] = "NDR"
802             # TODO: Remove when definitely no NDRPDRDISC tests are used:
803             elif "PDRDISC" in tags:
804                 test_result["type"] = "PDR"
805             elif "NDRPDR" in tags:
806                 test_result["type"] = "NDRPDR"
807             elif "SOAK" in tags:
808                 test_result["type"] = "SOAK"
809             elif "TCP" in tags:
810                 test_result["type"] = "TCP"
811             elif "MRR" in tags:
812                 test_result["type"] = "MRR"
813             elif "FRMOBL" in tags or "BMRR" in tags:
814                 test_result["type"] = "BMRR"
815             else:
816                 test_result["status"] = "FAIL"
817                 self._data["tests"][self._test_ID] = test_result
818                 return
819
820             # TODO: Remove when definitely no NDRPDRDISC tests are used:
821             if test_result["type"] in ("NDR", "PDR"):
822                 try:
823                     rate_value = str(re.search(
824                         self.REGEX_RATE, test.message).group(1))
825                 except AttributeError:
826                     rate_value = "-1"
827                 try:
828                     rate_unit = str(re.search(
829                         self.REGEX_RATE, test.message).group(2))
830                 except AttributeError:
831                     rate_unit = "-1"
832
833                 test_result["throughput"] = dict()
834                 test_result["throughput"]["value"] = \
835                     int(rate_value.split('.')[0])
836                 test_result["throughput"]["unit"] = rate_unit
837                 test_result["latency"] = \
838                     self._get_latency(test.message, test_result["type"])
839                 if test_result["type"] == "PDR":
840                     test_result["lossTolerance"] = str(re.search(
841                         self.REGEX_TOLERANCE, test.message).group(1))
842
843             elif test_result["type"] in ("NDRPDR", ):
844                 test_result["throughput"], test_result["status"] = \
845                     self._get_ndrpdr_throughput(test.message)
846                 test_result["latency"], test_result["status"] = \
847                     self._get_ndrpdr_latency(test.message)
848
849             elif test_result["type"] in ("SOAK", ):
850                 test_result["throughput"], test_result["status"] = \
851                     self._get_plr_throughput(test.message)
852
853             elif test_result["type"] in ("TCP", ):
854                 groups = re.search(self.REGEX_TCP, test.message)
855                 test_result["result"] = int(groups.group(2))
856
857             elif test_result["type"] in ("MRR", "BMRR"):
858                 test_result["result"] = dict()
859                 groups = re.search(self.REGEX_BMRR, test.message)
860                 if groups is not None:
861                     items_str = groups.group(1)
862                     items_float = [float(item.strip()) for item
863                                    in items_str.split(",")]
864                     metadata = AvgStdevMetadataFactory.from_data(items_float)
865                     # Next two lines have been introduced in CSIT-1179,
866                     # to be removed in CSIT-1180.
867                     metadata.size = 1
868                     metadata.stdev = 0.0
869                     test_result["result"]["receive-rate"] = metadata
870                 else:
871                     groups = re.search(self.REGEX_MRR, test.message)
872                     test_result["result"]["receive-rate"] = \
873                         AvgStdevMetadataFactory.from_data([
874                             float(groups.group(3)) / float(groups.group(1)), ])
875
876         self._data["tests"][self._test_ID] = test_result
877
878     def end_test(self, test):
879         """Called when test ends.
880
881         :param test: Test to process.
882         :type test: Test
883         :returns: Nothing.
884         """
885         pass
886
887     def visit_keyword(self, keyword):
888         """Implements traversing through the keyword and its child keywords.
889
890         :param keyword: Keyword to process.
891         :type keyword: Keyword
892         :returns: Nothing.
893         """
894         if self.start_keyword(keyword) is not False:
895             self.end_keyword(keyword)
896
897     def start_keyword(self, keyword):
898         """Called when keyword starts. Default implementation does nothing.
899
900         :param keyword: Keyword to process.
901         :type keyword: Keyword
902         :returns: Nothing.
903         """
904         try:
905             if keyword.type == "setup":
906                 self.visit_setup_kw(keyword)
907             elif keyword.type == "teardown":
908                 self._lookup_kw_nr = 0
909                 self.visit_teardown_kw(keyword)
910             else:
911                 self._lookup_kw_nr = 0
912                 self.visit_test_kw(keyword)
913         except AttributeError:
914             pass
915
916     def end_keyword(self, keyword):
917         """Called when keyword ends. Default implementation does nothing.
918
919         :param keyword: Keyword to process.
920         :type keyword: Keyword
921         :returns: Nothing.
922         """
923         pass
924
925     def visit_test_kw(self, test_kw):
926         """Implements traversing through the test keyword and its child
927         keywords.
928
929         :param test_kw: Keyword to process.
930         :type test_kw: Keyword
931         :returns: Nothing.
932         """
933         for keyword in test_kw.keywords:
934             if self.start_test_kw(keyword) is not False:
935                 self.visit_test_kw(keyword)
936                 self.end_test_kw(keyword)
937
938     def start_test_kw(self, test_kw):
939         """Called when test keyword starts. Default implementation does
940         nothing.
941
942         :param test_kw: Keyword to process.
943         :type test_kw: Keyword
944         :returns: Nothing.
945         """
946         if test_kw.name.count("Show Runtime Counters On All Duts"):
947             self._lookup_kw_nr += 1
948             self._show_run_lookup_nr = 0
949             self._msg_type = "test-show-runtime"
950         elif test_kw.name.count("Start The L2fwd Test") and not self._version:
951             self._msg_type = "dpdk-version"
952         else:
953             return
954         test_kw.messages.visit(self)
955
956     def end_test_kw(self, test_kw):
957         """Called when keyword ends. Default implementation does nothing.
958
959         :param test_kw: Keyword to process.
960         :type test_kw: Keyword
961         :returns: Nothing.
962         """
963         pass
964
965     def visit_setup_kw(self, setup_kw):
966         """Implements traversing through the teardown keyword and its child
967         keywords.
968
969         :param setup_kw: Keyword to process.
970         :type setup_kw: Keyword
971         :returns: Nothing.
972         """
973         for keyword in setup_kw.keywords:
974             if self.start_setup_kw(keyword) is not False:
975                 self.visit_setup_kw(keyword)
976                 self.end_setup_kw(keyword)
977
978     def start_setup_kw(self, setup_kw):
979         """Called when teardown keyword starts. Default implementation does
980         nothing.
981
982         :param setup_kw: Keyword to process.
983         :type setup_kw: Keyword
984         :returns: Nothing.
985         """
986         if setup_kw.name.count("Show Vpp Version On All Duts") \
987                 and not self._version:
988             self._msg_type = "vpp-version"
989
990         elif setup_kw.name.count("Setup performance global Variables") \
991                 and not self._timestamp:
992             self._msg_type = "timestamp"
993         elif setup_kw.name.count("Setup Framework") and not self._testbed:
994             self._msg_type = "testbed"
995         else:
996             return
997         setup_kw.messages.visit(self)
998
999     def end_setup_kw(self, setup_kw):
1000         """Called when keyword ends. Default implementation does nothing.
1001
1002         :param setup_kw: Keyword to process.
1003         :type setup_kw: Keyword
1004         :returns: Nothing.
1005         """
1006         pass
1007
1008     def visit_teardown_kw(self, teardown_kw):
1009         """Implements traversing through the teardown keyword and its child
1010         keywords.
1011
1012         :param teardown_kw: Keyword to process.
1013         :type teardown_kw: Keyword
1014         :returns: Nothing.
1015         """
1016         for keyword in teardown_kw.keywords:
1017             if self.start_teardown_kw(keyword) is not False:
1018                 self.visit_teardown_kw(keyword)
1019                 self.end_teardown_kw(keyword)
1020
1021     def start_teardown_kw(self, teardown_kw):
1022         """Called when teardown keyword starts. Default implementation does
1023         nothing.
1024
1025         :param teardown_kw: Keyword to process.
1026         :type teardown_kw: Keyword
1027         :returns: Nothing.
1028         """
1029
1030         if teardown_kw.name.count("Show Vat History On All Duts"):
1031             self._conf_history_lookup_nr = 0
1032             self._msg_type = "teardown-vat-history"
1033             teardown_kw.messages.visit(self)
1034         elif teardown_kw.name.count("Show Papi History On All Duts"):
1035             self._conf_history_lookup_nr = 0
1036             self._msg_type = "teardown-papi-history"
1037             teardown_kw.messages.visit(self)
1038
1039     def end_teardown_kw(self, teardown_kw):
1040         """Called when keyword ends. Default implementation does nothing.
1041
1042         :param teardown_kw: Keyword to process.
1043         :type teardown_kw: Keyword
1044         :returns: Nothing.
1045         """
1046         pass
1047
1048     def visit_message(self, msg):
1049         """Implements visiting the message.
1050
1051         :param msg: Message to process.
1052         :type msg: Message
1053         :returns: Nothing.
1054         """
1055         if self.start_message(msg) is not False:
1056             self.end_message(msg)
1057
1058     def start_message(self, msg):
1059         """Called when message starts. Get required information from messages:
1060         - VPP version.
1061
1062         :param msg: Message to process.
1063         :type msg: Message
1064         :returns: Nothing.
1065         """
1066
1067         if self._msg_type:
1068             self.parse_msg[self._msg_type](msg)
1069
1070     def end_message(self, msg):
1071         """Called when message ends. Default implementation does nothing.
1072
1073         :param msg: Message to process.
1074         :type msg: Message
1075         :returns: Nothing.
1076         """
1077         pass
1078
1079
1080 class InputData(object):
1081     """Input data
1082
1083     The data is extracted from output.xml files generated by Jenkins jobs and
1084     stored in pandas' DataFrames.
1085
1086     The data structure:
1087     - job name
1088       - build number
1089         - metadata
1090           (as described in ExecutionChecker documentation)
1091         - suites
1092           (as described in ExecutionChecker documentation)
1093         - tests
1094           (as described in ExecutionChecker documentation)
1095     """
1096
1097     def __init__(self, spec):
1098         """Initialization.
1099
1100         :param spec: Specification.
1101         :type spec: Specification
1102         """
1103
1104         # Specification:
1105         self._cfg = spec
1106
1107         # Data store:
1108         self._input_data = pd.Series()
1109
1110     @property
1111     def data(self):
1112         """Getter - Input data.
1113
1114         :returns: Input data
1115         :rtype: pandas.Series
1116         """
1117         return self._input_data
1118
1119     def metadata(self, job, build):
1120         """Getter - metadata
1121
1122         :param job: Job which metadata we want.
1123         :param build: Build which metadata we want.
1124         :type job: str
1125         :type build: str
1126         :returns: Metadata
1127         :rtype: pandas.Series
1128         """
1129
1130         return self.data[job][build]["metadata"]
1131
1132     def suites(self, job, build):
1133         """Getter - suites
1134
1135         :param job: Job which suites we want.
1136         :param build: Build which suites we want.
1137         :type job: str
1138         :type build: str
1139         :returns: Suites.
1140         :rtype: pandas.Series
1141         """
1142
1143         return self.data[job][str(build)]["suites"]
1144
1145     def tests(self, job, build):
1146         """Getter - tests
1147
1148         :param job: Job which tests we want.
1149         :param build: Build which tests we want.
1150         :type job: str
1151         :type build: str
1152         :returns: Tests.
1153         :rtype: pandas.Series
1154         """
1155
1156         return self.data[job][build]["tests"]
1157
1158     def _parse_tests(self, job, build, log):
1159         """Process data from robot output.xml file and return JSON structured
1160         data.
1161
1162         :param job: The name of job which build output data will be processed.
1163         :param build: The build which output data will be processed.
1164         :param log: List of log messages.
1165         :type job: str
1166         :type build: dict
1167         :type log: list of tuples (severity, msg)
1168         :returns: JSON data structure.
1169         :rtype: dict
1170         """
1171
1172         metadata = {
1173             "job": job,
1174             "build": build
1175         }
1176
1177         with open(build["file-name"], 'r') as data_file:
1178             try:
1179                 result = ExecutionResult(data_file)
1180             except errors.DataError as err:
1181                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1182                                      "{0}".format(err)))
1183                 return None
1184         checker = ExecutionChecker(metadata, self._cfg.mapping,
1185                                    self._cfg.ignore)
1186         result.visit(checker)
1187
1188         return checker.data
1189
1190     def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1191         """Download and parse the input data file.
1192
1193         :param pid: PID of the process executing this method.
1194         :param data_queue: Shared memory between processes. Queue which keeps
1195             the result data. This data is then read by the main process and used
1196             in further processing.
1197         :param job: Name of the Jenkins job which generated the processed input
1198             file.
1199         :param build: Information about the Jenkins build which generated the
1200             processed input file.
1201         :param repeat: Repeat the download specified number of times if not
1202             successful.
1203         :type pid: int
1204         :type data_queue: multiprocessing.Manager().Queue()
1205         :type job: str
1206         :type build: dict
1207         :type repeat: int
1208         """
1209
1210         logs = list()
1211
1212         logging.info("  Processing the job/build: {0}: {1}".
1213                      format(job, build["build"]))
1214
1215         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1216                      format(job, build["build"])))
1217
1218         state = "failed"
1219         success = False
1220         data = None
1221         do_repeat = repeat
1222         while do_repeat:
1223             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1224                                                    logs)
1225             if success:
1226                 break
1227             do_repeat -= 1
1228         if not success:
1229             logs.append(("ERROR", "It is not possible to download the input "
1230                                   "data file from the job '{job}', build "
1231                                   "'{build}', or it is damaged. Skipped.".
1232                          format(job=job, build=build["build"])))
1233         if success:
1234             logs.append(("INFO", "  Processing data from the build '{0}' ...".
1235                          format(build["build"])))
1236             data = self._parse_tests(job, build, logs)
1237             if data is None:
1238                 logs.append(("ERROR", "Input data file from the job '{job}', "
1239                                       "build '{build}' is damaged. Skipped.".
1240                              format(job=job, build=build["build"])))
1241             else:
1242                 state = "processed"
1243
1244             try:
1245                 remove(build["file-name"])
1246             except OSError as err:
1247                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1248                              format(build["file-name"], repr(err))))
1249
1250         # If the time-period is defined in the specification file, remove all
1251         # files which are outside the time period.
1252         timeperiod = self._cfg.input.get("time-period", None)
1253         if timeperiod and data:
1254             now = dt.utcnow()
1255             timeperiod = timedelta(int(timeperiod))
1256             metadata = data.get("metadata", None)
1257             if metadata:
1258                 generated = metadata.get("generated", None)
1259                 if generated:
1260                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1261                     if (now - generated) > timeperiod:
1262                         # Remove the data and the file:
1263                         state = "removed"
1264                         data = None
1265                         logs.append(
1266                             ("INFO",
1267                              "    The build {job}/{build} is outdated, will be "
1268                              "removed".format(job=job, build=build["build"])))
1269                         file_name = self._cfg.input["file-name"]
1270                         full_name = join(
1271                             self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1272                             "{job}{sep}{build}{sep}{name}".
1273                                 format(job=job,
1274                                        sep=SEPARATOR,
1275                                        build=build["build"],
1276                                        name=file_name))
1277                         try:
1278                             remove(full_name)
1279                             logs.append(("INFO",
1280                                          "    The file {name} has been removed".
1281                                          format(name=full_name)))
1282                         except OSError as err:
1283                             logs.append(("ERROR",
1284                                         "Cannot remove the file '{0}': {1}".
1285                                         format(full_name, repr(err))))
1286
1287         logs.append(("INFO", "  Done."))
1288
1289         result = {
1290             "data": data,
1291             "state": state,
1292             "job": job,
1293             "build": build,
1294             "logs": logs
1295         }
1296         data_queue.put(result)
1297
1298     def download_and_parse_data(self, repeat=1):
1299         """Download the input data files, parse input data from input files and
1300         store in pandas' Series.
1301
1302         :param repeat: Repeat the download specified number of times if not
1303             successful.
1304         :type repeat: int
1305         """
1306
1307         logging.info("Downloading and parsing input files ...")
1308
1309         work_queue = multiprocessing.JoinableQueue()
1310         manager = multiprocessing.Manager()
1311         data_queue = manager.Queue()
1312         cpus = multiprocessing.cpu_count()
1313
1314         workers = list()
1315         for cpu in range(cpus):
1316             worker = Worker(work_queue,
1317                             data_queue,
1318                             self._download_and_parse_build)
1319             worker.daemon = True
1320             worker.start()
1321             workers.append(worker)
1322             os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1323                       format(cpu, worker.pid))
1324
1325         for job, builds in self._cfg.builds.items():
1326             for build in builds:
1327                 work_queue.put((job, build, repeat))
1328
1329         work_queue.join()
1330
1331         logging.info("Done.")
1332
1333         while not data_queue.empty():
1334             result = data_queue.get()
1335
1336             job = result["job"]
1337             build_nr = result["build"]["build"]
1338
1339             if result["data"]:
1340                 data = result["data"]
1341                 build_data = pd.Series({
1342                     "metadata": pd.Series(data["metadata"].values(),
1343                                           index=data["metadata"].keys()),
1344                     "suites": pd.Series(data["suites"].values(),
1345                                         index=data["suites"].keys()),
1346                     "tests": pd.Series(data["tests"].values(),
1347                                        index=data["tests"].keys())})
1348
1349                 if self._input_data.get(job, None) is None:
1350                     self._input_data[job] = pd.Series()
1351                 self._input_data[job][str(build_nr)] = build_data
1352
1353                 self._cfg.set_input_file_name(job, build_nr,
1354                                               result["build"]["file-name"])
1355
1356             self._cfg.set_input_state(job, build_nr, result["state"])
1357
1358             for item in result["logs"]:
1359                 if item[0] == "INFO":
1360                     logging.info(item[1])
1361                 elif item[0] == "ERROR":
1362                     logging.error(item[1])
1363                 elif item[0] == "DEBUG":
1364                     logging.debug(item[1])
1365                 elif item[0] == "CRITICAL":
1366                     logging.critical(item[1])
1367                 elif item[0] == "WARNING":
1368                     logging.warning(item[1])
1369
1370         del data_queue
1371
1372         # Terminate all workers
1373         for worker in workers:
1374             worker.terminate()
1375             worker.join()
1376
1377         logging.info("Done.")
1378
1379     @staticmethod
1380     def _end_of_tag(tag_filter, start=0, closer="'"):
1381         """Return the index of character in the string which is the end of tag.
1382
1383         :param tag_filter: The string where the end of tag is being searched.
1384         :param start: The index where the searching is stated.
1385         :param closer: The character which is the tag closer.
1386         :type tag_filter: str
1387         :type start: int
1388         :type closer: str
1389         :returns: The index of the tag closer.
1390         :rtype: int
1391         """
1392
1393         try:
1394             idx_opener = tag_filter.index(closer, start)
1395             return tag_filter.index(closer, idx_opener + 1)
1396         except ValueError:
1397             return None
1398
1399     @staticmethod
1400     def _condition(tag_filter):
1401         """Create a conditional statement from the given tag filter.
1402
1403         :param tag_filter: Filter based on tags from the element specification.
1404         :type tag_filter: str
1405         :returns: Conditional statement which can be evaluated.
1406         :rtype: str
1407         """
1408
1409         index = 0
1410         while True:
1411             index = InputData._end_of_tag(tag_filter, index)
1412             if index is None:
1413                 return tag_filter
1414             index += 1
1415             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1416
1417     def filter_data(self, element, params=None, data_set="tests",
1418                     continue_on_error=False):
1419         """Filter required data from the given jobs and builds.
1420
1421         The output data structure is:
1422
1423         - job 1
1424           - build 1
1425             - test (or suite) 1 ID:
1426               - param 1
1427               - param 2
1428               ...
1429               - param n
1430             ...
1431             - test (or suite) n ID:
1432             ...
1433           ...
1434           - build n
1435         ...
1436         - job n
1437
1438         :param element: Element which will use the filtered data.
1439         :param params: Parameters which will be included in the output. If None,
1440         all parameters are included.
1441         :param data_set: The set of data to be filtered: tests, suites,
1442         metadata.
1443         :param continue_on_error: Continue if there is error while reading the
1444         data. The Item will be empty then
1445         :type element: pandas.Series
1446         :type params: list
1447         :type data_set: str
1448         :type continue_on_error: bool
1449         :returns: Filtered data.
1450         :rtype pandas.Series
1451         """
1452
1453         try:
1454             if element["filter"] in ("all", "template"):
1455                 cond = "True"
1456             else:
1457                 cond = InputData._condition(element["filter"])
1458             logging.debug("   Filter: {0}".format(cond))
1459         except KeyError:
1460             logging.error("  No filter defined.")
1461             return None
1462
1463         if params is None:
1464             params = element.get("parameters", None)
1465             if params:
1466                 params.append("type")
1467
1468         data = pd.Series()
1469         try:
1470             for job, builds in element["data"].items():
1471                 data[job] = pd.Series()
1472                 for build in builds:
1473                     data[job][str(build)] = pd.Series()
1474                     try:
1475                         data_iter = self.data[job][str(build)][data_set].\
1476                             iteritems()
1477                     except KeyError:
1478                         if continue_on_error:
1479                             continue
1480                         else:
1481                             return None
1482                     for test_ID, test_data in data_iter:
1483                         if eval(cond, {"tags": test_data.get("tags", "")}):
1484                             data[job][str(build)][test_ID] = pd.Series()
1485                             if params is None:
1486                                 for param, val in test_data.items():
1487                                     data[job][str(build)][test_ID][param] = val
1488                             else:
1489                                 for param in params:
1490                                     try:
1491                                         data[job][str(build)][test_ID][param] =\
1492                                             test_data[param]
1493                                     except KeyError:
1494                                         data[job][str(build)][test_ID][param] =\
1495                                             "No Data"
1496             return data
1497
1498         except (KeyError, IndexError, ValueError) as err:
1499             logging.error("   Missing mandatory parameter in the element "
1500                           "specification: {0}".format(err))
1501             return None
1502         except AttributeError:
1503             return None
1504         except SyntaxError:
1505             logging.error("   The filter '{0}' is not correct. Check if all "
1506                           "tags are enclosed by apostrophes.".format(cond))
1507             return None
1508
1509     @staticmethod
1510     def merge_data(data):
1511         """Merge data from more jobs and builds to a simple data structure.
1512
1513         The output data structure is:
1514
1515         - test (suite) 1 ID:
1516           - param 1
1517           - param 2
1518           ...
1519           - param n
1520         ...
1521         - test (suite) n ID:
1522         ...
1523
1524         :param data: Data to merge.
1525         :type data: pandas.Series
1526         :returns: Merged data.
1527         :rtype: pandas.Series
1528         """
1529
1530         logging.info("    Merging data ...")
1531
1532         merged_data = pd.Series()
1533         for _, builds in data.iteritems():
1534             for _, item in builds.iteritems():
1535                 for ID, item_data in item.iteritems():
1536                     merged_data[ID] = item_data
1537
1538         return merged_data