Trending: Fix vpp-ref
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import multiprocessing
23 import os
24 import re
25 import pandas as pd
26 import logging
27
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
32 from os import remove
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38
39 from input_data_files import download_and_unzip_data_file
40 from utils import Worker
41
42
43 # Separator used in file names
44 SEPARATOR = "__"
45
46
47 class ExecutionChecker(ResultVisitor):
48     """Class to traverse through the test suite structure.
49
50     The functionality implemented in this class generates a json structure:
51
52     Performance tests:
53
54     {
55         "metadata": {
56             "generated": "Timestamp",
57             "version": "SUT version",
58             "job": "Jenkins job name",
59             "build": "Information about the build"
60         },
61         "suites": {
62             "Suite long name 1": {
63                 "name": Suite name,
64                 "doc": "Suite 1 documentation",
65                 "parent": "Suite 1 parent",
66                 "level": "Level of the suite in the suite hierarchy"
67             }
68             "Suite long name N": {
69                 "name": Suite name,
70                 "doc": "Suite N documentation",
71                 "parent": "Suite 2 parent",
72                 "level": "Level of the suite in the suite hierarchy"
73             }
74         }
75         "tests": {
76             # NDRPDR tests:
77             "ID": {
78                 "name": "Test name",
79                 "parent": "Name of the parent of the test",
80                 "doc": "Test documentation",
81                 "msg": "Test message",
82                 "conf-history": "DUT1 and DUT2 VAT History",
83                 "show-run": "Show Run",
84                 "tags": ["tag 1", "tag 2", "tag n"],
85                 "type": "NDRPDR",
86                 "status": "PASS" | "FAIL",
87                 "throughput": {
88                     "NDR": {
89                         "LOWER": float,
90                         "UPPER": float
91                     },
92                     "PDR": {
93                         "LOWER": float,
94                         "UPPER": float
95                     }
96                 },
97                 "latency": {
98                     "NDR": {
99                         "direction1": {
100                             "min": float,
101                             "avg": float,
102                             "max": float
103                         },
104                         "direction2": {
105                             "min": float,
106                             "avg": float,
107                             "max": float
108                         }
109                     },
110                     "PDR": {
111                         "direction1": {
112                             "min": float,
113                             "avg": float,
114                             "max": float
115                         },
116                         "direction2": {
117                             "min": float,
118                             "avg": float,
119                             "max": float
120                         }
121                     }
122                 }
123             }
124
125             # TCP tests:
126             "ID": {
127                 "name": "Test name",
128                 "parent": "Name of the parent of the test",
129                 "doc": "Test documentation",
130                 "msg": "Test message",
131                 "tags": ["tag 1", "tag 2", "tag n"],
132                 "type": "TCP",
133                 "status": "PASS" | "FAIL",
134                 "result": int
135             }
136
137             # MRR, BMRR tests:
138             "ID": {
139                 "name": "Test name",
140                 "parent": "Name of the parent of the test",
141                 "doc": "Test documentation",
142                 "msg": "Test message",
143                 "tags": ["tag 1", "tag 2", "tag n"],
144                 "type": "MRR" | "BMRR",
145                 "status": "PASS" | "FAIL",
146                 "result": {
147                     "receive-rate": AvgStdevMetadata,
148                 }
149             }
150
151             # TODO: Remove when definitely no NDRPDRDISC tests are used:
152             # NDRPDRDISC tests:
153             "ID": {
154                 "name": "Test name",
155                 "parent": "Name of the parent of the test",
156                 "doc": "Test documentation",
157                 "msg": "Test message",
158                 "tags": ["tag 1", "tag 2", "tag n"],
159                 "type": "PDR" | "NDR",
160                 "status": "PASS" | "FAIL",
161                 "throughput": {  # Only type: "PDR" | "NDR"
162                     "value": int,
163                     "unit": "pps" | "bps" | "percentage"
164                 },
165                 "latency": {  # Only type: "PDR" | "NDR"
166                     "direction1": {
167                         "100": {
168                             "min": int,
169                             "avg": int,
170                             "max": int
171                         },
172                         "50": {  # Only for NDR
173                             "min": int,
174                             "avg": int,
175                             "max": int
176                         },
177                         "10": {  # Only for NDR
178                             "min": int,
179                             "avg": int,
180                             "max": int
181                         }
182                     },
183                     "direction2": {
184                         "100": {
185                             "min": int,
186                             "avg": int,
187                             "max": int
188                         },
189                         "50": {  # Only for NDR
190                             "min": int,
191                             "avg": int,
192                             "max": int
193                         },
194                         "10": {  # Only for NDR
195                             "min": int,
196                             "avg": int,
197                             "max": int
198                         }
199                     }
200                 },
201                 "lossTolerance": "lossTolerance",  # Only type: "PDR"
202                 "conf-history": "DUT1 and DUT2 VAT History"
203                 "show-run": "Show Run"
204             },
205             "ID" {
206                 # next test
207             }
208         }
209     }
210
211
212     Functional tests:
213
214     {
215         "metadata": {  # Optional
216             "version": "VPP version",
217             "job": "Jenkins job name",
218             "build": "Information about the build"
219         },
220         "suites": {
221             "Suite name 1": {
222                 "doc": "Suite 1 documentation",
223                 "parent": "Suite 1 parent",
224                 "level": "Level of the suite in the suite hierarchy"
225             }
226             "Suite name N": {
227                 "doc": "Suite N documentation",
228                 "parent": "Suite 2 parent",
229                 "level": "Level of the suite in the suite hierarchy"
230             }
231         }
232         "tests": {
233             "ID": {
234                 "name": "Test name",
235                 "parent": "Name of the parent of the test",
236                 "doc": "Test documentation"
237                 "msg": "Test message"
238                 "tags": ["tag 1", "tag 2", "tag n"],
239                 "conf-history": "DUT1 and DUT2 VAT History"
240                 "show-run": "Show Run"
241                 "status": "PASS" | "FAIL"
242             },
243             "ID" {
244                 # next test
245             }
246         }
247     }
248
249     .. note:: ID is the lowercase full path to the test.
250     """
251
252     # TODO: Remove when definitely no NDRPDRDISC tests are used:
253     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
254
255     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
256                                 r'PLRsearch upper bound::\s(\d+.\d+)')
257
258     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
259                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
260                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
261                                    r'PDR_UPPER:\s(\d+.\d+)')
262
263     # TODO: Remove when definitely no NDRPDRDISC tests are used:
264     REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
265                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
269                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
270                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
271
272     REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
273                                r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
274                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
275
276     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
277                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
278
279     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
280                                  r'[\D\d]*')
281
282     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
283                                    r"VPP Version:\s*)(.*)")
284
285     REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
286                                     r"(RTE Version: 'DPDK )(.*)(')")
287
288     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
289
290     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
291                            r'tx\s(\d*),\srx\s(\d*)')
292
293     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
294                             r' in packets per second: \[(.*)\]')
295
296     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
297
298     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
299
300     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
301
302     REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
303
304     def __init__(self, metadata, mapping, ignore):
305         """Initialisation.
306
307         :param metadata: Key-value pairs to be included in "metadata" part of
308             JSON structure.
309         :param mapping: Mapping of the old names of test cases to the new
310             (actual) one.
311         :param ignore: List of TCs to be ignored.
312         :type metadata: dict
313         :type mapping: dict
314         :type ignore: list
315         """
316
317         # Type of message to parse out from the test messages
318         self._msg_type = None
319
320         # VPP version
321         self._version = None
322
323         # Timestamp
324         self._timestamp = None
325
326         # Testbed. The testbed is identified by TG node IP address.
327         self._testbed = None
328
329         # Mapping of TCs long names
330         self._mapping = mapping
331
332         # Ignore list
333         self._ignore = ignore
334
335         # Number of VAT History messages found:
336         # 0 - no message
337         # 1 - VAT History of DUT1
338         # 2 - VAT History of DUT2
339         self._lookup_kw_nr = 0
340         self._conf_history_lookup_nr = 0
341
342         # Number of Show Running messages found
343         # 0 - no message
344         # 1 - Show run message found
345         self._show_run_lookup_nr = 0
346
347         # Test ID of currently processed test- the lowercase full path to the
348         # test
349         self._test_ID = None
350
351         # The main data structure
352         self._data = {
353             "metadata": OrderedDict(),
354             "suites": OrderedDict(),
355             "tests": OrderedDict()
356         }
357
358         # Save the provided metadata
359         for key, val in metadata.items():
360             self._data["metadata"][key] = val
361
362         # Dictionary defining the methods used to parse different types of
363         # messages
364         self.parse_msg = {
365             "timestamp": self._get_timestamp,
366             "vpp-version": self._get_vpp_version,
367             "dpdk-version": self._get_dpdk_version,
368             "teardown-vat-history": self._get_vat_history,
369             "teardown-papi-history": self._get_papi_history,
370             "test-show-runtime": self._get_show_run,
371             "testbed": self._get_testbed
372         }
373
374     @property
375     def data(self):
376         """Getter - Data parsed from the XML file.
377
378         :returns: Data parsed from the XML file.
379         :rtype: dict
380         """
381         return self._data
382
383     def _get_testbed(self, msg):
384         """Called when extraction of testbed IP is required.
385         The testbed is identified by TG node IP address.
386
387         :param msg: Message to process.
388         :type msg: Message
389         :returns: Nothing.
390         """
391
392         if msg.message.count("Arguments:"):
393             message = str(msg.message).replace(' ', '').replace('\n', '').\
394                 replace("'", '"').replace('b"', '"').\
395                 replace("honeycom", "honeycomb")
396             message = loads(message[11:-1])
397             try:
398                 self._testbed = message["TG"]["host"]
399             except (KeyError, ValueError):
400                 pass
401             finally:
402                 self._data["metadata"]["testbed"] = self._testbed
403                 self._msg_type = None
404
405     def _get_vpp_version(self, msg):
406         """Called when extraction of VPP version is required.
407
408         :param msg: Message to process.
409         :type msg: Message
410         :returns: Nothing.
411         """
412
413         if msg.message.count("return STDOUT Version:") or \
414             msg.message.count("VPP Version:") or \
415             msg.message.count("VPP version:"):
416             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
417                                 group(2))
418             self._data["metadata"]["version"] = self._version
419             self._msg_type = None
420
421     def _get_dpdk_version(self, msg):
422         """Called when extraction of DPDK version is required.
423
424         :param msg: Message to process.
425         :type msg: Message
426         :returns: Nothing.
427         """
428
429         if msg.message.count("return STDOUT testpmd"):
430             try:
431                 self._version = str(re.search(
432                     self.REGEX_VERSION_DPDK, msg.message). group(4))
433                 self._data["metadata"]["version"] = self._version
434             except IndexError:
435                 pass
436             finally:
437                 self._msg_type = None
438
439     def _get_timestamp(self, msg):
440         """Called when extraction of timestamp is required.
441
442         :param msg: Message to process.
443         :type msg: Message
444         :returns: Nothing.
445         """
446
447         self._timestamp = msg.timestamp[:14]
448         self._data["metadata"]["generated"] = self._timestamp
449         self._msg_type = None
450
451     def _get_vat_history(self, msg):
452         """Called when extraction of VAT command history is required.
453
454         :param msg: Message to process.
455         :type msg: Message
456         :returns: Nothing.
457         """
458         if msg.message.count("VAT command history:"):
459             self._conf_history_lookup_nr += 1
460             if self._conf_history_lookup_nr == 1:
461                 self._data["tests"][self._test_ID]["conf-history"] = str()
462             else:
463                 self._msg_type = None
464             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
465                           "VAT command history:", "", msg.message, count=1). \
466                 replace("\n\n", "\n").replace('\n', ' |br| ').\
467                 replace('\r', '').replace('"', "'")
468
469             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
470             self._data["tests"][self._test_ID]["conf-history"] += \
471                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
472
473     def _get_papi_history(self, msg):
474         """Called when extraction of PAPI command history is required.
475
476         :param msg: Message to process.
477         :type msg: Message
478         :returns: Nothing.
479         """
480         if msg.message.count("PAPI command history:"):
481             self._conf_history_lookup_nr += 1
482             if self._conf_history_lookup_nr == 1:
483                 self._data["tests"][self._test_ID]["conf-history"] = str()
484             else:
485                 self._msg_type = None
486             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
487                           "PAPI command history:", "", msg.message, count=1). \
488                 replace("\n\n", "\n").replace('\n', ' |br| ').\
489                 replace('\r', '').replace('"', "'")
490
491             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
492             self._data["tests"][self._test_ID]["conf-history"] += \
493                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
494
495     def _get_show_run(self, msg):
496         """Called when extraction of VPP operational data (output of CLI command
497         Show Runtime) is required.
498
499         :param msg: Message to process.
500         :type msg: Message
501         :returns: Nothing.
502         """
503         if msg.message.count("return STDOUT Thread "):
504             self._show_run_lookup_nr += 1
505             if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
506                 self._data["tests"][self._test_ID]["show-run"] = str()
507             if self._lookup_kw_nr > 1:
508                 self._msg_type = None
509             if self._show_run_lookup_nr == 1:
510                 text = msg.message.replace("vat# ", "").\
511                     replace("return STDOUT ", "").replace("\n\n", "\n").\
512                     replace('\n', ' |br| ').\
513                     replace('\r', '').replace('"', "'")
514                 try:
515                     self._data["tests"][self._test_ID]["show-run"] += " |br| "
516                     self._data["tests"][self._test_ID]["show-run"] += \
517                         "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
518                 except KeyError:
519                     pass
520
521     # TODO: Remove when definitely no NDRPDRDISC tests are used:
522     def _get_latency(self, msg, test_type):
523         """Get the latency data from the test message.
524
525         :param msg: Message to be parsed.
526         :param test_type: Type of the test - NDR or PDR.
527         :type msg: str
528         :type test_type: str
529         :returns: Latencies parsed from the message.
530         :rtype: dict
531         """
532
533         if test_type == "NDR":
534             groups = re.search(self.REGEX_LAT_NDR, msg)
535             groups_range = range(1, 7)
536         elif test_type == "PDR":
537             groups = re.search(self.REGEX_LAT_PDR, msg)
538             groups_range = range(1, 3)
539         else:
540             return {}
541
542         latencies = list()
543         for idx in groups_range:
544             try:
545                 lat = [int(item) for item in str(groups.group(idx)).split('/')]
546             except (AttributeError, ValueError):
547                 lat = [-1, -1, -1]
548             latencies.append(lat)
549
550         keys = ("min", "avg", "max")
551         latency = {
552             "direction1": {
553             },
554             "direction2": {
555             }
556         }
557
558         latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
559         latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
560         if test_type == "NDR":
561             latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
562             latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
563             latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
564             latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
565
566         return latency
567
568     def _get_ndrpdr_throughput(self, msg):
569         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
570         message.
571
572         :param msg: The test message to be parsed.
573         :type msg: str
574         :returns: Parsed data as a dict and the status (PASS/FAIL).
575         :rtype: tuple(dict, str)
576         """
577
578         throughput = {
579             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
580             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
581         }
582         status = "FAIL"
583         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
584
585         if groups is not None:
586             try:
587                 throughput["NDR"]["LOWER"] = float(groups.group(1))
588                 throughput["NDR"]["UPPER"] = float(groups.group(2))
589                 throughput["PDR"]["LOWER"] = float(groups.group(3))
590                 throughput["PDR"]["UPPER"] = float(groups.group(4))
591                 status = "PASS"
592             except (IndexError, ValueError):
593                 pass
594
595         return throughput, status
596
597     def _get_plr_throughput(self, msg):
598         """Get PLRsearch lower bound and PLRsearch upper bound from the test
599         message.
600
601         :param msg: The test message to be parsed.
602         :type msg: str
603         :returns: Parsed data as a dict and the status (PASS/FAIL).
604         :rtype: tuple(dict, str)
605         """
606
607         throughput = {
608             "LOWER": -1.0,
609             "UPPER": -1.0
610         }
611         status = "FAIL"
612         groups = re.search(self.REGEX_PLR_RATE, msg)
613
614         if groups is not None:
615             try:
616                 throughput["LOWER"] = float(groups.group(1))
617                 throughput["UPPER"] = float(groups.group(2))
618                 status = "PASS"
619             except (IndexError, ValueError):
620                 pass
621
622         return throughput, status
623
624     def _get_ndrpdr_latency(self, msg):
625         """Get LATENCY from the test message.
626
627         :param msg: The test message to be parsed.
628         :type msg: str
629         :returns: Parsed data as a dict and the status (PASS/FAIL).
630         :rtype: tuple(dict, str)
631         """
632
633         latency = {
634             "NDR": {
635                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
636                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
637             },
638             "PDR": {
639                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
640                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
641             }
642         }
643         status = "FAIL"
644         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
645
646         if groups is not None:
647             keys = ("min", "avg", "max")
648             try:
649                 latency["NDR"]["direction1"] = dict(
650                     zip(keys, [float(l) for l in groups.group(1).split('/')]))
651                 latency["NDR"]["direction2"] = dict(
652                     zip(keys, [float(l) for l in groups.group(2).split('/')]))
653                 latency["PDR"]["direction1"] = dict(
654                     zip(keys, [float(l) for l in groups.group(3).split('/')]))
655                 latency["PDR"]["direction2"] = dict(
656                     zip(keys, [float(l) for l in groups.group(4).split('/')]))
657                 status = "PASS"
658             except (IndexError, ValueError):
659                 pass
660
661         return latency, status
662
663     def visit_suite(self, suite):
664         """Implements traversing through the suite and its direct children.
665
666         :param suite: Suite to process.
667         :type suite: Suite
668         :returns: Nothing.
669         """
670         if self.start_suite(suite) is not False:
671             suite.suites.visit(self)
672             suite.tests.visit(self)
673             self.end_suite(suite)
674
675     def start_suite(self, suite):
676         """Called when suite starts.
677
678         :param suite: Suite to process.
679         :type suite: Suite
680         :returns: Nothing.
681         """
682
683         try:
684             parent_name = suite.parent.name
685         except AttributeError:
686             return
687
688         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
689             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
690         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
691
692         self._data["suites"][suite.longname.lower().replace('"', "'").
693             replace(" ", "_")] = {
694                 "name": suite.name.lower(),
695                 "doc": doc_str,
696                 "parent": parent_name,
697                 "level": len(suite.longname.split("."))
698             }
699
700         suite.keywords.visit(self)
701
702     def end_suite(self, suite):
703         """Called when suite ends.
704
705         :param suite: Suite to process.
706         :type suite: Suite
707         :returns: Nothing.
708         """
709         pass
710
711     def visit_test(self, test):
712         """Implements traversing through the test.
713
714         :param test: Test to process.
715         :type test: Test
716         :returns: Nothing.
717         """
718         if self.start_test(test) is not False:
719             test.keywords.visit(self)
720             self.end_test(test)
721
722     def start_test(self, test):
723         """Called when test starts.
724
725         :param test: Test to process.
726         :type test: Test
727         :returns: Nothing.
728         """
729
730         longname_orig = test.longname.lower()
731
732         # Check the ignore list
733         if longname_orig in self._ignore:
734             return
735
736         tags = [str(tag) for tag in test.tags]
737         test_result = dict()
738
739         # Change the TC long name and name if defined in the mapping table
740         longname = self._mapping.get(longname_orig, None)
741         if longname is not None:
742             name = longname.split('.')[-1]
743             logging.debug("{0}\n{1}\n{2}\n{3}".format(
744                 self._data["metadata"], longname_orig, longname, name))
745         else:
746             longname = longname_orig
747             name = test.name.lower()
748
749         # Remove TC number from the TC long name (backward compatibility):
750         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
751         # Remove TC number from the TC name (not needed):
752         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
753
754         test_result["parent"] = test.parent.name.lower()
755         test_result["tags"] = tags
756         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
757             replace('\r', '').replace('[', ' |br| [')
758         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
759         test_result["msg"] = test.message.replace('\n', ' |br| '). \
760             replace('\r', '').replace('"', "'")
761         test_result["type"] = "FUNC"
762         test_result["status"] = test.status
763
764         if "PERFTEST" in tags:
765             # Replace info about cores (e.g. -1c-) with the info about threads
766             # and cores (e.g. -1t1c-) in the long test case names and in the
767             # test case names if necessary.
768             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
769             if not groups:
770                 tag_count = 0
771                 for tag in test_result["tags"]:
772                     groups = re.search(self.REGEX_TC_TAG, tag)
773                     if groups:
774                         tag_count += 1
775                         tag_tc = tag
776
777                 if tag_count == 1:
778                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
779                                            "-{0}-".format(tag_tc.lower()),
780                                            self._test_ID,
781                                            count=1)
782                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
783                                                  "-{0}-".format(tag_tc.lower()),
784                                                  test_result["name"],
785                                                  count=1)
786                 else:
787                     test_result["status"] = "FAIL"
788                     self._data["tests"][self._test_ID] = test_result
789                     logging.debug("The test '{0}' has no or more than one "
790                                   "multi-threading tags.".format(self._test_ID))
791                     logging.debug("Tags: {0}".format(test_result["tags"]))
792                     return
793
794         if test.status == "PASS" and ("NDRPDRDISC" in tags or
795                                       "NDRPDR" in tags or
796                                       "SOAK" in tags or
797                                       "TCP" in tags or
798                                       "MRR" in tags or
799                                       "BMRR" in tags):
800             # TODO: Remove when definitely no NDRPDRDISC tests are used:
801             if "NDRDISC" in tags:
802                 test_result["type"] = "NDR"
803             # TODO: Remove when definitely no NDRPDRDISC tests are used:
804             elif "PDRDISC" in tags:
805                 test_result["type"] = "PDR"
806             elif "NDRPDR" in tags:
807                 test_result["type"] = "NDRPDR"
808             elif "SOAK" in tags:
809                 test_result["type"] = "SOAK"
810             elif "TCP" in tags:
811                 test_result["type"] = "TCP"
812             elif "MRR" in tags:
813                 test_result["type"] = "MRR"
814             elif "FRMOBL" in tags or "BMRR" in tags:
815                 test_result["type"] = "BMRR"
816             else:
817                 test_result["status"] = "FAIL"
818                 self._data["tests"][self._test_ID] = test_result
819                 return
820
821             # TODO: Remove when definitely no NDRPDRDISC tests are used:
822             if test_result["type"] in ("NDR", "PDR"):
823                 try:
824                     rate_value = str(re.search(
825                         self.REGEX_RATE, test.message).group(1))
826                 except AttributeError:
827                     rate_value = "-1"
828                 try:
829                     rate_unit = str(re.search(
830                         self.REGEX_RATE, test.message).group(2))
831                 except AttributeError:
832                     rate_unit = "-1"
833
834                 test_result["throughput"] = dict()
835                 test_result["throughput"]["value"] = \
836                     int(rate_value.split('.')[0])
837                 test_result["throughput"]["unit"] = rate_unit
838                 test_result["latency"] = \
839                     self._get_latency(test.message, test_result["type"])
840                 if test_result["type"] == "PDR":
841                     test_result["lossTolerance"] = str(re.search(
842                         self.REGEX_TOLERANCE, test.message).group(1))
843
844             elif test_result["type"] in ("NDRPDR", ):
845                 test_result["throughput"], test_result["status"] = \
846                     self._get_ndrpdr_throughput(test.message)
847                 test_result["latency"], test_result["status"] = \
848                     self._get_ndrpdr_latency(test.message)
849
850             elif test_result["type"] in ("SOAK", ):
851                 test_result["throughput"], test_result["status"] = \
852                     self._get_plr_throughput(test.message)
853
854             elif test_result["type"] in ("TCP", ):
855                 groups = re.search(self.REGEX_TCP, test.message)
856                 test_result["result"] = int(groups.group(2))
857
858             elif test_result["type"] in ("MRR", "BMRR"):
859                 test_result["result"] = dict()
860                 groups = re.search(self.REGEX_BMRR, test.message)
861                 if groups is not None:
862                     items_str = groups.group(1)
863                     items_float = [float(item.strip()) for item
864                                    in items_str.split(",")]
865                     metadata = AvgStdevMetadataFactory.from_data(items_float)
866                     # Next two lines have been introduced in CSIT-1179,
867                     # to be removed in CSIT-1180.
868                     metadata.size = 1
869                     metadata.stdev = 0.0
870                     test_result["result"]["receive-rate"] = metadata
871                 else:
872                     groups = re.search(self.REGEX_MRR, test.message)
873                     test_result["result"]["receive-rate"] = \
874                         AvgStdevMetadataFactory.from_data([
875                             float(groups.group(3)) / float(groups.group(1)), ])
876
877         self._data["tests"][self._test_ID] = test_result
878
879     def end_test(self, test):
880         """Called when test ends.
881
882         :param test: Test to process.
883         :type test: Test
884         :returns: Nothing.
885         """
886         pass
887
888     def visit_keyword(self, keyword):
889         """Implements traversing through the keyword and its child keywords.
890
891         :param keyword: Keyword to process.
892         :type keyword: Keyword
893         :returns: Nothing.
894         """
895         if self.start_keyword(keyword) is not False:
896             self.end_keyword(keyword)
897
898     def start_keyword(self, keyword):
899         """Called when keyword starts. Default implementation does nothing.
900
901         :param keyword: Keyword to process.
902         :type keyword: Keyword
903         :returns: Nothing.
904         """
905         try:
906             if keyword.type == "setup":
907                 self.visit_setup_kw(keyword)
908             elif keyword.type == "teardown":
909                 self._lookup_kw_nr = 0
910                 self.visit_teardown_kw(keyword)
911             else:
912                 self._lookup_kw_nr = 0
913                 self.visit_test_kw(keyword)
914         except AttributeError:
915             pass
916
917     def end_keyword(self, keyword):
918         """Called when keyword ends. Default implementation does nothing.
919
920         :param keyword: Keyword to process.
921         :type keyword: Keyword
922         :returns: Nothing.
923         """
924         pass
925
926     def visit_test_kw(self, test_kw):
927         """Implements traversing through the test keyword and its child
928         keywords.
929
930         :param test_kw: Keyword to process.
931         :type test_kw: Keyword
932         :returns: Nothing.
933         """
934         for keyword in test_kw.keywords:
935             if self.start_test_kw(keyword) is not False:
936                 self.visit_test_kw(keyword)
937                 self.end_test_kw(keyword)
938
939     def start_test_kw(self, test_kw):
940         """Called when test keyword starts. Default implementation does
941         nothing.
942
943         :param test_kw: Keyword to process.
944         :type test_kw: Keyword
945         :returns: Nothing.
946         """
947         if test_kw.name.count("Show Runtime Counters On All Duts"):
948             self._lookup_kw_nr += 1
949             self._show_run_lookup_nr = 0
950             self._msg_type = "test-show-runtime"
951         elif test_kw.name.count("Start The L2fwd Test") and not self._version:
952             self._msg_type = "dpdk-version"
953         else:
954             return
955         test_kw.messages.visit(self)
956
957     def end_test_kw(self, test_kw):
958         """Called when keyword ends. Default implementation does nothing.
959
960         :param test_kw: Keyword to process.
961         :type test_kw: Keyword
962         :returns: Nothing.
963         """
964         pass
965
966     def visit_setup_kw(self, setup_kw):
967         """Implements traversing through the teardown keyword and its child
968         keywords.
969
970         :param setup_kw: Keyword to process.
971         :type setup_kw: Keyword
972         :returns: Nothing.
973         """
974         for keyword in setup_kw.keywords:
975             if self.start_setup_kw(keyword) is not False:
976                 self.visit_setup_kw(keyword)
977                 self.end_setup_kw(keyword)
978
979     def start_setup_kw(self, setup_kw):
980         """Called when teardown keyword starts. Default implementation does
981         nothing.
982
983         :param setup_kw: Keyword to process.
984         :type setup_kw: Keyword
985         :returns: Nothing.
986         """
987         if setup_kw.name.count("Show Vpp Version On All Duts") \
988                 and not self._version:
989             self._msg_type = "vpp-version"
990
991         elif setup_kw.name.count("Setup performance global Variables") \
992                 and not self._timestamp:
993             self._msg_type = "timestamp"
994         elif setup_kw.name.count("Setup Framework") and not self._testbed:
995             self._msg_type = "testbed"
996         else:
997             return
998         setup_kw.messages.visit(self)
999
1000     def end_setup_kw(self, setup_kw):
1001         """Called when keyword ends. Default implementation does nothing.
1002
1003         :param setup_kw: Keyword to process.
1004         :type setup_kw: Keyword
1005         :returns: Nothing.
1006         """
1007         pass
1008
1009     def visit_teardown_kw(self, teardown_kw):
1010         """Implements traversing through the teardown keyword and its child
1011         keywords.
1012
1013         :param teardown_kw: Keyword to process.
1014         :type teardown_kw: Keyword
1015         :returns: Nothing.
1016         """
1017         for keyword in teardown_kw.keywords:
1018             if self.start_teardown_kw(keyword) is not False:
1019                 self.visit_teardown_kw(keyword)
1020                 self.end_teardown_kw(keyword)
1021
1022     def start_teardown_kw(self, teardown_kw):
1023         """Called when teardown keyword starts. Default implementation does
1024         nothing.
1025
1026         :param teardown_kw: Keyword to process.
1027         :type teardown_kw: Keyword
1028         :returns: Nothing.
1029         """
1030
1031         if teardown_kw.name.count("Show Vat History On All Duts"):
1032             self._conf_history_lookup_nr = 0
1033             self._msg_type = "teardown-vat-history"
1034             teardown_kw.messages.visit(self)
1035         elif teardown_kw.name.count("Show Papi History On All Duts"):
1036             self._conf_history_lookup_nr = 0
1037             self._msg_type = "teardown-papi-history"
1038             teardown_kw.messages.visit(self)
1039
1040     def end_teardown_kw(self, teardown_kw):
1041         """Called when keyword ends. Default implementation does nothing.
1042
1043         :param teardown_kw: Keyword to process.
1044         :type teardown_kw: Keyword
1045         :returns: Nothing.
1046         """
1047         pass
1048
1049     def visit_message(self, msg):
1050         """Implements visiting the message.
1051
1052         :param msg: Message to process.
1053         :type msg: Message
1054         :returns: Nothing.
1055         """
1056         if self.start_message(msg) is not False:
1057             self.end_message(msg)
1058
1059     def start_message(self, msg):
1060         """Called when message starts. Get required information from messages:
1061         - VPP version.
1062
1063         :param msg: Message to process.
1064         :type msg: Message
1065         :returns: Nothing.
1066         """
1067
1068         if self._msg_type:
1069             self.parse_msg[self._msg_type](msg)
1070
1071     def end_message(self, msg):
1072         """Called when message ends. Default implementation does nothing.
1073
1074         :param msg: Message to process.
1075         :type msg: Message
1076         :returns: Nothing.
1077         """
1078         pass
1079
1080
1081 class InputData(object):
1082     """Input data
1083
1084     The data is extracted from output.xml files generated by Jenkins jobs and
1085     stored in pandas' DataFrames.
1086
1087     The data structure:
1088     - job name
1089       - build number
1090         - metadata
1091           (as described in ExecutionChecker documentation)
1092         - suites
1093           (as described in ExecutionChecker documentation)
1094         - tests
1095           (as described in ExecutionChecker documentation)
1096     """
1097
1098     def __init__(self, spec):
1099         """Initialization.
1100
1101         :param spec: Specification.
1102         :type spec: Specification
1103         """
1104
1105         # Specification:
1106         self._cfg = spec
1107
1108         # Data store:
1109         self._input_data = pd.Series()
1110
1111     @property
1112     def data(self):
1113         """Getter - Input data.
1114
1115         :returns: Input data
1116         :rtype: pandas.Series
1117         """
1118         return self._input_data
1119
1120     def metadata(self, job, build):
1121         """Getter - metadata
1122
1123         :param job: Job which metadata we want.
1124         :param build: Build which metadata we want.
1125         :type job: str
1126         :type build: str
1127         :returns: Metadata
1128         :rtype: pandas.Series
1129         """
1130
1131         return self.data[job][build]["metadata"]
1132
1133     def suites(self, job, build):
1134         """Getter - suites
1135
1136         :param job: Job which suites we want.
1137         :param build: Build which suites we want.
1138         :type job: str
1139         :type build: str
1140         :returns: Suites.
1141         :rtype: pandas.Series
1142         """
1143
1144         return self.data[job][str(build)]["suites"]
1145
1146     def tests(self, job, build):
1147         """Getter - tests
1148
1149         :param job: Job which tests we want.
1150         :param build: Build which tests we want.
1151         :type job: str
1152         :type build: str
1153         :returns: Tests.
1154         :rtype: pandas.Series
1155         """
1156
1157         return self.data[job][build]["tests"]
1158
1159     def _parse_tests(self, job, build, log):
1160         """Process data from robot output.xml file and return JSON structured
1161         data.
1162
1163         :param job: The name of job which build output data will be processed.
1164         :param build: The build which output data will be processed.
1165         :param log: List of log messages.
1166         :type job: str
1167         :type build: dict
1168         :type log: list of tuples (severity, msg)
1169         :returns: JSON data structure.
1170         :rtype: dict
1171         """
1172
1173         metadata = {
1174             "job": job,
1175             "build": build
1176         }
1177
1178         with open(build["file-name"], 'r') as data_file:
1179             try:
1180                 result = ExecutionResult(data_file)
1181             except errors.DataError as err:
1182                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1183                                      "{0}".format(err)))
1184                 return None
1185         checker = ExecutionChecker(metadata, self._cfg.mapping,
1186                                    self._cfg.ignore)
1187         result.visit(checker)
1188
1189         return checker.data
1190
1191     def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1192         """Download and parse the input data file.
1193
1194         :param pid: PID of the process executing this method.
1195         :param data_queue: Shared memory between processes. Queue which keeps
1196             the result data. This data is then read by the main process and used
1197             in further processing.
1198         :param job: Name of the Jenkins job which generated the processed input
1199             file.
1200         :param build: Information about the Jenkins build which generated the
1201             processed input file.
1202         :param repeat: Repeat the download specified number of times if not
1203             successful.
1204         :type pid: int
1205         :type data_queue: multiprocessing.Manager().Queue()
1206         :type job: str
1207         :type build: dict
1208         :type repeat: int
1209         """
1210
1211         logs = list()
1212
1213         logging.info("  Processing the job/build: {0}: {1}".
1214                      format(job, build["build"]))
1215
1216         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1217                      format(job, build["build"])))
1218
1219         state = "failed"
1220         success = False
1221         data = None
1222         do_repeat = repeat
1223         while do_repeat:
1224             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1225                                                    logs)
1226             if success:
1227                 break
1228             do_repeat -= 1
1229         if not success:
1230             logs.append(("ERROR", "It is not possible to download the input "
1231                                   "data file from the job '{job}', build "
1232                                   "'{build}', or it is damaged. Skipped.".
1233                          format(job=job, build=build["build"])))
1234         if success:
1235             logs.append(("INFO", "  Processing data from the build '{0}' ...".
1236                          format(build["build"])))
1237             data = self._parse_tests(job, build, logs)
1238             if data is None:
1239                 logs.append(("ERROR", "Input data file from the job '{job}', "
1240                                       "build '{build}' is damaged. Skipped.".
1241                              format(job=job, build=build["build"])))
1242             else:
1243                 state = "processed"
1244
1245             try:
1246                 remove(build["file-name"])
1247             except OSError as err:
1248                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1249                              format(build["file-name"], repr(err))))
1250
1251         # If the time-period is defined in the specification file, remove all
1252         # files which are outside the time period.
1253         timeperiod = self._cfg.input.get("time-period", None)
1254         if timeperiod and data:
1255             now = dt.utcnow()
1256             timeperiod = timedelta(int(timeperiod))
1257             metadata = data.get("metadata", None)
1258             if metadata:
1259                 generated = metadata.get("generated", None)
1260                 if generated:
1261                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1262                     if (now - generated) > timeperiod:
1263                         # Remove the data and the file:
1264                         state = "removed"
1265                         data = None
1266                         logs.append(
1267                             ("INFO",
1268                              "    The build {job}/{build} is outdated, will be "
1269                              "removed".format(job=job, build=build["build"])))
1270                         file_name = self._cfg.input["file-name"]
1271                         full_name = join(
1272                             self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1273                             "{job}{sep}{build}{sep}{name}".
1274                                 format(job=job,
1275                                        sep=SEPARATOR,
1276                                        build=build["build"],
1277                                        name=file_name))
1278                         try:
1279                             remove(full_name)
1280                             logs.append(("INFO",
1281                                          "    The file {name} has been removed".
1282                                          format(name=full_name)))
1283                         except OSError as err:
1284                             logs.append(("ERROR",
1285                                         "Cannot remove the file '{0}': {1}".
1286                                         format(full_name, repr(err))))
1287
1288         logs.append(("INFO", "  Done."))
1289
1290         result = {
1291             "data": data,
1292             "state": state,
1293             "job": job,
1294             "build": build,
1295             "logs": logs
1296         }
1297         data_queue.put(result)
1298
1299     def download_and_parse_data(self, repeat=1):
1300         """Download the input data files, parse input data from input files and
1301         store in pandas' Series.
1302
1303         :param repeat: Repeat the download specified number of times if not
1304             successful.
1305         :type repeat: int
1306         """
1307
1308         logging.info("Downloading and parsing input files ...")
1309
1310         work_queue = multiprocessing.JoinableQueue()
1311         manager = multiprocessing.Manager()
1312         data_queue = manager.Queue()
1313         cpus = multiprocessing.cpu_count()
1314
1315         workers = list()
1316         for cpu in range(cpus):
1317             worker = Worker(work_queue,
1318                             data_queue,
1319                             self._download_and_parse_build)
1320             worker.daemon = True
1321             worker.start()
1322             workers.append(worker)
1323             os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1324                       format(cpu, worker.pid))
1325
1326         for job, builds in self._cfg.builds.items():
1327             for build in builds:
1328                 work_queue.put((job, build, repeat))
1329
1330         work_queue.join()
1331
1332         logging.info("Done.")
1333
1334         while not data_queue.empty():
1335             result = data_queue.get()
1336
1337             job = result["job"]
1338             build_nr = result["build"]["build"]
1339
1340             if result["data"]:
1341                 data = result["data"]
1342                 build_data = pd.Series({
1343                     "metadata": pd.Series(data["metadata"].values(),
1344                                           index=data["metadata"].keys()),
1345                     "suites": pd.Series(data["suites"].values(),
1346                                         index=data["suites"].keys()),
1347                     "tests": pd.Series(data["tests"].values(),
1348                                        index=data["tests"].keys())})
1349
1350                 if self._input_data.get(job, None) is None:
1351                     self._input_data[job] = pd.Series()
1352                 self._input_data[job][str(build_nr)] = build_data
1353
1354                 self._cfg.set_input_file_name(job, build_nr,
1355                                               result["build"]["file-name"])
1356
1357             self._cfg.set_input_state(job, build_nr, result["state"])
1358
1359             for item in result["logs"]:
1360                 if item[0] == "INFO":
1361                     logging.info(item[1])
1362                 elif item[0] == "ERROR":
1363                     logging.error(item[1])
1364                 elif item[0] == "DEBUG":
1365                     logging.debug(item[1])
1366                 elif item[0] == "CRITICAL":
1367                     logging.critical(item[1])
1368                 elif item[0] == "WARNING":
1369                     logging.warning(item[1])
1370
1371         del data_queue
1372
1373         # Terminate all workers
1374         for worker in workers:
1375             worker.terminate()
1376             worker.join()
1377
1378         logging.info("Done.")
1379
1380     @staticmethod
1381     def _end_of_tag(tag_filter, start=0, closer="'"):
1382         """Return the index of character in the string which is the end of tag.
1383
1384         :param tag_filter: The string where the end of tag is being searched.
1385         :param start: The index where the searching is stated.
1386         :param closer: The character which is the tag closer.
1387         :type tag_filter: str
1388         :type start: int
1389         :type closer: str
1390         :returns: The index of the tag closer.
1391         :rtype: int
1392         """
1393
1394         try:
1395             idx_opener = tag_filter.index(closer, start)
1396             return tag_filter.index(closer, idx_opener + 1)
1397         except ValueError:
1398             return None
1399
1400     @staticmethod
1401     def _condition(tag_filter):
1402         """Create a conditional statement from the given tag filter.
1403
1404         :param tag_filter: Filter based on tags from the element specification.
1405         :type tag_filter: str
1406         :returns: Conditional statement which can be evaluated.
1407         :rtype: str
1408         """
1409
1410         index = 0
1411         while True:
1412             index = InputData._end_of_tag(tag_filter, index)
1413             if index is None:
1414                 return tag_filter
1415             index += 1
1416             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1417
1418     def filter_data(self, element, params=None, data_set="tests",
1419                     continue_on_error=False):
1420         """Filter required data from the given jobs and builds.
1421
1422         The output data structure is:
1423
1424         - job 1
1425           - build 1
1426             - test (or suite) 1 ID:
1427               - param 1
1428               - param 2
1429               ...
1430               - param n
1431             ...
1432             - test (or suite) n ID:
1433             ...
1434           ...
1435           - build n
1436         ...
1437         - job n
1438
1439         :param element: Element which will use the filtered data.
1440         :param params: Parameters which will be included in the output. If None,
1441         all parameters are included.
1442         :param data_set: The set of data to be filtered: tests, suites,
1443         metadata.
1444         :param continue_on_error: Continue if there is error while reading the
1445         data. The Item will be empty then
1446         :type element: pandas.Series
1447         :type params: list
1448         :type data_set: str
1449         :type continue_on_error: bool
1450         :returns: Filtered data.
1451         :rtype pandas.Series
1452         """
1453
1454         try:
1455             if element["filter"] in ("all", "template"):
1456                 cond = "True"
1457             else:
1458                 cond = InputData._condition(element["filter"])
1459             logging.debug("   Filter: {0}".format(cond))
1460         except KeyError:
1461             logging.error("  No filter defined.")
1462             return None
1463
1464         if params is None:
1465             params = element.get("parameters", None)
1466             if params:
1467                 params.append("type")
1468
1469         data = pd.Series()
1470         try:
1471             for job, builds in element["data"].items():
1472                 data[job] = pd.Series()
1473                 for build in builds:
1474                     data[job][str(build)] = pd.Series()
1475                     try:
1476                         data_iter = self.data[job][str(build)][data_set].\
1477                             iteritems()
1478                     except KeyError:
1479                         if continue_on_error:
1480                             continue
1481                         else:
1482                             return None
1483                     for test_ID, test_data in data_iter:
1484                         if eval(cond, {"tags": test_data.get("tags", "")}):
1485                             data[job][str(build)][test_ID] = pd.Series()
1486                             if params is None:
1487                                 for param, val in test_data.items():
1488                                     data[job][str(build)][test_ID][param] = val
1489                             else:
1490                                 for param in params:
1491                                     try:
1492                                         data[job][str(build)][test_ID][param] =\
1493                                             test_data[param]
1494                                     except KeyError:
1495                                         data[job][str(build)][test_ID][param] =\
1496                                             "No Data"
1497             return data
1498
1499         except (KeyError, IndexError, ValueError) as err:
1500             logging.error("   Missing mandatory parameter in the element "
1501                           "specification: {0}".format(err))
1502             return None
1503         except AttributeError:
1504             return None
1505         except SyntaxError:
1506             logging.error("   The filter '{0}' is not correct. Check if all "
1507                           "tags are enclosed by apostrophes.".format(cond))
1508             return None
1509
1510     @staticmethod
1511     def merge_data(data):
1512         """Merge data from more jobs and builds to a simple data structure.
1513
1514         The output data structure is:
1515
1516         - test (suite) 1 ID:
1517           - param 1
1518           - param 2
1519           ...
1520           - param n
1521         ...
1522         - test (suite) n ID:
1523         ...
1524
1525         :param data: Data to merge.
1526         :type data: pandas.Series
1527         :returns: Merged data.
1528         :rtype: pandas.Series
1529         """
1530
1531         logging.info("    Merging data ...")
1532
1533         merged_data = pd.Series()
1534         for _, builds in data.iteritems():
1535             for _, item in builds.iteritems():
1536                 for ID, item_data in item.iteritems():
1537                     merged_data[ID] = item_data
1538
1539         return merged_data