CSIT-1483: Add processing of output_info.xml as the first choice
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import multiprocessing
23 import os
24 import re
25 import pandas as pd
26 import logging
27
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
32 from os import remove
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38
39 from input_data_files import download_and_unzip_data_file
40 from utils import Worker
41
42
43 # Separator used in file names
44 SEPARATOR = "__"
45
46
47 class ExecutionChecker(ResultVisitor):
48     """Class to traverse through the test suite structure.
49
50     The functionality implemented in this class generates a json structure:
51
52     Performance tests:
53
54     {
55         "metadata": {
56             "generated": "Timestamp",
57             "version": "SUT version",
58             "job": "Jenkins job name",
59             "build": "Information about the build"
60         },
61         "suites": {
62             "Suite long name 1": {
63                 "name": Suite name,
64                 "doc": "Suite 1 documentation",
65                 "parent": "Suite 1 parent",
66                 "level": "Level of the suite in the suite hierarchy"
67             }
68             "Suite long name N": {
69                 "name": Suite name,
70                 "doc": "Suite N documentation",
71                 "parent": "Suite 2 parent",
72                 "level": "Level of the suite in the suite hierarchy"
73             }
74         }
75         "tests": {
76             # NDRPDR tests:
77             "ID": {
78                 "name": "Test name",
79                 "parent": "Name of the parent of the test",
80                 "doc": "Test documentation",
81                 "msg": "Test message",
82                 "conf-history": "DUT1 and DUT2 VAT History",
83                 "show-run": "Show Run",
84                 "tags": ["tag 1", "tag 2", "tag n"],
85                 "type": "NDRPDR",
86                 "status": "PASS" | "FAIL",
87                 "throughput": {
88                     "NDR": {
89                         "LOWER": float,
90                         "UPPER": float
91                     },
92                     "PDR": {
93                         "LOWER": float,
94                         "UPPER": float
95                     }
96                 },
97                 "latency": {
98                     "NDR": {
99                         "direction1": {
100                             "min": float,
101                             "avg": float,
102                             "max": float
103                         },
104                         "direction2": {
105                             "min": float,
106                             "avg": float,
107                             "max": float
108                         }
109                     },
110                     "PDR": {
111                         "direction1": {
112                             "min": float,
113                             "avg": float,
114                             "max": float
115                         },
116                         "direction2": {
117                             "min": float,
118                             "avg": float,
119                             "max": float
120                         }
121                     }
122                 }
123             }
124
125             # TCP tests:
126             "ID": {
127                 "name": "Test name",
128                 "parent": "Name of the parent of the test",
129                 "doc": "Test documentation",
130                 "msg": "Test message",
131                 "tags": ["tag 1", "tag 2", "tag n"],
132                 "type": "TCP",
133                 "status": "PASS" | "FAIL",
134                 "result": int
135             }
136
137             # MRR, BMRR tests:
138             "ID": {
139                 "name": "Test name",
140                 "parent": "Name of the parent of the test",
141                 "doc": "Test documentation",
142                 "msg": "Test message",
143                 "tags": ["tag 1", "tag 2", "tag n"],
144                 "type": "MRR" | "BMRR",
145                 "status": "PASS" | "FAIL",
146                 "result": {
147                     "receive-rate": AvgStdevMetadata,
148                 }
149             }
150
151             # TODO: Remove when definitely no NDRPDRDISC tests are used:
152             # NDRPDRDISC tests:
153             "ID": {
154                 "name": "Test name",
155                 "parent": "Name of the parent of the test",
156                 "doc": "Test documentation",
157                 "msg": "Test message",
158                 "tags": ["tag 1", "tag 2", "tag n"],
159                 "type": "PDR" | "NDR",
160                 "status": "PASS" | "FAIL",
161                 "throughput": {  # Only type: "PDR" | "NDR"
162                     "value": int,
163                     "unit": "pps" | "bps" | "percentage"
164                 },
165                 "latency": {  # Only type: "PDR" | "NDR"
166                     "direction1": {
167                         "100": {
168                             "min": int,
169                             "avg": int,
170                             "max": int
171                         },
172                         "50": {  # Only for NDR
173                             "min": int,
174                             "avg": int,
175                             "max": int
176                         },
177                         "10": {  # Only for NDR
178                             "min": int,
179                             "avg": int,
180                             "max": int
181                         }
182                     },
183                     "direction2": {
184                         "100": {
185                             "min": int,
186                             "avg": int,
187                             "max": int
188                         },
189                         "50": {  # Only for NDR
190                             "min": int,
191                             "avg": int,
192                             "max": int
193                         },
194                         "10": {  # Only for NDR
195                             "min": int,
196                             "avg": int,
197                             "max": int
198                         }
199                     }
200                 },
201                 "lossTolerance": "lossTolerance",  # Only type: "PDR"
202                 "conf-history": "DUT1 and DUT2 VAT History"
203                 "show-run": "Show Run"
204             },
205             "ID" {
206                 # next test
207             }
208         }
209     }
210
211
212     Functional tests:
213
214     {
215         "metadata": {  # Optional
216             "version": "VPP version",
217             "job": "Jenkins job name",
218             "build": "Information about the build"
219         },
220         "suites": {
221             "Suite name 1": {
222                 "doc": "Suite 1 documentation",
223                 "parent": "Suite 1 parent",
224                 "level": "Level of the suite in the suite hierarchy"
225             }
226             "Suite name N": {
227                 "doc": "Suite N documentation",
228                 "parent": "Suite 2 parent",
229                 "level": "Level of the suite in the suite hierarchy"
230             }
231         }
232         "tests": {
233             "ID": {
234                 "name": "Test name",
235                 "parent": "Name of the parent of the test",
236                 "doc": "Test documentation"
237                 "msg": "Test message"
238                 "tags": ["tag 1", "tag 2", "tag n"],
239                 "conf-history": "DUT1 and DUT2 VAT History"
240                 "show-run": "Show Run"
241                 "status": "PASS" | "FAIL"
242             },
243             "ID" {
244                 # next test
245             }
246         }
247     }
248
249     .. note:: ID is the lowercase full path to the test.
250     """
251
252     # TODO: Remove when definitely no NDRPDRDISC tests are used:
253     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
254
255     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
256                                 r'PLRsearch upper bound::\s(\d+.\d+)')
257
258     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
259                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
260                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
261                                    r'PDR_UPPER:\s(\d+.\d+)')
262
263     # TODO: Remove when definitely no NDRPDRDISC tests are used:
264     REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
265                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
269                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
270                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
271
272     REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
273                                r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
274                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
275
276     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
277                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
278
279     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
280                                  r'[\D\d]*')
281
282     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
283                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
284
285     REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
286                                     r"(RTE Version: 'DPDK )(.*)(')")
287
288     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
289
290     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
291                            r'tx\s(\d*),\srx\s(\d*)')
292
293     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
294                             r' in packets per second: \[(.*)\]')
295
296     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
297
298     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
299
300     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
301
302     REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
303
304     def __init__(self, metadata, mapping, ignore):
305         """Initialisation.
306
307         :param metadata: Key-value pairs to be included in "metadata" part of
308             JSON structure.
309         :param mapping: Mapping of the old names of test cases to the new
310             (actual) one.
311         :param ignore: List of TCs to be ignored.
312         :type metadata: dict
313         :type mapping: dict
314         :type ignore: list
315         """
316
317         # Type of message to parse out from the test messages
318         self._msg_type = None
319
320         # VPP version
321         self._version = None
322
323         # Timestamp
324         self._timestamp = None
325
326         # Testbed. The testbed is identified by TG node IP address.
327         self._testbed = None
328
329         # Mapping of TCs long names
330         self._mapping = mapping
331
332         # Ignore list
333         self._ignore = ignore
334
335         # Number of VAT History messages found:
336         # 0 - no message
337         # 1 - VAT History of DUT1
338         # 2 - VAT History of DUT2
339         self._lookup_kw_nr = 0
340         self._conf_history_lookup_nr = 0
341
342         # Number of Show Running messages found
343         # 0 - no message
344         # 1 - Show run message found
345         self._show_run_lookup_nr = 0
346
347         # Test ID of currently processed test- the lowercase full path to the
348         # test
349         self._test_ID = None
350
351         # The main data structure
352         self._data = {
353             "metadata": OrderedDict(),
354             "suites": OrderedDict(),
355             "tests": OrderedDict()
356         }
357
358         # Save the provided metadata
359         for key, val in metadata.items():
360             self._data["metadata"][key] = val
361
362         # Dictionary defining the methods used to parse different types of
363         # messages
364         self.parse_msg = {
365             "timestamp": self._get_timestamp,
366             "vpp-version": self._get_vpp_version,
367             "dpdk-version": self._get_dpdk_version,
368             "teardown-vat-history": self._get_vat_history,
369             "teardown-papi-history": self._get_papi_history,
370             "test-show-runtime": self._get_show_run,
371             "testbed": self._get_testbed
372         }
373
374     @property
375     def data(self):
376         """Getter - Data parsed from the XML file.
377
378         :returns: Data parsed from the XML file.
379         :rtype: dict
380         """
381         return self._data
382
383     def _get_testbed(self, msg):
384         """Called when extraction of testbed IP is required.
385         The testbed is identified by TG node IP address.
386
387         :param msg: Message to process.
388         :type msg: Message
389         :returns: Nothing.
390         """
391
392         if msg.message.count("Arguments:"):
393             message = str(msg.message).replace(' ', '').replace('\n', '').\
394                 replace("'", '"').replace('b"', '"').\
395                 replace("honeycom", "honeycomb")
396             message = loads(message[11:-1])
397             try:
398                 self._testbed = message["TG"]["host"]
399             except (KeyError, ValueError):
400                 pass
401             finally:
402                 self._data["metadata"]["testbed"] = self._testbed
403                 self._msg_type = None
404
405     def _get_vpp_version(self, msg):
406         """Called when extraction of VPP version is required.
407
408         :param msg: Message to process.
409         :type msg: Message
410         :returns: Nothing.
411         """
412
413         if msg.message.count("return STDOUT Version:") or \
414             msg.message.count("VPP Version:") or \
415             msg.message.count("VPP version:"):
416             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
417                                 group(2))
418             self._data["metadata"]["version"] = self._version
419             self._msg_type = None
420
421     def _get_dpdk_version(self, msg):
422         """Called when extraction of DPDK version is required.
423
424         :param msg: Message to process.
425         :type msg: Message
426         :returns: Nothing.
427         """
428
429         if msg.message.count("return STDOUT testpmd"):
430             try:
431                 self._version = str(re.search(
432                     self.REGEX_VERSION_DPDK, msg.message). group(4))
433                 self._data["metadata"]["version"] = self._version
434             except IndexError:
435                 pass
436             finally:
437                 self._msg_type = None
438
439     def _get_timestamp(self, msg):
440         """Called when extraction of timestamp is required.
441
442         :param msg: Message to process.
443         :type msg: Message
444         :returns: Nothing.
445         """
446
447         self._timestamp = msg.timestamp[:14]
448         self._data["metadata"]["generated"] = self._timestamp
449         self._msg_type = None
450
451     def _get_vat_history(self, msg):
452         """Called when extraction of VAT command history is required.
453
454         :param msg: Message to process.
455         :type msg: Message
456         :returns: Nothing.
457         """
458         if msg.message.count("VAT command history:"):
459             self._conf_history_lookup_nr += 1
460             if self._conf_history_lookup_nr == 1:
461                 self._data["tests"][self._test_ID]["conf-history"] = str()
462             else:
463                 self._msg_type = None
464             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
465                           "VAT command history:", "", msg.message, count=1). \
466                 replace("\n\n", "\n").replace('\n', ' |br| ').\
467                 replace('\r', '').replace('"', "'")
468
469             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
470             self._data["tests"][self._test_ID]["conf-history"] += \
471                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
472
473     def _get_papi_history(self, msg):
474         """Called when extraction of PAPI command history is required.
475
476         :param msg: Message to process.
477         :type msg: Message
478         :returns: Nothing.
479         """
480         if msg.message.count("PAPI command history:"):
481             self._conf_history_lookup_nr += 1
482             if self._conf_history_lookup_nr == 1:
483                 self._data["tests"][self._test_ID]["conf-history"] = str()
484             else:
485                 self._msg_type = None
486             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
487                           "PAPI command history:", "", msg.message, count=1). \
488                 replace("\n\n", "\n").replace('\n', ' |br| ').\
489                 replace('\r', '').replace('"', "'")
490
491             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
492             self._data["tests"][self._test_ID]["conf-history"] += \
493                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
494
495     def _get_show_run(self, msg):
496         """Called when extraction of VPP operational data (output of CLI command
497         Show Runtime) is required.
498
499         :param msg: Message to process.
500         :type msg: Message
501         :returns: Nothing.
502         """
503         if msg.message.count("Thread 0 vpp_main"):
504             self._show_run_lookup_nr += 1
505             if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
506                 self._data["tests"][self._test_ID]["show-run"] = str()
507             if self._lookup_kw_nr > 1:
508                 self._msg_type = None
509             if self._show_run_lookup_nr == 1:
510                 text = msg.message.replace("vat# ", "").\
511                     replace("return STDOUT ", "").replace("\n\n", "\n").\
512                     replace('\n', ' |br| ').\
513                     replace('\r', '').replace('"', "'")
514                 try:
515                     self._data["tests"][self._test_ID]["show-run"] += " |br| "
516                     self._data["tests"][self._test_ID]["show-run"] += \
517                         "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
518                 except KeyError:
519                     pass
520
521     # TODO: Remove when definitely no NDRPDRDISC tests are used:
522     def _get_latency(self, msg, test_type):
523         """Get the latency data from the test message.
524
525         :param msg: Message to be parsed.
526         :param test_type: Type of the test - NDR or PDR.
527         :type msg: str
528         :type test_type: str
529         :returns: Latencies parsed from the message.
530         :rtype: dict
531         """
532
533         if test_type == "NDR":
534             groups = re.search(self.REGEX_LAT_NDR, msg)
535             groups_range = range(1, 7)
536         elif test_type == "PDR":
537             groups = re.search(self.REGEX_LAT_PDR, msg)
538             groups_range = range(1, 3)
539         else:
540             return {}
541
542         latencies = list()
543         for idx in groups_range:
544             try:
545                 lat = [int(item) for item in str(groups.group(idx)).split('/')]
546             except (AttributeError, ValueError):
547                 lat = [-1, -1, -1]
548             latencies.append(lat)
549
550         keys = ("min", "avg", "max")
551         latency = {
552             "direction1": {
553             },
554             "direction2": {
555             }
556         }
557
558         latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
559         latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
560         if test_type == "NDR":
561             latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
562             latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
563             latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
564             latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
565
566         return latency
567
568     def _get_ndrpdr_throughput(self, msg):
569         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
570         message.
571
572         :param msg: The test message to be parsed.
573         :type msg: str
574         :returns: Parsed data as a dict and the status (PASS/FAIL).
575         :rtype: tuple(dict, str)
576         """
577
578         throughput = {
579             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
580             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
581         }
582         status = "FAIL"
583         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
584
585         if groups is not None:
586             try:
587                 throughput["NDR"]["LOWER"] = float(groups.group(1))
588                 throughput["NDR"]["UPPER"] = float(groups.group(2))
589                 throughput["PDR"]["LOWER"] = float(groups.group(3))
590                 throughput["PDR"]["UPPER"] = float(groups.group(4))
591                 status = "PASS"
592             except (IndexError, ValueError):
593                 pass
594
595         return throughput, status
596
597     def _get_plr_throughput(self, msg):
598         """Get PLRsearch lower bound and PLRsearch upper bound from the test
599         message.
600
601         :param msg: The test message to be parsed.
602         :type msg: str
603         :returns: Parsed data as a dict and the status (PASS/FAIL).
604         :rtype: tuple(dict, str)
605         """
606
607         throughput = {
608             "LOWER": -1.0,
609             "UPPER": -1.0
610         }
611         status = "FAIL"
612         groups = re.search(self.REGEX_PLR_RATE, msg)
613
614         if groups is not None:
615             try:
616                 throughput["LOWER"] = float(groups.group(1))
617                 throughput["UPPER"] = float(groups.group(2))
618                 status = "PASS"
619             except (IndexError, ValueError):
620                 pass
621
622         return throughput, status
623
624     def _get_ndrpdr_latency(self, msg):
625         """Get LATENCY from the test message.
626
627         :param msg: The test message to be parsed.
628         :type msg: str
629         :returns: Parsed data as a dict and the status (PASS/FAIL).
630         :rtype: tuple(dict, str)
631         """
632
633         latency = {
634             "NDR": {
635                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
636                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
637             },
638             "PDR": {
639                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
640                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
641             }
642         }
643         status = "FAIL"
644         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
645
646         if groups is not None:
647             keys = ("min", "avg", "max")
648             try:
649                 latency["NDR"]["direction1"] = dict(
650                     zip(keys, [float(l) for l in groups.group(1).split('/')]))
651                 latency["NDR"]["direction2"] = dict(
652                     zip(keys, [float(l) for l in groups.group(2).split('/')]))
653                 latency["PDR"]["direction1"] = dict(
654                     zip(keys, [float(l) for l in groups.group(3).split('/')]))
655                 latency["PDR"]["direction2"] = dict(
656                     zip(keys, [float(l) for l in groups.group(4).split('/')]))
657                 status = "PASS"
658             except (IndexError, ValueError):
659                 pass
660
661         return latency, status
662
663     def visit_suite(self, suite):
664         """Implements traversing through the suite and its direct children.
665
666         :param suite: Suite to process.
667         :type suite: Suite
668         :returns: Nothing.
669         """
670         if self.start_suite(suite) is not False:
671             suite.suites.visit(self)
672             suite.tests.visit(self)
673             self.end_suite(suite)
674
675     def start_suite(self, suite):
676         """Called when suite starts.
677
678         :param suite: Suite to process.
679         :type suite: Suite
680         :returns: Nothing.
681         """
682
683         try:
684             parent_name = suite.parent.name
685         except AttributeError:
686             return
687
688         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
689             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
690         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
691
692         self._data["suites"][suite.longname.lower().replace('"', "'").
693             replace(" ", "_")] = {
694                 "name": suite.name.lower(),
695                 "doc": doc_str,
696                 "parent": parent_name,
697                 "level": len(suite.longname.split("."))
698             }
699
700         suite.keywords.visit(self)
701
702     def end_suite(self, suite):
703         """Called when suite ends.
704
705         :param suite: Suite to process.
706         :type suite: Suite
707         :returns: Nothing.
708         """
709         pass
710
711     def visit_test(self, test):
712         """Implements traversing through the test.
713
714         :param test: Test to process.
715         :type test: Test
716         :returns: Nothing.
717         """
718         if self.start_test(test) is not False:
719             test.keywords.visit(self)
720             self.end_test(test)
721
722     def start_test(self, test):
723         """Called when test starts.
724
725         :param test: Test to process.
726         :type test: Test
727         :returns: Nothing.
728         """
729
730         longname_orig = test.longname.lower()
731
732         # Check the ignore list
733         if longname_orig in self._ignore:
734             return
735
736         tags = [str(tag) for tag in test.tags]
737         test_result = dict()
738
739         # Change the TC long name and name if defined in the mapping table
740         longname = self._mapping.get(longname_orig, None)
741         if longname is not None:
742             name = longname.split('.')[-1]
743             logging.debug("{0}\n{1}\n{2}\n{3}".format(
744                 self._data["metadata"], longname_orig, longname, name))
745         else:
746             longname = longname_orig
747             name = test.name.lower()
748
749         # Remove TC number from the TC long name (backward compatibility):
750         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
751         # Remove TC number from the TC name (not needed):
752         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
753
754         test_result["parent"] = test.parent.name.lower()
755         test_result["tags"] = tags
756         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
757             replace('\r', '').replace('[', ' |br| [')
758         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
759         test_result["msg"] = test.message.replace('\n', ' |br| '). \
760             replace('\r', '').replace('"', "'")
761         test_result["type"] = "FUNC"
762         test_result["status"] = test.status
763
764         if "PERFTEST" in tags:
765             # Replace info about cores (e.g. -1c-) with the info about threads
766             # and cores (e.g. -1t1c-) in the long test case names and in the
767             # test case names if necessary.
768             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
769             if not groups:
770                 tag_count = 0
771                 for tag in test_result["tags"]:
772                     groups = re.search(self.REGEX_TC_TAG, tag)
773                     if groups:
774                         tag_count += 1
775                         tag_tc = tag
776
777                 if tag_count == 1:
778                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
779                                            "-{0}-".format(tag_tc.lower()),
780                                            self._test_ID,
781                                            count=1)
782                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
783                                                  "-{0}-".format(tag_tc.lower()),
784                                                  test_result["name"],
785                                                  count=1)
786                 else:
787                     test_result["status"] = "FAIL"
788                     self._data["tests"][self._test_ID] = test_result
789                     logging.debug("The test '{0}' has no or more than one "
790                                   "multi-threading tags.".format(self._test_ID))
791                     logging.debug("Tags: {0}".format(test_result["tags"]))
792                     return
793
794         if test.status == "PASS" and ("NDRPDRDISC" in tags or
795                                       "NDRPDR" in tags or
796                                       "SOAK" in tags or
797                                       "TCP" in tags or
798                                       "MRR" in tags or
799                                       "BMRR" in tags):
800             # TODO: Remove when definitely no NDRPDRDISC tests are used:
801             if "NDRDISC" in tags:
802                 test_result["type"] = "NDR"
803             # TODO: Remove when definitely no NDRPDRDISC tests are used:
804             elif "PDRDISC" in tags:
805                 test_result["type"] = "PDR"
806             elif "NDRPDR" in tags:
807                 test_result["type"] = "NDRPDR"
808             elif "SOAK" in tags:
809                 test_result["type"] = "SOAK"
810             elif "TCP" in tags:
811                 test_result["type"] = "TCP"
812             elif "MRR" in tags:
813                 test_result["type"] = "MRR"
814             elif "FRMOBL" in tags or "BMRR" in tags:
815                 test_result["type"] = "BMRR"
816             else:
817                 test_result["status"] = "FAIL"
818                 self._data["tests"][self._test_ID] = test_result
819                 return
820
821             # TODO: Remove when definitely no NDRPDRDISC tests are used:
822             if test_result["type"] in ("NDR", "PDR"):
823                 try:
824                     rate_value = str(re.search(
825                         self.REGEX_RATE, test.message).group(1))
826                 except AttributeError:
827                     rate_value = "-1"
828                 try:
829                     rate_unit = str(re.search(
830                         self.REGEX_RATE, test.message).group(2))
831                 except AttributeError:
832                     rate_unit = "-1"
833
834                 test_result["throughput"] = dict()
835                 test_result["throughput"]["value"] = \
836                     int(rate_value.split('.')[0])
837                 test_result["throughput"]["unit"] = rate_unit
838                 test_result["latency"] = \
839                     self._get_latency(test.message, test_result["type"])
840                 if test_result["type"] == "PDR":
841                     test_result["lossTolerance"] = str(re.search(
842                         self.REGEX_TOLERANCE, test.message).group(1))
843
844             elif test_result["type"] in ("NDRPDR", ):
845                 test_result["throughput"], test_result["status"] = \
846                     self._get_ndrpdr_throughput(test.message)
847                 test_result["latency"], test_result["status"] = \
848                     self._get_ndrpdr_latency(test.message)
849
850             elif test_result["type"] in ("SOAK", ):
851                 test_result["throughput"], test_result["status"] = \
852                     self._get_plr_throughput(test.message)
853
854             elif test_result["type"] in ("TCP", ):
855                 groups = re.search(self.REGEX_TCP, test.message)
856                 test_result["result"] = int(groups.group(2))
857
858             elif test_result["type"] in ("MRR", "BMRR"):
859                 test_result["result"] = dict()
860                 groups = re.search(self.REGEX_BMRR, test.message)
861                 if groups is not None:
862                     items_str = groups.group(1)
863                     items_float = [float(item.strip()) for item
864                                    in items_str.split(",")]
865                     metadata = AvgStdevMetadataFactory.from_data(items_float)
866                     # Next two lines have been introduced in CSIT-1179,
867                     # to be removed in CSIT-1180.
868                     metadata.size = 1
869                     metadata.stdev = 0.0
870                     test_result["result"]["receive-rate"] = metadata
871                 else:
872                     groups = re.search(self.REGEX_MRR, test.message)
873                     test_result["result"]["receive-rate"] = \
874                         AvgStdevMetadataFactory.from_data([
875                             float(groups.group(3)) / float(groups.group(1)), ])
876
877         self._data["tests"][self._test_ID] = test_result
878
879     def end_test(self, test):
880         """Called when test ends.
881
882         :param test: Test to process.
883         :type test: Test
884         :returns: Nothing.
885         """
886         pass
887
888     def visit_keyword(self, keyword):
889         """Implements traversing through the keyword and its child keywords.
890
891         :param keyword: Keyword to process.
892         :type keyword: Keyword
893         :returns: Nothing.
894         """
895         if self.start_keyword(keyword) is not False:
896             self.end_keyword(keyword)
897
898     def start_keyword(self, keyword):
899         """Called when keyword starts. Default implementation does nothing.
900
901         :param keyword: Keyword to process.
902         :type keyword: Keyword
903         :returns: Nothing.
904         """
905         try:
906             if keyword.type == "setup":
907                 self.visit_setup_kw(keyword)
908             elif keyword.type == "teardown":
909                 self._lookup_kw_nr = 0
910                 self.visit_teardown_kw(keyword)
911             else:
912                 self._lookup_kw_nr = 0
913                 self.visit_test_kw(keyword)
914         except AttributeError:
915             pass
916
917     def end_keyword(self, keyword):
918         """Called when keyword ends. Default implementation does nothing.
919
920         :param keyword: Keyword to process.
921         :type keyword: Keyword
922         :returns: Nothing.
923         """
924         pass
925
926     def visit_test_kw(self, test_kw):
927         """Implements traversing through the test keyword and its child
928         keywords.
929
930         :param test_kw: Keyword to process.
931         :type test_kw: Keyword
932         :returns: Nothing.
933         """
934         for keyword in test_kw.keywords:
935             if self.start_test_kw(keyword) is not False:
936                 self.visit_test_kw(keyword)
937                 self.end_test_kw(keyword)
938
939     def start_test_kw(self, test_kw):
940         """Called when test keyword starts. Default implementation does
941         nothing.
942
943         :param test_kw: Keyword to process.
944         :type test_kw: Keyword
945         :returns: Nothing.
946         """
947         if test_kw.name.count("Show Runtime Counters On All Duts"):
948             self._lookup_kw_nr += 1
949             self._show_run_lookup_nr = 0
950             self._msg_type = "test-show-runtime"
951         elif test_kw.name.count("Start The L2fwd Test") and not self._version:
952             self._msg_type = "dpdk-version"
953         else:
954             return
955         test_kw.messages.visit(self)
956
957     def end_test_kw(self, test_kw):
958         """Called when keyword ends. Default implementation does nothing.
959
960         :param test_kw: Keyword to process.
961         :type test_kw: Keyword
962         :returns: Nothing.
963         """
964         pass
965
966     def visit_setup_kw(self, setup_kw):
967         """Implements traversing through the teardown keyword and its child
968         keywords.
969
970         :param setup_kw: Keyword to process.
971         :type setup_kw: Keyword
972         :returns: Nothing.
973         """
974         for keyword in setup_kw.keywords:
975             if self.start_setup_kw(keyword) is not False:
976                 self.visit_setup_kw(keyword)
977                 self.end_setup_kw(keyword)
978
979     def start_setup_kw(self, setup_kw):
980         """Called when teardown keyword starts. Default implementation does
981         nothing.
982
983         :param setup_kw: Keyword to process.
984         :type setup_kw: Keyword
985         :returns: Nothing.
986         """
987         if setup_kw.name.count("Show Vpp Version On All Duts") \
988                 and not self._version:
989             self._msg_type = "vpp-version"
990         elif setup_kw.name.count("Set Global Variable") \
991                 and not self._timestamp:
992             self._msg_type = "timestamp"
993         elif setup_kw.name.count("Setup Framework") and not self._testbed:
994             self._msg_type = "testbed"
995         else:
996             return
997         setup_kw.messages.visit(self)
998
999     def end_setup_kw(self, setup_kw):
1000         """Called when keyword ends. Default implementation does nothing.
1001
1002         :param setup_kw: Keyword to process.
1003         :type setup_kw: Keyword
1004         :returns: Nothing.
1005         """
1006         pass
1007
1008     def visit_teardown_kw(self, teardown_kw):
1009         """Implements traversing through the teardown keyword and its child
1010         keywords.
1011
1012         :param teardown_kw: Keyword to process.
1013         :type teardown_kw: Keyword
1014         :returns: Nothing.
1015         """
1016         for keyword in teardown_kw.keywords:
1017             if self.start_teardown_kw(keyword) is not False:
1018                 self.visit_teardown_kw(keyword)
1019                 self.end_teardown_kw(keyword)
1020
1021     def start_teardown_kw(self, teardown_kw):
1022         """Called when teardown keyword starts. Default implementation does
1023         nothing.
1024
1025         :param teardown_kw: Keyword to process.
1026         :type teardown_kw: Keyword
1027         :returns: Nothing.
1028         """
1029
1030         if teardown_kw.name.count("Show Vat History On All Duts"):
1031             self._conf_history_lookup_nr = 0
1032             self._msg_type = "teardown-vat-history"
1033             teardown_kw.messages.visit(self)
1034         elif teardown_kw.name.count("Show Papi History On All Duts"):
1035             self._conf_history_lookup_nr = 0
1036             self._msg_type = "teardown-papi-history"
1037             teardown_kw.messages.visit(self)
1038
1039     def end_teardown_kw(self, teardown_kw):
1040         """Called when keyword ends. Default implementation does nothing.
1041
1042         :param teardown_kw: Keyword to process.
1043         :type teardown_kw: Keyword
1044         :returns: Nothing.
1045         """
1046         pass
1047
1048     def visit_message(self, msg):
1049         """Implements visiting the message.
1050
1051         :param msg: Message to process.
1052         :type msg: Message
1053         :returns: Nothing.
1054         """
1055         if self.start_message(msg) is not False:
1056             self.end_message(msg)
1057
1058     def start_message(self, msg):
1059         """Called when message starts. Get required information from messages:
1060         - VPP version.
1061
1062         :param msg: Message to process.
1063         :type msg: Message
1064         :returns: Nothing.
1065         """
1066
1067         if self._msg_type:
1068             self.parse_msg[self._msg_type](msg)
1069
1070     def end_message(self, msg):
1071         """Called when message ends. Default implementation does nothing.
1072
1073         :param msg: Message to process.
1074         :type msg: Message
1075         :returns: Nothing.
1076         """
1077         pass
1078
1079
1080 class InputData(object):
1081     """Input data
1082
1083     The data is extracted from output.xml files generated by Jenkins jobs and
1084     stored in pandas' DataFrames.
1085
1086     The data structure:
1087     - job name
1088       - build number
1089         - metadata
1090           (as described in ExecutionChecker documentation)
1091         - suites
1092           (as described in ExecutionChecker documentation)
1093         - tests
1094           (as described in ExecutionChecker documentation)
1095     """
1096
1097     def __init__(self, spec):
1098         """Initialization.
1099
1100         :param spec: Specification.
1101         :type spec: Specification
1102         """
1103
1104         # Specification:
1105         self._cfg = spec
1106
1107         # Data store:
1108         self._input_data = pd.Series()
1109
1110     @property
1111     def data(self):
1112         """Getter - Input data.
1113
1114         :returns: Input data
1115         :rtype: pandas.Series
1116         """
1117         return self._input_data
1118
1119     def metadata(self, job, build):
1120         """Getter - metadata
1121
1122         :param job: Job which metadata we want.
1123         :param build: Build which metadata we want.
1124         :type job: str
1125         :type build: str
1126         :returns: Metadata
1127         :rtype: pandas.Series
1128         """
1129
1130         return self.data[job][build]["metadata"]
1131
1132     def suites(self, job, build):
1133         """Getter - suites
1134
1135         :param job: Job which suites we want.
1136         :param build: Build which suites we want.
1137         :type job: str
1138         :type build: str
1139         :returns: Suites.
1140         :rtype: pandas.Series
1141         """
1142
1143         return self.data[job][str(build)]["suites"]
1144
1145     def tests(self, job, build):
1146         """Getter - tests
1147
1148         :param job: Job which tests we want.
1149         :param build: Build which tests we want.
1150         :type job: str
1151         :type build: str
1152         :returns: Tests.
1153         :rtype: pandas.Series
1154         """
1155
1156         return self.data[job][build]["tests"]
1157
1158     def _parse_tests(self, job, build, log):
1159         """Process data from robot output.xml file and return JSON structured
1160         data.
1161
1162         :param job: The name of job which build output data will be processed.
1163         :param build: The build which output data will be processed.
1164         :param log: List of log messages.
1165         :type job: str
1166         :type build: dict
1167         :type log: list of tuples (severity, msg)
1168         :returns: JSON data structure.
1169         :rtype: dict
1170         """
1171
1172         metadata = {
1173             "job": job,
1174             "build": build
1175         }
1176
1177         with open(build["file-name"], 'r') as data_file:
1178             try:
1179                 result = ExecutionResult(data_file)
1180             except errors.DataError as err:
1181                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1182                                      "{0}".format(err)))
1183                 return None
1184         checker = ExecutionChecker(metadata, self._cfg.mapping,
1185                                    self._cfg.ignore)
1186         result.visit(checker)
1187
1188         return checker.data
1189
1190     def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1191         """Download and parse the input data file.
1192
1193         :param pid: PID of the process executing this method.
1194         :param data_queue: Shared memory between processes. Queue which keeps
1195             the result data. This data is then read by the main process and used
1196             in further processing.
1197         :param job: Name of the Jenkins job which generated the processed input
1198             file.
1199         :param build: Information about the Jenkins build which generated the
1200             processed input file.
1201         :param repeat: Repeat the download specified number of times if not
1202             successful.
1203         :type pid: int
1204         :type data_queue: multiprocessing.Manager().Queue()
1205         :type job: str
1206         :type build: dict
1207         :type repeat: int
1208         """
1209
1210         logs = list()
1211
1212         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1213                      format(job, build["build"])))
1214
1215         state = "failed"
1216         success = False
1217         data = None
1218         do_repeat = repeat
1219         while do_repeat:
1220             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1221                                                    logs)
1222             if success:
1223                 break
1224             do_repeat -= 1
1225         if not success:
1226             logs.append(("ERROR", "It is not possible to download the input "
1227                                   "data file from the job '{job}', build "
1228                                   "'{build}', or it is damaged. Skipped.".
1229                          format(job=job, build=build["build"])))
1230         if success:
1231             logs.append(("INFO", "    Processing data from the build '{0}' ...".
1232                          format(build["build"])))
1233             data = self._parse_tests(job, build, logs)
1234             if data is None:
1235                 logs.append(("ERROR", "Input data file from the job '{job}', "
1236                                       "build '{build}' is damaged. Skipped.".
1237                              format(job=job, build=build["build"])))
1238             else:
1239                 state = "processed"
1240
1241             try:
1242                 remove(build["file-name"])
1243             except OSError as err:
1244                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1245                              format(build["file-name"], repr(err))))
1246
1247         # If the time-period is defined in the specification file, remove all
1248         # files which are outside the time period.
1249         timeperiod = self._cfg.input.get("time-period", None)
1250         if timeperiod and data:
1251             now = dt.utcnow()
1252             timeperiod = timedelta(int(timeperiod))
1253             metadata = data.get("metadata", None)
1254             if metadata:
1255                 generated = metadata.get("generated", None)
1256                 if generated:
1257                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1258                     if (now - generated) > timeperiod:
1259                         # Remove the data and the file:
1260                         state = "removed"
1261                         data = None
1262                         logs.append(
1263                             ("INFO",
1264                              "    The build {job}/{build} is outdated, will be "
1265                              "removed".format(job=job, build=build["build"])))
1266                         file_name = self._cfg.input["file-name"]
1267                         full_name = join(
1268                             self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1269                             "{job}{sep}{build}{sep}{name}".
1270                                 format(job=job,
1271                                        sep=SEPARATOR,
1272                                        build=build["build"],
1273                                        name=file_name))
1274                         try:
1275                             remove(full_name)
1276                             logs.append(("INFO",
1277                                          "    The file {name} has been removed".
1278                                          format(name=full_name)))
1279                         except OSError as err:
1280                             logs.append(("ERROR",
1281                                         "Cannot remove the file '{0}': {1}".
1282                                         format(full_name, repr(err))))
1283         logs.append(("INFO", "  Done."))
1284
1285         for level, line in logs:
1286             if level == "INFO":
1287                 logging.info(line)
1288             elif level == "ERROR":
1289                 logging.error(line)
1290             elif level == "DEBUG":
1291                 logging.debug(line)
1292             elif level == "CRITICAL":
1293                 logging.critical(line)
1294             elif level == "WARNING":
1295                 logging.warning(line)
1296
1297         result = {
1298             "data": data,
1299             "state": state,
1300             "job": job,
1301             "build": build
1302         }
1303         data_queue.put(result)
1304
1305     def download_and_parse_data(self, repeat=1):
1306         """Download the input data files, parse input data from input files and
1307         store in pandas' Series.
1308
1309         :param repeat: Repeat the download specified number of times if not
1310             successful.
1311         :type repeat: int
1312         """
1313
1314         logging.info("Downloading and parsing input files ...")
1315
1316         work_queue = multiprocessing.JoinableQueue()
1317         manager = multiprocessing.Manager()
1318         data_queue = manager.Queue()
1319         cpus = multiprocessing.cpu_count()
1320
1321         workers = list()
1322         for cpu in range(cpus):
1323             worker = Worker(work_queue,
1324                             data_queue,
1325                             self._download_and_parse_build)
1326             worker.daemon = True
1327             worker.start()
1328             workers.append(worker)
1329             os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1330                       format(cpu, worker.pid))
1331
1332         for job, builds in self._cfg.builds.items():
1333             for build in builds:
1334                 work_queue.put((job, build, repeat))
1335
1336         work_queue.join()
1337
1338         logging.info("Done.")
1339
1340         while not data_queue.empty():
1341             result = data_queue.get()
1342
1343             job = result["job"]
1344             build_nr = result["build"]["build"]
1345
1346             if result["data"]:
1347                 data = result["data"]
1348                 build_data = pd.Series({
1349                     "metadata": pd.Series(data["metadata"].values(),
1350                                           index=data["metadata"].keys()),
1351                     "suites": pd.Series(data["suites"].values(),
1352                                         index=data["suites"].keys()),
1353                     "tests": pd.Series(data["tests"].values(),
1354                                        index=data["tests"].keys())})
1355
1356                 if self._input_data.get(job, None) is None:
1357                     self._input_data[job] = pd.Series()
1358                 self._input_data[job][str(build_nr)] = build_data
1359
1360                 self._cfg.set_input_file_name(job, build_nr,
1361                                               result["build"]["file-name"])
1362
1363             self._cfg.set_input_state(job, build_nr, result["state"])
1364
1365         del data_queue
1366
1367         # Terminate all workers
1368         for worker in workers:
1369             worker.terminate()
1370             worker.join()
1371
1372         logging.info("Done.")
1373
1374     @staticmethod
1375     def _end_of_tag(tag_filter, start=0, closer="'"):
1376         """Return the index of character in the string which is the end of tag.
1377
1378         :param tag_filter: The string where the end of tag is being searched.
1379         :param start: The index where the searching is stated.
1380         :param closer: The character which is the tag closer.
1381         :type tag_filter: str
1382         :type start: int
1383         :type closer: str
1384         :returns: The index of the tag closer.
1385         :rtype: int
1386         """
1387
1388         try:
1389             idx_opener = tag_filter.index(closer, start)
1390             return tag_filter.index(closer, idx_opener + 1)
1391         except ValueError:
1392             return None
1393
1394     @staticmethod
1395     def _condition(tag_filter):
1396         """Create a conditional statement from the given tag filter.
1397
1398         :param tag_filter: Filter based on tags from the element specification.
1399         :type tag_filter: str
1400         :returns: Conditional statement which can be evaluated.
1401         :rtype: str
1402         """
1403
1404         index = 0
1405         while True:
1406             index = InputData._end_of_tag(tag_filter, index)
1407             if index is None:
1408                 return tag_filter
1409             index += 1
1410             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1411
1412     def filter_data(self, element, params=None, data_set="tests",
1413                     continue_on_error=False):
1414         """Filter required data from the given jobs and builds.
1415
1416         The output data structure is:
1417
1418         - job 1
1419           - build 1
1420             - test (or suite) 1 ID:
1421               - param 1
1422               - param 2
1423               ...
1424               - param n
1425             ...
1426             - test (or suite) n ID:
1427             ...
1428           ...
1429           - build n
1430         ...
1431         - job n
1432
1433         :param element: Element which will use the filtered data.
1434         :param params: Parameters which will be included in the output. If None,
1435         all parameters are included.
1436         :param data_set: The set of data to be filtered: tests, suites,
1437         metadata.
1438         :param continue_on_error: Continue if there is error while reading the
1439         data. The Item will be empty then
1440         :type element: pandas.Series
1441         :type params: list
1442         :type data_set: str
1443         :type continue_on_error: bool
1444         :returns: Filtered data.
1445         :rtype pandas.Series
1446         """
1447
1448         try:
1449             if element["filter"] in ("all", "template"):
1450                 cond = "True"
1451             else:
1452                 cond = InputData._condition(element["filter"])
1453             logging.debug("   Filter: {0}".format(cond))
1454         except KeyError:
1455             logging.error("  No filter defined.")
1456             return None
1457
1458         if params is None:
1459             params = element.get("parameters", None)
1460             if params:
1461                 params.append("type")
1462
1463         data = pd.Series()
1464         try:
1465             for job, builds in element["data"].items():
1466                 data[job] = pd.Series()
1467                 for build in builds:
1468                     data[job][str(build)] = pd.Series()
1469                     try:
1470                         data_iter = self.data[job][str(build)][data_set].\
1471                             iteritems()
1472                     except KeyError:
1473                         if continue_on_error:
1474                             continue
1475                         else:
1476                             return None
1477                     for test_ID, test_data in data_iter:
1478                         if eval(cond, {"tags": test_data.get("tags", "")}):
1479                             data[job][str(build)][test_ID] = pd.Series()
1480                             if params is None:
1481                                 for param, val in test_data.items():
1482                                     data[job][str(build)][test_ID][param] = val
1483                             else:
1484                                 for param in params:
1485                                     try:
1486                                         data[job][str(build)][test_ID][param] =\
1487                                             test_data[param]
1488                                     except KeyError:
1489                                         data[job][str(build)][test_ID][param] =\
1490                                             "No Data"
1491             return data
1492
1493         except (KeyError, IndexError, ValueError) as err:
1494             logging.error("   Missing mandatory parameter in the element "
1495                           "specification: {0}".format(err))
1496             return None
1497         except AttributeError:
1498             return None
1499         except SyntaxError:
1500             logging.error("   The filter '{0}' is not correct. Check if all "
1501                           "tags are enclosed by apostrophes.".format(cond))
1502             return None
1503
1504     @staticmethod
1505     def merge_data(data):
1506         """Merge data from more jobs and builds to a simple data structure.
1507
1508         The output data structure is:
1509
1510         - test (suite) 1 ID:
1511           - param 1
1512           - param 2
1513           ...
1514           - param n
1515         ...
1516         - test (suite) n ID:
1517         ...
1518
1519         :param data: Data to merge.
1520         :type data: pandas.Series
1521         :returns: Merged data.
1522         :rtype: pandas.Series
1523         """
1524
1525         logging.info("    Merging data ...")
1526
1527         merged_data = pd.Series()
1528         for _, builds in data.iteritems():
1529             for _, item in builds.iteritems():
1530                 for ID, item_data in item.iteritems():
1531                     merged_data[ID] = item_data
1532
1533         return merged_data

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.