Trending: Fix Alerts
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
32
33 import prettytable
34 import pandas as pd
35
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
38
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
41
42
43 # Separator used in file names
44 SEPARATOR = u"__"
45
46
47 class ExecutionChecker(ResultVisitor):
48     """Class to traverse through the test suite structure.
49
50     The functionality implemented in this class generates a json structure:
51
52     Performance tests:
53
54     {
55         "metadata": {
56             "generated": "Timestamp",
57             "version": "SUT version",
58             "job": "Jenkins job name",
59             "build": "Information about the build"
60         },
61         "suites": {
62             "Suite long name 1": {
63                 "name": Suite name,
64                 "doc": "Suite 1 documentation",
65                 "parent": "Suite 1 parent",
66                 "level": "Level of the suite in the suite hierarchy"
67             }
68             "Suite long name N": {
69                 "name": Suite name,
70                 "doc": "Suite N documentation",
71                 "parent": "Suite 2 parent",
72                 "level": "Level of the suite in the suite hierarchy"
73             }
74         }
75         "tests": {
76             # NDRPDR tests:
77             "ID": {
78                 "name": "Test name",
79                 "parent": "Name of the parent of the test",
80                 "doc": "Test documentation",
81                 "msg": "Test message",
82                 "conf-history": "DUT1 and DUT2 VAT History",
83                 "show-run": "Show Run",
84                 "tags": ["tag 1", "tag 2", "tag n"],
85                 "type": "NDRPDR",
86                 "status": "PASS" | "FAIL",
87                 "throughput": {
88                     "NDR": {
89                         "LOWER": float,
90                         "UPPER": float
91                     },
92                     "PDR": {
93                         "LOWER": float,
94                         "UPPER": float
95                     }
96                 },
97                 "latency": {
98                     "NDR": {
99                         "direction1": {
100                             "min": float,
101                             "avg": float,
102                             "max": float,
103                             "hdrh": str
104                         },
105                         "direction2": {
106                             "min": float,
107                             "avg": float,
108                             "max": float,
109                             "hdrh": str
110                         }
111                     },
112                     "PDR": {
113                         "direction1": {
114                             "min": float,
115                             "avg": float,
116                             "max": float,
117                             "hdrh": str
118                         },
119                         "direction2": {
120                             "min": float,
121                             "avg": float,
122                             "max": float,
123                             "hdrh": str
124                         }
125                     }
126                 }
127             }
128
129             # TCP tests:
130             "ID": {
131                 "name": "Test name",
132                 "parent": "Name of the parent of the test",
133                 "doc": "Test documentation",
134                 "msg": "Test message",
135                 "tags": ["tag 1", "tag 2", "tag n"],
136                 "type": "TCP",
137                 "status": "PASS" | "FAIL",
138                 "result": int
139             }
140
141             # MRR, BMRR tests:
142             "ID": {
143                 "name": "Test name",
144                 "parent": "Name of the parent of the test",
145                 "doc": "Test documentation",
146                 "msg": "Test message",
147                 "tags": ["tag 1", "tag 2", "tag n"],
148                 "type": "MRR" | "BMRR",
149                 "status": "PASS" | "FAIL",
150                 "result": {
151                     "receive-rate": float,
152                     # Average of a list, computed using AvgStdevStats.
153                     # In CSIT-1180, replace with List[float].
154                 }
155             }
156
157             "ID" {
158                 # next test
159             }
160         }
161     }
162
163
164     Functional tests:
165
166     {
167         "metadata": {  # Optional
168             "version": "VPP version",
169             "job": "Jenkins job name",
170             "build": "Information about the build"
171         },
172         "suites": {
173             "Suite name 1": {
174                 "doc": "Suite 1 documentation",
175                 "parent": "Suite 1 parent",
176                 "level": "Level of the suite in the suite hierarchy"
177             }
178             "Suite name N": {
179                 "doc": "Suite N documentation",
180                 "parent": "Suite 2 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183         }
184         "tests": {
185             "ID": {
186                 "name": "Test name",
187                 "parent": "Name of the parent of the test",
188                 "doc": "Test documentation"
189                 "msg": "Test message"
190                 "tags": ["tag 1", "tag 2", "tag n"],
191                 "conf-history": "DUT1 and DUT2 VAT History"
192                 "show-run": "Show Run"
193                 "status": "PASS" | "FAIL"
194             },
195             "ID" {
196                 # next test
197             }
198         }
199     }
200
201     .. note:: ID is the lowercase full path to the test.
202     """
203
204     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205                                 r'PLRsearch upper bound::?\s(\d+.\d+)')
206
207     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
209                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210                                    r'PDR_UPPER:\s(\d+.\d+)')
211
212     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
214
215     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
216                                  r'[\D\d]*')
217
218     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
220
221     REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
222
223     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
224
225     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226                            r'tx\s(\d*),\srx\s(\d*)')
227
228     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229                             r' in packets per second: \[(.*)\]')
230
231     REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232     REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
233
234     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
235
236     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
237
238     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
239
240     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
241
242     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
243
244     def __init__(self, metadata, mapping, ignore):
245         """Initialisation.
246
247         :param metadata: Key-value pairs to be included in "metadata" part of
248             JSON structure.
249         :param mapping: Mapping of the old names of test cases to the new
250             (actual) one.
251         :param ignore: List of TCs to be ignored.
252         :type metadata: dict
253         :type mapping: dict
254         :type ignore: list
255         """
256
257         # Type of message to parse out from the test messages
258         self._msg_type = None
259
260         # VPP version
261         self._version = None
262
263         # Timestamp
264         self._timestamp = None
265
266         # Testbed. The testbed is identified by TG node IP address.
267         self._testbed = None
268
269         # Mapping of TCs long names
270         self._mapping = mapping
271
272         # Ignore list
273         self._ignore = ignore
274
275         # Number of PAPI History messages found:
276         # 0 - no message
277         # 1 - PAPI History of DUT1
278         # 2 - PAPI History of DUT2
279         self._lookup_kw_nr = 0
280         self._conf_history_lookup_nr = 0
281
282         # Number of Show Running messages found
283         # 0 - no message
284         # 1 - Show run message found
285         self._show_run_lookup_nr = 0
286
287         # Test ID of currently processed test- the lowercase full path to the
288         # test
289         self._test_id = None
290
291         # The main data structure
292         self._data = {
293             u"metadata": OrderedDict(),
294             u"suites": OrderedDict(),
295             u"tests": OrderedDict()
296         }
297
298         # Save the provided metadata
299         for key, val in metadata.items():
300             self._data[u"metadata"][key] = val
301
302         # Dictionary defining the methods used to parse different types of
303         # messages
304         self.parse_msg = {
305             u"timestamp": self._get_timestamp,
306             u"vpp-version": self._get_vpp_version,
307             u"dpdk-version": self._get_dpdk_version,
308             # TODO: Remove when not needed:
309             u"teardown-vat-history": self._get_vat_history,
310             u"teardown-papi-history": self._get_papi_history,
311             u"test-show-runtime": self._get_show_run,
312             u"testbed": self._get_testbed
313         }
314
315     @property
316     def data(self):
317         """Getter - Data parsed from the XML file.
318
319         :returns: Data parsed from the XML file.
320         :rtype: dict
321         """
322         return self._data
323
324     def _get_testbed(self, msg):
325         """Called when extraction of testbed IP is required.
326         The testbed is identified by TG node IP address.
327
328         :param msg: Message to process.
329         :type msg: Message
330         :returns: Nothing.
331         """
332
333         if msg.message.count(u"Setup of TG node") or \
334                 msg.message.count(u"Setup of node TG host"):
335             reg_tg_ip = re.compile(
336                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
337             try:
338                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
339             except (KeyError, ValueError, IndexError, AttributeError):
340                 pass
341             finally:
342                 self._data[u"metadata"][u"testbed"] = self._testbed
343                 self._msg_type = None
344
345     def _get_vpp_version(self, msg):
346         """Called when extraction of VPP version is required.
347
348         :param msg: Message to process.
349         :type msg: Message
350         :returns: Nothing.
351         """
352
353         if msg.message.count(u"return STDOUT Version:") or \
354             msg.message.count(u"VPP Version:") or \
355             msg.message.count(u"VPP version:"):
356             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
357                                 group(2))
358             self._data[u"metadata"][u"version"] = self._version
359             self._msg_type = None
360
361     def _get_dpdk_version(self, msg):
362         """Called when extraction of DPDK version is required.
363
364         :param msg: Message to process.
365         :type msg: Message
366         :returns: Nothing.
367         """
368
369         if msg.message.count(u"DPDK Version:"):
370             try:
371                 self._version = str(re.search(
372                     self.REGEX_VERSION_DPDK, msg.message).group(2))
373                 self._data[u"metadata"][u"version"] = self._version
374             except IndexError:
375                 pass
376             finally:
377                 self._msg_type = None
378
379     def _get_timestamp(self, msg):
380         """Called when extraction of timestamp is required.
381
382         :param msg: Message to process.
383         :type msg: Message
384         :returns: Nothing.
385         """
386
387         self._timestamp = msg.timestamp[:14]
388         self._data[u"metadata"][u"generated"] = self._timestamp
389         self._msg_type = None
390
391     def _get_vat_history(self, msg):
392         """Called when extraction of VAT command history is required.
393
394         TODO: Remove when not needed.
395
396         :param msg: Message to process.
397         :type msg: Message
398         :returns: Nothing.
399         """
400         if msg.message.count(u"VAT command history:"):
401             self._conf_history_lookup_nr += 1
402             if self._conf_history_lookup_nr == 1:
403                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
404             else:
405                 self._msg_type = None
406             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
407                           r"VAT command history:", u"",
408                           msg.message, count=1).replace(u'\n', u' |br| ').\
409                 replace(u'"', u"'")
410
411             self._data[u"tests"][self._test_id][u"conf-history"] += (
412                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
413             )
414
415     def _get_papi_history(self, msg):
416         """Called when extraction of PAPI command history is required.
417
418         :param msg: Message to process.
419         :type msg: Message
420         :returns: Nothing.
421         """
422         if msg.message.count(u"PAPI command history:"):
423             self._conf_history_lookup_nr += 1
424             if self._conf_history_lookup_nr == 1:
425                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
426             else:
427                 self._msg_type = None
428             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
429                           r"PAPI command history:", u"",
430                           msg.message, count=1).replace(u'\n', u' |br| ').\
431                 replace(u'"', u"'")
432
433             self._data[u"tests"][self._test_id][u"conf-history"] += (
434                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
435             )
436
437     def _get_show_run(self, msg):
438         """Called when extraction of VPP operational data (output of CLI command
439         Show Runtime) is required.
440
441         :param msg: Message to process.
442         :type msg: Message
443         :returns: Nothing.
444         """
445
446         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
447             self._data[u"tests"][self._test_id][u"show-run"] = str()
448
449         if msg.message.count(u"stats runtime") or \
450                 msg.message.count(u"Runtime"):
451             try:
452                 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
453                            group(1))
454             except (AttributeError, IndexError):
455                 host = self._data[u"tests"][self._test_id][u"show-run"].\
456                            count(u"DUT:") + 1
457             try:
458                 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
459                              group(2))
460                 socket = f"/{socket}"
461             except (AttributeError, IndexError):
462                 socket = u""
463             runtime = loads(
464                 str(msg.message).
465                 replace(u' ', u'').
466                 replace(u'\n', u'').
467                 replace(u"'", u'"').
468                 replace(u'b"', u'"').
469                 replace(u'u"', u'"').
470                 split(u":", 1)[1]
471             )
472             try:
473                 threads_nr = len(runtime[0][u"clocks"])
474             except (IndexError, KeyError):
475                 return
476             tbl_hdr = [
477                 u"Name",
478                 u"Calls",
479                 u"Vectors",
480                 u"Suspends",
481                 u"Clocks",
482                 u"Vectors/Calls"
483             ]
484             table = [[tbl_hdr, ] for _ in range(threads_nr)]
485             for item in runtime:
486                 for idx in range(threads_nr):
487                     name = format(item[u"name"])
488                     calls = format(item[u"calls"][idx])
489                     vectors = format(item[u"vectors"][idx])
490                     suspends = format(item[u"suspends"][idx])
491                     if item[u"vectors"][idx] > 0:
492                         clocks = format(
493                             item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
494                     elif item[u"calls"][idx] > 0:
495                         clocks = format(
496                             item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
497                     elif item[u"suspends"][idx] > 0:
498                         clocks = format(
499                             item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
500                     else:
501                         clocks = 0
502                     if item[u"calls"][idx] > 0:
503                         vectors_call = format(
504                             item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
505                     else:
506                         vectors_call = format(0, u".2f")
507                     if int(calls) + int(vectors) + int(suspends):
508                         table[idx].append([
509                             name, calls, vectors, suspends, clocks, vectors_call
510                         ])
511             text = ""
512             for idx in range(threads_nr):
513                 text += f"Thread {idx} "
514                 text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
515                 txt_table = None
516                 for row in table[idx]:
517                     if txt_table is None:
518                         txt_table = prettytable.PrettyTable(row)
519                     else:
520                         if any(row[1:]):
521                             txt_table.add_row(row)
522                 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
523                 txt_table.align[u"Name"] = u"l"
524                 txt_table.align[u"Calls"] = u"r"
525                 txt_table.align[u"Vectors"] = u"r"
526                 txt_table.align[u"Suspends"] = u"r"
527                 txt_table.align[u"Clocks"] = u"r"
528                 txt_table.align[u"Vectors/Calls"] = u"r"
529
530                 text += txt_table.get_string(sortby=u"Name") + u'\n'
531             text = f" \n**DUT: {host}{socket}**\n{text}".\
532                 replace(u'\n', u' |br| ').\
533                 replace(u'\r', u'').\
534                 replace(u'"', u"'")
535             self._data[u"tests"][self._test_id][u"show-run"] += text
536
537     def _get_ndrpdr_throughput(self, msg):
538         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
539         message.
540
541         :param msg: The test message to be parsed.
542         :type msg: str
543         :returns: Parsed data as a dict and the status (PASS/FAIL).
544         :rtype: tuple(dict, str)
545         """
546
547         throughput = {
548             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
549             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
550         }
551         status = u"FAIL"
552         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
553
554         if groups is not None:
555             try:
556                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
557                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
558                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
559                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
560                 status = u"PASS"
561             except (IndexError, ValueError):
562                 pass
563
564         return throughput, status
565
566     def _get_plr_throughput(self, msg):
567         """Get PLRsearch lower bound and PLRsearch upper bound from the test
568         message.
569
570         :param msg: The test message to be parsed.
571         :type msg: str
572         :returns: Parsed data as a dict and the status (PASS/FAIL).
573         :rtype: tuple(dict, str)
574         """
575
576         throughput = {
577             u"LOWER": -1.0,
578             u"UPPER": -1.0
579         }
580         status = u"FAIL"
581         groups = re.search(self.REGEX_PLR_RATE, msg)
582
583         if groups is not None:
584             try:
585                 throughput[u"LOWER"] = float(groups.group(1))
586                 throughput[u"UPPER"] = float(groups.group(2))
587                 status = u"PASS"
588             except (IndexError, ValueError):
589                 pass
590
591         return throughput, status
592
593     def _get_ndrpdr_latency(self, msg):
594         """Get LATENCY from the test message.
595
596         :param msg: The test message to be parsed.
597         :type msg: str
598         :returns: Parsed data as a dict and the status (PASS/FAIL).
599         :rtype: tuple(dict, str)
600         """
601         latency_default = {
602             u"min": -1.0,
603             u"avg": -1.0,
604             u"max": -1.0,
605             u"hdrh": u""
606         }
607         latency = {
608             u"NDR": {
609                 u"direction1": copy.copy(latency_default),
610                 u"direction2": copy.copy(latency_default)
611             },
612             u"PDR": {
613                 u"direction1": copy.copy(latency_default),
614                 u"direction2": copy.copy(latency_default)
615             }
616         }
617         status = u"FAIL"
618         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
619
620         def process_latency(in_str):
621             """Return object with parsed latency values.
622
623             TODO: Define class for the return type.
624
625             :param in_str: Input string, min/avg/max/hdrh format.
626             :type in_str: str
627             :returns: Dict with corresponding keys, except hdrh float values.
628             :rtype dict:
629             :throws IndexError: If in_str does not have enough substrings.
630             :throws ValueError: If a substring does not convert to float.
631             """
632             in_list = in_str.split('/', 3)
633
634             rval = {
635                 u"min": float(in_list[0]),
636                 u"avg": float(in_list[1]),
637                 u"max": float(in_list[2]),
638                 u"hdrh": u""
639             }
640
641             if len(in_list) == 4:
642                 rval[u"hdrh"] = str(in_list[3])
643
644             return rval
645
646         if groups is not None:
647             try:
648                 latency[u"NDR"][u"direction1"] = \
649                     process_latency(groups.group(1))
650                 latency[u"NDR"][u"direction2"] = \
651                     process_latency(groups.group(2))
652                 latency[u"PDR"][u"direction1"] = \
653                     process_latency(groups.group(3))
654                 latency[u"PDR"][u"direction2"] = \
655                     process_latency(groups.group(4))
656                 status = u"PASS"
657             except (IndexError, ValueError):
658                 pass
659
660         return latency, status
661
662     def visit_suite(self, suite):
663         """Implements traversing through the suite and its direct children.
664
665         :param suite: Suite to process.
666         :type suite: Suite
667         :returns: Nothing.
668         """
669         if self.start_suite(suite) is not False:
670             suite.suites.visit(self)
671             suite.tests.visit(self)
672             self.end_suite(suite)
673
674     def start_suite(self, suite):
675         """Called when suite starts.
676
677         :param suite: Suite to process.
678         :type suite: Suite
679         :returns: Nothing.
680         """
681
682         try:
683             parent_name = suite.parent.name
684         except AttributeError:
685             return
686
687         doc_str = suite.doc.\
688             replace(u'"', u"'").\
689             replace(u'\n', u' ').\
690             replace(u'\r', u'').\
691             replace(u'*[', u' |br| *[').\
692             replace(u"*", u"**").\
693             replace(u' |br| *[', u'*[', 1)
694
695         self._data[u"suites"][suite.longname.lower().
696                               replace(u'"', u"'").
697                               replace(u" ", u"_")] = {
698                                   u"name": suite.name.lower(),
699                                   u"doc": doc_str,
700                                   u"parent": parent_name,
701                                   u"level": len(suite.longname.split(u"."))
702                               }
703
704         suite.keywords.visit(self)
705
706     def end_suite(self, suite):
707         """Called when suite ends.
708
709         :param suite: Suite to process.
710         :type suite: Suite
711         :returns: Nothing.
712         """
713
714     def visit_test(self, test):
715         """Implements traversing through the test.
716
717         :param test: Test to process.
718         :type test: Test
719         :returns: Nothing.
720         """
721         if self.start_test(test) is not False:
722             test.keywords.visit(self)
723             self.end_test(test)
724
725     def start_test(self, test):
726         """Called when test starts.
727
728         :param test: Test to process.
729         :type test: Test
730         :returns: Nothing.
731         """
732
733         longname_orig = test.longname.lower()
734
735         # Check the ignore list
736         if longname_orig in self._ignore:
737             return
738
739         tags = [str(tag) for tag in test.tags]
740         test_result = dict()
741
742         # Change the TC long name and name if defined in the mapping table
743         longname = self._mapping.get(longname_orig, None)
744         if longname is not None:
745             name = longname.split(u'.')[-1]
746             logging.debug(
747                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
748                 f"{name}"
749             )
750         else:
751             longname = longname_orig
752             name = test.name.lower()
753
754         # Remove TC number from the TC long name (backward compatibility):
755         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
756         # Remove TC number from the TC name (not needed):
757         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
758
759         test_result[u"parent"] = test.parent.name.lower()
760         test_result[u"tags"] = tags
761         test_result["doc"] = test.doc.\
762             replace(u'"', u"'").\
763             replace(u'\n', u' ').\
764             replace(u'\r', u'').\
765             replace(u'[', u' |br| [').\
766             replace(u' |br| [', u'[', 1)
767         test_result[u"msg"] = test.message.\
768             replace(u'\n', u' |br| ').\
769             replace(u'\r', u'').\
770             replace(u'"', u"'")
771         test_result[u"type"] = u"FUNC"
772         test_result[u"status"] = test.status
773
774         if u"PERFTEST" in tags:
775             # Replace info about cores (e.g. -1c-) with the info about threads
776             # and cores (e.g. -1t1c-) in the long test case names and in the
777             # test case names if necessary.
778             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
779             if not groups:
780                 tag_count = 0
781                 tag_tc = str()
782                 for tag in test_result[u"tags"]:
783                     groups = re.search(self.REGEX_TC_TAG, tag)
784                     if groups:
785                         tag_count += 1
786                         tag_tc = tag
787
788                 if tag_count == 1:
789                     self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
790                                            f"-{tag_tc.lower()}-",
791                                            self._test_id,
792                                            count=1)
793                     test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
794                                                   f"-{tag_tc.lower()}-",
795                                                   test_result["name"],
796                                                   count=1)
797                 else:
798                     test_result[u"status"] = u"FAIL"
799                     self._data[u"tests"][self._test_id] = test_result
800                     logging.debug(
801                         f"The test {self._test_id} has no or more than one "
802                         f"multi-threading tags.\n"
803                         f"Tags: {test_result[u'tags']}"
804                     )
805                     return
806
807         if test.status == u"PASS":
808             if u"NDRPDR" in tags:
809                 test_result[u"type"] = u"NDRPDR"
810                 test_result[u"throughput"], test_result[u"status"] = \
811                     self._get_ndrpdr_throughput(test.message)
812                 test_result[u"latency"], test_result[u"status"] = \
813                     self._get_ndrpdr_latency(test.message)
814             elif u"SOAK" in tags:
815                 test_result[u"type"] = u"SOAK"
816                 test_result[u"throughput"], test_result[u"status"] = \
817                     self._get_plr_throughput(test.message)
818             elif u"TCP" in tags:
819                 test_result[u"type"] = u"TCP"
820                 groups = re.search(self.REGEX_TCP, test.message)
821                 test_result[u"result"] = int(groups.group(2))
822             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
823                 if u"MRR" in tags:
824                     test_result[u"type"] = u"MRR"
825                 else:
826                     test_result[u"type"] = u"BMRR"
827
828                 test_result[u"result"] = dict()
829                 groups = re.search(self.REGEX_BMRR, test.message)
830                 if groups is not None:
831                     items_str = groups.group(1)
832                     items_float = [float(item.strip()) for item
833                                    in items_str.split(",")]
834                     # Use whole list in CSIT-1180.
835                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
836                     test_result[u"result"][u"receive-rate"] = stats.avg
837                 else:
838                     groups = re.search(self.REGEX_MRR, test.message)
839                     test_result[u"result"][u"receive-rate"] = \
840                         float(groups.group(3)) / float(groups.group(1))
841             elif u"RECONF" in tags:
842                 test_result[u"type"] = u"RECONF"
843                 test_result[u"result"] = None
844                 try:
845                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
846                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
847                     test_result[u"result"] = {
848                         u"loss": int(grps_loss.group(1)),
849                         u"time": float(grps_time.group(1))
850                     }
851                 except (AttributeError, IndexError, ValueError, TypeError):
852                     test_result[u"status"] = u"FAIL"
853             else:
854                 test_result[u"status"] = u"FAIL"
855                 self._data[u"tests"][self._test_id] = test_result
856                 return
857
858         self._data[u"tests"][self._test_id] = test_result
859
860     def end_test(self, test):
861         """Called when test ends.
862
863         :param test: Test to process.
864         :type test: Test
865         :returns: Nothing.
866         """
867
868     def visit_keyword(self, keyword):
869         """Implements traversing through the keyword and its child keywords.
870
871         :param keyword: Keyword to process.
872         :type keyword: Keyword
873         :returns: Nothing.
874         """
875         if self.start_keyword(keyword) is not False:
876             self.end_keyword(keyword)
877
878     def start_keyword(self, keyword):
879         """Called when keyword starts. Default implementation does nothing.
880
881         :param keyword: Keyword to process.
882         :type keyword: Keyword
883         :returns: Nothing.
884         """
885         try:
886             if keyword.type == u"setup":
887                 self.visit_setup_kw(keyword)
888             elif keyword.type == u"teardown":
889                 self._lookup_kw_nr = 0
890                 self.visit_teardown_kw(keyword)
891             else:
892                 self._lookup_kw_nr = 0
893                 self.visit_test_kw(keyword)
894         except AttributeError:
895             pass
896
897     def end_keyword(self, keyword):
898         """Called when keyword ends. Default implementation does nothing.
899
900         :param keyword: Keyword to process.
901         :type keyword: Keyword
902         :returns: Nothing.
903         """
904
905     def visit_test_kw(self, test_kw):
906         """Implements traversing through the test keyword and its child
907         keywords.
908
909         :param test_kw: Keyword to process.
910         :type test_kw: Keyword
911         :returns: Nothing.
912         """
913         for keyword in test_kw.keywords:
914             if self.start_test_kw(keyword) is not False:
915                 self.visit_test_kw(keyword)
916                 self.end_test_kw(keyword)
917
918     def start_test_kw(self, test_kw):
919         """Called when test keyword starts. Default implementation does
920         nothing.
921
922         :param test_kw: Keyword to process.
923         :type test_kw: Keyword
924         :returns: Nothing.
925         """
926         if test_kw.name.count(u"Show Runtime Counters On All Duts"):
927             self._lookup_kw_nr += 1
928             self._show_run_lookup_nr = 0
929             self._msg_type = u"test-show-runtime"
930         elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
931             self._msg_type = u"dpdk-version"
932         else:
933             return
934         test_kw.messages.visit(self)
935
936     def end_test_kw(self, test_kw):
937         """Called when keyword ends. Default implementation does nothing.
938
939         :param test_kw: Keyword to process.
940         :type test_kw: Keyword
941         :returns: Nothing.
942         """
943
944     def visit_setup_kw(self, setup_kw):
945         """Implements traversing through the teardown keyword and its child
946         keywords.
947
948         :param setup_kw: Keyword to process.
949         :type setup_kw: Keyword
950         :returns: Nothing.
951         """
952         for keyword in setup_kw.keywords:
953             if self.start_setup_kw(keyword) is not False:
954                 self.visit_setup_kw(keyword)
955                 self.end_setup_kw(keyword)
956
957     def start_setup_kw(self, setup_kw):
958         """Called when teardown keyword starts. Default implementation does
959         nothing.
960
961         :param setup_kw: Keyword to process.
962         :type setup_kw: Keyword
963         :returns: Nothing.
964         """
965         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
966                 and not self._version:
967             self._msg_type = u"vpp-version"
968         elif setup_kw.name.count(u"Set Global Variable") \
969                 and not self._timestamp:
970             self._msg_type = u"timestamp"
971         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
972             self._msg_type = u"testbed"
973         else:
974             return
975         setup_kw.messages.visit(self)
976
977     def end_setup_kw(self, setup_kw):
978         """Called when keyword ends. Default implementation does nothing.
979
980         :param setup_kw: Keyword to process.
981         :type setup_kw: Keyword
982         :returns: Nothing.
983         """
984
985     def visit_teardown_kw(self, teardown_kw):
986         """Implements traversing through the teardown keyword and its child
987         keywords.
988
989         :param teardown_kw: Keyword to process.
990         :type teardown_kw: Keyword
991         :returns: Nothing.
992         """
993         for keyword in teardown_kw.keywords:
994             if self.start_teardown_kw(keyword) is not False:
995                 self.visit_teardown_kw(keyword)
996                 self.end_teardown_kw(keyword)
997
998     def start_teardown_kw(self, teardown_kw):
999         """Called when teardown keyword starts
1000
1001         :param teardown_kw: Keyword to process.
1002         :type teardown_kw: Keyword
1003         :returns: Nothing.
1004         """
1005
1006         if teardown_kw.name.count(u"Show Vat History On All Duts"):
1007             # TODO: Remove when not needed:
1008             self._conf_history_lookup_nr = 0
1009             self._msg_type = u"teardown-vat-history"
1010             teardown_kw.messages.visit(self)
1011         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1012             self._conf_history_lookup_nr = 0
1013             self._msg_type = u"teardown-papi-history"
1014             teardown_kw.messages.visit(self)
1015
1016     def end_teardown_kw(self, teardown_kw):
1017         """Called when keyword ends. Default implementation does nothing.
1018
1019         :param teardown_kw: Keyword to process.
1020         :type teardown_kw: Keyword
1021         :returns: Nothing.
1022         """
1023
1024     def visit_message(self, msg):
1025         """Implements visiting the message.
1026
1027         :param msg: Message to process.
1028         :type msg: Message
1029         :returns: Nothing.
1030         """
1031         if self.start_message(msg) is not False:
1032             self.end_message(msg)
1033
1034     def start_message(self, msg):
1035         """Called when message starts. Get required information from messages:
1036         - VPP version.
1037
1038         :param msg: Message to process.
1039         :type msg: Message
1040         :returns: Nothing.
1041         """
1042
1043         if self._msg_type:
1044             self.parse_msg[self._msg_type](msg)
1045
1046     def end_message(self, msg):
1047         """Called when message ends. Default implementation does nothing.
1048
1049         :param msg: Message to process.
1050         :type msg: Message
1051         :returns: Nothing.
1052         """
1053
1054
1055 class InputData:
1056     """Input data
1057
1058     The data is extracted from output.xml files generated by Jenkins jobs and
1059     stored in pandas' DataFrames.
1060
1061     The data structure:
1062     - job name
1063       - build number
1064         - metadata
1065           (as described in ExecutionChecker documentation)
1066         - suites
1067           (as described in ExecutionChecker documentation)
1068         - tests
1069           (as described in ExecutionChecker documentation)
1070     """
1071
1072     def __init__(self, spec):
1073         """Initialization.
1074
1075         :param spec: Specification.
1076         :type spec: Specification
1077         """
1078
1079         # Specification:
1080         self._cfg = spec
1081
1082         # Data store:
1083         self._input_data = pd.Series()
1084
1085     @property
1086     def data(self):
1087         """Getter - Input data.
1088
1089         :returns: Input data
1090         :rtype: pandas.Series
1091         """
1092         return self._input_data
1093
1094     def metadata(self, job, build):
1095         """Getter - metadata
1096
1097         :param job: Job which metadata we want.
1098         :param build: Build which metadata we want.
1099         :type job: str
1100         :type build: str
1101         :returns: Metadata
1102         :rtype: pandas.Series
1103         """
1104
1105         return self.data[job][build][u"metadata"]
1106
1107     def suites(self, job, build):
1108         """Getter - suites
1109
1110         :param job: Job which suites we want.
1111         :param build: Build which suites we want.
1112         :type job: str
1113         :type build: str
1114         :returns: Suites.
1115         :rtype: pandas.Series
1116         """
1117
1118         return self.data[job][str(build)][u"suites"]
1119
1120     def tests(self, job, build):
1121         """Getter - tests
1122
1123         :param job: Job which tests we want.
1124         :param build: Build which tests we want.
1125         :type job: str
1126         :type build: str
1127         :returns: Tests.
1128         :rtype: pandas.Series
1129         """
1130
1131         return self.data[job][build][u"tests"]
1132
1133     def _parse_tests(self, job, build, log):
1134         """Process data from robot output.xml file and return JSON structured
1135         data.
1136
1137         :param job: The name of job which build output data will be processed.
1138         :param build: The build which output data will be processed.
1139         :param log: List of log messages.
1140         :type job: str
1141         :type build: dict
1142         :type log: list of tuples (severity, msg)
1143         :returns: JSON data structure.
1144         :rtype: dict
1145         """
1146
1147         metadata = {
1148             u"job": job,
1149             u"build": build
1150         }
1151
1152         with open(build[u"file-name"], u'r') as data_file:
1153             try:
1154                 result = ExecutionResult(data_file)
1155             except errors.DataError as err:
1156                 log.append(
1157                     (u"ERROR", f"Error occurred while parsing output.xml: "
1158                                f"{repr(err)}")
1159                 )
1160                 return None
1161         checker = ExecutionChecker(metadata, self._cfg.mapping,
1162                                    self._cfg.ignore)
1163         result.visit(checker)
1164
1165         return checker.data
1166
1167     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1168         """Download and parse the input data file.
1169
1170         :param pid: PID of the process executing this method.
1171         :param job: Name of the Jenkins job which generated the processed input
1172             file.
1173         :param build: Information about the Jenkins build which generated the
1174             processed input file.
1175         :param repeat: Repeat the download specified number of times if not
1176             successful.
1177         :type pid: int
1178         :type job: str
1179         :type build: dict
1180         :type repeat: int
1181         """
1182
1183         logs = list()
1184
1185         logs.append(
1186             (u"INFO", f"  Processing the job/build: {job}: {build[u'build']}")
1187         )
1188
1189         state = u"failed"
1190         success = False
1191         data = None
1192         do_repeat = repeat
1193         while do_repeat:
1194             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1195                                                    logs)
1196             if success:
1197                 break
1198             do_repeat -= 1
1199         if not success:
1200             logs.append(
1201                 (u"ERROR",
1202                  f"It is not possible to download the input data file from the "
1203                  f"job {job}, build {build[u'build']}, or it is damaged. "
1204                  f"Skipped.")
1205             )
1206         if success:
1207             logs.append(
1208                 (u"INFO",
1209                  f"    Processing data from the build {build[u'build']} ...")
1210             )
1211             data = self._parse_tests(job, build, logs)
1212             if data is None:
1213                 logs.append(
1214                     (u"ERROR",
1215                      f"Input data file from the job {job}, build "
1216                      f"{build[u'build']} is damaged. Skipped.")
1217                 )
1218             else:
1219                 state = u"processed"
1220
1221             try:
1222                 remove(build[u"file-name"])
1223             except OSError as err:
1224                 logs.append(
1225                     ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1226                               f"{repr(err)}")
1227                 )
1228
1229         # If the time-period is defined in the specification file, remove all
1230         # files which are outside the time period.
1231         timeperiod = self._cfg.input.get(u"time-period", None)
1232         if timeperiod and data:
1233             now = dt.utcnow()
1234             timeperiod = timedelta(int(timeperiod))
1235             metadata = data.get(u"metadata", None)
1236             if metadata:
1237                 generated = metadata.get(u"generated", None)
1238                 if generated:
1239                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1240                     if (now - generated) > timeperiod:
1241                         # Remove the data and the file:
1242                         state = u"removed"
1243                         data = None
1244                         logs.append(
1245                             (u"INFO",
1246                              f"    The build {job}/{build[u'build']} is "
1247                              f"outdated, will be removed.")
1248                         )
1249         logs.append((u"INFO", u"  Done."))
1250
1251         for level, line in logs:
1252             if level == u"INFO":
1253                 logging.info(line)
1254             elif level == u"ERROR":
1255                 logging.error(line)
1256             elif level == u"DEBUG":
1257                 logging.debug(line)
1258             elif level == u"CRITICAL":
1259                 logging.critical(line)
1260             elif level == u"WARNING":
1261                 logging.warning(line)
1262
1263         return {u"data": data, u"state": state, u"job": job, u"build": build}
1264
1265     def download_and_parse_data(self, repeat=1):
1266         """Download the input data files, parse input data from input files and
1267         store in pandas' Series.
1268
1269         :param repeat: Repeat the download specified number of times if not
1270             successful.
1271         :type repeat: int
1272         """
1273
1274         logging.info(u"Downloading and parsing input files ...")
1275
1276         for job, builds in self._cfg.builds.items():
1277             for build in builds:
1278
1279                 result = self._download_and_parse_build(job, build, repeat)
1280                 build_nr = result[u"build"][u"build"]
1281
1282                 if result[u"data"]:
1283                     data = result[u"data"]
1284                     build_data = pd.Series({
1285                         u"metadata": pd.Series(
1286                             list(data[u"metadata"].values()),
1287                             index=list(data[u"metadata"].keys())
1288                         ),
1289                         u"suites": pd.Series(
1290                             list(data[u"suites"].values()),
1291                             index=list(data[u"suites"].keys())
1292                         ),
1293                         u"tests": pd.Series(
1294                             list(data[u"tests"].values()),
1295                             index=list(data[u"tests"].keys())
1296                         )
1297                     })
1298
1299                     if self._input_data.get(job, None) is None:
1300                         self._input_data[job] = pd.Series()
1301                     self._input_data[job][str(build_nr)] = build_data
1302
1303                     self._cfg.set_input_file_name(
1304                         job, build_nr, result[u"build"][u"file-name"])
1305
1306                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1307
1308                 mem_alloc = \
1309                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1310                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1311
1312         logging.info(u"Done.")
1313
1314     @staticmethod
1315     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1316         """Return the index of character in the string which is the end of tag.
1317
1318         :param tag_filter: The string where the end of tag is being searched.
1319         :param start: The index where the searching is stated.
1320         :param closer: The character which is the tag closer.
1321         :type tag_filter: str
1322         :type start: int
1323         :type closer: str
1324         :returns: The index of the tag closer.
1325         :rtype: int
1326         """
1327
1328         try:
1329             idx_opener = tag_filter.index(closer, start)
1330             return tag_filter.index(closer, idx_opener + 1)
1331         except ValueError:
1332             return None
1333
1334     @staticmethod
1335     def _condition(tag_filter):
1336         """Create a conditional statement from the given tag filter.
1337
1338         :param tag_filter: Filter based on tags from the element specification.
1339         :type tag_filter: str
1340         :returns: Conditional statement which can be evaluated.
1341         :rtype: str
1342         """
1343
1344         index = 0
1345         while True:
1346             index = InputData._end_of_tag(tag_filter, index)
1347             if index is None:
1348                 return tag_filter
1349             index += 1
1350             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1351
1352     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1353                     continue_on_error=False):
1354         """Filter required data from the given jobs and builds.
1355
1356         The output data structure is:
1357
1358         - job 1
1359           - build 1
1360             - test (or suite) 1 ID:
1361               - param 1
1362               - param 2
1363               ...
1364               - param n
1365             ...
1366             - test (or suite) n ID:
1367             ...
1368           ...
1369           - build n
1370         ...
1371         - job n
1372
1373         :param element: Element which will use the filtered data.
1374         :param params: Parameters which will be included in the output. If None,
1375             all parameters are included.
1376         :param data: If not None, this data is used instead of data specified
1377             in the element.
1378         :param data_set: The set of data to be filtered: tests, suites,
1379             metadata.
1380         :param continue_on_error: Continue if there is error while reading the
1381             data. The Item will be empty then
1382         :type element: pandas.Series
1383         :type params: list
1384         :type data: dict
1385         :type data_set: str
1386         :type continue_on_error: bool
1387         :returns: Filtered data.
1388         :rtype pandas.Series
1389         """
1390
1391         try:
1392             if element[u"filter"] in (u"all", u"template"):
1393                 cond = u"True"
1394             else:
1395                 cond = InputData._condition(element[u"filter"])
1396             logging.debug(f"   Filter: {cond}")
1397         except KeyError:
1398             logging.error(u"  No filter defined.")
1399             return None
1400
1401         if params is None:
1402             params = element.get(u"parameters", None)
1403             if params:
1404                 params.append(u"type")
1405
1406         data_to_filter = data if data else element[u"data"]
1407         data = pd.Series()
1408         try:
1409             for job, builds in data_to_filter.items():
1410                 data[job] = pd.Series()
1411                 for build in builds:
1412                     data[job][str(build)] = pd.Series()
1413                     try:
1414                         data_dict = dict(
1415                             self.data[job][str(build)][data_set].items())
1416                     except KeyError:
1417                         if continue_on_error:
1418                             continue
1419                         return None
1420
1421                     for test_id, test_data in data_dict.items():
1422                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1423                             data[job][str(build)][test_id] = pd.Series()
1424                             if params is None:
1425                                 for param, val in test_data.items():
1426                                     data[job][str(build)][test_id][param] = val
1427                             else:
1428                                 for param in params:
1429                                     try:
1430                                         data[job][str(build)][test_id][param] =\
1431                                             test_data[param]
1432                                     except KeyError:
1433                                         data[job][str(build)][test_id][param] =\
1434                                             u"No Data"
1435             return data
1436
1437         except (KeyError, IndexError, ValueError) as err:
1438             logging.error(
1439                 f"Missing mandatory parameter in the element specification: "
1440                 f"{repr(err)}"
1441             )
1442             return None
1443         except AttributeError as err:
1444             logging.error(repr(err))
1445             return None
1446         except SyntaxError as err:
1447             logging.error(
1448                 f"The filter {cond} is not correct. Check if all tags are "
1449                 f"enclosed by apostrophes.\n{repr(err)}"
1450             )
1451             return None
1452
1453     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1454                              continue_on_error=False):
1455         """Filter required data from the given jobs and builds.
1456
1457         The output data structure is:
1458
1459         - job 1
1460           - build 1
1461             - test (or suite) 1 ID:
1462               - param 1
1463               - param 2
1464               ...
1465               - param n
1466             ...
1467             - test (or suite) n ID:
1468             ...
1469           ...
1470           - build n
1471         ...
1472         - job n
1473
1474         :param element: Element which will use the filtered data.
1475         :param params: Parameters which will be included in the output. If None,
1476         all parameters are included.
1477         :param data_set: The set of data to be filtered: tests, suites,
1478         metadata.
1479         :param continue_on_error: Continue if there is error while reading the
1480         data. The Item will be empty then
1481         :type element: pandas.Series
1482         :type params: list
1483         :type data_set: str
1484         :type continue_on_error: bool
1485         :returns: Filtered data.
1486         :rtype pandas.Series
1487         """
1488
1489         include = element.get(u"include", None)
1490         if not include:
1491             logging.warning(u"No tests to include, skipping the element.")
1492             return None
1493
1494         if params is None:
1495             params = element.get(u"parameters", None)
1496             if params:
1497                 params.append(u"type")
1498
1499         data = pd.Series()
1500         try:
1501             for job, builds in element[u"data"].items():
1502                 data[job] = pd.Series()
1503                 for build in builds:
1504                     data[job][str(build)] = pd.Series()
1505                     for test in include:
1506                         try:
1507                             reg_ex = re.compile(str(test).lower())
1508                             for test_id in self.data[job][
1509                                     str(build)][data_set].keys():
1510                                 if re.match(reg_ex, str(test_id).lower()):
1511                                     test_data = self.data[job][
1512                                         str(build)][data_set][test_id]
1513                                     data[job][str(build)][test_id] = pd.Series()
1514                                     if params is None:
1515                                         for param, val in test_data.items():
1516                                             data[job][str(build)][test_id]\
1517                                                 [param] = val
1518                                     else:
1519                                         for param in params:
1520                                             try:
1521                                                 data[job][str(build)][
1522                                                     test_id][param] = \
1523                                                     test_data[param]
1524                                             except KeyError:
1525                                                 data[job][str(build)][
1526                                                     test_id][param] = u"No Data"
1527                         except KeyError as err:
1528                             logging.error(repr(err))
1529                             if continue_on_error:
1530                                 continue
1531                             return None
1532             return data
1533
1534         except (KeyError, IndexError, ValueError) as err:
1535             logging.error(
1536                 f"Missing mandatory parameter in the element "
1537                 f"specification: {repr(err)}"
1538             )
1539             return None
1540         except AttributeError as err:
1541             logging.error(repr(err))
1542             return None
1543
1544     @staticmethod
1545     def merge_data(data):
1546         """Merge data from more jobs and builds to a simple data structure.
1547
1548         The output data structure is:
1549
1550         - test (suite) 1 ID:
1551           - param 1
1552           - param 2
1553           ...
1554           - param n
1555         ...
1556         - test (suite) n ID:
1557         ...
1558
1559         :param data: Data to merge.
1560         :type data: pandas.Series
1561         :returns: Merged data.
1562         :rtype: pandas.Series
1563         """
1564
1565         logging.info(u"    Merging data ...")
1566
1567         merged_data = pd.Series()
1568         for builds in data.values:
1569             for item in builds.values:
1570                 for item_id, item_data in item.items():
1571                     merged_data[item_id] = item_data
1572
1573         return merged_data