Telemetry: Add more operational data
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
32
33 import prettytable
34 import pandas as pd
35
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
38
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
41
42
43 # Separator used in file names
44 SEPARATOR = u"__"
45
46
47 class ExecutionChecker(ResultVisitor):
48     """Class to traverse through the test suite structure.
49
50     The functionality implemented in this class generates a json structure:
51
52     Performance tests:
53
54     {
55         "metadata": {
56             "generated": "Timestamp",
57             "version": "SUT version",
58             "job": "Jenkins job name",
59             "build": "Information about the build"
60         },
61         "suites": {
62             "Suite long name 1": {
63                 "name": Suite name,
64                 "doc": "Suite 1 documentation",
65                 "parent": "Suite 1 parent",
66                 "level": "Level of the suite in the suite hierarchy"
67             }
68             "Suite long name N": {
69                 "name": Suite name,
70                 "doc": "Suite N documentation",
71                 "parent": "Suite 2 parent",
72                 "level": "Level of the suite in the suite hierarchy"
73             }
74         }
75         "tests": {
76             # NDRPDR tests:
77             "ID": {
78                 "name": "Test name",
79                 "parent": "Name of the parent of the test",
80                 "doc": "Test documentation",
81                 "msg": "Test message",
82                 "conf-history": "DUT1 and DUT2 VAT History",
83                 "show-run": "Show Run",
84                 "tags": ["tag 1", "tag 2", "tag n"],
85                 "type": "NDRPDR",
86                 "status": "PASS" | "FAIL",
87                 "throughput": {
88                     "NDR": {
89                         "LOWER": float,
90                         "UPPER": float
91                     },
92                     "PDR": {
93                         "LOWER": float,
94                         "UPPER": float
95                     }
96                 },
97                 "latency": {
98                     "NDR": {
99                         "direction1": {
100                             "min": float,
101                             "avg": float,
102                             "max": float,
103                             "hdrh": str
104                         },
105                         "direction2": {
106                             "min": float,
107                             "avg": float,
108                             "max": float,
109                             "hdrh": str
110                         }
111                     },
112                     "PDR": {
113                         "direction1": {
114                             "min": float,
115                             "avg": float,
116                             "max": float,
117                             "hdrh": str
118                         },
119                         "direction2": {
120                             "min": float,
121                             "avg": float,
122                             "max": float,
123                             "hdrh": str
124                         }
125                     }
126                 }
127             }
128
129             # TCP tests:
130             "ID": {
131                 "name": "Test name",
132                 "parent": "Name of the parent of the test",
133                 "doc": "Test documentation",
134                 "msg": "Test message",
135                 "tags": ["tag 1", "tag 2", "tag n"],
136                 "type": "TCP",
137                 "status": "PASS" | "FAIL",
138                 "result": int
139             }
140
141             # MRR, BMRR tests:
142             "ID": {
143                 "name": "Test name",
144                 "parent": "Name of the parent of the test",
145                 "doc": "Test documentation",
146                 "msg": "Test message",
147                 "tags": ["tag 1", "tag 2", "tag n"],
148                 "type": "MRR" | "BMRR",
149                 "status": "PASS" | "FAIL",
150                 "result": {
151                     "receive-rate": float,
152                     # Average of a list, computed using AvgStdevStats.
153                     # In CSIT-1180, replace with List[float].
154                 }
155             }
156
157             "ID" {
158                 # next test
159             }
160         }
161     }
162
163
164     Functional tests:
165
166     {
167         "metadata": {  # Optional
168             "version": "VPP version",
169             "job": "Jenkins job name",
170             "build": "Information about the build"
171         },
172         "suites": {
173             "Suite name 1": {
174                 "doc": "Suite 1 documentation",
175                 "parent": "Suite 1 parent",
176                 "level": "Level of the suite in the suite hierarchy"
177             }
178             "Suite name N": {
179                 "doc": "Suite N documentation",
180                 "parent": "Suite 2 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183         }
184         "tests": {
185             "ID": {
186                 "name": "Test name",
187                 "parent": "Name of the parent of the test",
188                 "doc": "Test documentation"
189                 "msg": "Test message"
190                 "tags": ["tag 1", "tag 2", "tag n"],
191                 "conf-history": "DUT1 and DUT2 VAT History"
192                 "show-run": "Show Run"
193                 "status": "PASS" | "FAIL"
194             },
195             "ID" {
196                 # next test
197             }
198         }
199     }
200
201     .. note:: ID is the lowercase full path to the test.
202     """
203
204     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205                                 r'PLRsearch upper bound::?\s(\d+.\d+)')
206
207     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
209                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210                                    r'PDR_UPPER:\s(\d+.\d+)')
211
212     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
214
215     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
216                                  r'[\D\d]*')
217
218     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
220
221     REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
222
223     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
224
225     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226                            r'tx\s(\d*),\srx\s(\d*)')
227
228     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229                             r' in packets per second: \[(.*)\]')
230
231     REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232     REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
233
234     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
235
236     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
237
238     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
239
240     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
241
242     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
243
244     def __init__(self, metadata, mapping, ignore):
245         """Initialisation.
246
247         :param metadata: Key-value pairs to be included in "metadata" part of
248             JSON structure.
249         :param mapping: Mapping of the old names of test cases to the new
250             (actual) one.
251         :param ignore: List of TCs to be ignored.
252         :type metadata: dict
253         :type mapping: dict
254         :type ignore: list
255         """
256
257         # Type of message to parse out from the test messages
258         self._msg_type = None
259
260         # VPP version
261         self._version = None
262
263         # Timestamp
264         self._timestamp = None
265
266         # Testbed. The testbed is identified by TG node IP address.
267         self._testbed = None
268
269         # Mapping of TCs long names
270         self._mapping = mapping
271
272         # Ignore list
273         self._ignore = ignore
274
275         # Number of PAPI History messages found:
276         # 0 - no message
277         # 1 - PAPI History of DUT1
278         # 2 - PAPI History of DUT2
279         self._lookup_kw_nr = 0
280         self._conf_history_lookup_nr = 0
281
282         # Number of Show Running messages found
283         # 0 - no message
284         # 1 - Show run message found
285         self._show_run_lookup_nr = 0
286
287         # Test ID of currently processed test- the lowercase full path to the
288         # test
289         self._test_id = None
290
291         # The main data structure
292         self._data = {
293             u"metadata": OrderedDict(),
294             u"suites": OrderedDict(),
295             u"tests": OrderedDict()
296         }
297
298         # Save the provided metadata
299         for key, val in metadata.items():
300             self._data[u"metadata"][key] = val
301
302         # Dictionary defining the methods used to parse different types of
303         # messages
304         self.parse_msg = {
305             u"timestamp": self._get_timestamp,
306             u"vpp-version": self._get_vpp_version,
307             u"dpdk-version": self._get_dpdk_version,
308             # TODO: Remove when not needed:
309             u"teardown-vat-history": self._get_vat_history,
310             u"teardown-papi-history": self._get_papi_history,
311             u"test-show-runtime": self._get_show_run,
312             u"testbed": self._get_testbed
313         }
314
315     @property
316     def data(self):
317         """Getter - Data parsed from the XML file.
318
319         :returns: Data parsed from the XML file.
320         :rtype: dict
321         """
322         return self._data
323
324     def _get_testbed(self, msg):
325         """Called when extraction of testbed IP is required.
326         The testbed is identified by TG node IP address.
327
328         :param msg: Message to process.
329         :type msg: Message
330         :returns: Nothing.
331         """
332
333         if msg.message.count(u"Setup of TG node") or \
334                 msg.message.count(u"Setup of node TG host"):
335             reg_tg_ip = re.compile(
336                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
337             try:
338                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
339             except (KeyError, ValueError, IndexError, AttributeError):
340                 pass
341             finally:
342                 self._data[u"metadata"][u"testbed"] = self._testbed
343                 self._msg_type = None
344
345     def _get_vpp_version(self, msg):
346         """Called when extraction of VPP version is required.
347
348         :param msg: Message to process.
349         :type msg: Message
350         :returns: Nothing.
351         """
352
353         if msg.message.count(u"return STDOUT Version:") or \
354             msg.message.count(u"VPP Version:") or \
355             msg.message.count(u"VPP version:"):
356             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
357                                 group(2))
358             self._data[u"metadata"][u"version"] = self._version
359             self._msg_type = None
360
361     def _get_dpdk_version(self, msg):
362         """Called when extraction of DPDK version is required.
363
364         :param msg: Message to process.
365         :type msg: Message
366         :returns: Nothing.
367         """
368
369         if msg.message.count(u"DPDK Version:"):
370             try:
371                 self._version = str(re.search(
372                     self.REGEX_VERSION_DPDK, msg.message).group(2))
373                 self._data[u"metadata"][u"version"] = self._version
374             except IndexError:
375                 pass
376             finally:
377                 self._msg_type = None
378
379     def _get_timestamp(self, msg):
380         """Called when extraction of timestamp is required.
381
382         :param msg: Message to process.
383         :type msg: Message
384         :returns: Nothing.
385         """
386
387         self._timestamp = msg.timestamp[:14]
388         self._data[u"metadata"][u"generated"] = self._timestamp
389         self._msg_type = None
390
391     def _get_vat_history(self, msg):
392         """Called when extraction of VAT command history is required.
393
394         TODO: Remove when not needed.
395
396         :param msg: Message to process.
397         :type msg: Message
398         :returns: Nothing.
399         """
400         if msg.message.count(u"VAT command history:"):
401             self._conf_history_lookup_nr += 1
402             if self._conf_history_lookup_nr == 1:
403                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
404             else:
405                 self._msg_type = None
406             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
407                           r"VAT command history:", u"",
408                           msg.message, count=1).replace(u'\n', u' |br| ').\
409                 replace(u'"', u"'")
410
411             self._data[u"tests"][self._test_id][u"conf-history"] += (
412                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
413             )
414
415     def _get_papi_history(self, msg):
416         """Called when extraction of PAPI command history is required.
417
418         :param msg: Message to process.
419         :type msg: Message
420         :returns: Nothing.
421         """
422         if msg.message.count(u"PAPI command history:"):
423             self._conf_history_lookup_nr += 1
424             if self._conf_history_lookup_nr == 1:
425                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
426             else:
427                 self._msg_type = None
428             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
429                           r"PAPI command history:", u"",
430                           msg.message, count=1).replace(u'\n', u' |br| ').\
431                 replace(u'"', u"'")
432             self._data[u"tests"][self._test_id][u"conf-history"] += (
433                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
434             )
435
436     def _get_show_run(self, msg):
437         """Called when extraction of VPP operational data (output of CLI command
438         Show Runtime) is required.
439
440         :param msg: Message to process.
441         :type msg: Message
442         :returns: Nothing.
443         """
444
445         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
446             self._data[u"tests"][self._test_id][u"show-run"] = str()
447
448         if msg.message.count(u"stats runtime") or \
449                 msg.message.count(u"Runtime"):
450             try:
451                 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
452                            group(1))
453             except (AttributeError, IndexError):
454                 host = self._data[u"tests"][self._test_id][u"show-run"].\
455                            count(u"DUT:") + 1
456             try:
457                 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
458                              group(2))
459                 socket = f"/{socket}"
460             except (AttributeError, IndexError):
461                 socket = u""
462             runtime = loads(
463                 str(msg.message).
464                 replace(u' ', u'').
465                 replace(u'\n', u'').
466                 replace(u"'", u'"').
467                 replace(u'b"', u'"').
468                 replace(u'u"', u'"').
469                 split(u":", 1)[1]
470             )
471             try:
472                 threads_nr = len(runtime[0][u"clocks"])
473             except (IndexError, KeyError):
474                 return
475             tbl_hdr = [
476                 u"Name",
477                 u"Calls",
478                 u"Vectors",
479                 u"Suspends",
480                 u"Clocks",
481                 u"Vectors/Calls"
482             ]
483             table = [[tbl_hdr, ] for _ in range(threads_nr)]
484             for item in runtime:
485                 for idx in range(threads_nr):
486                     name = format(item[u"name"])
487                     calls = format(item[u"calls"][idx])
488                     vectors = format(item[u"vectors"][idx])
489                     suspends = format(item[u"suspends"][idx])
490                     if item[u"vectors"][idx] > 0:
491                         clocks = format(
492                             item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
493                     elif item[u"calls"][idx] > 0:
494                         clocks = format(
495                             item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
496                     elif item[u"suspends"][idx] > 0:
497                         clocks = format(
498                             item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
499                     else:
500                         clocks = 0
501                     if item[u"calls"][idx] > 0:
502                         vectors_call = format(
503                             item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
504                     else:
505                         vectors_call = format(0, u".2f")
506                     if int(calls) + int(vectors) + int(suspends):
507                         table[idx].append([
508                             name, calls, vectors, suspends, clocks, vectors_call
509                         ])
510             text = ""
511             for idx in range(threads_nr):
512                 text += f"Thread {idx} "
513                 text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
514                 txt_table = None
515                 for row in table[idx]:
516                     if txt_table is None:
517                         txt_table = prettytable.PrettyTable(row)
518                     else:
519                         if any(row[1:]):
520                             txt_table.add_row(row)
521                 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
522                 txt_table.align[u"Name"] = u"l"
523                 txt_table.align[u"Calls"] = u"r"
524                 txt_table.align[u"Vectors"] = u"r"
525                 txt_table.align[u"Suspends"] = u"r"
526                 txt_table.align[u"Clocks"] = u"r"
527                 txt_table.align[u"Vectors/Calls"] = u"r"
528
529                 text += txt_table.get_string(sortby=u"Name") + u'\n'
530             text = f"\n**DUT: {host}{socket}**\n{text}".\
531                 replace(u'\n', u' |br| ').\
532                 replace(u'\r', u'').\
533                 replace(u'"', u"'")
534             self._data[u"tests"][self._test_id][u"show-run"] += text
535
536     def _get_ndrpdr_throughput(self, msg):
537         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
538         message.
539
540         :param msg: The test message to be parsed.
541         :type msg: str
542         :returns: Parsed data as a dict and the status (PASS/FAIL).
543         :rtype: tuple(dict, str)
544         """
545
546         throughput = {
547             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
548             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
549         }
550         status = u"FAIL"
551         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
552
553         if groups is not None:
554             try:
555                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
556                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
557                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
558                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
559                 status = u"PASS"
560             except (IndexError, ValueError):
561                 pass
562
563         return throughput, status
564
565     def _get_plr_throughput(self, msg):
566         """Get PLRsearch lower bound and PLRsearch upper bound from the test
567         message.
568
569         :param msg: The test message to be parsed.
570         :type msg: str
571         :returns: Parsed data as a dict and the status (PASS/FAIL).
572         :rtype: tuple(dict, str)
573         """
574
575         throughput = {
576             u"LOWER": -1.0,
577             u"UPPER": -1.0
578         }
579         status = u"FAIL"
580         groups = re.search(self.REGEX_PLR_RATE, msg)
581
582         if groups is not None:
583             try:
584                 throughput[u"LOWER"] = float(groups.group(1))
585                 throughput[u"UPPER"] = float(groups.group(2))
586                 status = u"PASS"
587             except (IndexError, ValueError):
588                 pass
589
590         return throughput, status
591
592     def _get_ndrpdr_latency(self, msg):
593         """Get LATENCY from the test message.
594
595         :param msg: The test message to be parsed.
596         :type msg: str
597         :returns: Parsed data as a dict and the status (PASS/FAIL).
598         :rtype: tuple(dict, str)
599         """
600         latency_default = {
601             u"min": -1.0,
602             u"avg": -1.0,
603             u"max": -1.0,
604             u"hdrh": u""
605         }
606         latency = {
607             u"NDR": {
608                 u"direction1": copy.copy(latency_default),
609                 u"direction2": copy.copy(latency_default)
610             },
611             u"PDR": {
612                 u"direction1": copy.copy(latency_default),
613                 u"direction2": copy.copy(latency_default)
614             }
615         }
616         status = u"FAIL"
617         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
618
619         def process_latency(in_str):
620             """Return object with parsed latency values.
621
622             TODO: Define class for the return type.
623
624             :param in_str: Input string, min/avg/max/hdrh format.
625             :type in_str: str
626             :returns: Dict with corresponding keys, except hdrh float values.
627             :rtype dict:
628             :throws IndexError: If in_str does not have enough substrings.
629             :throws ValueError: If a substring does not convert to float.
630             """
631             in_list = in_str.split('/', 3)
632
633             rval = {
634                 u"min": float(in_list[0]),
635                 u"avg": float(in_list[1]),
636                 u"max": float(in_list[2]),
637                 u"hdrh": u""
638             }
639
640             if len(in_list) == 4:
641                 rval[u"hdrh"] = str(in_list[3])
642
643             return rval
644
645         if groups is not None:
646             try:
647                 latency[u"NDR"][u"direction1"] = \
648                     process_latency(groups.group(1))
649                 latency[u"NDR"][u"direction2"] = \
650                     process_latency(groups.group(2))
651                 latency[u"PDR"][u"direction1"] = \
652                     process_latency(groups.group(3))
653                 latency[u"PDR"][u"direction2"] = \
654                     process_latency(groups.group(4))
655                 status = u"PASS"
656             except (IndexError, ValueError):
657                 pass
658
659         return latency, status
660
661     def visit_suite(self, suite):
662         """Implements traversing through the suite and its direct children.
663
664         :param suite: Suite to process.
665         :type suite: Suite
666         :returns: Nothing.
667         """
668         if self.start_suite(suite) is not False:
669             suite.suites.visit(self)
670             suite.tests.visit(self)
671             self.end_suite(suite)
672
673     def start_suite(self, suite):
674         """Called when suite starts.
675
676         :param suite: Suite to process.
677         :type suite: Suite
678         :returns: Nothing.
679         """
680
681         try:
682             parent_name = suite.parent.name
683         except AttributeError:
684             return
685
686         doc_str = suite.doc.\
687             replace(u'"', u"'").\
688             replace(u'\n', u' ').\
689             replace(u'\r', u'').\
690             replace(u'*[', u' |br| *[').\
691             replace(u"*", u"**").\
692             replace(u' |br| *[', u'*[', 1)
693
694         self._data[u"suites"][suite.longname.lower().
695                               replace(u'"', u"'").
696                               replace(u" ", u"_")] = {
697                                   u"name": suite.name.lower(),
698                                   u"doc": doc_str,
699                                   u"parent": parent_name,
700                                   u"level": len(suite.longname.split(u"."))
701                               }
702
703         suite.keywords.visit(self)
704
705     def end_suite(self, suite):
706         """Called when suite ends.
707
708         :param suite: Suite to process.
709         :type suite: Suite
710         :returns: Nothing.
711         """
712
713     def visit_test(self, test):
714         """Implements traversing through the test.
715
716         :param test: Test to process.
717         :type test: Test
718         :returns: Nothing.
719         """
720         if self.start_test(test) is not False:
721             test.keywords.visit(self)
722             self.end_test(test)
723
724     def start_test(self, test):
725         """Called when test starts.
726
727         :param test: Test to process.
728         :type test: Test
729         :returns: Nothing.
730         """
731
732         longname_orig = test.longname.lower()
733
734         # Check the ignore list
735         if longname_orig in self._ignore:
736             return
737
738         tags = [str(tag) for tag in test.tags]
739         test_result = dict()
740
741         # Change the TC long name and name if defined in the mapping table
742         longname = self._mapping.get(longname_orig, None)
743         if longname is not None:
744             name = longname.split(u'.')[-1]
745             logging.debug(
746                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
747                 f"{name}"
748             )
749         else:
750             longname = longname_orig
751             name = test.name.lower()
752
753         # Remove TC number from the TC long name (backward compatibility):
754         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
755         # Remove TC number from the TC name (not needed):
756         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
757
758         test_result[u"parent"] = test.parent.name.lower()
759         test_result[u"tags"] = tags
760         test_result["doc"] = test.doc.\
761             replace(u'"', u"'").\
762             replace(u'\n', u' ').\
763             replace(u'\r', u'').\
764             replace(u'[', u' |br| [').\
765             replace(u' |br| [', u'[', 1)
766         test_result[u"msg"] = test.message.\
767             replace(u'\n', u' |br| ').\
768             replace(u'\r', u'').\
769             replace(u'"', u"'")
770         test_result[u"type"] = u"FUNC"
771         test_result[u"status"] = test.status
772
773         if u"PERFTEST" in tags:
774             # Replace info about cores (e.g. -1c-) with the info about threads
775             # and cores (e.g. -1t1c-) in the long test case names and in the
776             # test case names if necessary.
777             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
778             if not groups:
779                 tag_count = 0
780                 tag_tc = str()
781                 for tag in test_result[u"tags"]:
782                     groups = re.search(self.REGEX_TC_TAG, tag)
783                     if groups:
784                         tag_count += 1
785                         tag_tc = tag
786
787                 if tag_count == 1:
788                     self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
789                                            f"-{tag_tc.lower()}-",
790                                            self._test_id,
791                                            count=1)
792                     test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
793                                                   f"-{tag_tc.lower()}-",
794                                                   test_result["name"],
795                                                   count=1)
796                 else:
797                     test_result[u"status"] = u"FAIL"
798                     self._data[u"tests"][self._test_id] = test_result
799                     logging.debug(
800                         f"The test {self._test_id} has no or more than one "
801                         f"multi-threading tags.\n"
802                         f"Tags: {test_result[u'tags']}"
803                     )
804                     return
805
806         if test.status == u"PASS":
807             if u"NDRPDR" in tags:
808                 test_result[u"type"] = u"NDRPDR"
809                 test_result[u"throughput"], test_result[u"status"] = \
810                     self._get_ndrpdr_throughput(test.message)
811                 test_result[u"latency"], test_result[u"status"] = \
812                     self._get_ndrpdr_latency(test.message)
813             elif u"SOAK" in tags:
814                 test_result[u"type"] = u"SOAK"
815                 test_result[u"throughput"], test_result[u"status"] = \
816                     self._get_plr_throughput(test.message)
817             elif u"TCP" in tags:
818                 test_result[u"type"] = u"TCP"
819                 groups = re.search(self.REGEX_TCP, test.message)
820                 test_result[u"result"] = int(groups.group(2))
821             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
822                 if u"MRR" in tags:
823                     test_result[u"type"] = u"MRR"
824                 else:
825                     test_result[u"type"] = u"BMRR"
826
827                 test_result[u"result"] = dict()
828                 groups = re.search(self.REGEX_BMRR, test.message)
829                 if groups is not None:
830                     items_str = groups.group(1)
831                     items_float = [float(item.strip()) for item
832                                    in items_str.split(",")]
833                     # Use whole list in CSIT-1180.
834                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
835                     test_result[u"result"][u"receive-rate"] = stats.avg
836                 else:
837                     groups = re.search(self.REGEX_MRR, test.message)
838                     test_result[u"result"][u"receive-rate"] = \
839                         float(groups.group(3)) / float(groups.group(1))
840             elif u"RECONF" in tags:
841                 test_result[u"type"] = u"RECONF"
842                 test_result[u"result"] = None
843                 try:
844                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
845                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
846                     test_result[u"result"] = {
847                         u"loss": int(grps_loss.group(1)),
848                         u"time": float(grps_time.group(1))
849                     }
850                 except (AttributeError, IndexError, ValueError, TypeError):
851                     test_result[u"status"] = u"FAIL"
852             else:
853                 test_result[u"status"] = u"FAIL"
854                 self._data[u"tests"][self._test_id] = test_result
855                 return
856
857         self._data[u"tests"][self._test_id] = test_result
858
859     def end_test(self, test):
860         """Called when test ends.
861
862         :param test: Test to process.
863         :type test: Test
864         :returns: Nothing.
865         """
866
867     def visit_keyword(self, keyword):
868         """Implements traversing through the keyword and its child keywords.
869
870         :param keyword: Keyword to process.
871         :type keyword: Keyword
872         :returns: Nothing.
873         """
874         if self.start_keyword(keyword) is not False:
875             self.end_keyword(keyword)
876
877     def start_keyword(self, keyword):
878         """Called when keyword starts. Default implementation does nothing.
879
880         :param keyword: Keyword to process.
881         :type keyword: Keyword
882         :returns: Nothing.
883         """
884         try:
885             if keyword.type == u"setup":
886                 self.visit_setup_kw(keyword)
887             elif keyword.type == u"teardown":
888                 self._lookup_kw_nr = 0
889                 self.visit_teardown_kw(keyword)
890             else:
891                 self._lookup_kw_nr = 0
892                 self.visit_test_kw(keyword)
893         except AttributeError:
894             pass
895
896     def end_keyword(self, keyword):
897         """Called when keyword ends. Default implementation does nothing.
898
899         :param keyword: Keyword to process.
900         :type keyword: Keyword
901         :returns: Nothing.
902         """
903
904     def visit_test_kw(self, test_kw):
905         """Implements traversing through the test keyword and its child
906         keywords.
907
908         :param test_kw: Keyword to process.
909         :type test_kw: Keyword
910         :returns: Nothing.
911         """
912         for keyword in test_kw.keywords:
913             if self.start_test_kw(keyword) is not False:
914                 self.visit_test_kw(keyword)
915                 self.end_test_kw(keyword)
916
917     def start_test_kw(self, test_kw):
918         """Called when test keyword starts. Default implementation does
919         nothing.
920
921         :param test_kw: Keyword to process.
922         :type test_kw: Keyword
923         :returns: Nothing.
924         """
925         if test_kw.name.count(u"Show Runtime On All Duts") or
926                 test_kw.name.count(u"Show Runtime Counters On All Duts"):
927             self._lookup_kw_nr += 1
928             self._show_run_lookup_nr = 0
929             self._msg_type = u"test-show-runtime"
930         elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
931             self._msg_type = u"dpdk-version"
932         else:
933             return
934         test_kw.messages.visit(self)
935
936     def end_test_kw(self, test_kw):
937         """Called when keyword ends. Default implementation does nothing.
938
939         :param test_kw: Keyword to process.
940         :type test_kw: Keyword
941         :returns: Nothing.
942         """
943
944     def visit_setup_kw(self, setup_kw):
945         """Implements traversing through the teardown keyword and its child
946         keywords.
947
948         :param setup_kw: Keyword to process.
949         :type setup_kw: Keyword
950         :returns: Nothing.
951         """
952         for keyword in setup_kw.keywords:
953             if self.start_setup_kw(keyword) is not False:
954                 self.visit_setup_kw(keyword)
955                 self.end_setup_kw(keyword)
956
957     def start_setup_kw(self, setup_kw):
958         """Called when teardown keyword starts. Default implementation does
959         nothing.
960
961         :param setup_kw: Keyword to process.
962         :type setup_kw: Keyword
963         :returns: Nothing.
964         """
965         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
966                 and not self._version:
967             self._msg_type = u"vpp-version"
968         elif setup_kw.name.count(u"Set Global Variable") \
969                 and not self._timestamp:
970             self._msg_type = u"timestamp"
971         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
972             self._msg_type = u"testbed"
973         else:
974             return
975         setup_kw.messages.visit(self)
976
977     def end_setup_kw(self, setup_kw):
978         """Called when keyword ends. Default implementation does nothing.
979
980         :param setup_kw: Keyword to process.
981         :type setup_kw: Keyword
982         :returns: Nothing.
983         """
984
985     def visit_teardown_kw(self, teardown_kw):
986         """Implements traversing through the teardown keyword and its child
987         keywords.
988
989         :param teardown_kw: Keyword to process.
990         :type teardown_kw: Keyword
991         :returns: Nothing.
992         """
993         for keyword in teardown_kw.keywords:
994             if self.start_teardown_kw(keyword) is not False:
995                 self.visit_teardown_kw(keyword)
996                 self.end_teardown_kw(keyword)
997
998     def start_teardown_kw(self, teardown_kw):
999         """Called when teardown keyword starts
1000
1001         :param teardown_kw: Keyword to process.
1002         :type teardown_kw: Keyword
1003         :returns: Nothing.
1004         """
1005
1006         if teardown_kw.name.count(u"Show Vat History On All Duts"):
1007             # TODO: Remove when not needed:
1008             self._conf_history_lookup_nr = 0
1009             self._msg_type = u"teardown-vat-history"
1010             teardown_kw.messages.visit(self)
1011         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1012             self._conf_history_lookup_nr = 0
1013             self._msg_type = u"teardown-papi-history"
1014             teardown_kw.messages.visit(self)
1015
1016     def end_teardown_kw(self, teardown_kw):
1017         """Called when keyword ends. Default implementation does nothing.
1018
1019         :param teardown_kw: Keyword to process.
1020         :type teardown_kw: Keyword
1021         :returns: Nothing.
1022         """
1023
1024     def visit_message(self, msg):
1025         """Implements visiting the message.
1026
1027         :param msg: Message to process.
1028         :type msg: Message
1029         :returns: Nothing.
1030         """
1031         if self.start_message(msg) is not False:
1032             self.end_message(msg)
1033
1034     def start_message(self, msg):
1035         """Called when message starts. Get required information from messages:
1036         - VPP version.
1037
1038         :param msg: Message to process.
1039         :type msg: Message
1040         :returns: Nothing.
1041         """
1042
1043         if self._msg_type:
1044             self.parse_msg[self._msg_type](msg)
1045
1046     def end_message(self, msg):
1047         """Called when message ends. Default implementation does nothing.
1048
1049         :param msg: Message to process.
1050         :type msg: Message
1051         :returns: Nothing.
1052         """
1053
1054
1055 class InputData:
1056     """Input data
1057
1058     The data is extracted from output.xml files generated by Jenkins jobs and
1059     stored in pandas' DataFrames.
1060
1061     The data structure:
1062     - job name
1063       - build number
1064         - metadata
1065           (as described in ExecutionChecker documentation)
1066         - suites
1067           (as described in ExecutionChecker documentation)
1068         - tests
1069           (as described in ExecutionChecker documentation)
1070     """
1071
1072     def __init__(self, spec):
1073         """Initialization.
1074
1075         :param spec: Specification.
1076         :type spec: Specification
1077         """
1078
1079         # Specification:
1080         self._cfg = spec
1081
1082         # Data store:
1083         self._input_data = pd.Series()
1084
1085     @property
1086     def data(self):
1087         """Getter - Input data.
1088
1089         :returns: Input data
1090         :rtype: pandas.Series
1091         """
1092         return self._input_data
1093
1094     def metadata(self, job, build):
1095         """Getter - metadata
1096
1097         :param job: Job which metadata we want.
1098         :param build: Build which metadata we want.
1099         :type job: str
1100         :type build: str
1101         :returns: Metadata
1102         :rtype: pandas.Series
1103         """
1104
1105         return self.data[job][build][u"metadata"]
1106
1107     def suites(self, job, build):
1108         """Getter - suites
1109
1110         :param job: Job which suites we want.
1111         :param build: Build which suites we want.
1112         :type job: str
1113         :type build: str
1114         :returns: Suites.
1115         :rtype: pandas.Series
1116         """
1117
1118         return self.data[job][str(build)][u"suites"]
1119
1120     def tests(self, job, build):
1121         """Getter - tests
1122
1123         :param job: Job which tests we want.
1124         :param build: Build which tests we want.
1125         :type job: str
1126         :type build: str
1127         :returns: Tests.
1128         :rtype: pandas.Series
1129         """
1130
1131         return self.data[job][build][u"tests"]
1132
1133     def _parse_tests(self, job, build, log):
1134         """Process data from robot output.xml file and return JSON structured
1135         data.
1136
1137         :param job: The name of job which build output data will be processed.
1138         :param build: The build which output data will be processed.
1139         :param log: List of log messages.
1140         :type job: str
1141         :type build: dict
1142         :type log: list of tuples (severity, msg)
1143         :returns: JSON data structure.
1144         :rtype: dict
1145         """
1146
1147         metadata = {
1148             u"job": job,
1149             u"build": build
1150         }
1151
1152         with open(build[u"file-name"], u'r') as data_file:
1153             try:
1154                 result = ExecutionResult(data_file)
1155             except errors.DataError as err:
1156                 log.append(
1157                     (u"ERROR", f"Error occurred while parsing output.xml: "
1158                                f"{repr(err)}")
1159                 )
1160                 return None
1161         checker = ExecutionChecker(metadata, self._cfg.mapping,
1162                                    self._cfg.ignore)
1163         result.visit(checker)
1164
1165         return checker.data
1166
1167     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1168         """Download and parse the input data file.
1169
1170         :param pid: PID of the process executing this method.
1171         :param job: Name of the Jenkins job which generated the processed input
1172             file.
1173         :param build: Information about the Jenkins build which generated the
1174             processed input file.
1175         :param repeat: Repeat the download specified number of times if not
1176             successful.
1177         :type pid: int
1178         :type job: str
1179         :type build: dict
1180         :type repeat: int
1181         """
1182
1183         logs = list()
1184
1185         logs.append(
1186             (u"INFO", f"  Processing the job/build: {job}: {build[u'build']}")
1187         )
1188
1189         state = u"failed"
1190         success = False
1191         data = None
1192         do_repeat = repeat
1193         while do_repeat:
1194             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1195                                                    logs)
1196             if success:
1197                 break
1198             do_repeat -= 1
1199         if not success:
1200             logs.append(
1201                 (u"ERROR",
1202                  f"It is not possible to download the input data file from the "
1203                  f"job {job}, build {build[u'build']}, or it is damaged. "
1204                  f"Skipped.")
1205             )
1206         if success:
1207             logs.append(
1208                 (u"INFO",
1209                  f"    Processing data from the build {build[u'build']} ...")
1210             )
1211             data = self._parse_tests(job, build, logs)
1212             if data is None:
1213                 logs.append(
1214                     (u"ERROR",
1215                      f"Input data file from the job {job}, build "
1216                      f"{build[u'build']} is damaged. Skipped.")
1217                 )
1218             else:
1219                 state = u"processed"
1220
1221             try:
1222                 remove(build[u"file-name"])
1223             except OSError as err:
1224                 logs.append(
1225                     ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1226                               f"{repr(err)}")
1227                 )
1228
1229         # If the time-period is defined in the specification file, remove all
1230         # files which are outside the time period.
1231         timeperiod = self._cfg.input.get(u"time-period", None)
1232         if timeperiod and data:
1233             now = dt.utcnow()
1234             timeperiod = timedelta(int(timeperiod))
1235             metadata = data.get(u"metadata", None)
1236             if metadata:
1237                 generated = metadata.get(u"generated", None)
1238                 if generated:
1239                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1240                     if (now - generated) > timeperiod:
1241                         # Remove the data and the file:
1242                         state = u"removed"
1243                         data = None
1244                         logs.append(
1245                             (u"INFO",
1246                              f"    The build {job}/{build[u'build']} is "
1247                              f"outdated, will be removed.")
1248                         )
1249         logs.append((u"INFO", u"  Done."))
1250
1251         for level, line in logs:
1252             if level == u"INFO":
1253                 logging.info(line)
1254             elif level == u"ERROR":
1255                 logging.error(line)
1256             elif level == u"DEBUG":
1257                 logging.debug(line)
1258             elif level == u"CRITICAL":
1259                 logging.critical(line)
1260             elif level == u"WARNING":
1261                 logging.warning(line)
1262
1263         return {u"data": data, u"state": state, u"job": job, u"build": build}
1264
1265     def download_and_parse_data(self, repeat=1):
1266         """Download the input data files, parse input data from input files and
1267         store in pandas' Series.
1268
1269         :param repeat: Repeat the download specified number of times if not
1270             successful.
1271         :type repeat: int
1272         """
1273
1274         logging.info(u"Downloading and parsing input files ...")
1275
1276         for job, builds in self._cfg.builds.items():
1277             for build in builds:
1278
1279                 result = self._download_and_parse_build(job, build, repeat)
1280                 build_nr = result[u"build"][u"build"]
1281
1282                 if result[u"data"]:
1283                     data = result[u"data"]
1284                     build_data = pd.Series({
1285                         u"metadata": pd.Series(
1286                             list(data[u"metadata"].values()),
1287                             index=list(data[u"metadata"].keys())
1288                         ),
1289                         u"suites": pd.Series(
1290                             list(data[u"suites"].values()),
1291                             index=list(data[u"suites"].keys())
1292                         ),
1293                         u"tests": pd.Series(
1294                             list(data[u"tests"].values()),
1295                             index=list(data[u"tests"].keys())
1296                         )
1297                     })
1298
1299                     if self._input_data.get(job, None) is None:
1300                         self._input_data[job] = pd.Series()
1301                     self._input_data[job][str(build_nr)] = build_data
1302
1303                     self._cfg.set_input_file_name(
1304                         job, build_nr, result[u"build"][u"file-name"])
1305
1306                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1307
1308                 mem_alloc = \
1309                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1310                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1311
1312         logging.info(u"Done.")
1313
1314     @staticmethod
1315     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1316         """Return the index of character in the string which is the end of tag.
1317
1318         :param tag_filter: The string where the end of tag is being searched.
1319         :param start: The index where the searching is stated.
1320         :param closer: The character which is the tag closer.
1321         :type tag_filter: str
1322         :type start: int
1323         :type closer: str
1324         :returns: The index of the tag closer.
1325         :rtype: int
1326         """
1327
1328         try:
1329             idx_opener = tag_filter.index(closer, start)
1330             return tag_filter.index(closer, idx_opener + 1)
1331         except ValueError:
1332             return None
1333
1334     @staticmethod
1335     def _condition(tag_filter):
1336         """Create a conditional statement from the given tag filter.
1337
1338         :param tag_filter: Filter based on tags from the element specification.
1339         :type tag_filter: str
1340         :returns: Conditional statement which can be evaluated.
1341         :rtype: str
1342         """
1343
1344         index = 0
1345         while True:
1346             index = InputData._end_of_tag(tag_filter, index)
1347             if index is None:
1348                 return tag_filter
1349             index += 1
1350             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1351
1352     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1353                     continue_on_error=False):
1354         """Filter required data from the given jobs and builds.
1355
1356         The output data structure is:
1357
1358         - job 1
1359           - build 1
1360             - test (or suite) 1 ID:
1361               - param 1
1362               - param 2
1363               ...
1364               - param n
1365             ...
1366             - test (or suite) n ID:
1367             ...
1368           ...
1369           - build n
1370         ...
1371         - job n
1372
1373         :param element: Element which will use the filtered data.
1374         :param params: Parameters which will be included in the output. If None,
1375             all parameters are included.
1376         :param data: If not None, this data is used instead of data specified
1377             in the element.
1378         :param data_set: The set of data to be filtered: tests, suites,
1379             metadata.
1380         :param continue_on_error: Continue if there is error while reading the
1381             data. The Item will be empty then
1382         :type element: pandas.Series
1383         :type params: list
1384         :type data: dict
1385         :type data_set: str
1386         :type continue_on_error: bool
1387         :returns: Filtered data.
1388         :rtype pandas.Series
1389         """
1390
1391         try:
1392             if element[u"filter"] in (u"all", u"template"):
1393                 cond = u"True"
1394             else:
1395                 cond = InputData._condition(element[u"filter"])
1396             logging.debug(f"   Filter: {cond}")
1397         except KeyError:
1398             logging.error(u"  No filter defined.")
1399             return None
1400
1401         if params is None:
1402             params = element.get(u"parameters", None)
1403             if params:
1404                 params.append(u"type")
1405
1406         data_to_filter = data if data else element[u"data"]
1407         data = pd.Series()
1408         try:
1409             for job, builds in data_to_filter.items():
1410                 data[job] = pd.Series()
1411                 for build in builds:
1412                     data[job][str(build)] = pd.Series()
1413                     try:
1414                         data_dict = dict(
1415                             self.data[job][str(build)][data_set].items())
1416                     except KeyError:
1417                         if continue_on_error:
1418                             continue
1419                         return None
1420
1421                     for test_id, test_data in data_dict.items():
1422                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1423                             data[job][str(build)][test_id] = pd.Series()
1424                             if params is None:
1425                                 for param, val in test_data.items():
1426                                     data[job][str(build)][test_id][param] = val
1427                             else:
1428                                 for param in params:
1429                                     try:
1430                                         data[job][str(build)][test_id][param] =\
1431                                             test_data[param]
1432                                     except KeyError:
1433                                         data[job][str(build)][test_id][param] =\
1434                                             u"No Data"
1435             return data
1436
1437         except (KeyError, IndexError, ValueError) as err:
1438             logging.error(
1439                 f"Missing mandatory parameter in the element specification: "
1440                 f"{repr(err)}"
1441             )
1442             return None
1443         except AttributeError as err:
1444             logging.error(repr(err))
1445             return None
1446         except SyntaxError as err:
1447             logging.error(
1448                 f"The filter {cond} is not correct. Check if all tags are "
1449                 f"enclosed by apostrophes.\n{repr(err)}"
1450             )
1451             return None
1452
1453     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1454                              continue_on_error=False):
1455         """Filter required data from the given jobs and builds.
1456
1457         The output data structure is:
1458
1459         - job 1
1460           - build 1
1461             - test (or suite) 1 ID:
1462               - param 1
1463               - param 2
1464               ...
1465               - param n
1466             ...
1467             - test (or suite) n ID:
1468             ...
1469           ...
1470           - build n
1471         ...
1472         - job n
1473
1474         :param element: Element which will use the filtered data.
1475         :param params: Parameters which will be included in the output. If None,
1476         all parameters are included.
1477         :param data_set: The set of data to be filtered: tests, suites,
1478         metadata.
1479         :param continue_on_error: Continue if there is error while reading the
1480         data. The Item will be empty then
1481         :type element: pandas.Series
1482         :type params: list
1483         :type data_set: str
1484         :type continue_on_error: bool
1485         :returns: Filtered data.
1486         :rtype pandas.Series
1487         """
1488
1489         include = element.get(u"include", None)
1490         if not include:
1491             logging.warning(u"No tests to include, skipping the element.")
1492             return None
1493
1494         if params is None:
1495             params = element.get(u"parameters", None)
1496             if params:
1497                 params.append(u"type")
1498
1499         data = pd.Series()
1500         try:
1501             for job, builds in element[u"data"].items():
1502                 data[job] = pd.Series()
1503                 for build in builds:
1504                     data[job][str(build)] = pd.Series()
1505                     for test in include:
1506                         try:
1507                             reg_ex = re.compile(str(test).lower())
1508                             for test_id in self.data[job][
1509                                     str(build)][data_set].keys():
1510                                 if re.match(reg_ex, str(test_id).lower()):
1511                                     test_data = self.data[job][
1512                                         str(build)][data_set][test_id]
1513                                     data[job][str(build)][test_id] = pd.Series()
1514                                     if params is None:
1515                                         for param, val in test_data.items():
1516                                             data[job][str(build)][test_id]\
1517                                                 [param] = val
1518                                     else:
1519                                         for param in params:
1520                                             try:
1521                                                 data[job][str(build)][
1522                                                     test_id][param] = \
1523                                                     test_data[param]
1524                                             except KeyError:
1525                                                 data[job][str(build)][
1526                                                     test_id][param] = u"No Data"
1527                         except KeyError as err:
1528                             logging.error(repr(err))
1529                             if continue_on_error:
1530                                 continue
1531                             return None
1532             return data
1533
1534         except (KeyError, IndexError, ValueError) as err:
1535             logging.error(
1536                 f"Missing mandatory parameter in the element "
1537                 f"specification: {repr(err)}"
1538             )
1539             return None
1540         except AttributeError as err:
1541             logging.error(repr(err))
1542             return None
1543
1544     @staticmethod
1545     def merge_data(data):
1546         """Merge data from more jobs and builds to a simple data structure.
1547
1548         The output data structure is:
1549
1550         - test (suite) 1 ID:
1551           - param 1
1552           - param 2
1553           ...
1554           - param n
1555         ...
1556         - test (suite) n ID:
1557         ...
1558
1559         :param data: Data to merge.
1560         :type data: pandas.Series
1561         :returns: Merged data.
1562         :rtype: pandas.Series
1563         """
1564
1565         logging.info(u"    Merging data ...")
1566
1567         merged_data = pd.Series()
1568         for builds in data.values:
1569             for item in builds.values:
1570                 for item_id, item_data in item.items():
1571                     merged_data[item_id] = item_data
1572
1573         return merged_data