af8a854ce9fb132eb5b3b117484e63b15e44329a
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
32
33 import prettytable
34 import pandas as pd
35
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
38
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
41
42
43 # Separator used in file names
44 SEPARATOR = u"__"
45
46
47 class ExecutionChecker(ResultVisitor):
48     """Class to traverse through the test suite structure.
49
50     The functionality implemented in this class generates a json structure:
51
52     Performance tests:
53
54     {
55         "metadata": {
56             "generated": "Timestamp",
57             "version": "SUT version",
58             "job": "Jenkins job name",
59             "build": "Information about the build"
60         },
61         "suites": {
62             "Suite long name 1": {
63                 "name": Suite name,
64                 "doc": "Suite 1 documentation",
65                 "parent": "Suite 1 parent",
66                 "level": "Level of the suite in the suite hierarchy"
67             }
68             "Suite long name N": {
69                 "name": Suite name,
70                 "doc": "Suite N documentation",
71                 "parent": "Suite 2 parent",
72                 "level": "Level of the suite in the suite hierarchy"
73             }
74         }
75         "tests": {
76             # NDRPDR tests:
77             "ID": {
78                 "name": "Test name",
79                 "parent": "Name of the parent of the test",
80                 "doc": "Test documentation",
81                 "msg": "Test message",
82                 "conf-history": "DUT1 and DUT2 VAT History",
83                 "show-run": "Show Run",
84                 "tags": ["tag 1", "tag 2", "tag n"],
85                 "type": "NDRPDR",
86                 "status": "PASS" | "FAIL",
87                 "throughput": {
88                     "NDR": {
89                         "LOWER": float,
90                         "UPPER": float
91                     },
92                     "PDR": {
93                         "LOWER": float,
94                         "UPPER": float
95                     }
96                 },
97                 "latency": {
98                     "NDR": {
99                         "direction1": {
100                             "min": float,
101                             "avg": float,
102                             "max": float,
103                             "hdrh": str
104                         },
105                         "direction2": {
106                             "min": float,
107                             "avg": float,
108                             "max": float,
109                             "hdrh": str
110                         }
111                     },
112                     "PDR": {
113                         "direction1": {
114                             "min": float,
115                             "avg": float,
116                             "max": float,
117                             "hdrh": str
118                         },
119                         "direction2": {
120                             "min": float,
121                             "avg": float,
122                             "max": float,
123                             "hdrh": str
124                         }
125                     }
126                 }
127             }
128
129             # TCP tests:
130             "ID": {
131                 "name": "Test name",
132                 "parent": "Name of the parent of the test",
133                 "doc": "Test documentation",
134                 "msg": "Test message",
135                 "tags": ["tag 1", "tag 2", "tag n"],
136                 "type": "TCP",
137                 "status": "PASS" | "FAIL",
138                 "result": int
139             }
140
141             # MRR, BMRR tests:
142             "ID": {
143                 "name": "Test name",
144                 "parent": "Name of the parent of the test",
145                 "doc": "Test documentation",
146                 "msg": "Test message",
147                 "tags": ["tag 1", "tag 2", "tag n"],
148                 "type": "MRR" | "BMRR",
149                 "status": "PASS" | "FAIL",
150                 "result": {
151                     "receive-rate": float,
152                     # Average of a list, computed using AvgStdevStats.
153                     # In CSIT-1180, replace with List[float].
154                 }
155             }
156
157             "ID" {
158                 # next test
159             }
160         }
161     }
162
163
164     Functional tests:
165
166     {
167         "metadata": {  # Optional
168             "version": "VPP version",
169             "job": "Jenkins job name",
170             "build": "Information about the build"
171         },
172         "suites": {
173             "Suite name 1": {
174                 "doc": "Suite 1 documentation",
175                 "parent": "Suite 1 parent",
176                 "level": "Level of the suite in the suite hierarchy"
177             }
178             "Suite name N": {
179                 "doc": "Suite N documentation",
180                 "parent": "Suite 2 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183         }
184         "tests": {
185             "ID": {
186                 "name": "Test name",
187                 "parent": "Name of the parent of the test",
188                 "doc": "Test documentation"
189                 "msg": "Test message"
190                 "tags": ["tag 1", "tag 2", "tag n"],
191                 "conf-history": "DUT1 and DUT2 VAT History"
192                 "show-run": "Show Run"
193                 "status": "PASS" | "FAIL"
194             },
195             "ID" {
196                 # next test
197             }
198         }
199     }
200
201     .. note:: ID is the lowercase full path to the test.
202     """
203
204     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205                                 r'PLRsearch upper bound::?\s(\d+.\d+)')
206
207     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
209                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210                                    r'PDR_UPPER:\s(\d+.\d+)')
211
212     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
214
215     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
216                                  r'[\D\d]*')
217
218     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
220
221     REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
222
223     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
224
225     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226                            r'tx\s(\d*),\srx\s(\d*)')
227
228     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229                             r' in packets per second: \[(.*)\]')
230
231     REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232     REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
233
234     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
235
236     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
237
238     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
239
240     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
241
242     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
243
244     def __init__(self, metadata, mapping, ignore):
245         """Initialisation.
246
247         :param metadata: Key-value pairs to be included in "metadata" part of
248             JSON structure.
249         :param mapping: Mapping of the old names of test cases to the new
250             (actual) one.
251         :param ignore: List of TCs to be ignored.
252         :type metadata: dict
253         :type mapping: dict
254         :type ignore: list
255         """
256
257         # Type of message to parse out from the test messages
258         self._msg_type = None
259
260         # VPP version
261         self._version = None
262
263         # Timestamp
264         self._timestamp = None
265
266         # Testbed. The testbed is identified by TG node IP address.
267         self._testbed = None
268
269         # Mapping of TCs long names
270         self._mapping = mapping
271
272         # Ignore list
273         self._ignore = ignore
274
275         # Number of PAPI History messages found:
276         # 0 - no message
277         # 1 - PAPI History of DUT1
278         # 2 - PAPI History of DUT2
279         self._lookup_kw_nr = 0
280         self._conf_history_lookup_nr = 0
281
282         # Number of Show Running messages found
283         # 0 - no message
284         # 1 - Show run message found
285         self._show_run_lookup_nr = 0
286
287         # Test ID of currently processed test- the lowercase full path to the
288         # test
289         self._test_id = None
290
291         # The main data structure
292         self._data = {
293             u"metadata": OrderedDict(),
294             u"suites": OrderedDict(),
295             u"tests": OrderedDict()
296         }
297
298         # Save the provided metadata
299         for key, val in metadata.items():
300             self._data[u"metadata"][key] = val
301
302         # Dictionary defining the methods used to parse different types of
303         # messages
304         self.parse_msg = {
305             u"timestamp": self._get_timestamp,
306             u"vpp-version": self._get_vpp_version,
307             u"dpdk-version": self._get_dpdk_version,
308             # TODO: Remove when not needed:
309             u"teardown-vat-history": self._get_vat_history,
310             u"teardown-papi-history": self._get_papi_history,
311             u"test-show-runtime": self._get_show_run,
312             u"testbed": self._get_testbed
313         }
314
315     @property
316     def data(self):
317         """Getter - Data parsed from the XML file.
318
319         :returns: Data parsed from the XML file.
320         :rtype: dict
321         """
322         return self._data
323
324     def _get_testbed(self, msg):
325         """Called when extraction of testbed IP is required.
326         The testbed is identified by TG node IP address.
327
328         :param msg: Message to process.
329         :type msg: Message
330         :returns: Nothing.
331         """
332
333         if msg.message.count(u"Setup of TG node") or \
334                 msg.message.count(u"Setup of node TG host"):
335             reg_tg_ip = re.compile(
336                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
337             try:
338                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
339             except (KeyError, ValueError, IndexError, AttributeError):
340                 pass
341             finally:
342                 self._data[u"metadata"][u"testbed"] = self._testbed
343                 self._msg_type = None
344
345     def _get_vpp_version(self, msg):
346         """Called when extraction of VPP version is required.
347
348         :param msg: Message to process.
349         :type msg: Message
350         :returns: Nothing.
351         """
352
353         if msg.message.count(u"return STDOUT Version:") or \
354             msg.message.count(u"VPP Version:") or \
355             msg.message.count(u"VPP version:"):
356             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
357                                 group(2))
358             self._data[u"metadata"][u"version"] = self._version
359             self._msg_type = None
360
361     def _get_dpdk_version(self, msg):
362         """Called when extraction of DPDK version is required.
363
364         :param msg: Message to process.
365         :type msg: Message
366         :returns: Nothing.
367         """
368
369         if msg.message.count(u"DPDK Version:"):
370             try:
371                 self._version = str(re.search(
372                     self.REGEX_VERSION_DPDK, msg.message).group(2))
373                 self._data[u"metadata"][u"version"] = self._version
374             except IndexError:
375                 pass
376             finally:
377                 self._msg_type = None
378
379     def _get_timestamp(self, msg):
380         """Called when extraction of timestamp is required.
381
382         :param msg: Message to process.
383         :type msg: Message
384         :returns: Nothing.
385         """
386
387         self._timestamp = msg.timestamp[:14]
388         self._data[u"metadata"][u"generated"] = self._timestamp
389         self._msg_type = None
390
391     def _get_vat_history(self, msg):
392         """Called when extraction of VAT command history is required.
393
394         TODO: Remove when not needed.
395
396         :param msg: Message to process.
397         :type msg: Message
398         :returns: Nothing.
399         """
400         if msg.message.count(u"VAT command history:"):
401             self._conf_history_lookup_nr += 1
402             if self._conf_history_lookup_nr == 1:
403                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
404             else:
405                 self._msg_type = None
406             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
407                           r"VAT command history:", u"",
408                           msg.message, count=1).replace(u'\n', u' |br| ').\
409                 replace(u'"', u"'")
410
411             self._data[u"tests"][self._test_id][u"conf-history"] += (
412                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
413             )
414
415     def _get_papi_history(self, msg):
416         """Called when extraction of PAPI command history is required.
417
418         :param msg: Message to process.
419         :type msg: Message
420         :returns: Nothing.
421         """
422         if msg.message.count(u"PAPI command history:"):
423             self._conf_history_lookup_nr += 1
424             if self._conf_history_lookup_nr == 1:
425                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
426             else:
427                 self._msg_type = None
428             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
429                           r"PAPI command history:", u"",
430                           msg.message, count=1).replace(u'\n', u' |br| ').\
431                 replace(u'"', u"'")
432
433             self._data[u"tests"][self._test_id][u"conf-history"] += (
434                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
435             )
436
437     def _get_show_run(self, msg):
438         """Called when extraction of VPP operational data (output of CLI command
439         Show Runtime) is required.
440
441         :param msg: Message to process.
442         :type msg: Message
443         :returns: Nothing.
444         """
445
446         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
447             self._data[u"tests"][self._test_id][u"show-run"] = str()
448
449         if msg.message.count(u"stats runtime") or \
450                 msg.message.count(u"Runtime"):
451             try:
452                 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
453                            group(1))
454             except (AttributeError, IndexError):
455                 host = u""
456             try:
457                 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
458                              group(2))
459             except (AttributeError, IndexError):
460                 socket = u""
461             runtime = loads(
462                 str(msg.message).
463                 replace(u' ', u'').
464                 replace(u'\n', u'').
465                 replace(u"'", u'"').
466                 replace(u'b"', u'"').
467                 replace(u'u"', u'"').
468                 split(u":", 1)[1]
469             )
470             try:
471                 threads_nr = len(runtime[0][u"clocks"])
472             except (IndexError, KeyError):
473                 return
474             tbl_hdr = [
475                 u"Name",
476                 u"Calls",
477                 u"Vectors",
478                 u"Suspends",
479                 u"Clocks",
480                 u"Vectors/Calls"
481             ]
482             table = [[tbl_hdr, ] for _ in range(threads_nr)]
483             for item in runtime:
484                 for idx in range(threads_nr):
485                     name = format(item[u"name"])
486                     calls = format(item[u"calls"][idx])
487                     vectors = format(item[u"vectors"][idx])
488                     suspends = format(item[u"suspends"][idx])
489                     if item[u"vectors"][idx] > 0:
490                         clocks = format(
491                             item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
492                     elif item[u"calls"][idx] > 0:
493                         clocks = format(
494                             item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
495                     elif item[u"suspends"][idx] > 0:
496                         clocks = format(
497                             item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
498                     else:
499                         clocks = 0
500                     if item[u"calls"][idx] > 0:
501                         vectors_call = format(
502                             item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
503                     else:
504                         vectors_call = format(0, u".2f")
505                     if int(calls) + int(vectors) + int(suspends):
506                         table[idx].append([
507                             name, calls, vectors, suspends, clocks, vectors_call
508                         ])
509             text = ""
510             for idx in range(threads_nr):
511                 text += f"Thread {idx} "
512                 text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
513                 txt_table = None
514                 for row in table[idx]:
515                     if txt_table is None:
516                         txt_table = prettytable.PrettyTable(row)
517                     else:
518                         if any(row[1:]):
519                             txt_table.add_row(row)
520                 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
521                 txt_table.align[u"Name"] = u"l"
522                 txt_table.align[u"Calls"] = u"r"
523                 txt_table.align[u"Vectors"] = u"r"
524                 txt_table.align[u"Suspends"] = u"r"
525                 txt_table.align[u"Clocks"] = u"r"
526                 txt_table.align[u"Vectors/Calls"] = u"r"
527
528                 text += txt_table.get_string(sortby=u"Name") + u'\n'
529             text = f" \n **DUT: {host}/{socket}** \n {text}".\
530                 replace(u'\n', u' |br| ').\
531                 replace(u'\r', u'').\
532                 replace(u'"', u"'")
533             self._data[u"tests"][self._test_id][u"show-run"] += text
534
535     def _get_ndrpdr_throughput(self, msg):
536         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
537         message.
538
539         :param msg: The test message to be parsed.
540         :type msg: str
541         :returns: Parsed data as a dict and the status (PASS/FAIL).
542         :rtype: tuple(dict, str)
543         """
544
545         throughput = {
546             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
547             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
548         }
549         status = u"FAIL"
550         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
551
552         if groups is not None:
553             try:
554                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
555                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
556                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
557                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
558                 status = u"PASS"
559             except (IndexError, ValueError):
560                 pass
561
562         return throughput, status
563
564     def _get_plr_throughput(self, msg):
565         """Get PLRsearch lower bound and PLRsearch upper bound from the test
566         message.
567
568         :param msg: The test message to be parsed.
569         :type msg: str
570         :returns: Parsed data as a dict and the status (PASS/FAIL).
571         :rtype: tuple(dict, str)
572         """
573
574         throughput = {
575             u"LOWER": -1.0,
576             u"UPPER": -1.0
577         }
578         status = u"FAIL"
579         groups = re.search(self.REGEX_PLR_RATE, msg)
580
581         if groups is not None:
582             try:
583                 throughput[u"LOWER"] = float(groups.group(1))
584                 throughput[u"UPPER"] = float(groups.group(2))
585                 status = u"PASS"
586             except (IndexError, ValueError):
587                 pass
588
589         return throughput, status
590
591     def _get_ndrpdr_latency(self, msg):
592         """Get LATENCY from the test message.
593
594         :param msg: The test message to be parsed.
595         :type msg: str
596         :returns: Parsed data as a dict and the status (PASS/FAIL).
597         :rtype: tuple(dict, str)
598         """
599         latency_default = {
600             u"min": -1.0,
601             u"avg": -1.0,
602             u"max": -1.0,
603             u"hdrh": u""
604         }
605         latency = {
606             u"NDR": {
607                 u"direction1": copy.copy(latency_default),
608                 u"direction2": copy.copy(latency_default)
609             },
610             u"PDR": {
611                 u"direction1": copy.copy(latency_default),
612                 u"direction2": copy.copy(latency_default)
613             }
614         }
615         status = u"FAIL"
616         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
617
618         def process_latency(in_str):
619             """Return object with parsed latency values.
620
621             TODO: Define class for the return type.
622
623             :param in_str: Input string, min/avg/max/hdrh format.
624             :type in_str: str
625             :returns: Dict with corresponding keys, except hdrh float values.
626             :rtype dict:
627             :throws IndexError: If in_str does not have enough substrings.
628             :throws ValueError: If a substring does not convert to float.
629             """
630             in_list = in_str.split('/', 3)
631
632             rval = {
633                 u"min": float(in_list[0]),
634                 u"avg": float(in_list[1]),
635                 u"max": float(in_list[2]),
636                 u"hdrh": u""
637             }
638
639             if len(in_list) == 4:
640                 rval[u"hdrh"] = str(in_list[3])
641
642             return rval
643
644         if groups is not None:
645             try:
646                 latency[u"NDR"][u"direction1"] = \
647                     process_latency(groups.group(1))
648                 latency[u"NDR"][u"direction2"] = \
649                     process_latency(groups.group(2))
650                 latency[u"PDR"][u"direction1"] = \
651                     process_latency(groups.group(3))
652                 latency[u"PDR"][u"direction2"] = \
653                     process_latency(groups.group(4))
654                 status = u"PASS"
655             except (IndexError, ValueError):
656                 pass
657
658         return latency, status
659
660     def visit_suite(self, suite):
661         """Implements traversing through the suite and its direct children.
662
663         :param suite: Suite to process.
664         :type suite: Suite
665         :returns: Nothing.
666         """
667         if self.start_suite(suite) is not False:
668             suite.suites.visit(self)
669             suite.tests.visit(self)
670             self.end_suite(suite)
671
672     def start_suite(self, suite):
673         """Called when suite starts.
674
675         :param suite: Suite to process.
676         :type suite: Suite
677         :returns: Nothing.
678         """
679
680         try:
681             parent_name = suite.parent.name
682         except AttributeError:
683             return
684
685         doc_str = suite.doc.\
686             replace(u'"', u"'").\
687             replace(u'\n', u' ').\
688             replace(u'\r', u'').\
689             replace(u'*[', u' |br| *[').\
690             replace(u"*", u"**").\
691             replace(u' |br| *[', u'*[', 1)
692
693         self._data[u"suites"][suite.longname.lower().
694                               replace(u'"', u"'").
695                               replace(u" ", u"_")] = {
696                                   u"name": suite.name.lower(),
697                                   u"doc": doc_str,
698                                   u"parent": parent_name,
699                                   u"level": len(suite.longname.split(u"."))
700                               }
701
702         suite.keywords.visit(self)
703
704     def end_suite(self, suite):
705         """Called when suite ends.
706
707         :param suite: Suite to process.
708         :type suite: Suite
709         :returns: Nothing.
710         """
711
712     def visit_test(self, test):
713         """Implements traversing through the test.
714
715         :param test: Test to process.
716         :type test: Test
717         :returns: Nothing.
718         """
719         if self.start_test(test) is not False:
720             test.keywords.visit(self)
721             self.end_test(test)
722
723     def start_test(self, test):
724         """Called when test starts.
725
726         :param test: Test to process.
727         :type test: Test
728         :returns: Nothing.
729         """
730
731         longname_orig = test.longname.lower()
732
733         # Check the ignore list
734         if longname_orig in self._ignore:
735             return
736
737         tags = [str(tag) for tag in test.tags]
738         test_result = dict()
739
740         # Change the TC long name and name if defined in the mapping table
741         longname = self._mapping.get(longname_orig, None)
742         if longname is not None:
743             name = longname.split(u'.')[-1]
744             logging.debug(
745                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
746                 f"{name}"
747             )
748         else:
749             longname = longname_orig
750             name = test.name.lower()
751
752         # Remove TC number from the TC long name (backward compatibility):
753         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
754         # Remove TC number from the TC name (not needed):
755         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
756
757         test_result[u"parent"] = test.parent.name.lower()
758         test_result[u"tags"] = tags
759         test_result["doc"] = test.doc.\
760             replace(u'"', u"'").\
761             replace(u'\n', u' ').\
762             replace(u'\r', u'').\
763             replace(u'[', u' |br| [').\
764             replace(u' |br| [', u'[', 1)
765         test_result[u"msg"] = test.message.\
766             replace(u'\n', u' |br| ').\
767             replace(u'\r', u'').\
768             replace(u'"', u"'")
769         test_result[u"type"] = u"FUNC"
770         test_result[u"status"] = test.status
771
772         if u"PERFTEST" in tags:
773             # Replace info about cores (e.g. -1c-) with the info about threads
774             # and cores (e.g. -1t1c-) in the long test case names and in the
775             # test case names if necessary.
776             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
777             if not groups:
778                 tag_count = 0
779                 tag_tc = str()
780                 for tag in test_result[u"tags"]:
781                     groups = re.search(self.REGEX_TC_TAG, tag)
782                     if groups:
783                         tag_count += 1
784                         tag_tc = tag
785
786                 if tag_count == 1:
787                     self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
788                                            f"-{tag_tc.lower()}-",
789                                            self._test_id,
790                                            count=1)
791                     test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
792                                                   f"-{tag_tc.lower()}-",
793                                                   test_result["name"],
794                                                   count=1)
795                 else:
796                     test_result[u"status"] = u"FAIL"
797                     self._data[u"tests"][self._test_id] = test_result
798                     logging.debug(
799                         f"The test {self._test_id} has no or more than one "
800                         f"multi-threading tags.\n"
801                         f"Tags: {test_result[u'tags']}"
802                     )
803                     return
804
805         if test.status == u"PASS":
806             if u"NDRPDR" in tags:
807                 test_result[u"type"] = u"NDRPDR"
808                 test_result[u"throughput"], test_result[u"status"] = \
809                     self._get_ndrpdr_throughput(test.message)
810                 test_result[u"latency"], test_result[u"status"] = \
811                     self._get_ndrpdr_latency(test.message)
812             elif u"SOAK" in tags:
813                 test_result[u"type"] = u"SOAK"
814                 test_result[u"throughput"], test_result[u"status"] = \
815                     self._get_plr_throughput(test.message)
816             elif u"TCP" in tags:
817                 test_result[u"type"] = u"TCP"
818                 groups = re.search(self.REGEX_TCP, test.message)
819                 test_result[u"result"] = int(groups.group(2))
820             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
821                 if u"MRR" in tags:
822                     test_result[u"type"] = u"MRR"
823                 else:
824                     test_result[u"type"] = u"BMRR"
825
826                 test_result[u"result"] = dict()
827                 groups = re.search(self.REGEX_BMRR, test.message)
828                 if groups is not None:
829                     items_str = groups.group(1)
830                     items_float = [float(item.strip()) for item
831                                    in items_str.split(",")]
832                     # Use whole list in CSIT-1180.
833                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
834                     test_result[u"result"][u"receive-rate"] = stats.avg
835                 else:
836                     groups = re.search(self.REGEX_MRR, test.message)
837                     test_result[u"result"][u"receive-rate"] = \
838                         float(groups.group(3)) / float(groups.group(1))
839             elif u"RECONF" in tags:
840                 test_result[u"type"] = u"RECONF"
841                 test_result[u"result"] = None
842                 try:
843                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
844                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
845                     test_result[u"result"] = {
846                         u"loss": int(grps_loss.group(1)),
847                         u"time": float(grps_time.group(1))
848                     }
849                 except (AttributeError, IndexError, ValueError, TypeError):
850                     test_result[u"status"] = u"FAIL"
851             else:
852                 test_result[u"status"] = u"FAIL"
853                 self._data[u"tests"][self._test_id] = test_result
854                 return
855
856         self._data[u"tests"][self._test_id] = test_result
857
858     def end_test(self, test):
859         """Called when test ends.
860
861         :param test: Test to process.
862         :type test: Test
863         :returns: Nothing.
864         """
865
866     def visit_keyword(self, keyword):
867         """Implements traversing through the keyword and its child keywords.
868
869         :param keyword: Keyword to process.
870         :type keyword: Keyword
871         :returns: Nothing.
872         """
873         if self.start_keyword(keyword) is not False:
874             self.end_keyword(keyword)
875
876     def start_keyword(self, keyword):
877         """Called when keyword starts. Default implementation does nothing.
878
879         :param keyword: Keyword to process.
880         :type keyword: Keyword
881         :returns: Nothing.
882         """
883         try:
884             if keyword.type == u"setup":
885                 self.visit_setup_kw(keyword)
886             elif keyword.type == u"teardown":
887                 self._lookup_kw_nr = 0
888                 self.visit_teardown_kw(keyword)
889             else:
890                 self._lookup_kw_nr = 0
891                 self.visit_test_kw(keyword)
892         except AttributeError:
893             pass
894
895     def end_keyword(self, keyword):
896         """Called when keyword ends. Default implementation does nothing.
897
898         :param keyword: Keyword to process.
899         :type keyword: Keyword
900         :returns: Nothing.
901         """
902
903     def visit_test_kw(self, test_kw):
904         """Implements traversing through the test keyword and its child
905         keywords.
906
907         :param test_kw: Keyword to process.
908         :type test_kw: Keyword
909         :returns: Nothing.
910         """
911         for keyword in test_kw.keywords:
912             if self.start_test_kw(keyword) is not False:
913                 self.visit_test_kw(keyword)
914                 self.end_test_kw(keyword)
915
916     def start_test_kw(self, test_kw):
917         """Called when test keyword starts. Default implementation does
918         nothing.
919
920         :param test_kw: Keyword to process.
921         :type test_kw: Keyword
922         :returns: Nothing.
923         """
924         if test_kw.name.count(u"Show Runtime Counters On All Duts"):
925             self._lookup_kw_nr += 1
926             self._show_run_lookup_nr = 0
927             self._msg_type = u"test-show-runtime"
928         elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
929             self._msg_type = u"dpdk-version"
930         else:
931             return
932         test_kw.messages.visit(self)
933
934     def end_test_kw(self, test_kw):
935         """Called when keyword ends. Default implementation does nothing.
936
937         :param test_kw: Keyword to process.
938         :type test_kw: Keyword
939         :returns: Nothing.
940         """
941
942     def visit_setup_kw(self, setup_kw):
943         """Implements traversing through the teardown keyword and its child
944         keywords.
945
946         :param setup_kw: Keyword to process.
947         :type setup_kw: Keyword
948         :returns: Nothing.
949         """
950         for keyword in setup_kw.keywords:
951             if self.start_setup_kw(keyword) is not False:
952                 self.visit_setup_kw(keyword)
953                 self.end_setup_kw(keyword)
954
955     def start_setup_kw(self, setup_kw):
956         """Called when teardown keyword starts. Default implementation does
957         nothing.
958
959         :param setup_kw: Keyword to process.
960         :type setup_kw: Keyword
961         :returns: Nothing.
962         """
963         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
964                 and not self._version:
965             self._msg_type = u"vpp-version"
966         elif setup_kw.name.count(u"Set Global Variable") \
967                 and not self._timestamp:
968             self._msg_type = u"timestamp"
969         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
970             self._msg_type = u"testbed"
971         else:
972             return
973         setup_kw.messages.visit(self)
974
975     def end_setup_kw(self, setup_kw):
976         """Called when keyword ends. Default implementation does nothing.
977
978         :param setup_kw: Keyword to process.
979         :type setup_kw: Keyword
980         :returns: Nothing.
981         """
982
983     def visit_teardown_kw(self, teardown_kw):
984         """Implements traversing through the teardown keyword and its child
985         keywords.
986
987         :param teardown_kw: Keyword to process.
988         :type teardown_kw: Keyword
989         :returns: Nothing.
990         """
991         for keyword in teardown_kw.keywords:
992             if self.start_teardown_kw(keyword) is not False:
993                 self.visit_teardown_kw(keyword)
994                 self.end_teardown_kw(keyword)
995
996     def start_teardown_kw(self, teardown_kw):
997         """Called when teardown keyword starts
998
999         :param teardown_kw: Keyword to process.
1000         :type teardown_kw: Keyword
1001         :returns: Nothing.
1002         """
1003
1004         if teardown_kw.name.count(u"Show Vat History On All Duts"):
1005             # TODO: Remove when not needed:
1006             self._conf_history_lookup_nr = 0
1007             self._msg_type = u"teardown-vat-history"
1008             teardown_kw.messages.visit(self)
1009         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1010             self._conf_history_lookup_nr = 0
1011             self._msg_type = u"teardown-papi-history"
1012             teardown_kw.messages.visit(self)
1013
1014     def end_teardown_kw(self, teardown_kw):
1015         """Called when keyword ends. Default implementation does nothing.
1016
1017         :param teardown_kw: Keyword to process.
1018         :type teardown_kw: Keyword
1019         :returns: Nothing.
1020         """
1021
1022     def visit_message(self, msg):
1023         """Implements visiting the message.
1024
1025         :param msg: Message to process.
1026         :type msg: Message
1027         :returns: Nothing.
1028         """
1029         if self.start_message(msg) is not False:
1030             self.end_message(msg)
1031
1032     def start_message(self, msg):
1033         """Called when message starts. Get required information from messages:
1034         - VPP version.
1035
1036         :param msg: Message to process.
1037         :type msg: Message
1038         :returns: Nothing.
1039         """
1040
1041         if self._msg_type:
1042             self.parse_msg[self._msg_type](msg)
1043
1044     def end_message(self, msg):
1045         """Called when message ends. Default implementation does nothing.
1046
1047         :param msg: Message to process.
1048         :type msg: Message
1049         :returns: Nothing.
1050         """
1051
1052
1053 class InputData:
1054     """Input data
1055
1056     The data is extracted from output.xml files generated by Jenkins jobs and
1057     stored in pandas' DataFrames.
1058
1059     The data structure:
1060     - job name
1061       - build number
1062         - metadata
1063           (as described in ExecutionChecker documentation)
1064         - suites
1065           (as described in ExecutionChecker documentation)
1066         - tests
1067           (as described in ExecutionChecker documentation)
1068     """
1069
1070     def __init__(self, spec):
1071         """Initialization.
1072
1073         :param spec: Specification.
1074         :type spec: Specification
1075         """
1076
1077         # Specification:
1078         self._cfg = spec
1079
1080         # Data store:
1081         self._input_data = pd.Series()
1082
1083     @property
1084     def data(self):
1085         """Getter - Input data.
1086
1087         :returns: Input data
1088         :rtype: pandas.Series
1089         """
1090         return self._input_data
1091
1092     def metadata(self, job, build):
1093         """Getter - metadata
1094
1095         :param job: Job which metadata we want.
1096         :param build: Build which metadata we want.
1097         :type job: str
1098         :type build: str
1099         :returns: Metadata
1100         :rtype: pandas.Series
1101         """
1102
1103         return self.data[job][build][u"metadata"]
1104
1105     def suites(self, job, build):
1106         """Getter - suites
1107
1108         :param job: Job which suites we want.
1109         :param build: Build which suites we want.
1110         :type job: str
1111         :type build: str
1112         :returns: Suites.
1113         :rtype: pandas.Series
1114         """
1115
1116         return self.data[job][str(build)][u"suites"]
1117
1118     def tests(self, job, build):
1119         """Getter - tests
1120
1121         :param job: Job which tests we want.
1122         :param build: Build which tests we want.
1123         :type job: str
1124         :type build: str
1125         :returns: Tests.
1126         :rtype: pandas.Series
1127         """
1128
1129         return self.data[job][build][u"tests"]
1130
1131     def _parse_tests(self, job, build, log):
1132         """Process data from robot output.xml file and return JSON structured
1133         data.
1134
1135         :param job: The name of job which build output data will be processed.
1136         :param build: The build which output data will be processed.
1137         :param log: List of log messages.
1138         :type job: str
1139         :type build: dict
1140         :type log: list of tuples (severity, msg)
1141         :returns: JSON data structure.
1142         :rtype: dict
1143         """
1144
1145         metadata = {
1146             u"job": job,
1147             u"build": build
1148         }
1149
1150         with open(build[u"file-name"], u'r') as data_file:
1151             try:
1152                 result = ExecutionResult(data_file)
1153             except errors.DataError as err:
1154                 log.append(
1155                     (u"ERROR", f"Error occurred while parsing output.xml: "
1156                                f"{repr(err)}")
1157                 )
1158                 return None
1159         checker = ExecutionChecker(metadata, self._cfg.mapping,
1160                                    self._cfg.ignore)
1161         result.visit(checker)
1162
1163         return checker.data
1164
1165     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1166         """Download and parse the input data file.
1167
1168         :param pid: PID of the process executing this method.
1169         :param job: Name of the Jenkins job which generated the processed input
1170             file.
1171         :param build: Information about the Jenkins build which generated the
1172             processed input file.
1173         :param repeat: Repeat the download specified number of times if not
1174             successful.
1175         :type pid: int
1176         :type job: str
1177         :type build: dict
1178         :type repeat: int
1179         """
1180
1181         logs = list()
1182
1183         logs.append(
1184             (u"INFO", f"  Processing the job/build: {job}: {build[u'build']}")
1185         )
1186
1187         state = u"failed"
1188         success = False
1189         data = None
1190         do_repeat = repeat
1191         while do_repeat:
1192             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1193                                                    logs)
1194             if success:
1195                 break
1196             do_repeat -= 1
1197         if not success:
1198             logs.append(
1199                 (u"ERROR",
1200                  f"It is not possible to download the input data file from the "
1201                  f"job {job}, build {build[u'build']}, or it is damaged. "
1202                  f"Skipped.")
1203             )
1204         if success:
1205             logs.append(
1206                 (u"INFO",
1207                  f"    Processing data from the build {build[u'build']} ...")
1208             )
1209             data = self._parse_tests(job, build, logs)
1210             if data is None:
1211                 logs.append(
1212                     (u"ERROR",
1213                      f"Input data file from the job {job}, build "
1214                      f"{build[u'build']} is damaged. Skipped.")
1215                 )
1216             else:
1217                 state = u"processed"
1218
1219             try:
1220                 remove(build[u"file-name"])
1221             except OSError as err:
1222                 logs.append(
1223                     ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1224                               f"{repr(err)}")
1225                 )
1226
1227         # If the time-period is defined in the specification file, remove all
1228         # files which are outside the time period.
1229         timeperiod = self._cfg.input.get(u"time-period", None)
1230         if timeperiod and data:
1231             now = dt.utcnow()
1232             timeperiod = timedelta(int(timeperiod))
1233             metadata = data.get(u"metadata", None)
1234             if metadata:
1235                 generated = metadata.get(u"generated", None)
1236                 if generated:
1237                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1238                     if (now - generated) > timeperiod:
1239                         # Remove the data and the file:
1240                         state = u"removed"
1241                         data = None
1242                         logs.append(
1243                             (u"INFO",
1244                              f"    The build {job}/{build[u'build']} is "
1245                              f"outdated, will be removed.")
1246                         )
1247         logs.append((u"INFO", u"  Done."))
1248
1249         for level, line in logs:
1250             if level == u"INFO":
1251                 logging.info(line)
1252             elif level == u"ERROR":
1253                 logging.error(line)
1254             elif level == u"DEBUG":
1255                 logging.debug(line)
1256             elif level == u"CRITICAL":
1257                 logging.critical(line)
1258             elif level == u"WARNING":
1259                 logging.warning(line)
1260
1261         return {u"data": data, u"state": state, u"job": job, u"build": build}
1262
1263     def download_and_parse_data(self, repeat=1):
1264         """Download the input data files, parse input data from input files and
1265         store in pandas' Series.
1266
1267         :param repeat: Repeat the download specified number of times if not
1268             successful.
1269         :type repeat: int
1270         """
1271
1272         logging.info(u"Downloading and parsing input files ...")
1273
1274         for job, builds in self._cfg.builds.items():
1275             for build in builds:
1276
1277                 result = self._download_and_parse_build(job, build, repeat)
1278                 build_nr = result[u"build"][u"build"]
1279
1280                 if result[u"data"]:
1281                     data = result[u"data"]
1282                     build_data = pd.Series({
1283                         u"metadata": pd.Series(
1284                             list(data[u"metadata"].values()),
1285                             index=list(data[u"metadata"].keys())
1286                         ),
1287                         u"suites": pd.Series(
1288                             list(data[u"suites"].values()),
1289                             index=list(data[u"suites"].keys())
1290                         ),
1291                         u"tests": pd.Series(
1292                             list(data[u"tests"].values()),
1293                             index=list(data[u"tests"].keys())
1294                         )
1295                     })
1296
1297                     if self._input_data.get(job, None) is None:
1298                         self._input_data[job] = pd.Series()
1299                     self._input_data[job][str(build_nr)] = build_data
1300
1301                     self._cfg.set_input_file_name(
1302                         job, build_nr, result[u"build"][u"file-name"])
1303
1304                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1305
1306                 mem_alloc = \
1307                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1308                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1309
1310         logging.info(u"Done.")
1311
1312     @staticmethod
1313     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1314         """Return the index of character in the string which is the end of tag.
1315
1316         :param tag_filter: The string where the end of tag is being searched.
1317         :param start: The index where the searching is stated.
1318         :param closer: The character which is the tag closer.
1319         :type tag_filter: str
1320         :type start: int
1321         :type closer: str
1322         :returns: The index of the tag closer.
1323         :rtype: int
1324         """
1325
1326         try:
1327             idx_opener = tag_filter.index(closer, start)
1328             return tag_filter.index(closer, idx_opener + 1)
1329         except ValueError:
1330             return None
1331
1332     @staticmethod
1333     def _condition(tag_filter):
1334         """Create a conditional statement from the given tag filter.
1335
1336         :param tag_filter: Filter based on tags from the element specification.
1337         :type tag_filter: str
1338         :returns: Conditional statement which can be evaluated.
1339         :rtype: str
1340         """
1341
1342         index = 0
1343         while True:
1344             index = InputData._end_of_tag(tag_filter, index)
1345             if index is None:
1346                 return tag_filter
1347             index += 1
1348             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1349
1350     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1351                     continue_on_error=False):
1352         """Filter required data from the given jobs and builds.
1353
1354         The output data structure is:
1355
1356         - job 1
1357           - build 1
1358             - test (or suite) 1 ID:
1359               - param 1
1360               - param 2
1361               ...
1362               - param n
1363             ...
1364             - test (or suite) n ID:
1365             ...
1366           ...
1367           - build n
1368         ...
1369         - job n
1370
1371         :param element: Element which will use the filtered data.
1372         :param params: Parameters which will be included in the output. If None,
1373             all parameters are included.
1374         :param data: If not None, this data is used instead of data specified
1375             in the element.
1376         :param data_set: The set of data to be filtered: tests, suites,
1377             metadata.
1378         :param continue_on_error: Continue if there is error while reading the
1379             data. The Item will be empty then
1380         :type element: pandas.Series
1381         :type params: list
1382         :type data: dict
1383         :type data_set: str
1384         :type continue_on_error: bool
1385         :returns: Filtered data.
1386         :rtype pandas.Series
1387         """
1388
1389         try:
1390             if element[u"filter"] in (u"all", u"template"):
1391                 cond = u"True"
1392             else:
1393                 cond = InputData._condition(element[u"filter"])
1394             logging.debug(f"   Filter: {cond}")
1395         except KeyError:
1396             logging.error(u"  No filter defined.")
1397             return None
1398
1399         if params is None:
1400             params = element.get(u"parameters", None)
1401             if params:
1402                 params.append(u"type")
1403
1404         data_to_filter = data if data else element[u"data"]
1405         data = pd.Series()
1406         try:
1407             for job, builds in data_to_filter.items():
1408                 data[job] = pd.Series()
1409                 for build in builds:
1410                     data[job][str(build)] = pd.Series()
1411                     try:
1412                         data_dict = dict(
1413                             self.data[job][str(build)][data_set].items())
1414                     except KeyError:
1415                         if continue_on_error:
1416                             continue
1417                         return None
1418
1419                     for test_id, test_data in data_dict.items():
1420                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1421                             data[job][str(build)][test_id] = pd.Series()
1422                             if params is None:
1423                                 for param, val in test_data.items():
1424                                     data[job][str(build)][test_id][param] = val
1425                             else:
1426                                 for param in params:
1427                                     try:
1428                                         data[job][str(build)][test_id][param] =\
1429                                             test_data[param]
1430                                     except KeyError:
1431                                         data[job][str(build)][test_id][param] =\
1432                                             u"No Data"
1433             return data
1434
1435         except (KeyError, IndexError, ValueError) as err:
1436             logging.error(
1437                 f"Missing mandatory parameter in the element specification: "
1438                 f"{repr(err)}"
1439             )
1440             return None
1441         except AttributeError as err:
1442             logging.error(repr(err))
1443             return None
1444         except SyntaxError as err:
1445             logging.error(
1446                 f"The filter {cond} is not correct. Check if all tags are "
1447                 f"enclosed by apostrophes.\n{repr(err)}"
1448             )
1449             return None
1450
1451     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1452                              continue_on_error=False):
1453         """Filter required data from the given jobs and builds.
1454
1455         The output data structure is:
1456
1457         - job 1
1458           - build 1
1459             - test (or suite) 1 ID:
1460               - param 1
1461               - param 2
1462               ...
1463               - param n
1464             ...
1465             - test (or suite) n ID:
1466             ...
1467           ...
1468           - build n
1469         ...
1470         - job n
1471
1472         :param element: Element which will use the filtered data.
1473         :param params: Parameters which will be included in the output. If None,
1474         all parameters are included.
1475         :param data_set: The set of data to be filtered: tests, suites,
1476         metadata.
1477         :param continue_on_error: Continue if there is error while reading the
1478         data. The Item will be empty then
1479         :type element: pandas.Series
1480         :type params: list
1481         :type data_set: str
1482         :type continue_on_error: bool
1483         :returns: Filtered data.
1484         :rtype pandas.Series
1485         """
1486
1487         include = element.get(u"include", None)
1488         if not include:
1489             logging.warning(u"No tests to include, skipping the element.")
1490             return None
1491
1492         if params is None:
1493             params = element.get(u"parameters", None)
1494             if params:
1495                 params.append(u"type")
1496
1497         data = pd.Series()
1498         try:
1499             for job, builds in element[u"data"].items():
1500                 data[job] = pd.Series()
1501                 for build in builds:
1502                     data[job][str(build)] = pd.Series()
1503                     for test in include:
1504                         try:
1505                             reg_ex = re.compile(str(test).lower())
1506                             for test_id in self.data[job][
1507                                     str(build)][data_set].keys():
1508                                 if re.match(reg_ex, str(test_id).lower()):
1509                                     test_data = self.data[job][
1510                                         str(build)][data_set][test_id]
1511                                     data[job][str(build)][test_id] = pd.Series()
1512                                     if params is None:
1513                                         for param, val in test_data.items():
1514                                             data[job][str(build)][test_id]\
1515                                                 [param] = val
1516                                     else:
1517                                         for param in params:
1518                                             try:
1519                                                 data[job][str(build)][
1520                                                     test_id][param] = \
1521                                                     test_data[param]
1522                                             except KeyError:
1523                                                 data[job][str(build)][
1524                                                     test_id][param] = u"No Data"
1525                         except KeyError as err:
1526                             logging.error(repr(err))
1527                             if continue_on_error:
1528                                 continue
1529                             return None
1530             return data
1531
1532         except (KeyError, IndexError, ValueError) as err:
1533             logging.error(
1534                 f"Missing mandatory parameter in the element "
1535                 f"specification: {repr(err)}"
1536             )
1537             return None
1538         except AttributeError as err:
1539             logging.error(repr(err))
1540             return None
1541
1542     @staticmethod
1543     def merge_data(data):
1544         """Merge data from more jobs and builds to a simple data structure.
1545
1546         The output data structure is:
1547
1548         - test (suite) 1 ID:
1549           - param 1
1550           - param 2
1551           ...
1552           - param n
1553         ...
1554         - test (suite) n ID:
1555         ...
1556
1557         :param data: Data to merge.
1558         :type data: pandas.Series
1559         :returns: Merged data.
1560         :rtype: pandas.Series
1561         """
1562
1563         logging.info(u"    Merging data ...")
1564
1565         merged_data = pd.Series()
1566         for builds in data.values:
1567             for item in builds.values:
1568                 for item_id, item_data in item.items():
1569                     merged_data[item_id] = item_data
1570
1571         return merged_data