Trending: Use Mpps
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import copy
24 import resource
25 import logging
26
27 from collections import OrderedDict
28 from os import remove
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
32
33 import prettytable
34 import pandas as pd
35
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
38
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
41
42
43 # Separator used in file names
44 SEPARATOR = u"__"
45
46
47 class ExecutionChecker(ResultVisitor):
48     """Class to traverse through the test suite structure.
49
50     The functionality implemented in this class generates a json structure:
51
52     Performance tests:
53
54     {
55         "metadata": {
56             "generated": "Timestamp",
57             "version": "SUT version",
58             "job": "Jenkins job name",
59             "build": "Information about the build"
60         },
61         "suites": {
62             "Suite long name 1": {
63                 "name": Suite name,
64                 "doc": "Suite 1 documentation",
65                 "parent": "Suite 1 parent",
66                 "level": "Level of the suite in the suite hierarchy"
67             }
68             "Suite long name N": {
69                 "name": Suite name,
70                 "doc": "Suite N documentation",
71                 "parent": "Suite 2 parent",
72                 "level": "Level of the suite in the suite hierarchy"
73             }
74         }
75         "tests": {
76             # NDRPDR tests:
77             "ID": {
78                 "name": "Test name",
79                 "parent": "Name of the parent of the test",
80                 "doc": "Test documentation",
81                 "msg": "Test message",
82                 "conf-history": "DUT1 and DUT2 VAT History",
83                 "show-run": "Show Run",
84                 "tags": ["tag 1", "tag 2", "tag n"],
85                 "type": "NDRPDR",
86                 "status": "PASS" | "FAIL",
87                 "throughput": {
88                     "NDR": {
89                         "LOWER": float,
90                         "UPPER": float
91                     },
92                     "PDR": {
93                         "LOWER": float,
94                         "UPPER": float
95                     }
96                 },
97                 "latency": {
98                     "NDR": {
99                         "direction1": {
100                             "min": float,
101                             "avg": float,
102                             "max": float,
103                             "hdrh": str
104                         },
105                         "direction2": {
106                             "min": float,
107                             "avg": float,
108                             "max": float,
109                             "hdrh": str
110                         }
111                     },
112                     "PDR": {
113                         "direction1": {
114                             "min": float,
115                             "avg": float,
116                             "max": float,
117                             "hdrh": str
118                         },
119                         "direction2": {
120                             "min": float,
121                             "avg": float,
122                             "max": float,
123                             "hdrh": str
124                         }
125                     }
126                 }
127             }
128
129             # TCP tests:
130             "ID": {
131                 "name": "Test name",
132                 "parent": "Name of the parent of the test",
133                 "doc": "Test documentation",
134                 "msg": "Test message",
135                 "tags": ["tag 1", "tag 2", "tag n"],
136                 "type": "TCP",
137                 "status": "PASS" | "FAIL",
138                 "result": int
139             }
140
141             # MRR, BMRR tests:
142             "ID": {
143                 "name": "Test name",
144                 "parent": "Name of the parent of the test",
145                 "doc": "Test documentation",
146                 "msg": "Test message",
147                 "tags": ["tag 1", "tag 2", "tag n"],
148                 "type": "MRR" | "BMRR",
149                 "status": "PASS" | "FAIL",
150                 "result": {
151                     "receive-rate": float,
152                     # Average of a list, computed using AvgStdevStats.
153                     # In CSIT-1180, replace with List[float].
154                 }
155             }
156
157             "ID" {
158                 # next test
159             }
160         }
161     }
162
163
164     Functional tests:
165
166     {
167         "metadata": {  # Optional
168             "version": "VPP version",
169             "job": "Jenkins job name",
170             "build": "Information about the build"
171         },
172         "suites": {
173             "Suite name 1": {
174                 "doc": "Suite 1 documentation",
175                 "parent": "Suite 1 parent",
176                 "level": "Level of the suite in the suite hierarchy"
177             }
178             "Suite name N": {
179                 "doc": "Suite N documentation",
180                 "parent": "Suite 2 parent",
181                 "level": "Level of the suite in the suite hierarchy"
182             }
183         }
184         "tests": {
185             "ID": {
186                 "name": "Test name",
187                 "parent": "Name of the parent of the test",
188                 "doc": "Test documentation"
189                 "msg": "Test message"
190                 "tags": ["tag 1", "tag 2", "tag n"],
191                 "conf-history": "DUT1 and DUT2 VAT History"
192                 "show-run": "Show Run"
193                 "status": "PASS" | "FAIL"
194             },
195             "ID" {
196                 # next test
197             }
198         }
199     }
200
201     .. note:: ID is the lowercase full path to the test.
202     """
203
204     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205                                 r'PLRsearch upper bound::?\s(\d+.\d+)')
206
207     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
209                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210                                    r'PDR_UPPER:\s(\d+.\d+)')
211
212     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
214
215     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
216                                  r'[\D\d]*')
217
218     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
220
221     REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
222
223     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
224
225     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226                            r'tx\s(\d*),\srx\s(\d*)')
227
228     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229                             r' in packets per second: \[(.*)\]')
230
231     REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232     REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
233
234     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
235
236     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
237
238     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
239
240     REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
241
242     REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
243
244     def __init__(self, metadata, mapping, ignore):
245         """Initialisation.
246
247         :param metadata: Key-value pairs to be included in "metadata" part of
248             JSON structure.
249         :param mapping: Mapping of the old names of test cases to the new
250             (actual) one.
251         :param ignore: List of TCs to be ignored.
252         :type metadata: dict
253         :type mapping: dict
254         :type ignore: list
255         """
256
257         # Type of message to parse out from the test messages
258         self._msg_type = None
259
260         # VPP version
261         self._version = None
262
263         # Timestamp
264         self._timestamp = None
265
266         # Testbed. The testbed is identified by TG node IP address.
267         self._testbed = None
268
269         # Mapping of TCs long names
270         self._mapping = mapping
271
272         # Ignore list
273         self._ignore = ignore
274
275         # Number of PAPI History messages found:
276         # 0 - no message
277         # 1 - PAPI History of DUT1
278         # 2 - PAPI History of DUT2
279         self._lookup_kw_nr = 0
280         self._conf_history_lookup_nr = 0
281
282         # Number of Show Running messages found
283         # 0 - no message
284         # 1 - Show run message found
285         self._show_run_lookup_nr = 0
286
287         # Test ID of currently processed test- the lowercase full path to the
288         # test
289         self._test_id = None
290
291         # The main data structure
292         self._data = {
293             u"metadata": OrderedDict(),
294             u"suites": OrderedDict(),
295             u"tests": OrderedDict()
296         }
297
298         # Save the provided metadata
299         for key, val in metadata.items():
300             self._data[u"metadata"][key] = val
301
302         # Dictionary defining the methods used to parse different types of
303         # messages
304         self.parse_msg = {
305             u"timestamp": self._get_timestamp,
306             u"vpp-version": self._get_vpp_version,
307             u"dpdk-version": self._get_dpdk_version,
308             # TODO: Remove when not needed:
309             u"teardown-vat-history": self._get_vat_history,
310             u"teardown-papi-history": self._get_papi_history,
311             u"test-show-runtime": self._get_show_run,
312             u"testbed": self._get_testbed
313         }
314
315     @property
316     def data(self):
317         """Getter - Data parsed from the XML file.
318
319         :returns: Data parsed from the XML file.
320         :rtype: dict
321         """
322         return self._data
323
324     def _get_testbed(self, msg):
325         """Called when extraction of testbed IP is required.
326         The testbed is identified by TG node IP address.
327
328         :param msg: Message to process.
329         :type msg: Message
330         :returns: Nothing.
331         """
332
333         if msg.message.count(u"Setup of TG node") or \
334                 msg.message.count(u"Setup of node TG host"):
335             reg_tg_ip = re.compile(
336                 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
337             try:
338                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
339             except (KeyError, ValueError, IndexError, AttributeError):
340                 pass
341             finally:
342                 self._data[u"metadata"][u"testbed"] = self._testbed
343                 self._msg_type = None
344
345     def _get_vpp_version(self, msg):
346         """Called when extraction of VPP version is required.
347
348         :param msg: Message to process.
349         :type msg: Message
350         :returns: Nothing.
351         """
352
353         if msg.message.count(u"return STDOUT Version:") or \
354             msg.message.count(u"VPP Version:") or \
355             msg.message.count(u"VPP version:"):
356             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
357                                 group(2))
358             self._data[u"metadata"][u"version"] = self._version
359             self._msg_type = None
360
361     def _get_dpdk_version(self, msg):
362         """Called when extraction of DPDK version is required.
363
364         :param msg: Message to process.
365         :type msg: Message
366         :returns: Nothing.
367         """
368
369         if msg.message.count(u"DPDK Version:"):
370             try:
371                 self._version = str(re.search(
372                     self.REGEX_VERSION_DPDK, msg.message).group(2))
373                 self._data[u"metadata"][u"version"] = self._version
374             except IndexError:
375                 pass
376             finally:
377                 self._msg_type = None
378
379     def _get_timestamp(self, msg):
380         """Called when extraction of timestamp is required.
381
382         :param msg: Message to process.
383         :type msg: Message
384         :returns: Nothing.
385         """
386
387         self._timestamp = msg.timestamp[:14]
388         self._data[u"metadata"][u"generated"] = self._timestamp
389         self._msg_type = None
390
391     def _get_vat_history(self, msg):
392         """Called when extraction of VAT command history is required.
393
394         TODO: Remove when not needed.
395
396         :param msg: Message to process.
397         :type msg: Message
398         :returns: Nothing.
399         """
400         if msg.message.count(u"VAT command history:"):
401             self._conf_history_lookup_nr += 1
402             if self._conf_history_lookup_nr == 1:
403                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
404             else:
405                 self._msg_type = None
406             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
407                           r"VAT command history:", u"",
408                           msg.message, count=1).replace(u'\n', u' |br| ').\
409                 replace(u'"', u"'")
410
411             self._data[u"tests"][self._test_id][u"conf-history"] += (
412                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
413             )
414
415     def _get_papi_history(self, msg):
416         """Called when extraction of PAPI command history is required.
417
418         :param msg: Message to process.
419         :type msg: Message
420         :returns: Nothing.
421         """
422         if msg.message.count(u"PAPI command history:"):
423             self._conf_history_lookup_nr += 1
424             if self._conf_history_lookup_nr == 1:
425                 self._data[u"tests"][self._test_id][u"conf-history"] = str()
426             else:
427                 self._msg_type = None
428             text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
429                           r"PAPI command history:", u"",
430                           msg.message, count=1).replace(u'\n', u' |br| ').\
431                 replace(u'"', u"'")
432             self._data[u"tests"][self._test_id][u"conf-history"] += (
433                 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
434             )
435
436     def _get_show_run(self, msg):
437         """Called when extraction of VPP operational data (output of CLI command
438         Show Runtime) is required.
439
440         :param msg: Message to process.
441         :type msg: Message
442         :returns: Nothing.
443         """
444
445         if u"show-run" not in self._data[u"tests"][self._test_id].keys():
446             self._data[u"tests"][self._test_id][u"show-run"] = str()
447
448         if msg.message.count(u"stats runtime") or \
449                 msg.message.count(u"Runtime"):
450             try:
451                 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
452                            group(1))
453             except (AttributeError, IndexError):
454                 host = self._data[u"tests"][self._test_id][u"show-run"].\
455                            count(u"DUT:") + 1
456             try:
457                 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
458                              group(2))
459                 socket = f"/{socket}"
460             except (AttributeError, IndexError):
461                 socket = u""
462             runtime = loads(
463                 str(msg.message).
464                 replace(u' ', u'').
465                 replace(u'\n', u'').
466                 replace(u"'", u'"').
467                 replace(u'b"', u'"').
468                 replace(u'u"', u'"').
469                 split(u":", 1)[1]
470             )
471             try:
472                 threads_nr = len(runtime[0][u"clocks"])
473             except (IndexError, KeyError):
474                 return
475             tbl_hdr = [
476                 u"Name",
477                 u"Calls",
478                 u"Vectors",
479                 u"Suspends",
480                 u"Clocks",
481                 u"Vectors/Calls"
482             ]
483             table = [[tbl_hdr, ] for _ in range(threads_nr)]
484             for item in runtime:
485                 for idx in range(threads_nr):
486                     name = format(item[u"name"])
487                     calls = format(item[u"calls"][idx])
488                     vectors = format(item[u"vectors"][idx])
489                     suspends = format(item[u"suspends"][idx])
490                     if item[u"vectors"][idx] > 0:
491                         clocks = format(
492                             item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
493                     elif item[u"calls"][idx] > 0:
494                         clocks = format(
495                             item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
496                     elif item[u"suspends"][idx] > 0:
497                         clocks = format(
498                             item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
499                     else:
500                         clocks = 0
501                     if item[u"calls"][idx] > 0:
502                         vectors_call = format(
503                             item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
504                     else:
505                         vectors_call = format(0, u".2f")
506                     if int(calls) + int(vectors) + int(suspends):
507                         table[idx].append([
508                             name, calls, vectors, suspends, clocks, vectors_call
509                         ])
510             text = ""
511             for idx in range(threads_nr):
512                 text += f"Thread {idx} "
513                 text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
514                 txt_table = None
515                 for row in table[idx]:
516                     if txt_table is None:
517                         txt_table = prettytable.PrettyTable(row)
518                     else:
519                         if any(row[1:]):
520                             txt_table.add_row(row)
521                 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
522                 txt_table.align[u"Name"] = u"l"
523                 txt_table.align[u"Calls"] = u"r"
524                 txt_table.align[u"Vectors"] = u"r"
525                 txt_table.align[u"Suspends"] = u"r"
526                 txt_table.align[u"Clocks"] = u"r"
527                 txt_table.align[u"Vectors/Calls"] = u"r"
528
529                 text += txt_table.get_string(sortby=u"Name") + u'\n'
530             text = f"\n**DUT: {host}{socket}**\n{text}".\
531                 replace(u'\n', u' |br| ').\
532                 replace(u'\r', u'').\
533                 replace(u'"', u"'")
534             self._data[u"tests"][self._test_id][u"show-run"] += text
535
536     def _get_ndrpdr_throughput(self, msg):
537         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
538         message.
539
540         :param msg: The test message to be parsed.
541         :type msg: str
542         :returns: Parsed data as a dict and the status (PASS/FAIL).
543         :rtype: tuple(dict, str)
544         """
545
546         throughput = {
547             u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
548             u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
549         }
550         status = u"FAIL"
551         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
552
553         if groups is not None:
554             try:
555                 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
556                 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
557                 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
558                 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
559                 status = u"PASS"
560             except (IndexError, ValueError):
561                 pass
562
563         return throughput, status
564
565     def _get_plr_throughput(self, msg):
566         """Get PLRsearch lower bound and PLRsearch upper bound from the test
567         message.
568
569         :param msg: The test message to be parsed.
570         :type msg: str
571         :returns: Parsed data as a dict and the status (PASS/FAIL).
572         :rtype: tuple(dict, str)
573         """
574
575         throughput = {
576             u"LOWER": -1.0,
577             u"UPPER": -1.0
578         }
579         status = u"FAIL"
580         groups = re.search(self.REGEX_PLR_RATE, msg)
581
582         if groups is not None:
583             try:
584                 throughput[u"LOWER"] = float(groups.group(1))
585                 throughput[u"UPPER"] = float(groups.group(2))
586                 status = u"PASS"
587             except (IndexError, ValueError):
588                 pass
589
590         return throughput, status
591
592     def _get_ndrpdr_latency(self, msg):
593         """Get LATENCY from the test message.
594
595         :param msg: The test message to be parsed.
596         :type msg: str
597         :returns: Parsed data as a dict and the status (PASS/FAIL).
598         :rtype: tuple(dict, str)
599         """
600         latency_default = {
601             u"min": -1.0,
602             u"avg": -1.0,
603             u"max": -1.0,
604             u"hdrh": u""
605         }
606         latency = {
607             u"NDR": {
608                 u"direction1": copy.copy(latency_default),
609                 u"direction2": copy.copy(latency_default)
610             },
611             u"PDR": {
612                 u"direction1": copy.copy(latency_default),
613                 u"direction2": copy.copy(latency_default)
614             }
615         }
616         status = u"FAIL"
617         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
618
619         def process_latency(in_str):
620             """Return object with parsed latency values.
621
622             TODO: Define class for the return type.
623
624             :param in_str: Input string, min/avg/max/hdrh format.
625             :type in_str: str
626             :returns: Dict with corresponding keys, except hdrh float values.
627             :rtype dict:
628             :throws IndexError: If in_str does not have enough substrings.
629             :throws ValueError: If a substring does not convert to float.
630             """
631             in_list = in_str.split('/', 3)
632
633             rval = {
634                 u"min": float(in_list[0]),
635                 u"avg": float(in_list[1]),
636                 u"max": float(in_list[2]),
637                 u"hdrh": u""
638             }
639
640             if len(in_list) == 4:
641                 rval[u"hdrh"] = str(in_list[3])
642
643             return rval
644
645         if groups is not None:
646             try:
647                 latency[u"NDR"][u"direction1"] = \
648                     process_latency(groups.group(1))
649                 latency[u"NDR"][u"direction2"] = \
650                     process_latency(groups.group(2))
651                 latency[u"PDR"][u"direction1"] = \
652                     process_latency(groups.group(3))
653                 latency[u"PDR"][u"direction2"] = \
654                     process_latency(groups.group(4))
655                 status = u"PASS"
656             except (IndexError, ValueError):
657                 pass
658
659         return latency, status
660
661     def visit_suite(self, suite):
662         """Implements traversing through the suite and its direct children.
663
664         :param suite: Suite to process.
665         :type suite: Suite
666         :returns: Nothing.
667         """
668         if self.start_suite(suite) is not False:
669             suite.suites.visit(self)
670             suite.tests.visit(self)
671             self.end_suite(suite)
672
673     def start_suite(self, suite):
674         """Called when suite starts.
675
676         :param suite: Suite to process.
677         :type suite: Suite
678         :returns: Nothing.
679         """
680
681         try:
682             parent_name = suite.parent.name
683         except AttributeError:
684             return
685
686         doc_str = suite.doc.\
687             replace(u'"', u"'").\
688             replace(u'\n', u' ').\
689             replace(u'\r', u'').\
690             replace(u'*[', u' |br| *[').\
691             replace(u"*", u"**").\
692             replace(u' |br| *[', u'*[', 1)
693
694         self._data[u"suites"][suite.longname.lower().
695                               replace(u'"', u"'").
696                               replace(u" ", u"_")] = {
697                                   u"name": suite.name.lower(),
698                                   u"doc": doc_str,
699                                   u"parent": parent_name,
700                                   u"level": len(suite.longname.split(u"."))
701                               }
702
703         suite.keywords.visit(self)
704
705     def end_suite(self, suite):
706         """Called when suite ends.
707
708         :param suite: Suite to process.
709         :type suite: Suite
710         :returns: Nothing.
711         """
712
713     def visit_test(self, test):
714         """Implements traversing through the test.
715
716         :param test: Test to process.
717         :type test: Test
718         :returns: Nothing.
719         """
720         if self.start_test(test) is not False:
721             test.keywords.visit(self)
722             self.end_test(test)
723
724     def start_test(self, test):
725         """Called when test starts.
726
727         :param test: Test to process.
728         :type test: Test
729         :returns: Nothing.
730         """
731
732         longname_orig = test.longname.lower()
733
734         # Check the ignore list
735         if longname_orig in self._ignore:
736             return
737
738         tags = [str(tag) for tag in test.tags]
739         test_result = dict()
740
741         # Change the TC long name and name if defined in the mapping table
742         longname = self._mapping.get(longname_orig, None)
743         if longname is not None:
744             name = longname.split(u'.')[-1]
745             logging.debug(
746                 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
747                 f"{name}"
748             )
749         else:
750             longname = longname_orig
751             name = test.name.lower()
752
753         # Remove TC number from the TC long name (backward compatibility):
754         self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
755         # Remove TC number from the TC name (not needed):
756         test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
757
758         test_result[u"parent"] = test.parent.name.lower()
759         test_result[u"tags"] = tags
760         test_result["doc"] = test.doc.\
761             replace(u'"', u"'").\
762             replace(u'\n', u' ').\
763             replace(u'\r', u'').\
764             replace(u'[', u' |br| [').\
765             replace(u' |br| [', u'[', 1)
766         test_result[u"msg"] = test.message.\
767             replace(u'\n', u' |br| ').\
768             replace(u'\r', u'').\
769             replace(u'"', u"'")
770         test_result[u"type"] = u"FUNC"
771         test_result[u"status"] = test.status
772
773         if u"PERFTEST" in tags:
774             # Replace info about cores (e.g. -1c-) with the info about threads
775             # and cores (e.g. -1t1c-) in the long test case names and in the
776             # test case names if necessary.
777             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
778             if not groups:
779                 tag_count = 0
780                 tag_tc = str()
781                 for tag in test_result[u"tags"]:
782                     groups = re.search(self.REGEX_TC_TAG, tag)
783                     if groups:
784                         tag_count += 1
785                         tag_tc = tag
786
787                 if tag_count == 1:
788                     self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
789                                            f"-{tag_tc.lower()}-",
790                                            self._test_id,
791                                            count=1)
792                     test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
793                                                   f"-{tag_tc.lower()}-",
794                                                   test_result["name"],
795                                                   count=1)
796                 else:
797                     test_result[u"status"] = u"FAIL"
798                     self._data[u"tests"][self._test_id] = test_result
799                     logging.debug(
800                         f"The test {self._test_id} has no or more than one "
801                         f"multi-threading tags.\n"
802                         f"Tags: {test_result[u'tags']}"
803                     )
804                     return
805
806         if test.status == u"PASS":
807             if u"NDRPDR" in tags:
808                 test_result[u"type"] = u"NDRPDR"
809                 test_result[u"throughput"], test_result[u"status"] = \
810                     self._get_ndrpdr_throughput(test.message)
811                 test_result[u"latency"], test_result[u"status"] = \
812                     self._get_ndrpdr_latency(test.message)
813             elif u"SOAK" in tags:
814                 test_result[u"type"] = u"SOAK"
815                 test_result[u"throughput"], test_result[u"status"] = \
816                     self._get_plr_throughput(test.message)
817             elif u"TCP" in tags:
818                 test_result[u"type"] = u"TCP"
819                 groups = re.search(self.REGEX_TCP, test.message)
820                 test_result[u"result"] = int(groups.group(2))
821             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
822                 if u"MRR" in tags:
823                     test_result[u"type"] = u"MRR"
824                 else:
825                     test_result[u"type"] = u"BMRR"
826
827                 test_result[u"result"] = dict()
828                 groups = re.search(self.REGEX_BMRR, test.message)
829                 if groups is not None:
830                     items_str = groups.group(1)
831                     items_float = [float(item.strip()) for item
832                                    in items_str.split(",")]
833                     # Use whole list in CSIT-1180.
834                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
835                     test_result[u"result"][u"receive-rate"] = stats.avg
836                 else:
837                     groups = re.search(self.REGEX_MRR, test.message)
838                     test_result[u"result"][u"receive-rate"] = \
839                         float(groups.group(3)) / float(groups.group(1))
840             elif u"RECONF" in tags:
841                 test_result[u"type"] = u"RECONF"
842                 test_result[u"result"] = None
843                 try:
844                     grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
845                     grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
846                     test_result[u"result"] = {
847                         u"loss": int(grps_loss.group(1)),
848                         u"time": float(grps_time.group(1))
849                     }
850                 except (AttributeError, IndexError, ValueError, TypeError):
851                     test_result[u"status"] = u"FAIL"
852             else:
853                 test_result[u"status"] = u"FAIL"
854                 self._data[u"tests"][self._test_id] = test_result
855                 return
856
857         self._data[u"tests"][self._test_id] = test_result
858
859     def end_test(self, test):
860         """Called when test ends.
861
862         :param test: Test to process.
863         :type test: Test
864         :returns: Nothing.
865         """
866
867     def visit_keyword(self, keyword):
868         """Implements traversing through the keyword and its child keywords.
869
870         :param keyword: Keyword to process.
871         :type keyword: Keyword
872         :returns: Nothing.
873         """
874         if self.start_keyword(keyword) is not False:
875             self.end_keyword(keyword)
876
877     def start_keyword(self, keyword):
878         """Called when keyword starts. Default implementation does nothing.
879
880         :param keyword: Keyword to process.
881         :type keyword: Keyword
882         :returns: Nothing.
883         """
884         try:
885             if keyword.type == u"setup":
886                 self.visit_setup_kw(keyword)
887             elif keyword.type == u"teardown":
888                 self._lookup_kw_nr = 0
889                 self.visit_teardown_kw(keyword)
890             else:
891                 self._lookup_kw_nr = 0
892                 self.visit_test_kw(keyword)
893         except AttributeError:
894             pass
895
896     def end_keyword(self, keyword):
897         """Called when keyword ends. Default implementation does nothing.
898
899         :param keyword: Keyword to process.
900         :type keyword: Keyword
901         :returns: Nothing.
902         """
903
904     def visit_test_kw(self, test_kw):
905         """Implements traversing through the test keyword and its child
906         keywords.
907
908         :param test_kw: Keyword to process.
909         :type test_kw: Keyword
910         :returns: Nothing.
911         """
912         for keyword in test_kw.keywords:
913             if self.start_test_kw(keyword) is not False:
914                 self.visit_test_kw(keyword)
915                 self.end_test_kw(keyword)
916
917     def start_test_kw(self, test_kw):
918         """Called when test keyword starts. Default implementation does
919         nothing.
920
921         :param test_kw: Keyword to process.
922         :type test_kw: Keyword
923         :returns: Nothing.
924         """
925         if test_kw.name.count(u"Show Runtime Counters On All Duts"):
926             self._lookup_kw_nr += 1
927             self._show_run_lookup_nr = 0
928             self._msg_type = u"test-show-runtime"
929         elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
930             self._msg_type = u"dpdk-version"
931         else:
932             return
933         test_kw.messages.visit(self)
934
935     def end_test_kw(self, test_kw):
936         """Called when keyword ends. Default implementation does nothing.
937
938         :param test_kw: Keyword to process.
939         :type test_kw: Keyword
940         :returns: Nothing.
941         """
942
943     def visit_setup_kw(self, setup_kw):
944         """Implements traversing through the teardown keyword and its child
945         keywords.
946
947         :param setup_kw: Keyword to process.
948         :type setup_kw: Keyword
949         :returns: Nothing.
950         """
951         for keyword in setup_kw.keywords:
952             if self.start_setup_kw(keyword) is not False:
953                 self.visit_setup_kw(keyword)
954                 self.end_setup_kw(keyword)
955
956     def start_setup_kw(self, setup_kw):
957         """Called when teardown keyword starts. Default implementation does
958         nothing.
959
960         :param setup_kw: Keyword to process.
961         :type setup_kw: Keyword
962         :returns: Nothing.
963         """
964         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
965                 and not self._version:
966             self._msg_type = u"vpp-version"
967         elif setup_kw.name.count(u"Set Global Variable") \
968                 and not self._timestamp:
969             self._msg_type = u"timestamp"
970         elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
971             self._msg_type = u"testbed"
972         else:
973             return
974         setup_kw.messages.visit(self)
975
976     def end_setup_kw(self, setup_kw):
977         """Called when keyword ends. Default implementation does nothing.
978
979         :param setup_kw: Keyword to process.
980         :type setup_kw: Keyword
981         :returns: Nothing.
982         """
983
984     def visit_teardown_kw(self, teardown_kw):
985         """Implements traversing through the teardown keyword and its child
986         keywords.
987
988         :param teardown_kw: Keyword to process.
989         :type teardown_kw: Keyword
990         :returns: Nothing.
991         """
992         for keyword in teardown_kw.keywords:
993             if self.start_teardown_kw(keyword) is not False:
994                 self.visit_teardown_kw(keyword)
995                 self.end_teardown_kw(keyword)
996
997     def start_teardown_kw(self, teardown_kw):
998         """Called when teardown keyword starts
999
1000         :param teardown_kw: Keyword to process.
1001         :type teardown_kw: Keyword
1002         :returns: Nothing.
1003         """
1004
1005         if teardown_kw.name.count(u"Show Vat History On All Duts"):
1006             # TODO: Remove when not needed:
1007             self._conf_history_lookup_nr = 0
1008             self._msg_type = u"teardown-vat-history"
1009             teardown_kw.messages.visit(self)
1010         elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1011             self._conf_history_lookup_nr = 0
1012             self._msg_type = u"teardown-papi-history"
1013             teardown_kw.messages.visit(self)
1014
1015     def end_teardown_kw(self, teardown_kw):
1016         """Called when keyword ends. Default implementation does nothing.
1017
1018         :param teardown_kw: Keyword to process.
1019         :type teardown_kw: Keyword
1020         :returns: Nothing.
1021         """
1022
1023     def visit_message(self, msg):
1024         """Implements visiting the message.
1025
1026         :param msg: Message to process.
1027         :type msg: Message
1028         :returns: Nothing.
1029         """
1030         if self.start_message(msg) is not False:
1031             self.end_message(msg)
1032
1033     def start_message(self, msg):
1034         """Called when message starts. Get required information from messages:
1035         - VPP version.
1036
1037         :param msg: Message to process.
1038         :type msg: Message
1039         :returns: Nothing.
1040         """
1041
1042         if self._msg_type:
1043             self.parse_msg[self._msg_type](msg)
1044
1045     def end_message(self, msg):
1046         """Called when message ends. Default implementation does nothing.
1047
1048         :param msg: Message to process.
1049         :type msg: Message
1050         :returns: Nothing.
1051         """
1052
1053
1054 class InputData:
1055     """Input data
1056
1057     The data is extracted from output.xml files generated by Jenkins jobs and
1058     stored in pandas' DataFrames.
1059
1060     The data structure:
1061     - job name
1062       - build number
1063         - metadata
1064           (as described in ExecutionChecker documentation)
1065         - suites
1066           (as described in ExecutionChecker documentation)
1067         - tests
1068           (as described in ExecutionChecker documentation)
1069     """
1070
1071     def __init__(self, spec):
1072         """Initialization.
1073
1074         :param spec: Specification.
1075         :type spec: Specification
1076         """
1077
1078         # Specification:
1079         self._cfg = spec
1080
1081         # Data store:
1082         self._input_data = pd.Series()
1083
1084     @property
1085     def data(self):
1086         """Getter - Input data.
1087
1088         :returns: Input data
1089         :rtype: pandas.Series
1090         """
1091         return self._input_data
1092
1093     def metadata(self, job, build):
1094         """Getter - metadata
1095
1096         :param job: Job which metadata we want.
1097         :param build: Build which metadata we want.
1098         :type job: str
1099         :type build: str
1100         :returns: Metadata
1101         :rtype: pandas.Series
1102         """
1103
1104         return self.data[job][build][u"metadata"]
1105
1106     def suites(self, job, build):
1107         """Getter - suites
1108
1109         :param job: Job which suites we want.
1110         :param build: Build which suites we want.
1111         :type job: str
1112         :type build: str
1113         :returns: Suites.
1114         :rtype: pandas.Series
1115         """
1116
1117         return self.data[job][str(build)][u"suites"]
1118
1119     def tests(self, job, build):
1120         """Getter - tests
1121
1122         :param job: Job which tests we want.
1123         :param build: Build which tests we want.
1124         :type job: str
1125         :type build: str
1126         :returns: Tests.
1127         :rtype: pandas.Series
1128         """
1129
1130         return self.data[job][build][u"tests"]
1131
1132     def _parse_tests(self, job, build, log):
1133         """Process data from robot output.xml file and return JSON structured
1134         data.
1135
1136         :param job: The name of job which build output data will be processed.
1137         :param build: The build which output data will be processed.
1138         :param log: List of log messages.
1139         :type job: str
1140         :type build: dict
1141         :type log: list of tuples (severity, msg)
1142         :returns: JSON data structure.
1143         :rtype: dict
1144         """
1145
1146         metadata = {
1147             u"job": job,
1148             u"build": build
1149         }
1150
1151         with open(build[u"file-name"], u'r') as data_file:
1152             try:
1153                 result = ExecutionResult(data_file)
1154             except errors.DataError as err:
1155                 log.append(
1156                     (u"ERROR", f"Error occurred while parsing output.xml: "
1157                                f"{repr(err)}")
1158                 )
1159                 return None
1160         checker = ExecutionChecker(metadata, self._cfg.mapping,
1161                                    self._cfg.ignore)
1162         result.visit(checker)
1163
1164         return checker.data
1165
1166     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1167         """Download and parse the input data file.
1168
1169         :param pid: PID of the process executing this method.
1170         :param job: Name of the Jenkins job which generated the processed input
1171             file.
1172         :param build: Information about the Jenkins build which generated the
1173             processed input file.
1174         :param repeat: Repeat the download specified number of times if not
1175             successful.
1176         :type pid: int
1177         :type job: str
1178         :type build: dict
1179         :type repeat: int
1180         """
1181
1182         logs = list()
1183
1184         logs.append(
1185             (u"INFO", f"  Processing the job/build: {job}: {build[u'build']}")
1186         )
1187
1188         state = u"failed"
1189         success = False
1190         data = None
1191         do_repeat = repeat
1192         while do_repeat:
1193             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1194                                                    logs)
1195             if success:
1196                 break
1197             do_repeat -= 1
1198         if not success:
1199             logs.append(
1200                 (u"ERROR",
1201                  f"It is not possible to download the input data file from the "
1202                  f"job {job}, build {build[u'build']}, or it is damaged. "
1203                  f"Skipped.")
1204             )
1205         if success:
1206             logs.append(
1207                 (u"INFO",
1208                  f"    Processing data from the build {build[u'build']} ...")
1209             )
1210             data = self._parse_tests(job, build, logs)
1211             if data is None:
1212                 logs.append(
1213                     (u"ERROR",
1214                      f"Input data file from the job {job}, build "
1215                      f"{build[u'build']} is damaged. Skipped.")
1216                 )
1217             else:
1218                 state = u"processed"
1219
1220             try:
1221                 remove(build[u"file-name"])
1222             except OSError as err:
1223                 logs.append(
1224                     ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1225                               f"{repr(err)}")
1226                 )
1227
1228         # If the time-period is defined in the specification file, remove all
1229         # files which are outside the time period.
1230         timeperiod = self._cfg.input.get(u"time-period", None)
1231         if timeperiod and data:
1232             now = dt.utcnow()
1233             timeperiod = timedelta(int(timeperiod))
1234             metadata = data.get(u"metadata", None)
1235             if metadata:
1236                 generated = metadata.get(u"generated", None)
1237                 if generated:
1238                     generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1239                     if (now - generated) > timeperiod:
1240                         # Remove the data and the file:
1241                         state = u"removed"
1242                         data = None
1243                         logs.append(
1244                             (u"INFO",
1245                              f"    The build {job}/{build[u'build']} is "
1246                              f"outdated, will be removed.")
1247                         )
1248         logs.append((u"INFO", u"  Done."))
1249
1250         for level, line in logs:
1251             if level == u"INFO":
1252                 logging.info(line)
1253             elif level == u"ERROR":
1254                 logging.error(line)
1255             elif level == u"DEBUG":
1256                 logging.debug(line)
1257             elif level == u"CRITICAL":
1258                 logging.critical(line)
1259             elif level == u"WARNING":
1260                 logging.warning(line)
1261
1262         return {u"data": data, u"state": state, u"job": job, u"build": build}
1263
1264     def download_and_parse_data(self, repeat=1):
1265         """Download the input data files, parse input data from input files and
1266         store in pandas' Series.
1267
1268         :param repeat: Repeat the download specified number of times if not
1269             successful.
1270         :type repeat: int
1271         """
1272
1273         logging.info(u"Downloading and parsing input files ...")
1274
1275         for job, builds in self._cfg.builds.items():
1276             for build in builds:
1277
1278                 result = self._download_and_parse_build(job, build, repeat)
1279                 build_nr = result[u"build"][u"build"]
1280
1281                 if result[u"data"]:
1282                     data = result[u"data"]
1283                     build_data = pd.Series({
1284                         u"metadata": pd.Series(
1285                             list(data[u"metadata"].values()),
1286                             index=list(data[u"metadata"].keys())
1287                         ),
1288                         u"suites": pd.Series(
1289                             list(data[u"suites"].values()),
1290                             index=list(data[u"suites"].keys())
1291                         ),
1292                         u"tests": pd.Series(
1293                             list(data[u"tests"].values()),
1294                             index=list(data[u"tests"].keys())
1295                         )
1296                     })
1297
1298                     if self._input_data.get(job, None) is None:
1299                         self._input_data[job] = pd.Series()
1300                     self._input_data[job][str(build_nr)] = build_data
1301
1302                     self._cfg.set_input_file_name(
1303                         job, build_nr, result[u"build"][u"file-name"])
1304
1305                 self._cfg.set_input_state(job, build_nr, result[u"state"])
1306
1307                 mem_alloc = \
1308                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1309                 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1310
1311         logging.info(u"Done.")
1312
1313     @staticmethod
1314     def _end_of_tag(tag_filter, start=0, closer=u"'"):
1315         """Return the index of character in the string which is the end of tag.
1316
1317         :param tag_filter: The string where the end of tag is being searched.
1318         :param start: The index where the searching is stated.
1319         :param closer: The character which is the tag closer.
1320         :type tag_filter: str
1321         :type start: int
1322         :type closer: str
1323         :returns: The index of the tag closer.
1324         :rtype: int
1325         """
1326
1327         try:
1328             idx_opener = tag_filter.index(closer, start)
1329             return tag_filter.index(closer, idx_opener + 1)
1330         except ValueError:
1331             return None
1332
1333     @staticmethod
1334     def _condition(tag_filter):
1335         """Create a conditional statement from the given tag filter.
1336
1337         :param tag_filter: Filter based on tags from the element specification.
1338         :type tag_filter: str
1339         :returns: Conditional statement which can be evaluated.
1340         :rtype: str
1341         """
1342
1343         index = 0
1344         while True:
1345             index = InputData._end_of_tag(tag_filter, index)
1346             if index is None:
1347                 return tag_filter
1348             index += 1
1349             tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1350
1351     def filter_data(self, element, params=None, data=None, data_set=u"tests",
1352                     continue_on_error=False):
1353         """Filter required data from the given jobs and builds.
1354
1355         The output data structure is:
1356
1357         - job 1
1358           - build 1
1359             - test (or suite) 1 ID:
1360               - param 1
1361               - param 2
1362               ...
1363               - param n
1364             ...
1365             - test (or suite) n ID:
1366             ...
1367           ...
1368           - build n
1369         ...
1370         - job n
1371
1372         :param element: Element which will use the filtered data.
1373         :param params: Parameters which will be included in the output. If None,
1374             all parameters are included.
1375         :param data: If not None, this data is used instead of data specified
1376             in the element.
1377         :param data_set: The set of data to be filtered: tests, suites,
1378             metadata.
1379         :param continue_on_error: Continue if there is error while reading the
1380             data. The Item will be empty then
1381         :type element: pandas.Series
1382         :type params: list
1383         :type data: dict
1384         :type data_set: str
1385         :type continue_on_error: bool
1386         :returns: Filtered data.
1387         :rtype pandas.Series
1388         """
1389
1390         try:
1391             if element[u"filter"] in (u"all", u"template"):
1392                 cond = u"True"
1393             else:
1394                 cond = InputData._condition(element[u"filter"])
1395             logging.debug(f"   Filter: {cond}")
1396         except KeyError:
1397             logging.error(u"  No filter defined.")
1398             return None
1399
1400         if params is None:
1401             params = element.get(u"parameters", None)
1402             if params:
1403                 params.append(u"type")
1404
1405         data_to_filter = data if data else element[u"data"]
1406         data = pd.Series()
1407         try:
1408             for job, builds in data_to_filter.items():
1409                 data[job] = pd.Series()
1410                 for build in builds:
1411                     data[job][str(build)] = pd.Series()
1412                     try:
1413                         data_dict = dict(
1414                             self.data[job][str(build)][data_set].items())
1415                     except KeyError:
1416                         if continue_on_error:
1417                             continue
1418                         return None
1419
1420                     for test_id, test_data in data_dict.items():
1421                         if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1422                             data[job][str(build)][test_id] = pd.Series()
1423                             if params is None:
1424                                 for param, val in test_data.items():
1425                                     data[job][str(build)][test_id][param] = val
1426                             else:
1427                                 for param in params:
1428                                     try:
1429                                         data[job][str(build)][test_id][param] =\
1430                                             test_data[param]
1431                                     except KeyError:
1432                                         data[job][str(build)][test_id][param] =\
1433                                             u"No Data"
1434             return data
1435
1436         except (KeyError, IndexError, ValueError) as err:
1437             logging.error(
1438                 f"Missing mandatory parameter in the element specification: "
1439                 f"{repr(err)}"
1440             )
1441             return None
1442         except AttributeError as err:
1443             logging.error(repr(err))
1444             return None
1445         except SyntaxError as err:
1446             logging.error(
1447                 f"The filter {cond} is not correct. Check if all tags are "
1448                 f"enclosed by apostrophes.\n{repr(err)}"
1449             )
1450             return None
1451
1452     def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1453                              continue_on_error=False):
1454         """Filter required data from the given jobs and builds.
1455
1456         The output data structure is:
1457
1458         - job 1
1459           - build 1
1460             - test (or suite) 1 ID:
1461               - param 1
1462               - param 2
1463               ...
1464               - param n
1465             ...
1466             - test (or suite) n ID:
1467             ...
1468           ...
1469           - build n
1470         ...
1471         - job n
1472
1473         :param element: Element which will use the filtered data.
1474         :param params: Parameters which will be included in the output. If None,
1475         all parameters are included.
1476         :param data_set: The set of data to be filtered: tests, suites,
1477         metadata.
1478         :param continue_on_error: Continue if there is error while reading the
1479         data. The Item will be empty then
1480         :type element: pandas.Series
1481         :type params: list
1482         :type data_set: str
1483         :type continue_on_error: bool
1484         :returns: Filtered data.
1485         :rtype pandas.Series
1486         """
1487
1488         include = element.get(u"include", None)
1489         if not include:
1490             logging.warning(u"No tests to include, skipping the element.")
1491             return None
1492
1493         if params is None:
1494             params = element.get(u"parameters", None)
1495             if params:
1496                 params.append(u"type")
1497
1498         data = pd.Series()
1499         try:
1500             for job, builds in element[u"data"].items():
1501                 data[job] = pd.Series()
1502                 for build in builds:
1503                     data[job][str(build)] = pd.Series()
1504                     for test in include:
1505                         try:
1506                             reg_ex = re.compile(str(test).lower())
1507                             for test_id in self.data[job][
1508                                     str(build)][data_set].keys():
1509                                 if re.match(reg_ex, str(test_id).lower()):
1510                                     test_data = self.data[job][
1511                                         str(build)][data_set][test_id]
1512                                     data[job][str(build)][test_id] = pd.Series()
1513                                     if params is None:
1514                                         for param, val in test_data.items():
1515                                             data[job][str(build)][test_id]\
1516                                                 [param] = val
1517                                     else:
1518                                         for param in params:
1519                                             try:
1520                                                 data[job][str(build)][
1521                                                     test_id][param] = \
1522                                                     test_data[param]
1523                                             except KeyError:
1524                                                 data[job][str(build)][
1525                                                     test_id][param] = u"No Data"
1526                         except KeyError as err:
1527                             logging.error(repr(err))
1528                             if continue_on_error:
1529                                 continue
1530                             return None
1531             return data
1532
1533         except (KeyError, IndexError, ValueError) as err:
1534             logging.error(
1535                 f"Missing mandatory parameter in the element "
1536                 f"specification: {repr(err)}"
1537             )
1538             return None
1539         except AttributeError as err:
1540             logging.error(repr(err))
1541             return None
1542
1543     @staticmethod
1544     def merge_data(data):
1545         """Merge data from more jobs and builds to a simple data structure.
1546
1547         The output data structure is:
1548
1549         - test (suite) 1 ID:
1550           - param 1
1551           - param 2
1552           ...
1553           - param n
1554         ...
1555         - test (suite) n ID:
1556         ...
1557
1558         :param data: Data to merge.
1559         :type data: pandas.Series
1560         :returns: Merged data.
1561         :rtype: pandas.Series
1562         """
1563
1564         logging.info(u"    Merging data ...")
1565
1566         merged_data = pd.Series()
1567         for builds in data.values:
1568             for item in builds.values:
1569                 for item_id, item_data in item.items():
1570                     merged_data[item_id] = item_data
1571
1572         return merged_data