PAL: Get DPDK Version
[csit.git] / resources / tools / presentation / input_data_parser.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Data pre-processing
15
16 - extract data from output.xml files generated by Jenkins jobs and store in
17   pandas' Series,
18 - provide access to the data.
19 - filter the data using tags,
20 """
21
22 import re
23 import resource
24 import pandas as pd
25 import logging
26
27 from robot.api import ExecutionResult, ResultVisitor
28 from robot import errors
29 from collections import OrderedDict
30 from string import replace
31 from os import remove
32 from os.path import join
33 from datetime import datetime as dt
34 from datetime import timedelta
35 from json import loads
36 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
37
38 from input_data_files import download_and_unzip_data_file
39
40
41 # Separator used in file names
42 SEPARATOR = "__"
43
44
45 class ExecutionChecker(ResultVisitor):
46     """Class to traverse through the test suite structure.
47
48     The functionality implemented in this class generates a json structure:
49
50     Performance tests:
51
52     {
53         "metadata": {
54             "generated": "Timestamp",
55             "version": "SUT version",
56             "job": "Jenkins job name",
57             "build": "Information about the build"
58         },
59         "suites": {
60             "Suite long name 1": {
61                 "name": Suite name,
62                 "doc": "Suite 1 documentation",
63                 "parent": "Suite 1 parent",
64                 "level": "Level of the suite in the suite hierarchy"
65             }
66             "Suite long name N": {
67                 "name": Suite name,
68                 "doc": "Suite N documentation",
69                 "parent": "Suite 2 parent",
70                 "level": "Level of the suite in the suite hierarchy"
71             }
72         }
73         "tests": {
74             # NDRPDR tests:
75             "ID": {
76                 "name": "Test name",
77                 "parent": "Name of the parent of the test",
78                 "doc": "Test documentation",
79                 "msg": "Test message",
80                 "conf-history": "DUT1 and DUT2 VAT History",
81                 "show-run": "Show Run",
82                 "tags": ["tag 1", "tag 2", "tag n"],
83                 "type": "NDRPDR",
84                 "status": "PASS" | "FAIL",
85                 "throughput": {
86                     "NDR": {
87                         "LOWER": float,
88                         "UPPER": float
89                     },
90                     "PDR": {
91                         "LOWER": float,
92                         "UPPER": float
93                     }
94                 },
95                 "latency": {
96                     "NDR": {
97                         "direction1": {
98                             "min": float,
99                             "avg": float,
100                             "max": float
101                         },
102                         "direction2": {
103                             "min": float,
104                             "avg": float,
105                             "max": float
106                         }
107                     },
108                     "PDR": {
109                         "direction1": {
110                             "min": float,
111                             "avg": float,
112                             "max": float
113                         },
114                         "direction2": {
115                             "min": float,
116                             "avg": float,
117                             "max": float
118                         }
119                     }
120                 }
121             }
122
123             # TCP tests:
124             "ID": {
125                 "name": "Test name",
126                 "parent": "Name of the parent of the test",
127                 "doc": "Test documentation",
128                 "msg": "Test message",
129                 "tags": ["tag 1", "tag 2", "tag n"],
130                 "type": "TCP",
131                 "status": "PASS" | "FAIL",
132                 "result": int
133             }
134
135             # MRR, BMRR tests:
136             "ID": {
137                 "name": "Test name",
138                 "parent": "Name of the parent of the test",
139                 "doc": "Test documentation",
140                 "msg": "Test message",
141                 "tags": ["tag 1", "tag 2", "tag n"],
142                 "type": "MRR" | "BMRR",
143                 "status": "PASS" | "FAIL",
144                 "result": {
145                     "receive-rate": AvgStdevMetadata,
146                 }
147             }
148
149             # TODO: Remove when definitely no NDRPDRDISC tests are used:
150             # NDRPDRDISC tests:
151             "ID": {
152                 "name": "Test name",
153                 "parent": "Name of the parent of the test",
154                 "doc": "Test documentation",
155                 "msg": "Test message",
156                 "tags": ["tag 1", "tag 2", "tag n"],
157                 "type": "PDR" | "NDR",
158                 "status": "PASS" | "FAIL",
159                 "throughput": {  # Only type: "PDR" | "NDR"
160                     "value": int,
161                     "unit": "pps" | "bps" | "percentage"
162                 },
163                 "latency": {  # Only type: "PDR" | "NDR"
164                     "direction1": {
165                         "100": {
166                             "min": int,
167                             "avg": int,
168                             "max": int
169                         },
170                         "50": {  # Only for NDR
171                             "min": int,
172                             "avg": int,
173                             "max": int
174                         },
175                         "10": {  # Only for NDR
176                             "min": int,
177                             "avg": int,
178                             "max": int
179                         }
180                     },
181                     "direction2": {
182                         "100": {
183                             "min": int,
184                             "avg": int,
185                             "max": int
186                         },
187                         "50": {  # Only for NDR
188                             "min": int,
189                             "avg": int,
190                             "max": int
191                         },
192                         "10": {  # Only for NDR
193                             "min": int,
194                             "avg": int,
195                             "max": int
196                         }
197                     }
198                 },
199                 "lossTolerance": "lossTolerance",  # Only type: "PDR"
200                 "conf-history": "DUT1 and DUT2 VAT History"
201                 "show-run": "Show Run"
202             },
203             "ID" {
204                 # next test
205             }
206         }
207     }
208
209
210     Functional tests:
211
212     {
213         "metadata": {  # Optional
214             "version": "VPP version",
215             "job": "Jenkins job name",
216             "build": "Information about the build"
217         },
218         "suites": {
219             "Suite name 1": {
220                 "doc": "Suite 1 documentation",
221                 "parent": "Suite 1 parent",
222                 "level": "Level of the suite in the suite hierarchy"
223             }
224             "Suite name N": {
225                 "doc": "Suite N documentation",
226                 "parent": "Suite 2 parent",
227                 "level": "Level of the suite in the suite hierarchy"
228             }
229         }
230         "tests": {
231             "ID": {
232                 "name": "Test name",
233                 "parent": "Name of the parent of the test",
234                 "doc": "Test documentation"
235                 "msg": "Test message"
236                 "tags": ["tag 1", "tag 2", "tag n"],
237                 "conf-history": "DUT1 and DUT2 VAT History"
238                 "show-run": "Show Run"
239                 "status": "PASS" | "FAIL"
240             },
241             "ID" {
242                 # next test
243             }
244         }
245     }
246
247     .. note:: ID is the lowercase full path to the test.
248     """
249
250     # TODO: Remove when definitely no NDRPDRDISC tests are used:
251     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
252
253     REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
254                                 r'PLRsearch upper bound::\s(\d+.\d+)')
255
256     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
257                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
258                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
259                                    r'PDR_UPPER:\s(\d+.\d+)')
260
261     # TODO: Remove when definitely no NDRPDRDISC tests are used:
262     REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
263                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
264                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
265                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267                                r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
269
270     REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
271                                r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
272                                r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
273
274     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
275                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
276
277     REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
278                                  r'[\D\d]*')
279
280     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
281                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
282
283     REGEX_VERSION_DPDK = re.compile(r"DPDK Version: (\d*.\d*)")
284
285     REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
286
287     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
288                            r'tx\s(\d*),\srx\s(\d*)')
289
290     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
291                             r' in packets per second: \[(.*)\]')
292
293     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
294
295     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
296
297     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
298
299     REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
300
301     def __init__(self, metadata, mapping, ignore):
302         """Initialisation.
303
304         :param metadata: Key-value pairs to be included in "metadata" part of
305             JSON structure.
306         :param mapping: Mapping of the old names of test cases to the new
307             (actual) one.
308         :param ignore: List of TCs to be ignored.
309         :type metadata: dict
310         :type mapping: dict
311         :type ignore: list
312         """
313
314         # Type of message to parse out from the test messages
315         self._msg_type = None
316
317         # VPP version
318         self._version = None
319
320         # Timestamp
321         self._timestamp = None
322
323         # Testbed. The testbed is identified by TG node IP address.
324         self._testbed = None
325
326         # Mapping of TCs long names
327         self._mapping = mapping
328
329         # Ignore list
330         self._ignore = ignore
331
332         # Number of VAT History messages found:
333         # 0 - no message
334         # 1 - VAT History of DUT1
335         # 2 - VAT History of DUT2
336         self._lookup_kw_nr = 0
337         self._conf_history_lookup_nr = 0
338
339         # Number of Show Running messages found
340         # 0 - no message
341         # 1 - Show run message found
342         self._show_run_lookup_nr = 0
343
344         # Test ID of currently processed test- the lowercase full path to the
345         # test
346         self._test_ID = None
347
348         # The main data structure
349         self._data = {
350             "metadata": OrderedDict(),
351             "suites": OrderedDict(),
352             "tests": OrderedDict()
353         }
354
355         # Save the provided metadata
356         for key, val in metadata.items():
357             self._data["metadata"][key] = val
358
359         # Dictionary defining the methods used to parse different types of
360         # messages
361         self.parse_msg = {
362             "timestamp": self._get_timestamp,
363             "vpp-version": self._get_vpp_version,
364             "dpdk-version": self._get_dpdk_version,
365             "teardown-vat-history": self._get_vat_history,
366             "teardown-papi-history": self._get_papi_history,
367             "test-show-runtime": self._get_show_run,
368             "testbed": self._get_testbed
369         }
370
371     @property
372     def data(self):
373         """Getter - Data parsed from the XML file.
374
375         :returns: Data parsed from the XML file.
376         :rtype: dict
377         """
378         return self._data
379
380     def _get_testbed(self, msg):
381         """Called when extraction of testbed IP is required.
382         The testbed is identified by TG node IP address.
383
384         :param msg: Message to process.
385         :type msg: Message
386         :returns: Nothing.
387         """
388
389         if msg.message.count("Setup of TG node"):
390             reg_tg_ip = re.compile(
391                 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
392             try:
393                 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
394             except (KeyError, ValueError, IndexError, AttributeError):
395                 pass
396             finally:
397                 self._data["metadata"]["testbed"] = self._testbed
398                 self._msg_type = None
399
400     def _get_vpp_version(self, msg):
401         """Called when extraction of VPP version is required.
402
403         :param msg: Message to process.
404         :type msg: Message
405         :returns: Nothing.
406         """
407
408         if msg.message.count("return STDOUT Version:") or \
409             msg.message.count("VPP Version:") or \
410             msg.message.count("VPP version:"):
411             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
412                                 group(2))
413             self._data["metadata"]["version"] = self._version
414             self._msg_type = None
415
416     def _get_dpdk_version(self, msg):
417         """Called when extraction of DPDK version is required.
418
419         :param msg: Message to process.
420         :type msg: Message
421         :returns: Nothing.
422         """
423
424         if msg.message.count("DPDK Version:"):
425             try:
426                 self._version = str(re.search(
427                     self.REGEX_VERSION_DPDK, msg.message). group(1))
428                 self._data["metadata"]["version"] = self._version
429             except IndexError:
430                 pass
431             finally:
432                 self._msg_type = None
433
434     def _get_timestamp(self, msg):
435         """Called when extraction of timestamp is required.
436
437         :param msg: Message to process.
438         :type msg: Message
439         :returns: Nothing.
440         """
441
442         self._timestamp = msg.timestamp[:14]
443         self._data["metadata"]["generated"] = self._timestamp
444         self._msg_type = None
445
446     def _get_vat_history(self, msg):
447         """Called when extraction of VAT command history is required.
448
449         :param msg: Message to process.
450         :type msg: Message
451         :returns: Nothing.
452         """
453         if msg.message.count("VAT command history:"):
454             self._conf_history_lookup_nr += 1
455             if self._conf_history_lookup_nr == 1:
456                 self._data["tests"][self._test_ID]["conf-history"] = str()
457             else:
458                 self._msg_type = None
459             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
460                           "VAT command history:", "", msg.message, count=1). \
461                 replace("\n\n", "\n").replace('\n', ' |br| ').\
462                 replace('\r', '').replace('"', "'")
463
464             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
465             self._data["tests"][self._test_ID]["conf-history"] += \
466                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
467
468     def _get_papi_history(self, msg):
469         """Called when extraction of PAPI command history is required.
470
471         :param msg: Message to process.
472         :type msg: Message
473         :returns: Nothing.
474         """
475         if msg.message.count("PAPI command history:"):
476             self._conf_history_lookup_nr += 1
477             if self._conf_history_lookup_nr == 1:
478                 self._data["tests"][self._test_ID]["conf-history"] = str()
479             else:
480                 self._msg_type = None
481             text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
482                           "PAPI command history:", "", msg.message, count=1). \
483                 replace("\n\n", "\n").replace('\n', ' |br| ').\
484                 replace('\r', '').replace('"', "'")
485
486             self._data["tests"][self._test_ID]["conf-history"] += " |br| "
487             self._data["tests"][self._test_ID]["conf-history"] += \
488                 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
489
490     def _get_show_run(self, msg):
491         """Called when extraction of VPP operational data (output of CLI command
492         Show Runtime) is required.
493
494         :param msg: Message to process.
495         :type msg: Message
496         :returns: Nothing.
497         """
498         if msg.message.count("Thread 0 vpp_main"):
499             self._show_run_lookup_nr += 1
500             if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
501                 self._data["tests"][self._test_ID]["show-run"] = str()
502             if self._lookup_kw_nr > 1:
503                 self._msg_type = None
504             if self._show_run_lookup_nr == 1:
505                 text = msg.message.replace("vat# ", "").\
506                     replace("return STDOUT ", "").replace("\n\n", "\n").\
507                     replace('\n', ' |br| ').\
508                     replace('\r', '').replace('"', "'")
509                 try:
510                     self._data["tests"][self._test_ID]["show-run"] += " |br| "
511                     self._data["tests"][self._test_ID]["show-run"] += \
512                         "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
513                 except KeyError:
514                     pass
515
516     # TODO: Remove when definitely no NDRPDRDISC tests are used:
517     def _get_latency(self, msg, test_type):
518         """Get the latency data from the test message.
519
520         :param msg: Message to be parsed.
521         :param test_type: Type of the test - NDR or PDR.
522         :type msg: str
523         :type test_type: str
524         :returns: Latencies parsed from the message.
525         :rtype: dict
526         """
527
528         if test_type == "NDR":
529             groups = re.search(self.REGEX_LAT_NDR, msg)
530             groups_range = range(1, 7)
531         elif test_type == "PDR":
532             groups = re.search(self.REGEX_LAT_PDR, msg)
533             groups_range = range(1, 3)
534         else:
535             return {}
536
537         latencies = list()
538         for idx in groups_range:
539             try:
540                 lat = [int(item) for item in str(groups.group(idx)).split('/')]
541             except (AttributeError, ValueError):
542                 lat = [-1, -1, -1]
543             latencies.append(lat)
544
545         keys = ("min", "avg", "max")
546         latency = {
547             "direction1": {
548             },
549             "direction2": {
550             }
551         }
552
553         latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
554         latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
555         if test_type == "NDR":
556             latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
557             latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
558             latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
559             latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
560
561         return latency
562
563     def _get_ndrpdr_throughput(self, msg):
564         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
565         message.
566
567         :param msg: The test message to be parsed.
568         :type msg: str
569         :returns: Parsed data as a dict and the status (PASS/FAIL).
570         :rtype: tuple(dict, str)
571         """
572
573         throughput = {
574             "NDR": {"LOWER": -1.0, "UPPER": -1.0},
575             "PDR": {"LOWER": -1.0, "UPPER": -1.0}
576         }
577         status = "FAIL"
578         groups = re.search(self.REGEX_NDRPDR_RATE, msg)
579
580         if groups is not None:
581             try:
582                 throughput["NDR"]["LOWER"] = float(groups.group(1))
583                 throughput["NDR"]["UPPER"] = float(groups.group(2))
584                 throughput["PDR"]["LOWER"] = float(groups.group(3))
585                 throughput["PDR"]["UPPER"] = float(groups.group(4))
586                 status = "PASS"
587             except (IndexError, ValueError):
588                 pass
589
590         return throughput, status
591
592     def _get_plr_throughput(self, msg):
593         """Get PLRsearch lower bound and PLRsearch upper bound from the test
594         message.
595
596         :param msg: The test message to be parsed.
597         :type msg: str
598         :returns: Parsed data as a dict and the status (PASS/FAIL).
599         :rtype: tuple(dict, str)
600         """
601
602         throughput = {
603             "LOWER": -1.0,
604             "UPPER": -1.0
605         }
606         status = "FAIL"
607         groups = re.search(self.REGEX_PLR_RATE, msg)
608
609         if groups is not None:
610             try:
611                 throughput["LOWER"] = float(groups.group(1))
612                 throughput["UPPER"] = float(groups.group(2))
613                 status = "PASS"
614             except (IndexError, ValueError):
615                 pass
616
617         return throughput, status
618
619     def _get_ndrpdr_latency(self, msg):
620         """Get LATENCY from the test message.
621
622         :param msg: The test message to be parsed.
623         :type msg: str
624         :returns: Parsed data as a dict and the status (PASS/FAIL).
625         :rtype: tuple(dict, str)
626         """
627
628         latency = {
629             "NDR": {
630                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
631                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
632             },
633             "PDR": {
634                 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
635                 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
636             }
637         }
638         status = "FAIL"
639         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
640
641         if groups is not None:
642             keys = ("min", "avg", "max")
643             try:
644                 latency["NDR"]["direction1"] = dict(
645                     zip(keys, [float(l) for l in groups.group(1).split('/')]))
646                 latency["NDR"]["direction2"] = dict(
647                     zip(keys, [float(l) for l in groups.group(2).split('/')]))
648                 latency["PDR"]["direction1"] = dict(
649                     zip(keys, [float(l) for l in groups.group(3).split('/')]))
650                 latency["PDR"]["direction2"] = dict(
651                     zip(keys, [float(l) for l in groups.group(4).split('/')]))
652                 status = "PASS"
653             except (IndexError, ValueError):
654                 pass
655
656         return latency, status
657
658     def visit_suite(self, suite):
659         """Implements traversing through the suite and its direct children.
660
661         :param suite: Suite to process.
662         :type suite: Suite
663         :returns: Nothing.
664         """
665         if self.start_suite(suite) is not False:
666             suite.suites.visit(self)
667             suite.tests.visit(self)
668             self.end_suite(suite)
669
670     def start_suite(self, suite):
671         """Called when suite starts.
672
673         :param suite: Suite to process.
674         :type suite: Suite
675         :returns: Nothing.
676         """
677
678         try:
679             parent_name = suite.parent.name
680         except AttributeError:
681             return
682
683         doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
684             replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
685         doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
686
687         self._data["suites"][suite.longname.lower().replace('"', "'").
688             replace(" ", "_")] = {
689                 "name": suite.name.lower(),
690                 "doc": doc_str,
691                 "parent": parent_name,
692                 "level": len(suite.longname.split("."))
693             }
694
695         suite.keywords.visit(self)
696
697     def end_suite(self, suite):
698         """Called when suite ends.
699
700         :param suite: Suite to process.
701         :type suite: Suite
702         :returns: Nothing.
703         """
704         pass
705
706     def visit_test(self, test):
707         """Implements traversing through the test.
708
709         :param test: Test to process.
710         :type test: Test
711         :returns: Nothing.
712         """
713         if self.start_test(test) is not False:
714             test.keywords.visit(self)
715             self.end_test(test)
716
717     def start_test(self, test):
718         """Called when test starts.
719
720         :param test: Test to process.
721         :type test: Test
722         :returns: Nothing.
723         """
724
725         longname_orig = test.longname.lower()
726
727         # Check the ignore list
728         if longname_orig in self._ignore:
729             return
730
731         tags = [str(tag) for tag in test.tags]
732         test_result = dict()
733
734         # Change the TC long name and name if defined in the mapping table
735         longname = self._mapping.get(longname_orig, None)
736         if longname is not None:
737             name = longname.split('.')[-1]
738             logging.debug("{0}\n{1}\n{2}\n{3}".format(
739                 self._data["metadata"], longname_orig, longname, name))
740         else:
741             longname = longname_orig
742             name = test.name.lower()
743
744         # Remove TC number from the TC long name (backward compatibility):
745         self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
746         # Remove TC number from the TC name (not needed):
747         test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
748
749         test_result["parent"] = test.parent.name.lower()
750         test_result["tags"] = tags
751         doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
752             replace('\r', '').replace('[', ' |br| [')
753         test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
754         test_result["msg"] = test.message.replace('\n', ' |br| '). \
755             replace('\r', '').replace('"', "'")
756         test_result["type"] = "FUNC"
757         test_result["status"] = test.status
758
759         if "PERFTEST" in tags:
760             # Replace info about cores (e.g. -1c-) with the info about threads
761             # and cores (e.g. -1t1c-) in the long test case names and in the
762             # test case names if necessary.
763             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
764             if not groups:
765                 tag_count = 0
766                 for tag in test_result["tags"]:
767                     groups = re.search(self.REGEX_TC_TAG, tag)
768                     if groups:
769                         tag_count += 1
770                         tag_tc = tag
771
772                 if tag_count == 1:
773                     self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
774                                            "-{0}-".format(tag_tc.lower()),
775                                            self._test_ID,
776                                            count=1)
777                     test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
778                                                  "-{0}-".format(tag_tc.lower()),
779                                                  test_result["name"],
780                                                  count=1)
781                 else:
782                     test_result["status"] = "FAIL"
783                     self._data["tests"][self._test_ID] = test_result
784                     logging.debug("The test '{0}' has no or more than one "
785                                   "multi-threading tags.".format(self._test_ID))
786                     logging.debug("Tags: {0}".format(test_result["tags"]))
787                     return
788
789         if test.status == "PASS" and ("NDRPDRDISC" in tags or
790                                       "NDRPDR" in tags or
791                                       "SOAK" in tags or
792                                       "TCP" in tags or
793                                       "MRR" in tags or
794                                       "BMRR" in tags):
795             # TODO: Remove when definitely no NDRPDRDISC tests are used:
796             if "NDRDISC" in tags:
797                 test_result["type"] = "NDR"
798             # TODO: Remove when definitely no NDRPDRDISC tests are used:
799             elif "PDRDISC" in tags:
800                 test_result["type"] = "PDR"
801             elif "NDRPDR" in tags:
802                 test_result["type"] = "NDRPDR"
803             elif "SOAK" in tags:
804                 test_result["type"] = "SOAK"
805             elif "TCP" in tags:
806                 test_result["type"] = "TCP"
807             elif "MRR" in tags:
808                 test_result["type"] = "MRR"
809             elif "FRMOBL" in tags or "BMRR" in tags:
810                 test_result["type"] = "BMRR"
811             else:
812                 test_result["status"] = "FAIL"
813                 self._data["tests"][self._test_ID] = test_result
814                 return
815
816             # TODO: Remove when definitely no NDRPDRDISC tests are used:
817             if test_result["type"] in ("NDR", "PDR"):
818                 try:
819                     rate_value = str(re.search(
820                         self.REGEX_RATE, test.message).group(1))
821                 except AttributeError:
822                     rate_value = "-1"
823                 try:
824                     rate_unit = str(re.search(
825                         self.REGEX_RATE, test.message).group(2))
826                 except AttributeError:
827                     rate_unit = "-1"
828
829                 test_result["throughput"] = dict()
830                 test_result["throughput"]["value"] = \
831                     int(rate_value.split('.')[0])
832                 test_result["throughput"]["unit"] = rate_unit
833                 test_result["latency"] = \
834                     self._get_latency(test.message, test_result["type"])
835                 if test_result["type"] == "PDR":
836                     test_result["lossTolerance"] = str(re.search(
837                         self.REGEX_TOLERANCE, test.message).group(1))
838
839             elif test_result["type"] in ("NDRPDR", ):
840                 test_result["throughput"], test_result["status"] = \
841                     self._get_ndrpdr_throughput(test.message)
842                 test_result["latency"], test_result["status"] = \
843                     self._get_ndrpdr_latency(test.message)
844
845             elif test_result["type"] in ("SOAK", ):
846                 test_result["throughput"], test_result["status"] = \
847                     self._get_plr_throughput(test.message)
848
849             elif test_result["type"] in ("TCP", ):
850                 groups = re.search(self.REGEX_TCP, test.message)
851                 test_result["result"] = int(groups.group(2))
852
853             elif test_result["type"] in ("MRR", "BMRR"):
854                 test_result["result"] = dict()
855                 groups = re.search(self.REGEX_BMRR, test.message)
856                 if groups is not None:
857                     items_str = groups.group(1)
858                     items_float = [float(item.strip()) for item
859                                    in items_str.split(",")]
860                     metadata = AvgStdevMetadataFactory.from_data(items_float)
861                     # Next two lines have been introduced in CSIT-1179,
862                     # to be removed in CSIT-1180.
863                     metadata.size = 1
864                     metadata.stdev = 0.0
865                     test_result["result"]["receive-rate"] = metadata
866                 else:
867                     groups = re.search(self.REGEX_MRR, test.message)
868                     test_result["result"]["receive-rate"] = \
869                         AvgStdevMetadataFactory.from_data([
870                             float(groups.group(3)) / float(groups.group(1)), ])
871
872         self._data["tests"][self._test_ID] = test_result
873
874     def end_test(self, test):
875         """Called when test ends.
876
877         :param test: Test to process.
878         :type test: Test
879         :returns: Nothing.
880         """
881         pass
882
883     def visit_keyword(self, keyword):
884         """Implements traversing through the keyword and its child keywords.
885
886         :param keyword: Keyword to process.
887         :type keyword: Keyword
888         :returns: Nothing.
889         """
890         if self.start_keyword(keyword) is not False:
891             self.end_keyword(keyword)
892
893     def start_keyword(self, keyword):
894         """Called when keyword starts. Default implementation does nothing.
895
896         :param keyword: Keyword to process.
897         :type keyword: Keyword
898         :returns: Nothing.
899         """
900         try:
901             if keyword.type == "setup":
902                 self.visit_setup_kw(keyword)
903             elif keyword.type == "teardown":
904                 self._lookup_kw_nr = 0
905                 self.visit_teardown_kw(keyword)
906             else:
907                 self._lookup_kw_nr = 0
908                 self.visit_test_kw(keyword)
909         except AttributeError:
910             pass
911
912     def end_keyword(self, keyword):
913         """Called when keyword ends. Default implementation does nothing.
914
915         :param keyword: Keyword to process.
916         :type keyword: Keyword
917         :returns: Nothing.
918         """
919         pass
920
921     def visit_test_kw(self, test_kw):
922         """Implements traversing through the test keyword and its child
923         keywords.
924
925         :param test_kw: Keyword to process.
926         :type test_kw: Keyword
927         :returns: Nothing.
928         """
929         for keyword in test_kw.keywords:
930             if self.start_test_kw(keyword) is not False:
931                 self.visit_test_kw(keyword)
932                 self.end_test_kw(keyword)
933
934     def start_test_kw(self, test_kw):
935         """Called when test keyword starts. Default implementation does
936         nothing.
937
938         :param test_kw: Keyword to process.
939         :type test_kw: Keyword
940         :returns: Nothing.
941         """
942         if test_kw.name.count("Show Runtime Counters On All Duts"):
943             self._lookup_kw_nr += 1
944             self._show_run_lookup_nr = 0
945             self._msg_type = "test-show-runtime"
946         elif test_kw.name.count("Install Dpdk Test") and not self._version:
947             self._msg_type = "dpdk-version"
948         else:
949             return
950         test_kw.messages.visit(self)
951
952     def end_test_kw(self, test_kw):
953         """Called when keyword ends. Default implementation does nothing.
954
955         :param test_kw: Keyword to process.
956         :type test_kw: Keyword
957         :returns: Nothing.
958         """
959         pass
960
961     def visit_setup_kw(self, setup_kw):
962         """Implements traversing through the teardown keyword and its child
963         keywords.
964
965         :param setup_kw: Keyword to process.
966         :type setup_kw: Keyword
967         :returns: Nothing.
968         """
969         for keyword in setup_kw.keywords:
970             if self.start_setup_kw(keyword) is not False:
971                 self.visit_setup_kw(keyword)
972                 self.end_setup_kw(keyword)
973
974     def start_setup_kw(self, setup_kw):
975         """Called when teardown keyword starts. Default implementation does
976         nothing.
977
978         :param setup_kw: Keyword to process.
979         :type setup_kw: Keyword
980         :returns: Nothing.
981         """
982         if setup_kw.name.count("Show Vpp Version On All Duts") \
983                 and not self._version:
984             self._msg_type = "vpp-version"
985         elif setup_kw.name.count("Set Global Variable") \
986                 and not self._timestamp:
987             self._msg_type = "timestamp"
988         elif setup_kw.name.count("Setup Framework") and not self._testbed:
989             self._msg_type = "testbed"
990         else:
991             return
992         setup_kw.messages.visit(self)
993
994     def end_setup_kw(self, setup_kw):
995         """Called when keyword ends. Default implementation does nothing.
996
997         :param setup_kw: Keyword to process.
998         :type setup_kw: Keyword
999         :returns: Nothing.
1000         """
1001         pass
1002
1003     def visit_teardown_kw(self, teardown_kw):
1004         """Implements traversing through the teardown keyword and its child
1005         keywords.
1006
1007         :param teardown_kw: Keyword to process.
1008         :type teardown_kw: Keyword
1009         :returns: Nothing.
1010         """
1011         for keyword in teardown_kw.keywords:
1012             if self.start_teardown_kw(keyword) is not False:
1013                 self.visit_teardown_kw(keyword)
1014                 self.end_teardown_kw(keyword)
1015
1016     def start_teardown_kw(self, teardown_kw):
1017         """Called when teardown keyword starts. Default implementation does
1018         nothing.
1019
1020         :param teardown_kw: Keyword to process.
1021         :type teardown_kw: Keyword
1022         :returns: Nothing.
1023         """
1024
1025         if teardown_kw.name.count("Show Vat History On All Duts"):
1026             self._conf_history_lookup_nr = 0
1027             self._msg_type = "teardown-vat-history"
1028             teardown_kw.messages.visit(self)
1029         elif teardown_kw.name.count("Show Papi History On All Duts"):
1030             self._conf_history_lookup_nr = 0
1031             self._msg_type = "teardown-papi-history"
1032             teardown_kw.messages.visit(self)
1033
1034     def end_teardown_kw(self, teardown_kw):
1035         """Called when keyword ends. Default implementation does nothing.
1036
1037         :param teardown_kw: Keyword to process.
1038         :type teardown_kw: Keyword
1039         :returns: Nothing.
1040         """
1041         pass
1042
1043     def visit_message(self, msg):
1044         """Implements visiting the message.
1045
1046         :param msg: Message to process.
1047         :type msg: Message
1048         :returns: Nothing.
1049         """
1050         if self.start_message(msg) is not False:
1051             self.end_message(msg)
1052
1053     def start_message(self, msg):
1054         """Called when message starts. Get required information from messages:
1055         - VPP version.
1056
1057         :param msg: Message to process.
1058         :type msg: Message
1059         :returns: Nothing.
1060         """
1061
1062         if self._msg_type:
1063             self.parse_msg[self._msg_type](msg)
1064
1065     def end_message(self, msg):
1066         """Called when message ends. Default implementation does nothing.
1067
1068         :param msg: Message to process.
1069         :type msg: Message
1070         :returns: Nothing.
1071         """
1072         pass
1073
1074
1075 class InputData(object):
1076     """Input data
1077
1078     The data is extracted from output.xml files generated by Jenkins jobs and
1079     stored in pandas' DataFrames.
1080
1081     The data structure:
1082     - job name
1083       - build number
1084         - metadata
1085           (as described in ExecutionChecker documentation)
1086         - suites
1087           (as described in ExecutionChecker documentation)
1088         - tests
1089           (as described in ExecutionChecker documentation)
1090     """
1091
1092     def __init__(self, spec):
1093         """Initialization.
1094
1095         :param spec: Specification.
1096         :type spec: Specification
1097         """
1098
1099         # Specification:
1100         self._cfg = spec
1101
1102         # Data store:
1103         self._input_data = pd.Series()
1104
1105     @property
1106     def data(self):
1107         """Getter - Input data.
1108
1109         :returns: Input data
1110         :rtype: pandas.Series
1111         """
1112         return self._input_data
1113
1114     def metadata(self, job, build):
1115         """Getter - metadata
1116
1117         :param job: Job which metadata we want.
1118         :param build: Build which metadata we want.
1119         :type job: str
1120         :type build: str
1121         :returns: Metadata
1122         :rtype: pandas.Series
1123         """
1124
1125         return self.data[job][build]["metadata"]
1126
1127     def suites(self, job, build):
1128         """Getter - suites
1129
1130         :param job: Job which suites we want.
1131         :param build: Build which suites we want.
1132         :type job: str
1133         :type build: str
1134         :returns: Suites.
1135         :rtype: pandas.Series
1136         """
1137
1138         return self.data[job][str(build)]["suites"]
1139
1140     def tests(self, job, build):
1141         """Getter - tests
1142
1143         :param job: Job which tests we want.
1144         :param build: Build which tests we want.
1145         :type job: str
1146         :type build: str
1147         :returns: Tests.
1148         :rtype: pandas.Series
1149         """
1150
1151         return self.data[job][build]["tests"]
1152
1153     def _parse_tests(self, job, build, log):
1154         """Process data from robot output.xml file and return JSON structured
1155         data.
1156
1157         :param job: The name of job which build output data will be processed.
1158         :param build: The build which output data will be processed.
1159         :param log: List of log messages.
1160         :type job: str
1161         :type build: dict
1162         :type log: list of tuples (severity, msg)
1163         :returns: JSON data structure.
1164         :rtype: dict
1165         """
1166
1167         metadata = {
1168             "job": job,
1169             "build": build
1170         }
1171
1172         with open(build["file-name"], 'r') as data_file:
1173             try:
1174                 result = ExecutionResult(data_file)
1175             except errors.DataError as err:
1176                 log.append(("ERROR", "Error occurred while parsing output.xml: "
1177                                      "{0}".format(err)))
1178                 return None
1179         checker = ExecutionChecker(metadata, self._cfg.mapping,
1180                                    self._cfg.ignore)
1181         result.visit(checker)
1182
1183         return checker.data
1184
1185     def _download_and_parse_build(self, job, build, repeat, pid=10000):
1186         """Download and parse the input data file.
1187
1188         :param pid: PID of the process executing this method.
1189         :param job: Name of the Jenkins job which generated the processed input
1190             file.
1191         :param build: Information about the Jenkins build which generated the
1192             processed input file.
1193         :param repeat: Repeat the download specified number of times if not
1194             successful.
1195         :type pid: int
1196         :type job: str
1197         :type build: dict
1198         :type repeat: int
1199         """
1200
1201         logs = list()
1202
1203         logs.append(("INFO", "  Processing the job/build: {0}: {1}".
1204                      format(job, build["build"])))
1205
1206         state = "failed"
1207         success = False
1208         data = None
1209         do_repeat = repeat
1210         while do_repeat:
1211             success = download_and_unzip_data_file(self._cfg, job, build, pid,
1212                                                    logs)
1213             if success:
1214                 break
1215             do_repeat -= 1
1216         if not success:
1217             logs.append(("ERROR", "It is not possible to download the input "
1218                                   "data file from the job '{job}', build "
1219                                   "'{build}', or it is damaged. Skipped.".
1220                          format(job=job, build=build["build"])))
1221         if success:
1222             logs.append(("INFO", "    Processing data from the build '{0}' ...".
1223                          format(build["build"])))
1224             data = self._parse_tests(job, build, logs)
1225             if data is None:
1226                 logs.append(("ERROR", "Input data file from the job '{job}', "
1227                                       "build '{build}' is damaged. Skipped.".
1228                              format(job=job, build=build["build"])))
1229             else:
1230                 state = "processed"
1231
1232             try:
1233                 remove(build["file-name"])
1234             except OSError as err:
1235                 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1236                              format(build["file-name"], repr(err))))
1237
1238         # If the time-period is defined in the specification file, remove all
1239         # files which are outside the time period.
1240         timeperiod = self._cfg.input.get("time-period", None)
1241         if timeperiod and data:
1242             now = dt.utcnow()
1243             timeperiod = timedelta(int(timeperiod))
1244             metadata = data.get("metadata", None)
1245             if metadata:
1246                 generated = metadata.get("generated", None)
1247                 if generated:
1248                     generated = dt.strptime(generated, "%Y%m%d %H:%M")
1249                     if (now - generated) > timeperiod:
1250                         # Remove the data and the file:
1251                         state = "removed"
1252                         data = None
1253                         logs.append(
1254                             ("INFO",
1255                              "    The build {job}/{build} is outdated, will be "
1256                              "removed".format(job=job, build=build["build"])))
1257                         file_name = self._cfg.input["file-name"]
1258                         full_name = join(
1259                             self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1260                             "{job}{sep}{build}{sep}{name}".format(
1261                                 job=job,
1262                                 sep=SEPARATOR,
1263                                 build=build["build"],
1264                                 name=file_name))
1265                         try:
1266                             remove(full_name)
1267                             logs.append(("INFO",
1268                                          "    The file {name} has been removed".
1269                                          format(name=full_name)))
1270                         except OSError as err:
1271                             logs.append(("ERROR",
1272                                          "Cannot remove the file '{0}': {1}".
1273                                          format(full_name, repr(err))))
1274         logs.append(("INFO", "  Done."))
1275
1276         for level, line in logs:
1277             if level == "INFO":
1278                 logging.info(line)
1279             elif level == "ERROR":
1280                 logging.error(line)
1281             elif level == "DEBUG":
1282                 logging.debug(line)
1283             elif level == "CRITICAL":
1284                 logging.critical(line)
1285             elif level == "WARNING":
1286                 logging.warning(line)
1287
1288         return {"data": data, "state": state, "job": job, "build": build}
1289
1290     def download_and_parse_data(self, repeat=1):
1291         """Download the input data files, parse input data from input files and
1292         store in pandas' Series.
1293
1294         :param repeat: Repeat the download specified number of times if not
1295             successful.
1296         :type repeat: int
1297         """
1298
1299         logging.info("Downloading and parsing input files ...")
1300
1301         for job, builds in self._cfg.builds.items():
1302             for build in builds:
1303
1304                 result = self._download_and_parse_build(job, build, repeat)
1305                 build_nr = result["build"]["build"]
1306
1307                 if result["data"]:
1308                     data = result["data"]
1309                     build_data = pd.Series({
1310                         "metadata": pd.Series(
1311                             data["metadata"].values(),
1312                             index=data["metadata"].keys()),
1313                         "suites": pd.Series(data["suites"].values(),
1314                                             index=data["suites"].keys()),
1315                         "tests": pd.Series(data["tests"].values(),
1316                                            index=data["tests"].keys())})
1317
1318                     if self._input_data.get(job, None) is None:
1319                         self._input_data[job] = pd.Series()
1320                     self._input_data[job][str(build_nr)] = build_data
1321
1322                     self._cfg.set_input_file_name(
1323                         job, build_nr, result["build"]["file-name"])
1324
1325                 self._cfg.set_input_state(job, build_nr, result["state"])
1326
1327                 logging.info("Memory allocation: {0:,d}MB".format(
1328                     resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1329
1330         logging.info("Done.")
1331
1332     @staticmethod
1333     def _end_of_tag(tag_filter, start=0, closer="'"):
1334         """Return the index of character in the string which is the end of tag.
1335
1336         :param tag_filter: The string where the end of tag is being searched.
1337         :param start: The index where the searching is stated.
1338         :param closer: The character which is the tag closer.
1339         :type tag_filter: str
1340         :type start: int
1341         :type closer: str
1342         :returns: The index of the tag closer.
1343         :rtype: int
1344         """
1345
1346         try:
1347             idx_opener = tag_filter.index(closer, start)
1348             return tag_filter.index(closer, idx_opener + 1)
1349         except ValueError:
1350             return None
1351
1352     @staticmethod
1353     def _condition(tag_filter):
1354         """Create a conditional statement from the given tag filter.
1355
1356         :param tag_filter: Filter based on tags from the element specification.
1357         :type tag_filter: str
1358         :returns: Conditional statement which can be evaluated.
1359         :rtype: str
1360         """
1361
1362         index = 0
1363         while True:
1364             index = InputData._end_of_tag(tag_filter, index)
1365             if index is None:
1366                 return tag_filter
1367             index += 1
1368             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1369
1370     def filter_data(self, element, params=None, data_set="tests",
1371                     continue_on_error=False):
1372         """Filter required data from the given jobs and builds.
1373
1374         The output data structure is:
1375
1376         - job 1
1377           - build 1
1378             - test (or suite) 1 ID:
1379               - param 1
1380               - param 2
1381               ...
1382               - param n
1383             ...
1384             - test (or suite) n ID:
1385             ...
1386           ...
1387           - build n
1388         ...
1389         - job n
1390
1391         :param element: Element which will use the filtered data.
1392         :param params: Parameters which will be included in the output. If None,
1393         all parameters are included.
1394         :param data_set: The set of data to be filtered: tests, suites,
1395         metadata.
1396         :param continue_on_error: Continue if there is error while reading the
1397         data. The Item will be empty then
1398         :type element: pandas.Series
1399         :type params: list
1400         :type data_set: str
1401         :type continue_on_error: bool
1402         :returns: Filtered data.
1403         :rtype pandas.Series
1404         """
1405
1406         try:
1407             if element["filter"] in ("all", "template"):
1408                 cond = "True"
1409             else:
1410                 cond = InputData._condition(element["filter"])
1411             logging.debug("   Filter: {0}".format(cond))
1412         except KeyError:
1413             logging.error("  No filter defined.")
1414             return None
1415
1416         if params is None:
1417             params = element.get("parameters", None)
1418             if params:
1419                 params.append("type")
1420
1421         data = pd.Series()
1422         try:
1423             for job, builds in element["data"].items():
1424                 data[job] = pd.Series()
1425                 for build in builds:
1426                     data[job][str(build)] = pd.Series()
1427                     try:
1428                         data_iter = self.data[job][str(build)][data_set].\
1429                             iteritems()
1430                     except KeyError:
1431                         if continue_on_error:
1432                             continue
1433                         else:
1434                             return None
1435                     for test_ID, test_data in data_iter:
1436                         if eval(cond, {"tags": test_data.get("tags", "")}):
1437                             data[job][str(build)][test_ID] = pd.Series()
1438                             if params is None:
1439                                 for param, val in test_data.items():
1440                                     data[job][str(build)][test_ID][param] = val
1441                             else:
1442                                 for param in params:
1443                                     try:
1444                                         data[job][str(build)][test_ID][param] =\
1445                                             test_data[param]
1446                                     except KeyError:
1447                                         data[job][str(build)][test_ID][param] =\
1448                                             "No Data"
1449             return data
1450
1451         except (KeyError, IndexError, ValueError) as err:
1452             logging.error("   Missing mandatory parameter in the element "
1453                           "specification: {0}".format(err))
1454             return None
1455         except AttributeError:
1456             return None
1457         except SyntaxError:
1458             logging.error("   The filter '{0}' is not correct. Check if all "
1459                           "tags are enclosed by apostrophes.".format(cond))
1460             return None
1461
1462     @staticmethod
1463     def merge_data(data):
1464         """Merge data from more jobs and builds to a simple data structure.
1465
1466         The output data structure is:
1467
1468         - test (suite) 1 ID:
1469           - param 1
1470           - param 2
1471           ...
1472           - param n
1473         ...
1474         - test (suite) n ID:
1475         ...
1476
1477         :param data: Data to merge.
1478         :type data: pandas.Series
1479         :returns: Merged data.
1480         :rtype: pandas.Series
1481         """
1482
1483         logging.info("    Merging data ...")
1484
1485         merged_data = pd.Series()
1486         for _, builds in data.iteritems():
1487             for _, item in builds.iteritems():
1488                 for ID, item_data in item.iteritems():
1489                     merged_data[ID] = item_data
1490
1491         return merged_data