1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from datetime import datetime as dt
34 from datetime import timedelta
35 from json import loads
36 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38 from input_data_files import download_and_unzip_data_file
41 # Separator used in file names
45 class ExecutionChecker(ResultVisitor):
46 """Class to traverse through the test suite structure.
48 The functionality implemented in this class generates a json structure:
54 "generated": "Timestamp",
55 "version": "SUT version",
56 "job": "Jenkins job name",
57 "build": "Information about the build"
60 "Suite long name 1": {
62 "doc": "Suite 1 documentation",
63 "parent": "Suite 1 parent",
64 "level": "Level of the suite in the suite hierarchy"
66 "Suite long name N": {
68 "doc": "Suite N documentation",
69 "parent": "Suite 2 parent",
70 "level": "Level of the suite in the suite hierarchy"
77 "parent": "Name of the parent of the test",
78 "doc": "Test documentation",
79 "msg": "Test message",
80 "conf-history": "DUT1 and DUT2 VAT History",
81 "show-run": "Show Run",
82 "tags": ["tag 1", "tag 2", "tag n"],
84 "status": "PASS" | "FAIL",
126 "parent": "Name of the parent of the test",
127 "doc": "Test documentation",
128 "msg": "Test message",
129 "tags": ["tag 1", "tag 2", "tag n"],
131 "status": "PASS" | "FAIL",
138 "parent": "Name of the parent of the test",
139 "doc": "Test documentation",
140 "msg": "Test message",
141 "tags": ["tag 1", "tag 2", "tag n"],
142 "type": "MRR" | "BMRR",
143 "status": "PASS" | "FAIL",
145 "receive-rate": AvgStdevMetadata,
149 # TODO: Remove when definitely no NDRPDRDISC tests are used:
153 "parent": "Name of the parent of the test",
154 "doc": "Test documentation",
155 "msg": "Test message",
156 "tags": ["tag 1", "tag 2", "tag n"],
157 "type": "PDR" | "NDR",
158 "status": "PASS" | "FAIL",
159 "throughput": { # Only type: "PDR" | "NDR"
161 "unit": "pps" | "bps" | "percentage"
163 "latency": { # Only type: "PDR" | "NDR"
170 "50": { # Only for NDR
175 "10": { # Only for NDR
187 "50": { # Only for NDR
192 "10": { # Only for NDR
199 "lossTolerance": "lossTolerance", # Only type: "PDR"
200 "conf-history": "DUT1 and DUT2 VAT History"
201 "show-run": "Show Run"
213 "metadata": { # Optional
214 "version": "VPP version",
215 "job": "Jenkins job name",
216 "build": "Information about the build"
220 "doc": "Suite 1 documentation",
221 "parent": "Suite 1 parent",
222 "level": "Level of the suite in the suite hierarchy"
225 "doc": "Suite N documentation",
226 "parent": "Suite 2 parent",
227 "level": "Level of the suite in the suite hierarchy"
233 "parent": "Name of the parent of the test",
234 "doc": "Test documentation"
235 "msg": "Test message"
236 "tags": ["tag 1", "tag 2", "tag n"],
237 "conf-history": "DUT1 and DUT2 VAT History"
238 "show-run": "Show Run"
239 "status": "PASS" | "FAIL"
247 .. note:: ID is the lowercase full path to the test.
250 # TODO: Remove when definitely no NDRPDRDISC tests are used:
251 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
253 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
254 r'PLRsearch upper bound::?\s(\d+.\d+)')
256 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
257 r'NDR_UPPER:\s(\d+.\d+).*\n'
258 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
259 r'PDR_UPPER:\s(\d+.\d+)')
261 # TODO: Remove when definitely no NDRPDRDISC tests are used:
262 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
263 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
264 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
265 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
270 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
271 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
272 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
274 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
275 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
277 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
280 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
281 r"VPP Version:\s*|VPP version:\s*)(.*)")
283 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
285 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
287 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
288 r'tx\s(\d*),\srx\s(\d*)')
290 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
291 r' in packets per second: \[(.*)\]')
293 REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
294 REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.\d*)')
296 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
298 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
300 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
302 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
304 def __init__(self, metadata, mapping, ignore):
307 :param metadata: Key-value pairs to be included in "metadata" part of
309 :param mapping: Mapping of the old names of test cases to the new
311 :param ignore: List of TCs to be ignored.
317 # Type of message to parse out from the test messages
318 self._msg_type = None
324 self._timestamp = None
326 # Testbed. The testbed is identified by TG node IP address.
329 # Mapping of TCs long names
330 self._mapping = mapping
333 self._ignore = ignore
335 # Number of VAT History messages found:
337 # 1 - VAT History of DUT1
338 # 2 - VAT History of DUT2
339 self._lookup_kw_nr = 0
340 self._conf_history_lookup_nr = 0
342 # Number of Show Running messages found
344 # 1 - Show run message found
345 self._show_run_lookup_nr = 0
347 # Test ID of currently processed test- the lowercase full path to the
351 # The main data structure
353 "metadata": OrderedDict(),
354 "suites": OrderedDict(),
355 "tests": OrderedDict()
358 # Save the provided metadata
359 for key, val in metadata.items():
360 self._data["metadata"][key] = val
362 # Dictionary defining the methods used to parse different types of
365 "timestamp": self._get_timestamp,
366 "vpp-version": self._get_vpp_version,
367 "dpdk-version": self._get_dpdk_version,
368 "teardown-vat-history": self._get_vat_history,
369 "teardown-papi-history": self._get_papi_history,
370 "test-show-runtime": self._get_show_run,
371 "testbed": self._get_testbed
376 """Getter - Data parsed from the XML file.
378 :returns: Data parsed from the XML file.
383 def _get_testbed(self, msg):
384 """Called when extraction of testbed IP is required.
385 The testbed is identified by TG node IP address.
387 :param msg: Message to process.
392 if msg.message.count("Setup of TG node"):
393 reg_tg_ip = re.compile(
394 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
396 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
397 except (KeyError, ValueError, IndexError, AttributeError):
400 self._data["metadata"]["testbed"] = self._testbed
401 self._msg_type = None
403 def _get_vpp_version(self, msg):
404 """Called when extraction of VPP version is required.
406 :param msg: Message to process.
411 if msg.message.count("return STDOUT Version:") or \
412 msg.message.count("VPP Version:") or \
413 msg.message.count("VPP version:"):
414 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
416 self._data["metadata"]["version"] = self._version
417 self._msg_type = None
419 def _get_dpdk_version(self, msg):
420 """Called when extraction of DPDK version is required.
422 :param msg: Message to process.
427 if msg.message.count("DPDK Version:"):
429 self._version = str(re.search(
430 self.REGEX_VERSION_DPDK, msg.message). group(2))
431 self._data["metadata"]["version"] = self._version
435 self._msg_type = None
437 def _get_timestamp(self, msg):
438 """Called when extraction of timestamp is required.
440 :param msg: Message to process.
445 self._timestamp = msg.timestamp[:14]
446 self._data["metadata"]["generated"] = self._timestamp
447 self._msg_type = None
449 def _get_vat_history(self, msg):
450 """Called when extraction of VAT command history is required.
452 :param msg: Message to process.
456 if msg.message.count("VAT command history:"):
457 self._conf_history_lookup_nr += 1
458 if self._conf_history_lookup_nr == 1:
459 self._data["tests"][self._test_ID]["conf-history"] = str()
461 self._msg_type = None
462 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
463 "VAT command history:", "", msg.message, count=1). \
464 replace("\n\n", "\n").replace('\n', ' |br| ').\
465 replace('\r', '').replace('"', "'")
467 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
468 self._data["tests"][self._test_ID]["conf-history"] += \
469 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
471 def _get_papi_history(self, msg):
472 """Called when extraction of PAPI command history is required.
474 :param msg: Message to process.
478 if msg.message.count("PAPI command history:"):
479 self._conf_history_lookup_nr += 1
480 if self._conf_history_lookup_nr == 1:
481 self._data["tests"][self._test_ID]["conf-history"] = str()
483 self._msg_type = None
484 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
485 "PAPI command history:", "", msg.message, count=1). \
486 replace("\n\n", "\n").replace('\n', ' |br| ').\
487 replace('\r', '').replace('"', "'")
489 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
490 self._data["tests"][self._test_ID]["conf-history"] += \
491 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
493 def _get_show_run(self, msg):
494 """Called when extraction of VPP operational data (output of CLI command
495 Show Runtime) is required.
497 :param msg: Message to process.
501 if msg.message.count("Runtime:"):
502 self._show_run_lookup_nr += 1
503 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
504 self._data["tests"][self._test_ID]["show-run"] = str()
505 if self._lookup_kw_nr > 1:
506 self._msg_type = None
507 if self._show_run_lookup_nr > 0:
508 message = str(msg.message).replace(' ', '').replace('\n', '').\
509 replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
510 runtime = loads(message)
512 threads_nr = len(runtime[0]["clocks"])
513 except (IndexError, KeyError):
515 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
516 table = [[tbl_hdr, ] for _ in range(threads_nr)]
518 for idx in range(threads_nr):
522 item["vectors"][idx],
523 item["suspends"][idx],
527 for idx in range(threads_nr):
528 text += "Thread {idx} ".format(idx=idx)
529 text += "vpp_main\n" if idx == 0 else \
530 "vpp_wk_{idx}\n".format(idx=idx-1)
532 for row in table[idx]:
533 if txt_table is None:
534 txt_table = prettytable.PrettyTable(row)
537 txt_table.add_row(row)
538 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
539 txt_table.align["Name"] = "l"
540 txt_table.align["Calls"] = "r"
541 txt_table.align["Vectors"] = "r"
542 txt_table.align["Suspends"] = "r"
543 txt_table.align["Clocks"] = "r"
545 text += txt_table.get_string(sortby="Name") + '\n'
547 text = text.replace('\n', ' |br| ').replace('\r', '').\
550 self._data["tests"][self._test_ID]["show-run"] += " |br| "
551 self._data["tests"][self._test_ID]["show-run"] += \
552 "**DUT" + str(self._show_run_lookup_nr) + ":** |br| " \
557 # TODO: Remove when definitely no NDRPDRDISC tests are used:
558 def _get_latency(self, msg, test_type):
559 """Get the latency data from the test message.
561 :param msg: Message to be parsed.
562 :param test_type: Type of the test - NDR or PDR.
565 :returns: Latencies parsed from the message.
569 if test_type == "NDR":
570 groups = re.search(self.REGEX_LAT_NDR, msg)
571 groups_range = range(1, 7)
572 elif test_type == "PDR":
573 groups = re.search(self.REGEX_LAT_PDR, msg)
574 groups_range = range(1, 3)
579 for idx in groups_range:
581 lat = [int(item) for item in str(groups.group(idx)).split('/')]
582 except (AttributeError, ValueError):
584 latencies.append(lat)
586 keys = ("min", "avg", "max")
594 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
595 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
596 if test_type == "NDR":
597 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
598 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
599 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
600 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
604 def _get_ndrpdr_throughput(self, msg):
605 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
608 :param msg: The test message to be parsed.
610 :returns: Parsed data as a dict and the status (PASS/FAIL).
611 :rtype: tuple(dict, str)
615 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
616 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
619 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
621 if groups is not None:
623 throughput["NDR"]["LOWER"] = float(groups.group(1))
624 throughput["NDR"]["UPPER"] = float(groups.group(2))
625 throughput["PDR"]["LOWER"] = float(groups.group(3))
626 throughput["PDR"]["UPPER"] = float(groups.group(4))
628 except (IndexError, ValueError):
631 return throughput, status
633 def _get_plr_throughput(self, msg):
634 """Get PLRsearch lower bound and PLRsearch upper bound from the test
637 :param msg: The test message to be parsed.
639 :returns: Parsed data as a dict and the status (PASS/FAIL).
640 :rtype: tuple(dict, str)
648 groups = re.search(self.REGEX_PLR_RATE, msg)
650 if groups is not None:
652 throughput["LOWER"] = float(groups.group(1))
653 throughput["UPPER"] = float(groups.group(2))
655 except (IndexError, ValueError):
658 return throughput, status
660 def _get_ndrpdr_latency(self, msg):
661 """Get LATENCY from the test message.
663 :param msg: The test message to be parsed.
665 :returns: Parsed data as a dict and the status (PASS/FAIL).
666 :rtype: tuple(dict, str)
671 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
672 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
675 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
676 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
680 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
682 if groups is not None:
683 keys = ("min", "avg", "max")
685 latency["NDR"]["direction1"] = dict(
686 zip(keys, [float(l) for l in groups.group(1).split('/')]))
687 latency["NDR"]["direction2"] = dict(
688 zip(keys, [float(l) for l in groups.group(2).split('/')]))
689 latency["PDR"]["direction1"] = dict(
690 zip(keys, [float(l) for l in groups.group(3).split('/')]))
691 latency["PDR"]["direction2"] = dict(
692 zip(keys, [float(l) for l in groups.group(4).split('/')]))
694 except (IndexError, ValueError):
697 return latency, status
699 def visit_suite(self, suite):
700 """Implements traversing through the suite and its direct children.
702 :param suite: Suite to process.
706 if self.start_suite(suite) is not False:
707 suite.suites.visit(self)
708 suite.tests.visit(self)
709 self.end_suite(suite)
711 def start_suite(self, suite):
712 """Called when suite starts.
714 :param suite: Suite to process.
720 parent_name = suite.parent.name
721 except AttributeError:
724 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
725 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
726 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
728 self._data["suites"][suite.longname.lower().replace('"', "'").
729 replace(" ", "_")] = {
730 "name": suite.name.lower(),
732 "parent": parent_name,
733 "level": len(suite.longname.split("."))
736 suite.keywords.visit(self)
738 def end_suite(self, suite):
739 """Called when suite ends.
741 :param suite: Suite to process.
747 def visit_test(self, test):
748 """Implements traversing through the test.
750 :param test: Test to process.
754 if self.start_test(test) is not False:
755 test.keywords.visit(self)
758 def start_test(self, test):
759 """Called when test starts.
761 :param test: Test to process.
766 longname_orig = test.longname.lower()
768 # Check the ignore list
769 if longname_orig in self._ignore:
772 tags = [str(tag) for tag in test.tags]
775 # Change the TC long name and name if defined in the mapping table
776 longname = self._mapping.get(longname_orig, None)
777 if longname is not None:
778 name = longname.split('.')[-1]
779 logging.debug("{0}\n{1}\n{2}\n{3}".format(
780 self._data["metadata"], longname_orig, longname, name))
782 longname = longname_orig
783 name = test.name.lower()
785 # Remove TC number from the TC long name (backward compatibility):
786 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
787 # Remove TC number from the TC name (not needed):
788 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
790 test_result["parent"] = test.parent.name.lower()
791 test_result["tags"] = tags
792 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
793 replace('\r', '').replace('[', ' |br| [')
794 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
795 test_result["msg"] = test.message.replace('\n', ' |br| '). \
796 replace('\r', '').replace('"', "'")
797 test_result["type"] = "FUNC"
798 test_result["status"] = test.status
800 if "PERFTEST" in tags:
801 # Replace info about cores (e.g. -1c-) with the info about threads
802 # and cores (e.g. -1t1c-) in the long test case names and in the
803 # test case names if necessary.
804 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
808 for tag in test_result["tags"]:
809 groups = re.search(self.REGEX_TC_TAG, tag)
815 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
816 "-{0}-".format(tag_tc.lower()),
819 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
820 "-{0}-".format(tag_tc.lower()),
824 test_result["status"] = "FAIL"
825 self._data["tests"][self._test_ID] = test_result
826 logging.debug("The test '{0}' has no or more than one "
827 "multi-threading tags.".format(self._test_ID))
828 logging.debug("Tags: {0}".format(test_result["tags"]))
831 if test.status == "PASS" and ("NDRPDRDISC" in tags or
838 # TODO: Remove when definitely no NDRPDRDISC tests are used:
839 if "NDRDISC" in tags:
840 test_result["type"] = "NDR"
841 # TODO: Remove when definitely no NDRPDRDISC tests are used:
842 elif "PDRDISC" in tags:
843 test_result["type"] = "PDR"
844 elif "NDRPDR" in tags:
845 test_result["type"] = "NDRPDR"
847 test_result["type"] = "SOAK"
849 test_result["type"] = "TCP"
851 test_result["type"] = "MRR"
852 elif "FRMOBL" in tags or "BMRR" in tags:
853 test_result["type"] = "BMRR"
854 elif "RECONF" in tags:
855 test_result["type"] = "RECONF"
857 test_result["status"] = "FAIL"
858 self._data["tests"][self._test_ID] = test_result
861 # TODO: Remove when definitely no NDRPDRDISC tests are used:
862 if test_result["type"] in ("NDR", "PDR"):
864 rate_value = str(re.search(
865 self.REGEX_RATE, test.message).group(1))
866 except AttributeError:
869 rate_unit = str(re.search(
870 self.REGEX_RATE, test.message).group(2))
871 except AttributeError:
874 test_result["throughput"] = dict()
875 test_result["throughput"]["value"] = \
876 int(rate_value.split('.')[0])
877 test_result["throughput"]["unit"] = rate_unit
878 test_result["latency"] = \
879 self._get_latency(test.message, test_result["type"])
880 if test_result["type"] == "PDR":
881 test_result["lossTolerance"] = str(re.search(
882 self.REGEX_TOLERANCE, test.message).group(1))
884 elif test_result["type"] in ("NDRPDR", ):
885 test_result["throughput"], test_result["status"] = \
886 self._get_ndrpdr_throughput(test.message)
887 test_result["latency"], test_result["status"] = \
888 self._get_ndrpdr_latency(test.message)
890 elif test_result["type"] in ("SOAK", ):
891 test_result["throughput"], test_result["status"] = \
892 self._get_plr_throughput(test.message)
894 elif test_result["type"] in ("TCP", ):
895 groups = re.search(self.REGEX_TCP, test.message)
896 test_result["result"] = int(groups.group(2))
898 elif test_result["type"] in ("MRR", "BMRR"):
899 test_result["result"] = dict()
900 groups = re.search(self.REGEX_BMRR, test.message)
901 if groups is not None:
902 items_str = groups.group(1)
903 items_float = [float(item.strip()) for item
904 in items_str.split(",")]
905 metadata = AvgStdevMetadataFactory.from_data(items_float)
906 # Next two lines have been introduced in CSIT-1179,
907 # to be removed in CSIT-1180.
910 test_result["result"]["receive-rate"] = metadata
912 groups = re.search(self.REGEX_MRR, test.message)
913 test_result["result"]["receive-rate"] = \
914 AvgStdevMetadataFactory.from_data([
915 float(groups.group(3)) / float(groups.group(1)), ])
917 elif test_result["type"] == "RECONF":
918 test_result["result"] = None
920 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
921 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
922 test_result["result"] = {
923 "loss": int(grps_loss.group(1)),
924 "time": float(grps_time.group(1))
926 except (AttributeError, IndexError, ValueError, TypeError):
927 test_result["status"] = "FAIL"
929 self._data["tests"][self._test_ID] = test_result
931 def end_test(self, test):
932 """Called when test ends.
934 :param test: Test to process.
940 def visit_keyword(self, keyword):
941 """Implements traversing through the keyword and its child keywords.
943 :param keyword: Keyword to process.
944 :type keyword: Keyword
947 if self.start_keyword(keyword) is not False:
948 self.end_keyword(keyword)
950 def start_keyword(self, keyword):
951 """Called when keyword starts. Default implementation does nothing.
953 :param keyword: Keyword to process.
954 :type keyword: Keyword
958 if keyword.type == "setup":
959 self.visit_setup_kw(keyword)
960 elif keyword.type == "teardown":
961 self._lookup_kw_nr = 0
962 self.visit_teardown_kw(keyword)
964 self._lookup_kw_nr = 0
965 self.visit_test_kw(keyword)
966 except AttributeError:
969 def end_keyword(self, keyword):
970 """Called when keyword ends. Default implementation does nothing.
972 :param keyword: Keyword to process.
973 :type keyword: Keyword
978 def visit_test_kw(self, test_kw):
979 """Implements traversing through the test keyword and its child
982 :param test_kw: Keyword to process.
983 :type test_kw: Keyword
986 for keyword in test_kw.keywords:
987 if self.start_test_kw(keyword) is not False:
988 self.visit_test_kw(keyword)
989 self.end_test_kw(keyword)
991 def start_test_kw(self, test_kw):
992 """Called when test keyword starts. Default implementation does
995 :param test_kw: Keyword to process.
996 :type test_kw: Keyword
999 if test_kw.name.count("Show Runtime Counters On All Duts"):
1000 self._lookup_kw_nr += 1
1001 self._show_run_lookup_nr = 0
1002 self._msg_type = "test-show-runtime"
1003 elif test_kw.name.count("Install Dpdk Test") and not self._version:
1004 self._msg_type = "dpdk-version"
1007 test_kw.messages.visit(self)
1009 def end_test_kw(self, test_kw):
1010 """Called when keyword ends. Default implementation does nothing.
1012 :param test_kw: Keyword to process.
1013 :type test_kw: Keyword
1018 def visit_setup_kw(self, setup_kw):
1019 """Implements traversing through the teardown keyword and its child
1022 :param setup_kw: Keyword to process.
1023 :type setup_kw: Keyword
1026 for keyword in setup_kw.keywords:
1027 if self.start_setup_kw(keyword) is not False:
1028 self.visit_setup_kw(keyword)
1029 self.end_setup_kw(keyword)
1031 def start_setup_kw(self, setup_kw):
1032 """Called when teardown keyword starts. Default implementation does
1035 :param setup_kw: Keyword to process.
1036 :type setup_kw: Keyword
1039 if setup_kw.name.count("Show Vpp Version On All Duts") \
1040 and not self._version:
1041 self._msg_type = "vpp-version"
1042 elif setup_kw.name.count("Set Global Variable") \
1043 and not self._timestamp:
1044 self._msg_type = "timestamp"
1045 elif setup_kw.name.count("Setup Framework") and not self._testbed:
1046 self._msg_type = "testbed"
1049 setup_kw.messages.visit(self)
1051 def end_setup_kw(self, setup_kw):
1052 """Called when keyword ends. Default implementation does nothing.
1054 :param setup_kw: Keyword to process.
1055 :type setup_kw: Keyword
1060 def visit_teardown_kw(self, teardown_kw):
1061 """Implements traversing through the teardown keyword and its child
1064 :param teardown_kw: Keyword to process.
1065 :type teardown_kw: Keyword
1068 for keyword in teardown_kw.keywords:
1069 if self.start_teardown_kw(keyword) is not False:
1070 self.visit_teardown_kw(keyword)
1071 self.end_teardown_kw(keyword)
1073 def start_teardown_kw(self, teardown_kw):
1074 """Called when teardown keyword starts. Default implementation does
1077 :param teardown_kw: Keyword to process.
1078 :type teardown_kw: Keyword
1082 if teardown_kw.name.count("Show Vat History On All Duts"):
1083 self._conf_history_lookup_nr = 0
1084 self._msg_type = "teardown-vat-history"
1085 teardown_kw.messages.visit(self)
1086 elif teardown_kw.name.count("Show Papi History On All Duts"):
1087 self._conf_history_lookup_nr = 0
1088 self._msg_type = "teardown-papi-history"
1089 teardown_kw.messages.visit(self)
1091 def end_teardown_kw(self, teardown_kw):
1092 """Called when keyword ends. Default implementation does nothing.
1094 :param teardown_kw: Keyword to process.
1095 :type teardown_kw: Keyword
1100 def visit_message(self, msg):
1101 """Implements visiting the message.
1103 :param msg: Message to process.
1107 if self.start_message(msg) is not False:
1108 self.end_message(msg)
1110 def start_message(self, msg):
1111 """Called when message starts. Get required information from messages:
1114 :param msg: Message to process.
1120 self.parse_msg[self._msg_type](msg)
1122 def end_message(self, msg):
1123 """Called when message ends. Default implementation does nothing.
1125 :param msg: Message to process.
1132 class InputData(object):
1135 The data is extracted from output.xml files generated by Jenkins jobs and
1136 stored in pandas' DataFrames.
1142 (as described in ExecutionChecker documentation)
1144 (as described in ExecutionChecker documentation)
1146 (as described in ExecutionChecker documentation)
1149 def __init__(self, spec):
1152 :param spec: Specification.
1153 :type spec: Specification
1160 self._input_data = pd.Series()
1164 """Getter - Input data.
1166 :returns: Input data
1167 :rtype: pandas.Series
1169 return self._input_data
1171 def metadata(self, job, build):
1172 """Getter - metadata
1174 :param job: Job which metadata we want.
1175 :param build: Build which metadata we want.
1179 :rtype: pandas.Series
1182 return self.data[job][build]["metadata"]
1184 def suites(self, job, build):
1187 :param job: Job which suites we want.
1188 :param build: Build which suites we want.
1192 :rtype: pandas.Series
1195 return self.data[job][str(build)]["suites"]
1197 def tests(self, job, build):
1200 :param job: Job which tests we want.
1201 :param build: Build which tests we want.
1205 :rtype: pandas.Series
1208 return self.data[job][build]["tests"]
1210 def _parse_tests(self, job, build, log):
1211 """Process data from robot output.xml file and return JSON structured
1214 :param job: The name of job which build output data will be processed.
1215 :param build: The build which output data will be processed.
1216 :param log: List of log messages.
1219 :type log: list of tuples (severity, msg)
1220 :returns: JSON data structure.
1229 with open(build["file-name"], 'r') as data_file:
1231 result = ExecutionResult(data_file)
1232 except errors.DataError as err:
1233 log.append(("ERROR", "Error occurred while parsing output.xml: "
1236 checker = ExecutionChecker(metadata, self._cfg.mapping,
1238 result.visit(checker)
1242 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1243 """Download and parse the input data file.
1245 :param pid: PID of the process executing this method.
1246 :param job: Name of the Jenkins job which generated the processed input
1248 :param build: Information about the Jenkins build which generated the
1249 processed input file.
1250 :param repeat: Repeat the download specified number of times if not
1260 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1261 format(job, build["build"])))
1268 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1274 logs.append(("ERROR", "It is not possible to download the input "
1275 "data file from the job '{job}', build "
1276 "'{build}', or it is damaged. Skipped.".
1277 format(job=job, build=build["build"])))
1279 logs.append(("INFO", " Processing data from the build '{0}' ...".
1280 format(build["build"])))
1281 data = self._parse_tests(job, build, logs)
1283 logs.append(("ERROR", "Input data file from the job '{job}', "
1284 "build '{build}' is damaged. Skipped.".
1285 format(job=job, build=build["build"])))
1290 remove(build["file-name"])
1291 except OSError as err:
1292 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1293 format(build["file-name"], repr(err))))
1295 # If the time-period is defined in the specification file, remove all
1296 # files which are outside the time period.
1297 timeperiod = self._cfg.input.get("time-period", None)
1298 if timeperiod and data:
1300 timeperiod = timedelta(int(timeperiod))
1301 metadata = data.get("metadata", None)
1303 generated = metadata.get("generated", None)
1305 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1306 if (now - generated) > timeperiod:
1307 # Remove the data and the file:
1312 " The build {job}/{build} is outdated, will be "
1313 "removed".format(job=job, build=build["build"])))
1314 logs.append(("INFO", " Done."))
1316 for level, line in logs:
1319 elif level == "ERROR":
1321 elif level == "DEBUG":
1323 elif level == "CRITICAL":
1324 logging.critical(line)
1325 elif level == "WARNING":
1326 logging.warning(line)
1328 return {"data": data, "state": state, "job": job, "build": build}
1330 def download_and_parse_data(self, repeat=1):
1331 """Download the input data files, parse input data from input files and
1332 store in pandas' Series.
1334 :param repeat: Repeat the download specified number of times if not
1339 logging.info("Downloading and parsing input files ...")
1341 for job, builds in self._cfg.builds.items():
1342 for build in builds:
1344 result = self._download_and_parse_build(job, build, repeat)
1345 build_nr = result["build"]["build"]
1348 data = result["data"]
1349 build_data = pd.Series({
1350 "metadata": pd.Series(
1351 data["metadata"].values(),
1352 index=data["metadata"].keys()),
1353 "suites": pd.Series(data["suites"].values(),
1354 index=data["suites"].keys()),
1355 "tests": pd.Series(data["tests"].values(),
1356 index=data["tests"].keys())})
1358 if self._input_data.get(job, None) is None:
1359 self._input_data[job] = pd.Series()
1360 self._input_data[job][str(build_nr)] = build_data
1362 self._cfg.set_input_file_name(
1363 job, build_nr, result["build"]["file-name"])
1365 self._cfg.set_input_state(job, build_nr, result["state"])
1367 logging.info("Memory allocation: {0:,d}MB".format(
1368 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1370 logging.info("Done.")
1373 def _end_of_tag(tag_filter, start=0, closer="'"):
1374 """Return the index of character in the string which is the end of tag.
1376 :param tag_filter: The string where the end of tag is being searched.
1377 :param start: The index where the searching is stated.
1378 :param closer: The character which is the tag closer.
1379 :type tag_filter: str
1382 :returns: The index of the tag closer.
1387 idx_opener = tag_filter.index(closer, start)
1388 return tag_filter.index(closer, idx_opener + 1)
1393 def _condition(tag_filter):
1394 """Create a conditional statement from the given tag filter.
1396 :param tag_filter: Filter based on tags from the element specification.
1397 :type tag_filter: str
1398 :returns: Conditional statement which can be evaluated.
1404 index = InputData._end_of_tag(tag_filter, index)
1408 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1410 def filter_data(self, element, params=None, data=None, data_set="tests",
1411 continue_on_error=False):
1412 """Filter required data from the given jobs and builds.
1414 The output data structure is:
1418 - test (or suite) 1 ID:
1424 - test (or suite) n ID:
1431 :param element: Element which will use the filtered data.
1432 :param params: Parameters which will be included in the output. If None,
1433 all parameters are included.
1434 :param data: If not None, this data is used instead of data specified
1436 :param data_set: The set of data to be filtered: tests, suites,
1438 :param continue_on_error: Continue if there is error while reading the
1439 data. The Item will be empty then
1440 :type element: pandas.Series
1444 :type continue_on_error: bool
1445 :returns: Filtered data.
1446 :rtype pandas.Series
1450 if element["filter"] in ("all", "template"):
1453 cond = InputData._condition(element["filter"])
1454 logging.debug(" Filter: {0}".format(cond))
1456 logging.error(" No filter defined.")
1460 params = element.get("parameters", None)
1462 params.append("type")
1464 data_to_filter = data if data else element["data"]
1467 for job, builds in data_to_filter.items():
1468 data[job] = pd.Series()
1469 for build in builds:
1470 data[job][str(build)] = pd.Series()
1472 data_iter = self.data[job][str(build)][data_set].\
1475 if continue_on_error:
1479 for test_ID, test_data in data_iter:
1480 if eval(cond, {"tags": test_data.get("tags", "")}):
1481 data[job][str(build)][test_ID] = pd.Series()
1483 for param, val in test_data.items():
1484 data[job][str(build)][test_ID][param] = val
1486 for param in params:
1488 data[job][str(build)][test_ID][param] =\
1491 data[job][str(build)][test_ID][param] =\
1495 except (KeyError, IndexError, ValueError) as err:
1496 logging.error(" Missing mandatory parameter in the element "
1497 "specification: {0}".format(err))
1499 except AttributeError:
1502 logging.error(" The filter '{0}' is not correct. Check if all "
1503 "tags are enclosed by apostrophes.".format(cond))
1506 def filter_tests_by_name(self, element, params=None, data_set="tests",
1507 continue_on_error=False):
1508 """Filter required data from the given jobs and builds.
1510 The output data structure is:
1514 - test (or suite) 1 ID:
1520 - test (or suite) n ID:
1527 :param element: Element which will use the filtered data.
1528 :param params: Parameters which will be included in the output. If None,
1529 all parameters are included.
1530 :param data_set: The set of data to be filtered: tests, suites,
1532 :param continue_on_error: Continue if there is error while reading the
1533 data. The Item will be empty then
1534 :type element: pandas.Series
1537 :type continue_on_error: bool
1538 :returns: Filtered data.
1539 :rtype pandas.Series
1542 include = element.get("include", None)
1544 logging.warning("No tests to include, skipping the element.")
1548 params = element.get("parameters", None)
1550 params.append("type")
1554 for job, builds in element["data"].items():
1555 data[job] = pd.Series()
1556 for build in builds:
1557 data[job][str(build)] = pd.Series()
1558 for test in include:
1560 reg_ex = re.compile(str(test).lower())
1561 for test_ID in self.data[job][str(build)]\
1563 if re.match(reg_ex, str(test_ID).lower()):
1564 test_data = self.data[job][str(build)]\
1566 data[job][str(build)][test_ID] = pd.Series()
1568 for param, val in test_data.items():
1569 data[job][str(build)][test_ID]\
1572 for param in params:
1574 data[job][str(build)][test_ID]\
1575 [param] = test_data[param]
1577 data[job][str(build)][test_ID]\
1579 except KeyError as err:
1580 logging.error("{err!r}".format(err=err))
1581 if continue_on_error:
1587 except (KeyError, IndexError, ValueError) as err:
1588 logging.error("Missing mandatory parameter in the element "
1589 "specification: {err!r}".format(err=err))
1591 except AttributeError as err:
1592 logging.error("{err!r}".format(err=err))
1597 def merge_data(data):
1598 """Merge data from more jobs and builds to a simple data structure.
1600 The output data structure is:
1602 - test (suite) 1 ID:
1608 - test (suite) n ID:
1611 :param data: Data to merge.
1612 :type data: pandas.Series
1613 :returns: Merged data.
1614 :rtype: pandas.Series
1617 logging.info(" Merging data ...")
1619 merged_data = pd.Series()
1620 for _, builds in data.iteritems():
1621 for _, item in builds.iteritems():
1622 for ID, item_data in item.iteritems():
1623 merged_data[ID] = item_data