1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
42 # Separator used in file names
46 class ExecutionChecker(ResultVisitor):
47 """Class to traverse through the test suite structure.
49 The functionality implemented in this class generates a json structure:
55 "generated": "Timestamp",
56 "version": "SUT version",
57 "job": "Jenkins job name",
58 "build": "Information about the build"
61 "Suite long name 1": {
63 "doc": "Suite 1 documentation",
64 "parent": "Suite 1 parent",
65 "level": "Level of the suite in the suite hierarchy"
67 "Suite long name N": {
69 "doc": "Suite N documentation",
70 "parent": "Suite 2 parent",
71 "level": "Level of the suite in the suite hierarchy"
78 "parent": "Name of the parent of the test",
79 "doc": "Test documentation",
80 "msg": "Test message",
81 "conf-history": "DUT1 and DUT2 VAT History",
82 "show-run": "Show Run",
83 "tags": ["tag 1", "tag 2", "tag n"],
85 "status": "PASS" | "FAIL",
127 "parent": "Name of the parent of the test",
128 "doc": "Test documentation",
129 "msg": "Test message",
130 "tags": ["tag 1", "tag 2", "tag n"],
132 "status": "PASS" | "FAIL",
139 "parent": "Name of the parent of the test",
140 "doc": "Test documentation",
141 "msg": "Test message",
142 "tags": ["tag 1", "tag 2", "tag n"],
143 "type": "MRR" | "BMRR",
144 "status": "PASS" | "FAIL",
146 "receive-rate": AvgStdevMetadata,
150 # TODO: Remove when definitely no NDRPDRDISC tests are used:
154 "parent": "Name of the parent of the test",
155 "doc": "Test documentation",
156 "msg": "Test message",
157 "tags": ["tag 1", "tag 2", "tag n"],
158 "type": "PDR" | "NDR",
159 "status": "PASS" | "FAIL",
160 "throughput": { # Only type: "PDR" | "NDR"
162 "unit": "pps" | "bps" | "percentage"
164 "latency": { # Only type: "PDR" | "NDR"
171 "50": { # Only for NDR
176 "10": { # Only for NDR
188 "50": { # Only for NDR
193 "10": { # Only for NDR
200 "lossTolerance": "lossTolerance", # Only type: "PDR"
201 "conf-history": "DUT1 and DUT2 VAT History"
202 "show-run": "Show Run"
214 "metadata": { # Optional
215 "version": "VPP version",
216 "job": "Jenkins job name",
217 "build": "Information about the build"
221 "doc": "Suite 1 documentation",
222 "parent": "Suite 1 parent",
223 "level": "Level of the suite in the suite hierarchy"
226 "doc": "Suite N documentation",
227 "parent": "Suite 2 parent",
228 "level": "Level of the suite in the suite hierarchy"
234 "parent": "Name of the parent of the test",
235 "doc": "Test documentation"
236 "msg": "Test message"
237 "tags": ["tag 1", "tag 2", "tag n"],
238 "conf-history": "DUT1 and DUT2 VAT History"
239 "show-run": "Show Run"
240 "status": "PASS" | "FAIL"
248 .. note:: ID is the lowercase full path to the test.
251 # TODO: Remove when definitely no NDRPDRDISC tests are used:
252 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
254 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
255 r'PLRsearch upper bound::\s(\d+.\d+)')
257 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
258 r'NDR_UPPER:\s(\d+.\d+).*\n'
259 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
260 r'PDR_UPPER:\s(\d+.\d+)')
262 # TODO: Remove when definitely no NDRPDRDISC tests are used:
263 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
264 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
265 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
266 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
271 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
272 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
273 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
275 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
276 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
278 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
281 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
282 r"VPP Version:\s*|VPP version:\s*)(.*)")
284 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
286 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
288 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289 r'tx\s(\d*),\srx\s(\d*)')
291 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292 r' in packets per second: \[(.*)\]')
294 REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
295 REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.\d*)')
297 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
299 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
301 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
303 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
305 def __init__(self, metadata, mapping, ignore):
308 :param metadata: Key-value pairs to be included in "metadata" part of
310 :param mapping: Mapping of the old names of test cases to the new
312 :param ignore: List of TCs to be ignored.
318 # Type of message to parse out from the test messages
319 self._msg_type = None
325 self._timestamp = None
327 # Testbed. The testbed is identified by TG node IP address.
330 # Mapping of TCs long names
331 self._mapping = mapping
334 self._ignore = ignore
336 # Number of VAT History messages found:
338 # 1 - VAT History of DUT1
339 # 2 - VAT History of DUT2
340 self._lookup_kw_nr = 0
341 self._conf_history_lookup_nr = 0
343 # Number of Show Running messages found
345 # 1 - Show run message found
346 self._show_run_lookup_nr = 0
348 # Test ID of currently processed test- the lowercase full path to the
352 # The main data structure
354 "metadata": OrderedDict(),
355 "suites": OrderedDict(),
356 "tests": OrderedDict()
359 # Save the provided metadata
360 for key, val in metadata.items():
361 self._data["metadata"][key] = val
363 # Dictionary defining the methods used to parse different types of
366 "timestamp": self._get_timestamp,
367 "vpp-version": self._get_vpp_version,
368 "dpdk-version": self._get_dpdk_version,
369 "teardown-vat-history": self._get_vat_history,
370 "teardown-papi-history": self._get_papi_history,
371 "test-show-runtime": self._get_show_run,
372 "testbed": self._get_testbed
377 """Getter - Data parsed from the XML file.
379 :returns: Data parsed from the XML file.
384 def _get_testbed(self, msg):
385 """Called when extraction of testbed IP is required.
386 The testbed is identified by TG node IP address.
388 :param msg: Message to process.
393 if msg.message.count("Setup of TG node"):
394 reg_tg_ip = re.compile(
395 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
397 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
398 except (KeyError, ValueError, IndexError, AttributeError):
401 self._data["metadata"]["testbed"] = self._testbed
402 self._msg_type = None
404 def _get_vpp_version(self, msg):
405 """Called when extraction of VPP version is required.
407 :param msg: Message to process.
412 if msg.message.count("return STDOUT Version:") or \
413 msg.message.count("VPP Version:") or \
414 msg.message.count("VPP version:"):
415 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
417 self._data["metadata"]["version"] = self._version
418 self._msg_type = None
420 def _get_dpdk_version(self, msg):
421 """Called when extraction of DPDK version is required.
423 :param msg: Message to process.
428 if msg.message.count("DPDK Version:"):
430 self._version = str(re.search(
431 self.REGEX_VERSION_DPDK, msg.message). group(2))
432 self._data["metadata"]["version"] = self._version
436 self._msg_type = None
438 def _get_timestamp(self, msg):
439 """Called when extraction of timestamp is required.
441 :param msg: Message to process.
446 self._timestamp = msg.timestamp[:14]
447 self._data["metadata"]["generated"] = self._timestamp
448 self._msg_type = None
450 def _get_vat_history(self, msg):
451 """Called when extraction of VAT command history is required.
453 :param msg: Message to process.
457 if msg.message.count("VAT command history:"):
458 self._conf_history_lookup_nr += 1
459 if self._conf_history_lookup_nr == 1:
460 self._data["tests"][self._test_ID]["conf-history"] = str()
462 self._msg_type = None
463 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
464 "VAT command history:", "", msg.message, count=1). \
465 replace("\n\n", "\n").replace('\n', ' |br| ').\
466 replace('\r', '').replace('"', "'")
468 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
469 self._data["tests"][self._test_ID]["conf-history"] += \
470 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
472 def _get_papi_history(self, msg):
473 """Called when extraction of PAPI command history is required.
475 :param msg: Message to process.
479 if msg.message.count("PAPI command history:"):
480 self._conf_history_lookup_nr += 1
481 if self._conf_history_lookup_nr == 1:
482 self._data["tests"][self._test_ID]["conf-history"] = str()
484 self._msg_type = None
485 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
486 "PAPI command history:", "", msg.message, count=1). \
487 replace("\n\n", "\n").replace('\n', ' |br| ').\
488 replace('\r', '').replace('"', "'")
490 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
491 self._data["tests"][self._test_ID]["conf-history"] += \
492 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
494 def _get_show_run(self, msg):
495 """Called when extraction of VPP operational data (output of CLI command
496 Show Runtime) is required.
498 :param msg: Message to process.
502 if msg.message.count("Runtime:"):
503 self._show_run_lookup_nr += 1
504 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
505 self._data["tests"][self._test_ID]["show-run"] = str()
506 if self._lookup_kw_nr > 1:
507 self._msg_type = None
508 if self._show_run_lookup_nr > 0:
509 message = str(msg.message).replace(' ', '').replace('\n', '').\
510 replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
511 runtime = loads(message)
513 threads_nr = len(runtime[0]["clocks"])
514 except (IndexError, KeyError):
516 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
517 table = [[tbl_hdr, ] for _ in range(threads_nr)]
519 for idx in range(threads_nr):
523 item["vectors"][idx],
524 item["suspends"][idx],
528 for idx in range(threads_nr):
529 text += "Thread {idx} ".format(idx=idx)
530 text += "vpp_main\n" if idx == 0 else \
531 "vpp_wk_{idx}\n".format(idx=idx-1)
533 for row in table[idx]:
534 if txt_table is None:
535 txt_table = prettytable.PrettyTable(row)
538 txt_table.add_row(row)
539 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
540 txt_table.align["Name"] = "l"
541 txt_table.align["Calls"] = "r"
542 txt_table.align["Vectors"] = "r"
543 txt_table.align["Suspends"] = "r"
544 txt_table.align["Clocks"] = "r"
546 text += txt_table.get_string(sortby="Name") + '\n'
548 text = text.replace('\n', ' |br| ').replace('\r', '').\
551 self._data["tests"][self._test_ID]["show-run"] += " |br| "
552 self._data["tests"][self._test_ID]["show-run"] += \
553 "**DUT" + str(self._show_run_lookup_nr) + ":** |br| " \
558 # TODO: Remove when definitely no NDRPDRDISC tests are used:
559 def _get_latency(self, msg, test_type):
560 """Get the latency data from the test message.
562 :param msg: Message to be parsed.
563 :param test_type: Type of the test - NDR or PDR.
566 :returns: Latencies parsed from the message.
570 if test_type == "NDR":
571 groups = re.search(self.REGEX_LAT_NDR, msg)
572 groups_range = range(1, 7)
573 elif test_type == "PDR":
574 groups = re.search(self.REGEX_LAT_PDR, msg)
575 groups_range = range(1, 3)
580 for idx in groups_range:
582 lat = [int(item) for item in str(groups.group(idx)).split('/')]
583 except (AttributeError, ValueError):
585 latencies.append(lat)
587 keys = ("min", "avg", "max")
595 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
596 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
597 if test_type == "NDR":
598 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
599 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
600 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
601 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
605 def _get_ndrpdr_throughput(self, msg):
606 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
609 :param msg: The test message to be parsed.
611 :returns: Parsed data as a dict and the status (PASS/FAIL).
612 :rtype: tuple(dict, str)
616 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
617 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
620 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
622 if groups is not None:
624 throughput["NDR"]["LOWER"] = float(groups.group(1))
625 throughput["NDR"]["UPPER"] = float(groups.group(2))
626 throughput["PDR"]["LOWER"] = float(groups.group(3))
627 throughput["PDR"]["UPPER"] = float(groups.group(4))
629 except (IndexError, ValueError):
632 return throughput, status
634 def _get_plr_throughput(self, msg):
635 """Get PLRsearch lower bound and PLRsearch upper bound from the test
638 :param msg: The test message to be parsed.
640 :returns: Parsed data as a dict and the status (PASS/FAIL).
641 :rtype: tuple(dict, str)
649 groups = re.search(self.REGEX_PLR_RATE, msg)
651 if groups is not None:
653 throughput["LOWER"] = float(groups.group(1))
654 throughput["UPPER"] = float(groups.group(2))
656 except (IndexError, ValueError):
659 return throughput, status
661 def _get_ndrpdr_latency(self, msg):
662 """Get LATENCY from the test message.
664 :param msg: The test message to be parsed.
666 :returns: Parsed data as a dict and the status (PASS/FAIL).
667 :rtype: tuple(dict, str)
672 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
673 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
676 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
677 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
681 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
683 if groups is not None:
684 keys = ("min", "avg", "max")
686 latency["NDR"]["direction1"] = dict(
687 zip(keys, [float(l) for l in groups.group(1).split('/')]))
688 latency["NDR"]["direction2"] = dict(
689 zip(keys, [float(l) for l in groups.group(2).split('/')]))
690 latency["PDR"]["direction1"] = dict(
691 zip(keys, [float(l) for l in groups.group(3).split('/')]))
692 latency["PDR"]["direction2"] = dict(
693 zip(keys, [float(l) for l in groups.group(4).split('/')]))
695 except (IndexError, ValueError):
698 return latency, status
700 def visit_suite(self, suite):
701 """Implements traversing through the suite and its direct children.
703 :param suite: Suite to process.
707 if self.start_suite(suite) is not False:
708 suite.suites.visit(self)
709 suite.tests.visit(self)
710 self.end_suite(suite)
712 def start_suite(self, suite):
713 """Called when suite starts.
715 :param suite: Suite to process.
721 parent_name = suite.parent.name
722 except AttributeError:
725 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
726 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
727 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
729 self._data["suites"][suite.longname.lower().replace('"', "'").
730 replace(" ", "_")] = {
731 "name": suite.name.lower(),
733 "parent": parent_name,
734 "level": len(suite.longname.split("."))
737 suite.keywords.visit(self)
739 def end_suite(self, suite):
740 """Called when suite ends.
742 :param suite: Suite to process.
748 def visit_test(self, test):
749 """Implements traversing through the test.
751 :param test: Test to process.
755 if self.start_test(test) is not False:
756 test.keywords.visit(self)
759 def start_test(self, test):
760 """Called when test starts.
762 :param test: Test to process.
767 longname_orig = test.longname.lower()
769 # Check the ignore list
770 if longname_orig in self._ignore:
773 tags = [str(tag) for tag in test.tags]
776 # Change the TC long name and name if defined in the mapping table
777 longname = self._mapping.get(longname_orig, None)
778 if longname is not None:
779 name = longname.split('.')[-1]
780 logging.debug("{0}\n{1}\n{2}\n{3}".format(
781 self._data["metadata"], longname_orig, longname, name))
783 longname = longname_orig
784 name = test.name.lower()
786 # Remove TC number from the TC long name (backward compatibility):
787 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
788 # Remove TC number from the TC name (not needed):
789 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
791 test_result["parent"] = test.parent.name.lower()
792 test_result["tags"] = tags
793 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
794 replace('\r', '').replace('[', ' |br| [')
795 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
796 test_result["msg"] = test.message.replace('\n', ' |br| '). \
797 replace('\r', '').replace('"', "'")
798 test_result["type"] = "FUNC"
799 test_result["status"] = test.status
801 if "PERFTEST" in tags:
802 # Replace info about cores (e.g. -1c-) with the info about threads
803 # and cores (e.g. -1t1c-) in the long test case names and in the
804 # test case names if necessary.
805 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
809 for tag in test_result["tags"]:
810 groups = re.search(self.REGEX_TC_TAG, tag)
816 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
817 "-{0}-".format(tag_tc.lower()),
820 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
821 "-{0}-".format(tag_tc.lower()),
825 test_result["status"] = "FAIL"
826 self._data["tests"][self._test_ID] = test_result
827 logging.debug("The test '{0}' has no or more than one "
828 "multi-threading tags.".format(self._test_ID))
829 logging.debug("Tags: {0}".format(test_result["tags"]))
832 if test.status == "PASS" and ("NDRPDRDISC" in tags or
839 # TODO: Remove when definitely no NDRPDRDISC tests are used:
840 if "NDRDISC" in tags:
841 test_result["type"] = "NDR"
842 # TODO: Remove when definitely no NDRPDRDISC tests are used:
843 elif "PDRDISC" in tags:
844 test_result["type"] = "PDR"
845 elif "NDRPDR" in tags:
846 test_result["type"] = "NDRPDR"
848 test_result["type"] = "SOAK"
850 test_result["type"] = "TCP"
852 test_result["type"] = "MRR"
853 elif "FRMOBL" in tags or "BMRR" in tags:
854 test_result["type"] = "BMRR"
855 elif "RECONF" in tags:
856 test_result["type"] = "RECONF"
858 test_result["status"] = "FAIL"
859 self._data["tests"][self._test_ID] = test_result
862 # TODO: Remove when definitely no NDRPDRDISC tests are used:
863 if test_result["type"] in ("NDR", "PDR"):
865 rate_value = str(re.search(
866 self.REGEX_RATE, test.message).group(1))
867 except AttributeError:
870 rate_unit = str(re.search(
871 self.REGEX_RATE, test.message).group(2))
872 except AttributeError:
875 test_result["throughput"] = dict()
876 test_result["throughput"]["value"] = \
877 int(rate_value.split('.')[0])
878 test_result["throughput"]["unit"] = rate_unit
879 test_result["latency"] = \
880 self._get_latency(test.message, test_result["type"])
881 if test_result["type"] == "PDR":
882 test_result["lossTolerance"] = str(re.search(
883 self.REGEX_TOLERANCE, test.message).group(1))
885 elif test_result["type"] in ("NDRPDR", ):
886 test_result["throughput"], test_result["status"] = \
887 self._get_ndrpdr_throughput(test.message)
888 test_result["latency"], test_result["status"] = \
889 self._get_ndrpdr_latency(test.message)
891 elif test_result["type"] in ("SOAK", ):
892 test_result["throughput"], test_result["status"] = \
893 self._get_plr_throughput(test.message)
895 elif test_result["type"] in ("TCP", ):
896 groups = re.search(self.REGEX_TCP, test.message)
897 test_result["result"] = int(groups.group(2))
899 elif test_result["type"] in ("MRR", "BMRR"):
900 test_result["result"] = dict()
901 groups = re.search(self.REGEX_BMRR, test.message)
902 if groups is not None:
903 items_str = groups.group(1)
904 items_float = [float(item.strip()) for item
905 in items_str.split(",")]
906 metadata = AvgStdevMetadataFactory.from_data(items_float)
907 # Next two lines have been introduced in CSIT-1179,
908 # to be removed in CSIT-1180.
911 test_result["result"]["receive-rate"] = metadata
913 groups = re.search(self.REGEX_MRR, test.message)
914 test_result["result"]["receive-rate"] = \
915 AvgStdevMetadataFactory.from_data([
916 float(groups.group(3)) / float(groups.group(1)), ])
918 elif test_result["type"] == "RECONF":
919 test_result["result"] = None
921 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
922 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
923 test_result["result"] = {
924 "loss": int(grps_loss.group(1)),
925 "time": float(grps_time.group(1))
927 except (AttributeError, IndexError, ValueError, TypeError):
928 test_result["status"] = "FAIL"
930 self._data["tests"][self._test_ID] = test_result
932 def end_test(self, test):
933 """Called when test ends.
935 :param test: Test to process.
941 def visit_keyword(self, keyword):
942 """Implements traversing through the keyword and its child keywords.
944 :param keyword: Keyword to process.
945 :type keyword: Keyword
948 if self.start_keyword(keyword) is not False:
949 self.end_keyword(keyword)
951 def start_keyword(self, keyword):
952 """Called when keyword starts. Default implementation does nothing.
954 :param keyword: Keyword to process.
955 :type keyword: Keyword
959 if keyword.type == "setup":
960 self.visit_setup_kw(keyword)
961 elif keyword.type == "teardown":
962 self._lookup_kw_nr = 0
963 self.visit_teardown_kw(keyword)
965 self._lookup_kw_nr = 0
966 self.visit_test_kw(keyword)
967 except AttributeError:
970 def end_keyword(self, keyword):
971 """Called when keyword ends. Default implementation does nothing.
973 :param keyword: Keyword to process.
974 :type keyword: Keyword
979 def visit_test_kw(self, test_kw):
980 """Implements traversing through the test keyword and its child
983 :param test_kw: Keyword to process.
984 :type test_kw: Keyword
987 for keyword in test_kw.keywords:
988 if self.start_test_kw(keyword) is not False:
989 self.visit_test_kw(keyword)
990 self.end_test_kw(keyword)
992 def start_test_kw(self, test_kw):
993 """Called when test keyword starts. Default implementation does
996 :param test_kw: Keyword to process.
997 :type test_kw: Keyword
1000 if test_kw.name.count("Show Runtime Counters On All Duts"):
1001 self._lookup_kw_nr += 1
1002 self._show_run_lookup_nr = 0
1003 self._msg_type = "test-show-runtime"
1004 elif test_kw.name.count("Install Dpdk Test") and not self._version:
1005 self._msg_type = "dpdk-version"
1008 test_kw.messages.visit(self)
1010 def end_test_kw(self, test_kw):
1011 """Called when keyword ends. Default implementation does nothing.
1013 :param test_kw: Keyword to process.
1014 :type test_kw: Keyword
1019 def visit_setup_kw(self, setup_kw):
1020 """Implements traversing through the teardown keyword and its child
1023 :param setup_kw: Keyword to process.
1024 :type setup_kw: Keyword
1027 for keyword in setup_kw.keywords:
1028 if self.start_setup_kw(keyword) is not False:
1029 self.visit_setup_kw(keyword)
1030 self.end_setup_kw(keyword)
1032 def start_setup_kw(self, setup_kw):
1033 """Called when teardown keyword starts. Default implementation does
1036 :param setup_kw: Keyword to process.
1037 :type setup_kw: Keyword
1040 if setup_kw.name.count("Show Vpp Version On All Duts") \
1041 and not self._version:
1042 self._msg_type = "vpp-version"
1043 elif setup_kw.name.count("Set Global Variable") \
1044 and not self._timestamp:
1045 self._msg_type = "timestamp"
1046 elif setup_kw.name.count("Setup Framework") and not self._testbed:
1047 self._msg_type = "testbed"
1050 setup_kw.messages.visit(self)
1052 def end_setup_kw(self, setup_kw):
1053 """Called when keyword ends. Default implementation does nothing.
1055 :param setup_kw: Keyword to process.
1056 :type setup_kw: Keyword
1061 def visit_teardown_kw(self, teardown_kw):
1062 """Implements traversing through the teardown keyword and its child
1065 :param teardown_kw: Keyword to process.
1066 :type teardown_kw: Keyword
1069 for keyword in teardown_kw.keywords:
1070 if self.start_teardown_kw(keyword) is not False:
1071 self.visit_teardown_kw(keyword)
1072 self.end_teardown_kw(keyword)
1074 def start_teardown_kw(self, teardown_kw):
1075 """Called when teardown keyword starts. Default implementation does
1078 :param teardown_kw: Keyword to process.
1079 :type teardown_kw: Keyword
1083 if teardown_kw.name.count("Show Vat History On All Duts"):
1084 self._conf_history_lookup_nr = 0
1085 self._msg_type = "teardown-vat-history"
1086 teardown_kw.messages.visit(self)
1087 elif teardown_kw.name.count("Show Papi History On All Duts"):
1088 self._conf_history_lookup_nr = 0
1089 self._msg_type = "teardown-papi-history"
1090 teardown_kw.messages.visit(self)
1092 def end_teardown_kw(self, teardown_kw):
1093 """Called when keyword ends. Default implementation does nothing.
1095 :param teardown_kw: Keyword to process.
1096 :type teardown_kw: Keyword
1101 def visit_message(self, msg):
1102 """Implements visiting the message.
1104 :param msg: Message to process.
1108 if self.start_message(msg) is not False:
1109 self.end_message(msg)
1111 def start_message(self, msg):
1112 """Called when message starts. Get required information from messages:
1115 :param msg: Message to process.
1121 self.parse_msg[self._msg_type](msg)
1123 def end_message(self, msg):
1124 """Called when message ends. Default implementation does nothing.
1126 :param msg: Message to process.
1133 class InputData(object):
1136 The data is extracted from output.xml files generated by Jenkins jobs and
1137 stored in pandas' DataFrames.
1143 (as described in ExecutionChecker documentation)
1145 (as described in ExecutionChecker documentation)
1147 (as described in ExecutionChecker documentation)
1150 def __init__(self, spec):
1153 :param spec: Specification.
1154 :type spec: Specification
1161 self._input_data = pd.Series()
1165 """Getter - Input data.
1167 :returns: Input data
1168 :rtype: pandas.Series
1170 return self._input_data
1172 def metadata(self, job, build):
1173 """Getter - metadata
1175 :param job: Job which metadata we want.
1176 :param build: Build which metadata we want.
1180 :rtype: pandas.Series
1183 return self.data[job][build]["metadata"]
1185 def suites(self, job, build):
1188 :param job: Job which suites we want.
1189 :param build: Build which suites we want.
1193 :rtype: pandas.Series
1196 return self.data[job][str(build)]["suites"]
1198 def tests(self, job, build):
1201 :param job: Job which tests we want.
1202 :param build: Build which tests we want.
1206 :rtype: pandas.Series
1209 return self.data[job][build]["tests"]
1211 def _parse_tests(self, job, build, log):
1212 """Process data from robot output.xml file and return JSON structured
1215 :param job: The name of job which build output data will be processed.
1216 :param build: The build which output data will be processed.
1217 :param log: List of log messages.
1220 :type log: list of tuples (severity, msg)
1221 :returns: JSON data structure.
1230 with open(build["file-name"], 'r') as data_file:
1232 result = ExecutionResult(data_file)
1233 except errors.DataError as err:
1234 log.append(("ERROR", "Error occurred while parsing output.xml: "
1237 checker = ExecutionChecker(metadata, self._cfg.mapping,
1239 result.visit(checker)
1243 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1244 """Download and parse the input data file.
1246 :param pid: PID of the process executing this method.
1247 :param job: Name of the Jenkins job which generated the processed input
1249 :param build: Information about the Jenkins build which generated the
1250 processed input file.
1251 :param repeat: Repeat the download specified number of times if not
1261 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1262 format(job, build["build"])))
1269 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1275 logs.append(("ERROR", "It is not possible to download the input "
1276 "data file from the job '{job}', build "
1277 "'{build}', or it is damaged. Skipped.".
1278 format(job=job, build=build["build"])))
1280 logs.append(("INFO", " Processing data from the build '{0}' ...".
1281 format(build["build"])))
1282 data = self._parse_tests(job, build, logs)
1284 logs.append(("ERROR", "Input data file from the job '{job}', "
1285 "build '{build}' is damaged. Skipped.".
1286 format(job=job, build=build["build"])))
1291 remove(build["file-name"])
1292 except OSError as err:
1293 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1294 format(build["file-name"], repr(err))))
1296 # If the time-period is defined in the specification file, remove all
1297 # files which are outside the time period.
1298 timeperiod = self._cfg.input.get("time-period", None)
1299 if timeperiod and data:
1301 timeperiod = timedelta(int(timeperiod))
1302 metadata = data.get("metadata", None)
1304 generated = metadata.get("generated", None)
1306 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1307 if (now - generated) > timeperiod:
1308 # Remove the data and the file:
1313 " The build {job}/{build} is outdated, will be "
1314 "removed".format(job=job, build=build["build"])))
1315 file_name = self._cfg.input["file-name"]
1317 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1318 "{job}{sep}{build}{sep}{name}".format(
1321 build=build["build"],
1325 logs.append(("INFO",
1326 " The file {name} has been removed".
1327 format(name=full_name)))
1328 except OSError as err:
1329 logs.append(("ERROR",
1330 "Cannot remove the file '{0}': {1}".
1331 format(full_name, repr(err))))
1332 logs.append(("INFO", " Done."))
1334 for level, line in logs:
1337 elif level == "ERROR":
1339 elif level == "DEBUG":
1341 elif level == "CRITICAL":
1342 logging.critical(line)
1343 elif level == "WARNING":
1344 logging.warning(line)
1346 return {"data": data, "state": state, "job": job, "build": build}
1348 def download_and_parse_data(self, repeat=1):
1349 """Download the input data files, parse input data from input files and
1350 store in pandas' Series.
1352 :param repeat: Repeat the download specified number of times if not
1357 logging.info("Downloading and parsing input files ...")
1359 for job, builds in self._cfg.builds.items():
1360 for build in builds:
1362 result = self._download_and_parse_build(job, build, repeat)
1363 build_nr = result["build"]["build"]
1366 data = result["data"]
1367 build_data = pd.Series({
1368 "metadata": pd.Series(
1369 data["metadata"].values(),
1370 index=data["metadata"].keys()),
1371 "suites": pd.Series(data["suites"].values(),
1372 index=data["suites"].keys()),
1373 "tests": pd.Series(data["tests"].values(),
1374 index=data["tests"].keys())})
1376 if self._input_data.get(job, None) is None:
1377 self._input_data[job] = pd.Series()
1378 self._input_data[job][str(build_nr)] = build_data
1380 self._cfg.set_input_file_name(
1381 job, build_nr, result["build"]["file-name"])
1383 self._cfg.set_input_state(job, build_nr, result["state"])
1385 logging.info("Memory allocation: {0:,d}MB".format(
1386 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1388 logging.info("Done.")
1391 def _end_of_tag(tag_filter, start=0, closer="'"):
1392 """Return the index of character in the string which is the end of tag.
1394 :param tag_filter: The string where the end of tag is being searched.
1395 :param start: The index where the searching is stated.
1396 :param closer: The character which is the tag closer.
1397 :type tag_filter: str
1400 :returns: The index of the tag closer.
1405 idx_opener = tag_filter.index(closer, start)
1406 return tag_filter.index(closer, idx_opener + 1)
1411 def _condition(tag_filter):
1412 """Create a conditional statement from the given tag filter.
1414 :param tag_filter: Filter based on tags from the element specification.
1415 :type tag_filter: str
1416 :returns: Conditional statement which can be evaluated.
1422 index = InputData._end_of_tag(tag_filter, index)
1426 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1428 def filter_data(self, element, params=None, data=None, data_set="tests",
1429 continue_on_error=False):
1430 """Filter required data from the given jobs and builds.
1432 The output data structure is:
1436 - test (or suite) 1 ID:
1442 - test (or suite) n ID:
1449 :param element: Element which will use the filtered data.
1450 :param params: Parameters which will be included in the output. If None,
1451 all parameters are included.
1452 :param data: If not None, this data is used instead of data specified
1454 :param data_set: The set of data to be filtered: tests, suites,
1456 :param continue_on_error: Continue if there is error while reading the
1457 data. The Item will be empty then
1458 :type element: pandas.Series
1462 :type continue_on_error: bool
1463 :returns: Filtered data.
1464 :rtype pandas.Series
1468 if element["filter"] in ("all", "template"):
1471 cond = InputData._condition(element["filter"])
1472 logging.debug(" Filter: {0}".format(cond))
1474 logging.error(" No filter defined.")
1478 params = element.get("parameters", None)
1480 params.append("type")
1482 data_to_filter = data if data else element["data"]
1485 for job, builds in data_to_filter.items():
1486 data[job] = pd.Series()
1487 for build in builds:
1488 data[job][str(build)] = pd.Series()
1490 data_iter = self.data[job][str(build)][data_set].\
1493 if continue_on_error:
1497 for test_ID, test_data in data_iter:
1498 if eval(cond, {"tags": test_data.get("tags", "")}):
1499 data[job][str(build)][test_ID] = pd.Series()
1501 for param, val in test_data.items():
1502 data[job][str(build)][test_ID][param] = val
1504 for param in params:
1506 data[job][str(build)][test_ID][param] =\
1509 data[job][str(build)][test_ID][param] =\
1513 except (KeyError, IndexError, ValueError) as err:
1514 logging.error(" Missing mandatory parameter in the element "
1515 "specification: {0}".format(err))
1517 except AttributeError:
1520 logging.error(" The filter '{0}' is not correct. Check if all "
1521 "tags are enclosed by apostrophes.".format(cond))
1524 def filter_tests_by_name(self, element, params=None, data_set="tests",
1525 continue_on_error=False):
1526 """Filter required data from the given jobs and builds.
1528 The output data structure is:
1532 - test (or suite) 1 ID:
1538 - test (or suite) n ID:
1545 :param element: Element which will use the filtered data.
1546 :param params: Parameters which will be included in the output. If None,
1547 all parameters are included.
1548 :param data_set: The set of data to be filtered: tests, suites,
1550 :param continue_on_error: Continue if there is error while reading the
1551 data. The Item will be empty then
1552 :type element: pandas.Series
1555 :type continue_on_error: bool
1556 :returns: Filtered data.
1557 :rtype pandas.Series
1560 include = element.get("include", None)
1562 logging.warning("No tests to include, skipping the element.")
1566 params = element.get("parameters", None)
1568 params.append("type")
1572 for job, builds in element["data"].items():
1573 data[job] = pd.Series()
1574 for build in builds:
1575 data[job][str(build)] = pd.Series()
1576 for test in include:
1578 reg_ex = re.compile(str(test).lower())
1579 for test_ID in self.data[job][str(build)]\
1581 if re.match(reg_ex, str(test_ID).lower()):
1582 test_data = self.data[job][str(build)]\
1584 data[job][str(build)][test_ID] = pd.Series()
1586 for param, val in test_data.items():
1587 data[job][str(build)][test_ID]\
1590 for param in params:
1592 data[job][str(build)][test_ID]\
1593 [param] = test_data[param]
1595 data[job][str(build)][test_ID]\
1597 except KeyError as err:
1598 logging.error("{err!r}".format(err=err))
1599 if continue_on_error:
1605 except (KeyError, IndexError, ValueError) as err:
1606 logging.error("Missing mandatory parameter in the element "
1607 "specification: {err!r}".format(err=err))
1609 except AttributeError as err:
1610 logging.error("{err!r}".format(err=err))
1615 def merge_data(data):
1616 """Merge data from more jobs and builds to a simple data structure.
1618 The output data structure is:
1620 - test (suite) 1 ID:
1626 - test (suite) n ID:
1629 :param data: Data to merge.
1630 :type data: pandas.Series
1631 :returns: Merged data.
1632 :rtype: pandas.Series
1635 logging.info(" Merging data ...")
1637 merged_data = pd.Series()
1638 for _, builds in data.iteritems():
1639 for _, item in builds.iteritems():
1640 for ID, item_data in item.iteritems():
1641 merged_data[ID] = item_data