1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
42 # Separator used in file names
46 class ExecutionChecker(ResultVisitor):
47 """Class to traverse through the test suite structure.
49 The functionality implemented in this class generates a json structure:
55 "generated": "Timestamp",
56 "version": "SUT version",
57 "job": "Jenkins job name",
58 "build": "Information about the build"
61 "Suite long name 1": {
63 "doc": "Suite 1 documentation",
64 "parent": "Suite 1 parent",
65 "level": "Level of the suite in the suite hierarchy"
67 "Suite long name N": {
69 "doc": "Suite N documentation",
70 "parent": "Suite 2 parent",
71 "level": "Level of the suite in the suite hierarchy"
78 "parent": "Name of the parent of the test",
79 "doc": "Test documentation",
80 "msg": "Test message",
81 "conf-history": "DUT1 and DUT2 VAT History",
82 "show-run": "Show Run",
83 "tags": ["tag 1", "tag 2", "tag n"],
85 "status": "PASS" | "FAIL",
127 "parent": "Name of the parent of the test",
128 "doc": "Test documentation",
129 "msg": "Test message",
130 "tags": ["tag 1", "tag 2", "tag n"],
132 "status": "PASS" | "FAIL",
139 "parent": "Name of the parent of the test",
140 "doc": "Test documentation",
141 "msg": "Test message",
142 "tags": ["tag 1", "tag 2", "tag n"],
143 "type": "MRR" | "BMRR",
144 "status": "PASS" | "FAIL",
146 "receive-rate": AvgStdevMetadata,
150 # TODO: Remove when definitely no NDRPDRDISC tests are used:
154 "parent": "Name of the parent of the test",
155 "doc": "Test documentation",
156 "msg": "Test message",
157 "tags": ["tag 1", "tag 2", "tag n"],
158 "type": "PDR" | "NDR",
159 "status": "PASS" | "FAIL",
160 "throughput": { # Only type: "PDR" | "NDR"
162 "unit": "pps" | "bps" | "percentage"
164 "latency": { # Only type: "PDR" | "NDR"
171 "50": { # Only for NDR
176 "10": { # Only for NDR
188 "50": { # Only for NDR
193 "10": { # Only for NDR
200 "lossTolerance": "lossTolerance", # Only type: "PDR"
201 "conf-history": "DUT1 and DUT2 VAT History"
202 "show-run": "Show Run"
214 "metadata": { # Optional
215 "version": "VPP version",
216 "job": "Jenkins job name",
217 "build": "Information about the build"
221 "doc": "Suite 1 documentation",
222 "parent": "Suite 1 parent",
223 "level": "Level of the suite in the suite hierarchy"
226 "doc": "Suite N documentation",
227 "parent": "Suite 2 parent",
228 "level": "Level of the suite in the suite hierarchy"
234 "parent": "Name of the parent of the test",
235 "doc": "Test documentation"
236 "msg": "Test message"
237 "tags": ["tag 1", "tag 2", "tag n"],
238 "conf-history": "DUT1 and DUT2 VAT History"
239 "show-run": "Show Run"
240 "status": "PASS" | "FAIL"
248 .. note:: ID is the lowercase full path to the test.
251 # TODO: Remove when definitely no NDRPDRDISC tests are used:
252 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
254 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
255 r'PLRsearch upper bound::\s(\d+.\d+)')
257 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
258 r'NDR_UPPER:\s(\d+.\d+).*\n'
259 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
260 r'PDR_UPPER:\s(\d+.\d+)')
262 # TODO: Remove when definitely no NDRPDRDISC tests are used:
263 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
264 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
265 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
266 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
271 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
272 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
273 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
275 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
276 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
278 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
281 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
282 r"VPP Version:\s*|VPP version:\s*)(.*)")
284 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
286 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
288 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289 r'tx\s(\d*),\srx\s(\d*)')
291 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292 r' in packets per second: \[(.*)\]')
294 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
296 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
298 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
300 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
302 def __init__(self, metadata, mapping, ignore):
305 :param metadata: Key-value pairs to be included in "metadata" part of
307 :param mapping: Mapping of the old names of test cases to the new
309 :param ignore: List of TCs to be ignored.
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 # Number of VAT History messages found:
335 # 1 - VAT History of DUT1
336 # 2 - VAT History of DUT2
337 self._lookup_kw_nr = 0
338 self._conf_history_lookup_nr = 0
340 # Number of Show Running messages found
342 # 1 - Show run message found
343 self._show_run_lookup_nr = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 "metadata": OrderedDict(),
352 "suites": OrderedDict(),
353 "tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data["metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 "timestamp": self._get_timestamp,
364 "vpp-version": self._get_vpp_version,
365 "dpdk-version": self._get_dpdk_version,
366 "teardown-vat-history": self._get_vat_history,
367 "teardown-papi-history": self._get_papi_history,
368 "test-show-runtime": self._get_show_run,
369 "testbed": self._get_testbed
374 """Getter - Data parsed from the XML file.
376 :returns: Data parsed from the XML file.
381 def _get_testbed(self, msg):
382 """Called when extraction of testbed IP is required.
383 The testbed is identified by TG node IP address.
385 :param msg: Message to process.
390 if msg.message.count("Setup of TG node"):
391 reg_tg_ip = re.compile(
392 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
394 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
395 except (KeyError, ValueError, IndexError, AttributeError):
398 self._data["metadata"]["testbed"] = self._testbed
399 self._msg_type = None
401 def _get_vpp_version(self, msg):
402 """Called when extraction of VPP version is required.
404 :param msg: Message to process.
409 if msg.message.count("return STDOUT Version:") or \
410 msg.message.count("VPP Version:") or \
411 msg.message.count("VPP version:"):
412 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
414 self._data["metadata"]["version"] = self._version
415 self._msg_type = None
417 def _get_dpdk_version(self, msg):
418 """Called when extraction of DPDK version is required.
420 :param msg: Message to process.
425 if msg.message.count("DPDK Version:"):
427 self._version = str(re.search(
428 self.REGEX_VERSION_DPDK, msg.message). group(2))
429 self._data["metadata"]["version"] = self._version
433 self._msg_type = None
435 def _get_timestamp(self, msg):
436 """Called when extraction of timestamp is required.
438 :param msg: Message to process.
443 self._timestamp = msg.timestamp[:14]
444 self._data["metadata"]["generated"] = self._timestamp
445 self._msg_type = None
447 def _get_vat_history(self, msg):
448 """Called when extraction of VAT command history is required.
450 :param msg: Message to process.
454 if msg.message.count("VAT command history:"):
455 self._conf_history_lookup_nr += 1
456 if self._conf_history_lookup_nr == 1:
457 self._data["tests"][self._test_ID]["conf-history"] = str()
459 self._msg_type = None
460 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
461 "VAT command history:", "", msg.message, count=1). \
462 replace("\n\n", "\n").replace('\n', ' |br| ').\
463 replace('\r', '').replace('"', "'")
465 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
466 self._data["tests"][self._test_ID]["conf-history"] += \
467 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
469 def _get_papi_history(self, msg):
470 """Called when extraction of PAPI command history is required.
472 :param msg: Message to process.
476 if msg.message.count("PAPI command history:"):
477 self._conf_history_lookup_nr += 1
478 if self._conf_history_lookup_nr == 1:
479 self._data["tests"][self._test_ID]["conf-history"] = str()
481 self._msg_type = None
482 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
483 "PAPI command history:", "", msg.message, count=1). \
484 replace("\n\n", "\n").replace('\n', ' |br| ').\
485 replace('\r', '').replace('"', "'")
487 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
488 self._data["tests"][self._test_ID]["conf-history"] += \
489 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
491 def _get_show_run(self, msg):
492 """Called when extraction of VPP operational data (output of CLI command
493 Show Runtime) is required.
495 :param msg: Message to process.
499 if msg.message.count("Runtime:"):
500 self._show_run_lookup_nr += 1
501 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
502 self._data["tests"][self._test_ID]["show-run"] = str()
503 if self._lookup_kw_nr > 1:
504 self._msg_type = None
505 if self._show_run_lookup_nr > 0:
506 message = str(msg.message).replace(' ', '').replace('\n', '').\
507 replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
508 runtime = loads(message)
510 threads_nr = len(runtime[0]["clocks"])
511 except (IndexError, KeyError):
513 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
514 table = [[tbl_hdr, ] for _ in range(threads_nr)]
516 for idx in range(threads_nr):
520 item["vectors"][idx],
521 item["suspends"][idx],
525 for idx in range(threads_nr):
526 text += "Thread {idx} ".format(idx=idx)
527 text += "vpp_main\n" if idx == 0 else \
528 "vpp_wk_{idx}\n".format(idx=idx-1)
530 for row in table[idx]:
531 if txt_table is None:
532 txt_table = prettytable.PrettyTable(row)
535 txt_table.add_row(row)
536 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
537 txt_table.align["Name"] = "l"
538 txt_table.align["Calls"] = "r"
539 txt_table.align["Vectors"] = "r"
540 txt_table.align["Suspends"] = "r"
541 txt_table.align["Clocks"] = "r"
543 text += txt_table.get_string(sortby="Name") + '\n'
545 text = text.replace('\n', ' |br| ').replace('\r', '').\
548 self._data["tests"][self._test_ID]["show-run"] += " |br| "
549 self._data["tests"][self._test_ID]["show-run"] += \
550 "**DUT" + str(self._show_run_lookup_nr) + ":** |br| " \
555 # TODO: Remove when definitely no NDRPDRDISC tests are used:
556 def _get_latency(self, msg, test_type):
557 """Get the latency data from the test message.
559 :param msg: Message to be parsed.
560 :param test_type: Type of the test - NDR or PDR.
563 :returns: Latencies parsed from the message.
567 if test_type == "NDR":
568 groups = re.search(self.REGEX_LAT_NDR, msg)
569 groups_range = range(1, 7)
570 elif test_type == "PDR":
571 groups = re.search(self.REGEX_LAT_PDR, msg)
572 groups_range = range(1, 3)
577 for idx in groups_range:
579 lat = [int(item) for item in str(groups.group(idx)).split('/')]
580 except (AttributeError, ValueError):
582 latencies.append(lat)
584 keys = ("min", "avg", "max")
592 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
593 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
594 if test_type == "NDR":
595 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
596 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
597 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
598 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
602 def _get_ndrpdr_throughput(self, msg):
603 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
606 :param msg: The test message to be parsed.
608 :returns: Parsed data as a dict and the status (PASS/FAIL).
609 :rtype: tuple(dict, str)
613 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
614 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
617 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
619 if groups is not None:
621 throughput["NDR"]["LOWER"] = float(groups.group(1))
622 throughput["NDR"]["UPPER"] = float(groups.group(2))
623 throughput["PDR"]["LOWER"] = float(groups.group(3))
624 throughput["PDR"]["UPPER"] = float(groups.group(4))
626 except (IndexError, ValueError):
629 return throughput, status
631 def _get_plr_throughput(self, msg):
632 """Get PLRsearch lower bound and PLRsearch upper bound from the test
635 :param msg: The test message to be parsed.
637 :returns: Parsed data as a dict and the status (PASS/FAIL).
638 :rtype: tuple(dict, str)
646 groups = re.search(self.REGEX_PLR_RATE, msg)
648 if groups is not None:
650 throughput["LOWER"] = float(groups.group(1))
651 throughput["UPPER"] = float(groups.group(2))
653 except (IndexError, ValueError):
656 return throughput, status
658 def _get_ndrpdr_latency(self, msg):
659 """Get LATENCY from the test message.
661 :param msg: The test message to be parsed.
663 :returns: Parsed data as a dict and the status (PASS/FAIL).
664 :rtype: tuple(dict, str)
669 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
670 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
673 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
674 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
678 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
680 if groups is not None:
681 keys = ("min", "avg", "max")
683 latency["NDR"]["direction1"] = dict(
684 zip(keys, [float(l) for l in groups.group(1).split('/')]))
685 latency["NDR"]["direction2"] = dict(
686 zip(keys, [float(l) for l in groups.group(2).split('/')]))
687 latency["PDR"]["direction1"] = dict(
688 zip(keys, [float(l) for l in groups.group(3).split('/')]))
689 latency["PDR"]["direction2"] = dict(
690 zip(keys, [float(l) for l in groups.group(4).split('/')]))
692 except (IndexError, ValueError):
695 return latency, status
697 def visit_suite(self, suite):
698 """Implements traversing through the suite and its direct children.
700 :param suite: Suite to process.
704 if self.start_suite(suite) is not False:
705 suite.suites.visit(self)
706 suite.tests.visit(self)
707 self.end_suite(suite)
709 def start_suite(self, suite):
710 """Called when suite starts.
712 :param suite: Suite to process.
718 parent_name = suite.parent.name
719 except AttributeError:
722 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
723 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
724 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
726 self._data["suites"][suite.longname.lower().replace('"', "'").
727 replace(" ", "_")] = {
728 "name": suite.name.lower(),
730 "parent": parent_name,
731 "level": len(suite.longname.split("."))
734 suite.keywords.visit(self)
736 def end_suite(self, suite):
737 """Called when suite ends.
739 :param suite: Suite to process.
745 def visit_test(self, test):
746 """Implements traversing through the test.
748 :param test: Test to process.
752 if self.start_test(test) is not False:
753 test.keywords.visit(self)
756 def start_test(self, test):
757 """Called when test starts.
759 :param test: Test to process.
764 longname_orig = test.longname.lower()
766 # Check the ignore list
767 if longname_orig in self._ignore:
770 tags = [str(tag) for tag in test.tags]
773 # Change the TC long name and name if defined in the mapping table
774 longname = self._mapping.get(longname_orig, None)
775 if longname is not None:
776 name = longname.split('.')[-1]
777 logging.debug("{0}\n{1}\n{2}\n{3}".format(
778 self._data["metadata"], longname_orig, longname, name))
780 longname = longname_orig
781 name = test.name.lower()
783 # Remove TC number from the TC long name (backward compatibility):
784 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
785 # Remove TC number from the TC name (not needed):
786 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
788 test_result["parent"] = test.parent.name.lower()
789 test_result["tags"] = tags
790 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
791 replace('\r', '').replace('[', ' |br| [')
792 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
793 test_result["msg"] = test.message.replace('\n', ' |br| '). \
794 replace('\r', '').replace('"', "'")
795 test_result["type"] = "FUNC"
796 test_result["status"] = test.status
798 if "PERFTEST" in tags:
799 # Replace info about cores (e.g. -1c-) with the info about threads
800 # and cores (e.g. -1t1c-) in the long test case names and in the
801 # test case names if necessary.
802 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
805 for tag in test_result["tags"]:
806 groups = re.search(self.REGEX_TC_TAG, tag)
812 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
813 "-{0}-".format(tag_tc.lower()),
816 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
817 "-{0}-".format(tag_tc.lower()),
821 test_result["status"] = "FAIL"
822 self._data["tests"][self._test_ID] = test_result
823 logging.debug("The test '{0}' has no or more than one "
824 "multi-threading tags.".format(self._test_ID))
825 logging.debug("Tags: {0}".format(test_result["tags"]))
828 if test.status == "PASS" and ("NDRPDRDISC" in tags or
834 # TODO: Remove when definitely no NDRPDRDISC tests are used:
835 if "NDRDISC" in tags:
836 test_result["type"] = "NDR"
837 # TODO: Remove when definitely no NDRPDRDISC tests are used:
838 elif "PDRDISC" in tags:
839 test_result["type"] = "PDR"
840 elif "NDRPDR" in tags:
841 test_result["type"] = "NDRPDR"
843 test_result["type"] = "SOAK"
845 test_result["type"] = "TCP"
847 test_result["type"] = "MRR"
848 elif "FRMOBL" in tags or "BMRR" in tags:
849 test_result["type"] = "BMRR"
851 test_result["status"] = "FAIL"
852 self._data["tests"][self._test_ID] = test_result
855 # TODO: Remove when definitely no NDRPDRDISC tests are used:
856 if test_result["type"] in ("NDR", "PDR"):
858 rate_value = str(re.search(
859 self.REGEX_RATE, test.message).group(1))
860 except AttributeError:
863 rate_unit = str(re.search(
864 self.REGEX_RATE, test.message).group(2))
865 except AttributeError:
868 test_result["throughput"] = dict()
869 test_result["throughput"]["value"] = \
870 int(rate_value.split('.')[0])
871 test_result["throughput"]["unit"] = rate_unit
872 test_result["latency"] = \
873 self._get_latency(test.message, test_result["type"])
874 if test_result["type"] == "PDR":
875 test_result["lossTolerance"] = str(re.search(
876 self.REGEX_TOLERANCE, test.message).group(1))
878 elif test_result["type"] in ("NDRPDR", ):
879 test_result["throughput"], test_result["status"] = \
880 self._get_ndrpdr_throughput(test.message)
881 test_result["latency"], test_result["status"] = \
882 self._get_ndrpdr_latency(test.message)
884 elif test_result["type"] in ("SOAK", ):
885 test_result["throughput"], test_result["status"] = \
886 self._get_plr_throughput(test.message)
888 elif test_result["type"] in ("TCP", ):
889 groups = re.search(self.REGEX_TCP, test.message)
890 test_result["result"] = int(groups.group(2))
892 elif test_result["type"] in ("MRR", "BMRR"):
893 test_result["result"] = dict()
894 groups = re.search(self.REGEX_BMRR, test.message)
895 if groups is not None:
896 items_str = groups.group(1)
897 items_float = [float(item.strip()) for item
898 in items_str.split(",")]
899 metadata = AvgStdevMetadataFactory.from_data(items_float)
900 # Next two lines have been introduced in CSIT-1179,
901 # to be removed in CSIT-1180.
904 test_result["result"]["receive-rate"] = metadata
906 groups = re.search(self.REGEX_MRR, test.message)
907 test_result["result"]["receive-rate"] = \
908 AvgStdevMetadataFactory.from_data([
909 float(groups.group(3)) / float(groups.group(1)), ])
911 self._data["tests"][self._test_ID] = test_result
913 def end_test(self, test):
914 """Called when test ends.
916 :param test: Test to process.
922 def visit_keyword(self, keyword):
923 """Implements traversing through the keyword and its child keywords.
925 :param keyword: Keyword to process.
926 :type keyword: Keyword
929 if self.start_keyword(keyword) is not False:
930 self.end_keyword(keyword)
932 def start_keyword(self, keyword):
933 """Called when keyword starts. Default implementation does nothing.
935 :param keyword: Keyword to process.
936 :type keyword: Keyword
940 if keyword.type == "setup":
941 self.visit_setup_kw(keyword)
942 elif keyword.type == "teardown":
943 self._lookup_kw_nr = 0
944 self.visit_teardown_kw(keyword)
946 self._lookup_kw_nr = 0
947 self.visit_test_kw(keyword)
948 except AttributeError:
951 def end_keyword(self, keyword):
952 """Called when keyword ends. Default implementation does nothing.
954 :param keyword: Keyword to process.
955 :type keyword: Keyword
960 def visit_test_kw(self, test_kw):
961 """Implements traversing through the test keyword and its child
964 :param test_kw: Keyword to process.
965 :type test_kw: Keyword
968 for keyword in test_kw.keywords:
969 if self.start_test_kw(keyword) is not False:
970 self.visit_test_kw(keyword)
971 self.end_test_kw(keyword)
973 def start_test_kw(self, test_kw):
974 """Called when test keyword starts. Default implementation does
977 :param test_kw: Keyword to process.
978 :type test_kw: Keyword
981 if test_kw.name.count("Show Runtime Counters On All Duts"):
982 self._lookup_kw_nr += 1
983 self._show_run_lookup_nr = 0
984 self._msg_type = "test-show-runtime"
985 elif test_kw.name.count("Install Dpdk Test") and not self._version:
986 self._msg_type = "dpdk-version"
989 test_kw.messages.visit(self)
991 def end_test_kw(self, test_kw):
992 """Called when keyword ends. Default implementation does nothing.
994 :param test_kw: Keyword to process.
995 :type test_kw: Keyword
1000 def visit_setup_kw(self, setup_kw):
1001 """Implements traversing through the teardown keyword and its child
1004 :param setup_kw: Keyword to process.
1005 :type setup_kw: Keyword
1008 for keyword in setup_kw.keywords:
1009 if self.start_setup_kw(keyword) is not False:
1010 self.visit_setup_kw(keyword)
1011 self.end_setup_kw(keyword)
1013 def start_setup_kw(self, setup_kw):
1014 """Called when teardown keyword starts. Default implementation does
1017 :param setup_kw: Keyword to process.
1018 :type setup_kw: Keyword
1021 if setup_kw.name.count("Show Vpp Version On All Duts") \
1022 and not self._version:
1023 self._msg_type = "vpp-version"
1024 elif setup_kw.name.count("Set Global Variable") \
1025 and not self._timestamp:
1026 self._msg_type = "timestamp"
1027 elif setup_kw.name.count("Setup Framework") and not self._testbed:
1028 self._msg_type = "testbed"
1031 setup_kw.messages.visit(self)
1033 def end_setup_kw(self, setup_kw):
1034 """Called when keyword ends. Default implementation does nothing.
1036 :param setup_kw: Keyword to process.
1037 :type setup_kw: Keyword
1042 def visit_teardown_kw(self, teardown_kw):
1043 """Implements traversing through the teardown keyword and its child
1046 :param teardown_kw: Keyword to process.
1047 :type teardown_kw: Keyword
1050 for keyword in teardown_kw.keywords:
1051 if self.start_teardown_kw(keyword) is not False:
1052 self.visit_teardown_kw(keyword)
1053 self.end_teardown_kw(keyword)
1055 def start_teardown_kw(self, teardown_kw):
1056 """Called when teardown keyword starts. Default implementation does
1059 :param teardown_kw: Keyword to process.
1060 :type teardown_kw: Keyword
1064 if teardown_kw.name.count("Show Vat History On All Duts"):
1065 self._conf_history_lookup_nr = 0
1066 self._msg_type = "teardown-vat-history"
1067 teardown_kw.messages.visit(self)
1068 elif teardown_kw.name.count("Show Papi History On All Duts"):
1069 self._conf_history_lookup_nr = 0
1070 self._msg_type = "teardown-papi-history"
1071 teardown_kw.messages.visit(self)
1073 def end_teardown_kw(self, teardown_kw):
1074 """Called when keyword ends. Default implementation does nothing.
1076 :param teardown_kw: Keyword to process.
1077 :type teardown_kw: Keyword
1082 def visit_message(self, msg):
1083 """Implements visiting the message.
1085 :param msg: Message to process.
1089 if self.start_message(msg) is not False:
1090 self.end_message(msg)
1092 def start_message(self, msg):
1093 """Called when message starts. Get required information from messages:
1096 :param msg: Message to process.
1102 self.parse_msg[self._msg_type](msg)
1104 def end_message(self, msg):
1105 """Called when message ends. Default implementation does nothing.
1107 :param msg: Message to process.
1114 class InputData(object):
1117 The data is extracted from output.xml files generated by Jenkins jobs and
1118 stored in pandas' DataFrames.
1124 (as described in ExecutionChecker documentation)
1126 (as described in ExecutionChecker documentation)
1128 (as described in ExecutionChecker documentation)
1131 def __init__(self, spec):
1134 :param spec: Specification.
1135 :type spec: Specification
1142 self._input_data = pd.Series()
1146 """Getter - Input data.
1148 :returns: Input data
1149 :rtype: pandas.Series
1151 return self._input_data
1153 def metadata(self, job, build):
1154 """Getter - metadata
1156 :param job: Job which metadata we want.
1157 :param build: Build which metadata we want.
1161 :rtype: pandas.Series
1164 return self.data[job][build]["metadata"]
1166 def suites(self, job, build):
1169 :param job: Job which suites we want.
1170 :param build: Build which suites we want.
1174 :rtype: pandas.Series
1177 return self.data[job][str(build)]["suites"]
1179 def tests(self, job, build):
1182 :param job: Job which tests we want.
1183 :param build: Build which tests we want.
1187 :rtype: pandas.Series
1190 return self.data[job][build]["tests"]
1192 def _parse_tests(self, job, build, log):
1193 """Process data from robot output.xml file and return JSON structured
1196 :param job: The name of job which build output data will be processed.
1197 :param build: The build which output data will be processed.
1198 :param log: List of log messages.
1201 :type log: list of tuples (severity, msg)
1202 :returns: JSON data structure.
1211 with open(build["file-name"], 'r') as data_file:
1213 result = ExecutionResult(data_file)
1214 except errors.DataError as err:
1215 log.append(("ERROR", "Error occurred while parsing output.xml: "
1218 checker = ExecutionChecker(metadata, self._cfg.mapping,
1220 result.visit(checker)
1224 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1225 """Download and parse the input data file.
1227 :param pid: PID of the process executing this method.
1228 :param job: Name of the Jenkins job which generated the processed input
1230 :param build: Information about the Jenkins build which generated the
1231 processed input file.
1232 :param repeat: Repeat the download specified number of times if not
1242 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1243 format(job, build["build"])))
1250 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1256 logs.append(("ERROR", "It is not possible to download the input "
1257 "data file from the job '{job}', build "
1258 "'{build}', or it is damaged. Skipped.".
1259 format(job=job, build=build["build"])))
1261 logs.append(("INFO", " Processing data from the build '{0}' ...".
1262 format(build["build"])))
1263 data = self._parse_tests(job, build, logs)
1265 logs.append(("ERROR", "Input data file from the job '{job}', "
1266 "build '{build}' is damaged. Skipped.".
1267 format(job=job, build=build["build"])))
1272 remove(build["file-name"])
1273 except OSError as err:
1274 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1275 format(build["file-name"], repr(err))))
1277 # If the time-period is defined in the specification file, remove all
1278 # files which are outside the time period.
1279 timeperiod = self._cfg.input.get("time-period", None)
1280 if timeperiod and data:
1282 timeperiod = timedelta(int(timeperiod))
1283 metadata = data.get("metadata", None)
1285 generated = metadata.get("generated", None)
1287 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1288 if (now - generated) > timeperiod:
1289 # Remove the data and the file:
1294 " The build {job}/{build} is outdated, will be "
1295 "removed".format(job=job, build=build["build"])))
1296 file_name = self._cfg.input["file-name"]
1298 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1299 "{job}{sep}{build}{sep}{name}".format(
1302 build=build["build"],
1306 logs.append(("INFO",
1307 " The file {name} has been removed".
1308 format(name=full_name)))
1309 except OSError as err:
1310 logs.append(("ERROR",
1311 "Cannot remove the file '{0}': {1}".
1312 format(full_name, repr(err))))
1313 logs.append(("INFO", " Done."))
1315 for level, line in logs:
1318 elif level == "ERROR":
1320 elif level == "DEBUG":
1322 elif level == "CRITICAL":
1323 logging.critical(line)
1324 elif level == "WARNING":
1325 logging.warning(line)
1327 return {"data": data, "state": state, "job": job, "build": build}
1329 def download_and_parse_data(self, repeat=1):
1330 """Download the input data files, parse input data from input files and
1331 store in pandas' Series.
1333 :param repeat: Repeat the download specified number of times if not
1338 logging.info("Downloading and parsing input files ...")
1340 for job, builds in self._cfg.builds.items():
1341 for build in builds:
1343 result = self._download_and_parse_build(job, build, repeat)
1344 build_nr = result["build"]["build"]
1347 data = result["data"]
1348 build_data = pd.Series({
1349 "metadata": pd.Series(
1350 data["metadata"].values(),
1351 index=data["metadata"].keys()),
1352 "suites": pd.Series(data["suites"].values(),
1353 index=data["suites"].keys()),
1354 "tests": pd.Series(data["tests"].values(),
1355 index=data["tests"].keys())})
1357 if self._input_data.get(job, None) is None:
1358 self._input_data[job] = pd.Series()
1359 self._input_data[job][str(build_nr)] = build_data
1361 self._cfg.set_input_file_name(
1362 job, build_nr, result["build"]["file-name"])
1364 self._cfg.set_input_state(job, build_nr, result["state"])
1366 logging.info("Memory allocation: {0:,d}MB".format(
1367 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1369 logging.info("Done.")
1372 def _end_of_tag(tag_filter, start=0, closer="'"):
1373 """Return the index of character in the string which is the end of tag.
1375 :param tag_filter: The string where the end of tag is being searched.
1376 :param start: The index where the searching is stated.
1377 :param closer: The character which is the tag closer.
1378 :type tag_filter: str
1381 :returns: The index of the tag closer.
1386 idx_opener = tag_filter.index(closer, start)
1387 return tag_filter.index(closer, idx_opener + 1)
1392 def _condition(tag_filter):
1393 """Create a conditional statement from the given tag filter.
1395 :param tag_filter: Filter based on tags from the element specification.
1396 :type tag_filter: str
1397 :returns: Conditional statement which can be evaluated.
1403 index = InputData._end_of_tag(tag_filter, index)
1407 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1409 def filter_data(self, element, params=None, data_set="tests",
1410 continue_on_error=False):
1411 """Filter required data from the given jobs and builds.
1413 The output data structure is:
1417 - test (or suite) 1 ID:
1423 - test (or suite) n ID:
1430 :param element: Element which will use the filtered data.
1431 :param params: Parameters which will be included in the output. If None,
1432 all parameters are included.
1433 :param data_set: The set of data to be filtered: tests, suites,
1435 :param continue_on_error: Continue if there is error while reading the
1436 data. The Item will be empty then
1437 :type element: pandas.Series
1440 :type continue_on_error: bool
1441 :returns: Filtered data.
1442 :rtype pandas.Series
1446 if element["filter"] in ("all", "template"):
1449 cond = InputData._condition(element["filter"])
1450 logging.debug(" Filter: {0}".format(cond))
1452 logging.error(" No filter defined.")
1456 params = element.get("parameters", None)
1458 params.append("type")
1462 for job, builds in element["data"].items():
1463 data[job] = pd.Series()
1464 for build in builds:
1465 data[job][str(build)] = pd.Series()
1467 data_iter = self.data[job][str(build)][data_set].\
1470 if continue_on_error:
1474 for test_ID, test_data in data_iter:
1475 if eval(cond, {"tags": test_data.get("tags", "")}):
1476 data[job][str(build)][test_ID] = pd.Series()
1478 for param, val in test_data.items():
1479 data[job][str(build)][test_ID][param] = val
1481 for param in params:
1483 data[job][str(build)][test_ID][param] =\
1486 data[job][str(build)][test_ID][param] =\
1490 except (KeyError, IndexError, ValueError) as err:
1491 logging.error(" Missing mandatory parameter in the element "
1492 "specification: {0}".format(err))
1494 except AttributeError:
1497 logging.error(" The filter '{0}' is not correct. Check if all "
1498 "tags are enclosed by apostrophes.".format(cond))
1501 def filter_tests_by_name(self, element, params=None, data_set="tests",
1502 continue_on_error=False):
1503 """Filter required data from the given jobs and builds.
1505 The output data structure is:
1509 - test (or suite) 1 ID:
1515 - test (or suite) n ID:
1522 :param element: Element which will use the filtered data.
1523 :param params: Parameters which will be included in the output. If None,
1524 all parameters are included.
1525 :param data_set: The set of data to be filtered: tests, suites,
1527 :param continue_on_error: Continue if there is error while reading the
1528 data. The Item will be empty then
1529 :type element: pandas.Series
1532 :type continue_on_error: bool
1533 :returns: Filtered data.
1534 :rtype pandas.Series
1537 include = element.get("include", None)
1539 logging.warning("No tests to include, skipping the element.")
1543 params = element.get("parameters", None)
1545 params.append("type")
1549 for job, builds in element["data"].items():
1550 data[job] = pd.Series()
1551 for build in builds:
1552 data[job][str(build)] = pd.Series()
1553 for test in include:
1555 reg_ex = re.compile(str(test).lower())
1556 for test_ID in self.data[job][str(build)]\
1558 if re.match(reg_ex, str(test_ID).lower()):
1559 test_data = self.data[job][str(build)]\
1561 data[job][str(build)][test_ID] = pd.Series()
1563 for param, val in test_data.items():
1564 data[job][str(build)][test_ID]\
1567 for param in params:
1569 data[job][str(build)][test_ID]\
1570 [param] = test_data[param]
1572 data[job][str(build)][test_ID]\
1574 except KeyError as err:
1575 logging.error("{err!r}".format(err=err))
1576 if continue_on_error:
1582 except (KeyError, IndexError, ValueError) as err:
1583 logging.error("Missing mandatory parameter in the element "
1584 "specification: {err!r}".format(err=err))
1586 except AttributeError as err:
1587 logging.error("{err!r}".format(err=err))
1592 def merge_data(data):
1593 """Merge data from more jobs and builds to a simple data structure.
1595 The output data structure is:
1597 - test (suite) 1 ID:
1603 - test (suite) n ID:
1606 :param data: Data to merge.
1607 :type data: pandas.Series
1608 :returns: Merged data.
1609 :rtype: pandas.Series
1612 logging.info(" Merging data ...")
1614 merged_data = pd.Series()
1615 for _, builds in data.iteritems():
1616 for _, item in builds.iteritems():
1617 for ID, item_data in item.iteritems():
1618 merged_data[ID] = item_data