1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
29 from robot.api import ExecutionResult, ResultVisitor
30 from robot import errors
31 from collections import OrderedDict
32 from string import replace
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
42 # Separator used in file names
46 class ExecutionChecker(ResultVisitor):
47 """Class to traverse through the test suite structure.
49 The functionality implemented in this class generates a json structure:
55 "generated": "Timestamp",
56 "version": "SUT version",
57 "job": "Jenkins job name",
58 "build": "Information about the build"
61 "Suite long name 1": {
63 "doc": "Suite 1 documentation",
64 "parent": "Suite 1 parent",
65 "level": "Level of the suite in the suite hierarchy"
67 "Suite long name N": {
69 "doc": "Suite N documentation",
70 "parent": "Suite 2 parent",
71 "level": "Level of the suite in the suite hierarchy"
78 "parent": "Name of the parent of the test",
79 "doc": "Test documentation",
80 "msg": "Test message",
81 "conf-history": "DUT1 and DUT2 VAT History",
82 "show-run": "Show Run",
83 "tags": ["tag 1", "tag 2", "tag n"],
85 "status": "PASS" | "FAIL",
131 "parent": "Name of the parent of the test",
132 "doc": "Test documentation",
133 "msg": "Test message",
134 "tags": ["tag 1", "tag 2", "tag n"],
136 "status": "PASS" | "FAIL",
143 "parent": "Name of the parent of the test",
144 "doc": "Test documentation",
145 "msg": "Test message",
146 "tags": ["tag 1", "tag 2", "tag n"],
147 "type": "MRR" | "BMRR",
148 "status": "PASS" | "FAIL",
150 "receive-rate": AvgStdevMetadata,
164 "metadata": { # Optional
165 "version": "VPP version",
166 "job": "Jenkins job name",
167 "build": "Information about the build"
171 "doc": "Suite 1 documentation",
172 "parent": "Suite 1 parent",
173 "level": "Level of the suite in the suite hierarchy"
176 "doc": "Suite N documentation",
177 "parent": "Suite 2 parent",
178 "level": "Level of the suite in the suite hierarchy"
184 "parent": "Name of the parent of the test",
185 "doc": "Test documentation"
186 "msg": "Test message"
187 "tags": ["tag 1", "tag 2", "tag n"],
188 "conf-history": "DUT1 and DUT2 VAT History"
189 "show-run": "Show Run"
190 "status": "PASS" | "FAIL"
198 .. note:: ID is the lowercase full path to the test.
201 # TODO: Remove when definitely no NDRPDRDISC tests are used:
202 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
204 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205 r'PLRsearch upper bound::?\s(\d+.\d+)')
207 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208 r'NDR_UPPER:\s(\d+.\d+).*\n'
209 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210 r'PDR_UPPER:\s(\d+.\d+)')
212 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
215 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
218 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219 r"VPP Version:\s*|VPP version:\s*)(.*)")
221 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
223 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
225 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226 r'tx\s(\d*),\srx\s(\d*)')
228 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229 r' in packets per second: \[(.*)\]')
231 REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232 REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.\d*)')
234 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
236 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
238 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
240 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
242 def __init__(self, metadata, mapping, ignore):
245 :param metadata: Key-value pairs to be included in "metadata" part of
247 :param mapping: Mapping of the old names of test cases to the new
249 :param ignore: List of TCs to be ignored.
255 # Type of message to parse out from the test messages
256 self._msg_type = None
262 self._timestamp = None
264 # Testbed. The testbed is identified by TG node IP address.
267 # Mapping of TCs long names
268 self._mapping = mapping
271 self._ignore = ignore
273 # Number of VAT History messages found:
275 # 1 - VAT History of DUT1
276 # 2 - VAT History of DUT2
277 self._lookup_kw_nr = 0
278 self._conf_history_lookup_nr = 0
280 # Number of Show Running messages found
282 # 1 - Show run message found
283 self._show_run_lookup_nr = 0
285 # Test ID of currently processed test- the lowercase full path to the
289 # The main data structure
291 "metadata": OrderedDict(),
292 "suites": OrderedDict(),
293 "tests": OrderedDict()
296 # Save the provided metadata
297 for key, val in metadata.items():
298 self._data["metadata"][key] = val
300 # Dictionary defining the methods used to parse different types of
303 "timestamp": self._get_timestamp,
304 "vpp-version": self._get_vpp_version,
305 "dpdk-version": self._get_dpdk_version,
306 "teardown-vat-history": self._get_vat_history,
307 "teardown-papi-history": self._get_papi_history,
308 "test-show-runtime": self._get_show_run,
309 "testbed": self._get_testbed
314 """Getter - Data parsed from the XML file.
316 :returns: Data parsed from the XML file.
321 def _get_testbed(self, msg):
322 """Called when extraction of testbed IP is required.
323 The testbed is identified by TG node IP address.
325 :param msg: Message to process.
330 if msg.message.count("Setup of TG node"):
331 reg_tg_ip = re.compile(
332 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
334 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
335 except (KeyError, ValueError, IndexError, AttributeError):
338 self._data["metadata"]["testbed"] = self._testbed
339 self._msg_type = None
341 def _get_vpp_version(self, msg):
342 """Called when extraction of VPP version is required.
344 :param msg: Message to process.
349 if msg.message.count("return STDOUT Version:") or \
350 msg.message.count("VPP Version:") or \
351 msg.message.count("VPP version:"):
352 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
354 self._data["metadata"]["version"] = self._version
355 self._msg_type = None
357 def _get_dpdk_version(self, msg):
358 """Called when extraction of DPDK version is required.
360 :param msg: Message to process.
365 if msg.message.count("DPDK Version:"):
367 self._version = str(re.search(
368 self.REGEX_VERSION_DPDK, msg.message). group(2))
369 self._data["metadata"]["version"] = self._version
373 self._msg_type = None
375 def _get_timestamp(self, msg):
376 """Called when extraction of timestamp is required.
378 :param msg: Message to process.
383 self._timestamp = msg.timestamp[:14]
384 self._data["metadata"]["generated"] = self._timestamp
385 self._msg_type = None
387 def _get_vat_history(self, msg):
388 """Called when extraction of VAT command history is required.
390 :param msg: Message to process.
394 if msg.message.count("VAT command history:"):
395 self._conf_history_lookup_nr += 1
396 if self._conf_history_lookup_nr == 1:
397 self._data["tests"][self._test_ID]["conf-history"] = str()
399 self._msg_type = None
400 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
401 "VAT command history:", "", msg.message, count=1). \
402 replace("\n\n", "\n").replace('\n', ' |br| ').\
403 replace('\r', '').replace('"', "'")
405 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
406 self._data["tests"][self._test_ID]["conf-history"] += \
407 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
409 def _get_papi_history(self, msg):
410 """Called when extraction of PAPI command history is required.
412 :param msg: Message to process.
416 if msg.message.count("PAPI command history:"):
417 self._conf_history_lookup_nr += 1
418 if self._conf_history_lookup_nr == 1:
419 self._data["tests"][self._test_ID]["conf-history"] = str()
421 self._msg_type = None
422 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
423 "PAPI command history:", "", msg.message, count=1). \
424 replace("\n\n", "\n").replace('\n', ' |br| ').\
425 replace('\r', '').replace('"', "'")
427 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
428 self._data["tests"][self._test_ID]["conf-history"] += \
429 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
431 def _get_show_run(self, msg):
432 """Called when extraction of VPP operational data (output of CLI command
433 Show Runtime) is required.
435 :param msg: Message to process.
439 if msg.message.count("Runtime:"):
440 self._show_run_lookup_nr += 1
441 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
442 self._data["tests"][self._test_ID]["show-run"] = str()
443 if self._lookup_kw_nr > 1:
444 self._msg_type = None
445 if self._show_run_lookup_nr > 0:
446 message = str(msg.message).replace(' ', '').replace('\n', '').\
447 replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
448 runtime = loads(message)
450 threads_nr = len(runtime[0]["clocks"])
451 except (IndexError, KeyError):
453 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
454 table = [[tbl_hdr, ] for _ in range(threads_nr)]
456 for idx in range(threads_nr):
460 item["vectors"][idx],
461 item["suspends"][idx],
465 for idx in range(threads_nr):
466 text += "Thread {idx} ".format(idx=idx)
467 text += "vpp_main\n" if idx == 0 else \
468 "vpp_wk_{idx}\n".format(idx=idx-1)
470 for row in table[idx]:
471 if txt_table is None:
472 txt_table = prettytable.PrettyTable(row)
475 txt_table.add_row(row)
476 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
477 txt_table.align["Name"] = "l"
478 txt_table.align["Calls"] = "r"
479 txt_table.align["Vectors"] = "r"
480 txt_table.align["Suspends"] = "r"
481 txt_table.align["Clocks"] = "r"
483 text += txt_table.get_string(sortby="Name") + '\n'
485 text = text.replace('\n', ' |br| ').replace('\r', '').\
488 self._data["tests"][self._test_ID]["show-run"] += " |br| "
489 self._data["tests"][self._test_ID]["show-run"] += \
490 "**DUT" + str(self._show_run_lookup_nr) + ":** |br| " \
495 def _get_ndrpdr_throughput(self, msg):
496 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
499 :param msg: The test message to be parsed.
501 :returns: Parsed data as a dict and the status (PASS/FAIL).
502 :rtype: tuple(dict, str)
506 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
507 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
510 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
512 if groups is not None:
514 throughput["NDR"]["LOWER"] = float(groups.group(1))
515 throughput["NDR"]["UPPER"] = float(groups.group(2))
516 throughput["PDR"]["LOWER"] = float(groups.group(3))
517 throughput["PDR"]["UPPER"] = float(groups.group(4))
519 except (IndexError, ValueError):
522 return throughput, status
524 def _get_plr_throughput(self, msg):
525 """Get PLRsearch lower bound and PLRsearch upper bound from the test
528 :param msg: The test message to be parsed.
530 :returns: Parsed data as a dict and the status (PASS/FAIL).
531 :rtype: tuple(dict, str)
539 groups = re.search(self.REGEX_PLR_RATE, msg)
541 if groups is not None:
543 throughput["LOWER"] = float(groups.group(1))
544 throughput["UPPER"] = float(groups.group(2))
546 except (IndexError, ValueError):
549 return throughput, status
551 def _get_ndrpdr_latency(self, msg):
552 """Get LATENCY from the test message.
554 :param msg: The test message to be parsed.
556 :returns: Parsed data as a dict and the status (PASS/FAIL).
557 :rtype: tuple(dict, str)
559 latency_default = {"min": -1.0, "avg": -1.0, "max": -1.0, "hdrh": ""}
562 "direction1": copy.copy(latency_default),
563 "direction2": copy.copy(latency_default)
566 "direction1": copy.copy(latency_default),
567 "direction2": copy.copy(latency_default)
571 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
573 def process_latency(in_str):
574 """Return object with parsed latency values.
576 TODO: Define class for the return type.
578 :param in_str: Input string, min/avg/max/hdrh format.
580 :returns: Dict with corresponding keys, except hdrh float values.
582 :throws IndexError: If in_str does not have enough substrings.
583 :throws ValueError: If a substring does not convert to float.
585 in_list = in_str.split('/')
588 "min": float(in_list[0]),
589 "avg": float(in_list[1]),
590 "max": float(in_list[2]),
594 if len(in_list) == 4:
595 rval["hdrh"] = str(in_list[3])
599 if groups is not None:
601 latency["NDR"]["direction1"] = process_latency(groups.group(1))
602 latency["NDR"]["direction2"] = process_latency(groups.group(2))
603 latency["PDR"]["direction1"] = process_latency(groups.group(3))
604 latency["PDR"]["direction2"] = process_latency(groups.group(4))
606 except (IndexError, ValueError):
609 return latency, status
611 def visit_suite(self, suite):
612 """Implements traversing through the suite and its direct children.
614 :param suite: Suite to process.
618 if self.start_suite(suite) is not False:
619 suite.suites.visit(self)
620 suite.tests.visit(self)
621 self.end_suite(suite)
623 def start_suite(self, suite):
624 """Called when suite starts.
626 :param suite: Suite to process.
632 parent_name = suite.parent.name
633 except AttributeError:
636 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
637 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
638 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
640 self._data["suites"][suite.longname.lower().replace('"', "'").
641 replace(" ", "_")] = {
642 "name": suite.name.lower(),
644 "parent": parent_name,
645 "level": len(suite.longname.split("."))
648 suite.keywords.visit(self)
650 def end_suite(self, suite):
651 """Called when suite ends.
653 :param suite: Suite to process.
659 def visit_test(self, test):
660 """Implements traversing through the test.
662 :param test: Test to process.
666 if self.start_test(test) is not False:
667 test.keywords.visit(self)
670 def start_test(self, test):
671 """Called when test starts.
673 :param test: Test to process.
678 longname_orig = test.longname.lower()
680 # Check the ignore list
681 if longname_orig in self._ignore:
684 tags = [str(tag) for tag in test.tags]
687 # Change the TC long name and name if defined in the mapping table
688 longname = self._mapping.get(longname_orig, None)
689 if longname is not None:
690 name = longname.split('.')[-1]
691 logging.debug("{0}\n{1}\n{2}\n{3}".format(
692 self._data["metadata"], longname_orig, longname, name))
694 longname = longname_orig
695 name = test.name.lower()
697 # Remove TC number from the TC long name (backward compatibility):
698 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
699 # Remove TC number from the TC name (not needed):
700 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
702 test_result["parent"] = test.parent.name.lower()
703 test_result["tags"] = tags
704 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
705 replace('\r', '').replace('[', ' |br| [')
706 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
707 test_result["msg"] = test.message.replace('\n', ' |br| '). \
708 replace('\r', '').replace('"', "'")
709 test_result["type"] = "FUNC"
710 test_result["status"] = test.status
712 if "PERFTEST" in tags:
713 # Replace info about cores (e.g. -1c-) with the info about threads
714 # and cores (e.g. -1t1c-) in the long test case names and in the
715 # test case names if necessary.
716 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
720 for tag in test_result["tags"]:
721 groups = re.search(self.REGEX_TC_TAG, tag)
727 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
728 "-{0}-".format(tag_tc.lower()),
731 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
732 "-{0}-".format(tag_tc.lower()),
736 test_result["status"] = "FAIL"
737 self._data["tests"][self._test_ID] = test_result
738 logging.debug("The test '{0}' has no or more than one "
739 "multi-threading tags.".format(self._test_ID))
740 logging.debug("Tags: {0}".format(test_result["tags"]))
743 if test.status == "PASS" and ("NDRPDRDISC" in tags or
750 # TODO: Remove when definitely no NDRPDRDISC tests are used:
751 if "NDRDISC" in tags:
752 test_result["type"] = "NDR"
753 # TODO: Remove when definitely no NDRPDRDISC tests are used:
754 elif "PDRDISC" in tags:
755 test_result["type"] = "PDR"
756 elif "NDRPDR" in tags:
757 test_result["type"] = "NDRPDR"
759 test_result["type"] = "SOAK"
761 test_result["type"] = "TCP"
763 test_result["type"] = "MRR"
764 elif "FRMOBL" in tags or "BMRR" in tags:
765 test_result["type"] = "BMRR"
766 elif "RECONF" in tags:
767 test_result["type"] = "RECONF"
769 test_result["status"] = "FAIL"
770 self._data["tests"][self._test_ID] = test_result
773 # TODO: Remove when definitely no NDRPDRDISC tests are used:
774 if test_result["type"] in ("NDR", "PDR"):
776 rate_value = str(re.search(
777 self.REGEX_RATE, test.message).group(1))
778 except AttributeError:
781 rate_unit = str(re.search(
782 self.REGEX_RATE, test.message).group(2))
783 except AttributeError:
786 test_result["throughput"] = dict()
787 test_result["throughput"]["value"] = \
788 int(rate_value.split('.')[0])
789 test_result["throughput"]["unit"] = rate_unit
790 test_result["latency"] = \
791 self._get_latency(test.message, test_result["type"])
792 if test_result["type"] == "PDR":
793 test_result["lossTolerance"] = str(re.search(
794 self.REGEX_TOLERANCE, test.message).group(1))
796 elif test_result["type"] in ("NDRPDR", ):
797 test_result["throughput"], test_result["status"] = \
798 self._get_ndrpdr_throughput(test.message)
799 test_result["latency"], test_result["status"] = \
800 self._get_ndrpdr_latency(test.message)
802 elif test_result["type"] in ("SOAK", ):
803 test_result["throughput"], test_result["status"] = \
804 self._get_plr_throughput(test.message)
806 elif test_result["type"] in ("TCP", ):
807 groups = re.search(self.REGEX_TCP, test.message)
808 test_result["result"] = int(groups.group(2))
810 elif test_result["type"] in ("MRR", "BMRR"):
811 test_result["result"] = dict()
812 groups = re.search(self.REGEX_BMRR, test.message)
813 if groups is not None:
814 items_str = groups.group(1)
815 items_float = [float(item.strip()) for item
816 in items_str.split(",")]
817 metadata = AvgStdevMetadataFactory.from_data(items_float)
818 # Next two lines have been introduced in CSIT-1179,
819 # to be removed in CSIT-1180.
822 test_result["result"]["receive-rate"] = metadata
824 groups = re.search(self.REGEX_MRR, test.message)
825 test_result["result"]["receive-rate"] = \
826 AvgStdevMetadataFactory.from_data([
827 float(groups.group(3)) / float(groups.group(1)), ])
829 elif test_result["type"] == "RECONF":
830 test_result["result"] = None
832 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
833 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
834 test_result["result"] = {
835 "loss": int(grps_loss.group(1)),
836 "time": float(grps_time.group(1))
838 except (AttributeError, IndexError, ValueError, TypeError):
839 test_result["status"] = "FAIL"
841 self._data["tests"][self._test_ID] = test_result
843 def end_test(self, test):
844 """Called when test ends.
846 :param test: Test to process.
852 def visit_keyword(self, keyword):
853 """Implements traversing through the keyword and its child keywords.
855 :param keyword: Keyword to process.
856 :type keyword: Keyword
859 if self.start_keyword(keyword) is not False:
860 self.end_keyword(keyword)
862 def start_keyword(self, keyword):
863 """Called when keyword starts. Default implementation does nothing.
865 :param keyword: Keyword to process.
866 :type keyword: Keyword
870 if keyword.type == "setup":
871 self.visit_setup_kw(keyword)
872 elif keyword.type == "teardown":
873 self._lookup_kw_nr = 0
874 self.visit_teardown_kw(keyword)
876 self._lookup_kw_nr = 0
877 self.visit_test_kw(keyword)
878 except AttributeError:
881 def end_keyword(self, keyword):
882 """Called when keyword ends. Default implementation does nothing.
884 :param keyword: Keyword to process.
885 :type keyword: Keyword
890 def visit_test_kw(self, test_kw):
891 """Implements traversing through the test keyword and its child
894 :param test_kw: Keyword to process.
895 :type test_kw: Keyword
898 for keyword in test_kw.keywords:
899 if self.start_test_kw(keyword) is not False:
900 self.visit_test_kw(keyword)
901 self.end_test_kw(keyword)
903 def start_test_kw(self, test_kw):
904 """Called when test keyword starts. Default implementation does
907 :param test_kw: Keyword to process.
908 :type test_kw: Keyword
911 if test_kw.name.count("Show Runtime Counters On All Duts"):
912 self._lookup_kw_nr += 1
913 self._show_run_lookup_nr = 0
914 self._msg_type = "test-show-runtime"
915 elif test_kw.name.count("Install Dpdk Test") and not self._version:
916 self._msg_type = "dpdk-version"
919 test_kw.messages.visit(self)
921 def end_test_kw(self, test_kw):
922 """Called when keyword ends. Default implementation does nothing.
924 :param test_kw: Keyword to process.
925 :type test_kw: Keyword
930 def visit_setup_kw(self, setup_kw):
931 """Implements traversing through the teardown keyword and its child
934 :param setup_kw: Keyword to process.
935 :type setup_kw: Keyword
938 for keyword in setup_kw.keywords:
939 if self.start_setup_kw(keyword) is not False:
940 self.visit_setup_kw(keyword)
941 self.end_setup_kw(keyword)
943 def start_setup_kw(self, setup_kw):
944 """Called when teardown keyword starts. Default implementation does
947 :param setup_kw: Keyword to process.
948 :type setup_kw: Keyword
951 if setup_kw.name.count("Show Vpp Version On All Duts") \
952 and not self._version:
953 self._msg_type = "vpp-version"
954 elif setup_kw.name.count("Set Global Variable") \
955 and not self._timestamp:
956 self._msg_type = "timestamp"
957 elif setup_kw.name.count("Setup Framework") and not self._testbed:
958 self._msg_type = "testbed"
961 setup_kw.messages.visit(self)
963 def end_setup_kw(self, setup_kw):
964 """Called when keyword ends. Default implementation does nothing.
966 :param setup_kw: Keyword to process.
967 :type setup_kw: Keyword
972 def visit_teardown_kw(self, teardown_kw):
973 """Implements traversing through the teardown keyword and its child
976 :param teardown_kw: Keyword to process.
977 :type teardown_kw: Keyword
980 for keyword in teardown_kw.keywords:
981 if self.start_teardown_kw(keyword) is not False:
982 self.visit_teardown_kw(keyword)
983 self.end_teardown_kw(keyword)
985 def start_teardown_kw(self, teardown_kw):
986 """Called when teardown keyword starts. Default implementation does
989 :param teardown_kw: Keyword to process.
990 :type teardown_kw: Keyword
994 if teardown_kw.name.count("Show Vat History On All Duts"):
995 self._conf_history_lookup_nr = 0
996 self._msg_type = "teardown-vat-history"
997 teardown_kw.messages.visit(self)
998 elif teardown_kw.name.count("Show Papi History On All Duts"):
999 self._conf_history_lookup_nr = 0
1000 self._msg_type = "teardown-papi-history"
1001 teardown_kw.messages.visit(self)
1003 def end_teardown_kw(self, teardown_kw):
1004 """Called when keyword ends. Default implementation does nothing.
1006 :param teardown_kw: Keyword to process.
1007 :type teardown_kw: Keyword
1012 def visit_message(self, msg):
1013 """Implements visiting the message.
1015 :param msg: Message to process.
1019 if self.start_message(msg) is not False:
1020 self.end_message(msg)
1022 def start_message(self, msg):
1023 """Called when message starts. Get required information from messages:
1026 :param msg: Message to process.
1032 self.parse_msg[self._msg_type](msg)
1034 def end_message(self, msg):
1035 """Called when message ends. Default implementation does nothing.
1037 :param msg: Message to process.
1044 class InputData(object):
1047 The data is extracted from output.xml files generated by Jenkins jobs and
1048 stored in pandas' DataFrames.
1054 (as described in ExecutionChecker documentation)
1056 (as described in ExecutionChecker documentation)
1058 (as described in ExecutionChecker documentation)
1061 def __init__(self, spec):
1064 :param spec: Specification.
1065 :type spec: Specification
1072 self._input_data = pd.Series()
1076 """Getter - Input data.
1078 :returns: Input data
1079 :rtype: pandas.Series
1081 return self._input_data
1083 def metadata(self, job, build):
1084 """Getter - metadata
1086 :param job: Job which metadata we want.
1087 :param build: Build which metadata we want.
1091 :rtype: pandas.Series
1094 return self.data[job][build]["metadata"]
1096 def suites(self, job, build):
1099 :param job: Job which suites we want.
1100 :param build: Build which suites we want.
1104 :rtype: pandas.Series
1107 return self.data[job][str(build)]["suites"]
1109 def tests(self, job, build):
1112 :param job: Job which tests we want.
1113 :param build: Build which tests we want.
1117 :rtype: pandas.Series
1120 return self.data[job][build]["tests"]
1122 def _parse_tests(self, job, build, log):
1123 """Process data from robot output.xml file and return JSON structured
1126 :param job: The name of job which build output data will be processed.
1127 :param build: The build which output data will be processed.
1128 :param log: List of log messages.
1131 :type log: list of tuples (severity, msg)
1132 :returns: JSON data structure.
1141 with open(build["file-name"], 'r') as data_file:
1143 result = ExecutionResult(data_file)
1144 except errors.DataError as err:
1145 log.append(("ERROR", "Error occurred while parsing output.xml: "
1148 checker = ExecutionChecker(metadata, self._cfg.mapping,
1150 result.visit(checker)
1154 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1155 """Download and parse the input data file.
1157 :param pid: PID of the process executing this method.
1158 :param job: Name of the Jenkins job which generated the processed input
1160 :param build: Information about the Jenkins build which generated the
1161 processed input file.
1162 :param repeat: Repeat the download specified number of times if not
1172 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1173 format(job, build["build"])))
1180 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1186 logs.append(("ERROR", "It is not possible to download the input "
1187 "data file from the job '{job}', build "
1188 "'{build}', or it is damaged. Skipped.".
1189 format(job=job, build=build["build"])))
1191 logs.append(("INFO", " Processing data from the build '{0}' ...".
1192 format(build["build"])))
1193 data = self._parse_tests(job, build, logs)
1195 logs.append(("ERROR", "Input data file from the job '{job}', "
1196 "build '{build}' is damaged. Skipped.".
1197 format(job=job, build=build["build"])))
1202 remove(build["file-name"])
1203 except OSError as err:
1204 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1205 format(build["file-name"], repr(err))))
1207 # If the time-period is defined in the specification file, remove all
1208 # files which are outside the time period.
1209 timeperiod = self._cfg.input.get("time-period", None)
1210 if timeperiod and data:
1212 timeperiod = timedelta(int(timeperiod))
1213 metadata = data.get("metadata", None)
1215 generated = metadata.get("generated", None)
1217 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1218 if (now - generated) > timeperiod:
1219 # Remove the data and the file:
1224 " The build {job}/{build} is outdated, will be "
1225 "removed".format(job=job, build=build["build"])))
1226 logs.append(("INFO", " Done."))
1228 for level, line in logs:
1231 elif level == "ERROR":
1233 elif level == "DEBUG":
1235 elif level == "CRITICAL":
1236 logging.critical(line)
1237 elif level == "WARNING":
1238 logging.warning(line)
1240 return {"data": data, "state": state, "job": job, "build": build}
1242 def download_and_parse_data(self, repeat=1):
1243 """Download the input data files, parse input data from input files and
1244 store in pandas' Series.
1246 :param repeat: Repeat the download specified number of times if not
1251 logging.info("Downloading and parsing input files ...")
1253 for job, builds in self._cfg.builds.items():
1254 for build in builds:
1256 result = self._download_and_parse_build(job, build, repeat)
1257 build_nr = result["build"]["build"]
1260 data = result["data"]
1261 build_data = pd.Series({
1262 "metadata": pd.Series(
1263 data["metadata"].values(),
1264 index=data["metadata"].keys()),
1265 "suites": pd.Series(data["suites"].values(),
1266 index=data["suites"].keys()),
1267 "tests": pd.Series(data["tests"].values(),
1268 index=data["tests"].keys())})
1270 if self._input_data.get(job, None) is None:
1271 self._input_data[job] = pd.Series()
1272 self._input_data[job][str(build_nr)] = build_data
1274 self._cfg.set_input_file_name(
1275 job, build_nr, result["build"]["file-name"])
1277 self._cfg.set_input_state(job, build_nr, result["state"])
1279 logging.info("Memory allocation: {0:,d}MB".format(
1280 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1282 logging.info("Done.")
1285 def _end_of_tag(tag_filter, start=0, closer="'"):
1286 """Return the index of character in the string which is the end of tag.
1288 :param tag_filter: The string where the end of tag is being searched.
1289 :param start: The index where the searching is stated.
1290 :param closer: The character which is the tag closer.
1291 :type tag_filter: str
1294 :returns: The index of the tag closer.
1299 idx_opener = tag_filter.index(closer, start)
1300 return tag_filter.index(closer, idx_opener + 1)
1305 def _condition(tag_filter):
1306 """Create a conditional statement from the given tag filter.
1308 :param tag_filter: Filter based on tags from the element specification.
1309 :type tag_filter: str
1310 :returns: Conditional statement which can be evaluated.
1316 index = InputData._end_of_tag(tag_filter, index)
1320 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1322 def filter_data(self, element, params=None, data=None, data_set="tests",
1323 continue_on_error=False):
1324 """Filter required data from the given jobs and builds.
1326 The output data structure is:
1330 - test (or suite) 1 ID:
1336 - test (or suite) n ID:
1343 :param element: Element which will use the filtered data.
1344 :param params: Parameters which will be included in the output. If None,
1345 all parameters are included.
1346 :param data: If not None, this data is used instead of data specified
1348 :param data_set: The set of data to be filtered: tests, suites,
1350 :param continue_on_error: Continue if there is error while reading the
1351 data. The Item will be empty then
1352 :type element: pandas.Series
1356 :type continue_on_error: bool
1357 :returns: Filtered data.
1358 :rtype pandas.Series
1362 if element["filter"] in ("all", "template"):
1365 cond = InputData._condition(element["filter"])
1366 logging.debug(" Filter: {0}".format(cond))
1368 logging.error(" No filter defined.")
1372 params = element.get("parameters", None)
1374 params.append("type")
1376 data_to_filter = data if data else element["data"]
1379 for job, builds in data_to_filter.items():
1380 data[job] = pd.Series()
1381 for build in builds:
1382 data[job][str(build)] = pd.Series()
1384 data_iter = self.data[job][str(build)][data_set].\
1387 if continue_on_error:
1391 for test_ID, test_data in data_iter:
1392 if eval(cond, {"tags": test_data.get("tags", "")}):
1393 data[job][str(build)][test_ID] = pd.Series()
1395 for param, val in test_data.items():
1396 data[job][str(build)][test_ID][param] = val
1398 for param in params:
1400 data[job][str(build)][test_ID][param] =\
1403 data[job][str(build)][test_ID][param] =\
1407 except (KeyError, IndexError, ValueError) as err:
1408 logging.error(" Missing mandatory parameter in the element "
1409 "specification: {0}".format(err))
1411 except AttributeError:
1414 logging.error(" The filter '{0}' is not correct. Check if all "
1415 "tags are enclosed by apostrophes.".format(cond))
1418 def filter_tests_by_name(self, element, params=None, data_set="tests",
1419 continue_on_error=False):
1420 """Filter required data from the given jobs and builds.
1422 The output data structure is:
1426 - test (or suite) 1 ID:
1432 - test (or suite) n ID:
1439 :param element: Element which will use the filtered data.
1440 :param params: Parameters which will be included in the output. If None,
1441 all parameters are included.
1442 :param data_set: The set of data to be filtered: tests, suites,
1444 :param continue_on_error: Continue if there is error while reading the
1445 data. The Item will be empty then
1446 :type element: pandas.Series
1449 :type continue_on_error: bool
1450 :returns: Filtered data.
1451 :rtype pandas.Series
1454 include = element.get("include", None)
1456 logging.warning("No tests to include, skipping the element.")
1460 params = element.get("parameters", None)
1462 params.append("type")
1466 for job, builds in element["data"].items():
1467 data[job] = pd.Series()
1468 for build in builds:
1469 data[job][str(build)] = pd.Series()
1470 for test in include:
1472 reg_ex = re.compile(str(test).lower())
1473 for test_ID in self.data[job][str(build)]\
1475 if re.match(reg_ex, str(test_ID).lower()):
1476 test_data = self.data[job][str(build)]\
1478 data[job][str(build)][test_ID] = pd.Series()
1480 for param, val in test_data.items():
1481 data[job][str(build)][test_ID]\
1484 for param in params:
1486 data[job][str(build)][test_ID]\
1487 [param] = test_data[param]
1489 data[job][str(build)][test_ID]\
1491 except KeyError as err:
1492 logging.error("{err!r}".format(err=err))
1493 if continue_on_error:
1499 except (KeyError, IndexError, ValueError) as err:
1500 logging.error("Missing mandatory parameter in the element "
1501 "specification: {err!r}".format(err=err))
1503 except AttributeError as err:
1504 logging.error("{err!r}".format(err=err))
1509 def merge_data(data):
1510 """Merge data from more jobs and builds to a simple data structure.
1512 The output data structure is:
1514 - test (suite) 1 ID:
1520 - test (suite) n ID:
1523 :param data: Data to merge.
1524 :type data: pandas.Series
1525 :returns: Merged data.
1526 :rtype: pandas.Series
1529 logging.info(" Merging data ...")
1531 merged_data = pd.Series()
1532 for _, builds in data.iteritems():
1533 for _, item in builds.iteritems():
1534 for ID, item_data in item.iteritems():
1535 merged_data[ID] = item_data