1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
29 from robot.api import ExecutionResult, ResultVisitor
30 from robot import errors
31 from collections import OrderedDict
32 from string import replace
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
42 # Separator used in file names
46 class ExecutionChecker(ResultVisitor):
47 """Class to traverse through the test suite structure.
49 The functionality implemented in this class generates a json structure:
55 "generated": "Timestamp",
56 "version": "SUT version",
57 "job": "Jenkins job name",
58 "build": "Information about the build"
61 "Suite long name 1": {
63 "doc": "Suite 1 documentation",
64 "parent": "Suite 1 parent",
65 "level": "Level of the suite in the suite hierarchy"
67 "Suite long name N": {
69 "doc": "Suite N documentation",
70 "parent": "Suite 2 parent",
71 "level": "Level of the suite in the suite hierarchy"
78 "parent": "Name of the parent of the test",
79 "doc": "Test documentation",
80 "msg": "Test message",
81 "conf-history": "DUT1 and DUT2 VAT History",
82 "show-run": "Show Run",
83 "tags": ["tag 1", "tag 2", "tag n"],
85 "status": "PASS" | "FAIL",
131 "parent": "Name of the parent of the test",
132 "doc": "Test documentation",
133 "msg": "Test message",
134 "tags": ["tag 1", "tag 2", "tag n"],
136 "status": "PASS" | "FAIL",
143 "parent": "Name of the parent of the test",
144 "doc": "Test documentation",
145 "msg": "Test message",
146 "tags": ["tag 1", "tag 2", "tag n"],
147 "type": "MRR" | "BMRR",
148 "status": "PASS" | "FAIL",
150 "receive-rate": AvgStdevMetadata,
164 "metadata": { # Optional
165 "version": "VPP version",
166 "job": "Jenkins job name",
167 "build": "Information about the build"
171 "doc": "Suite 1 documentation",
172 "parent": "Suite 1 parent",
173 "level": "Level of the suite in the suite hierarchy"
176 "doc": "Suite N documentation",
177 "parent": "Suite 2 parent",
178 "level": "Level of the suite in the suite hierarchy"
184 "parent": "Name of the parent of the test",
185 "doc": "Test documentation"
186 "msg": "Test message"
187 "tags": ["tag 1", "tag 2", "tag n"],
188 "conf-history": "DUT1 and DUT2 VAT History"
189 "show-run": "Show Run"
190 "status": "PASS" | "FAIL"
198 .. note:: ID is the lowercase full path to the test.
201 # TODO: Remove when definitely no NDRPDRDISC tests are used:
202 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
204 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205 r'PLRsearch upper bound::?\s(\d+.\d+)')
207 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208 r'NDR_UPPER:\s(\d+.\d+).*\n'
209 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210 r'PDR_UPPER:\s(\d+.\d+)')
212 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
215 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
218 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219 r"VPP Version:\s*|VPP version:\s*)(.*)")
221 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
223 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
225 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226 r'tx\s(\d*),\srx\s(\d*)')
228 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229 r' in packets per second: \[(.*)\]')
231 REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232 REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
234 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
236 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
238 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
240 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
242 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
244 def __init__(self, metadata, mapping, ignore):
247 :param metadata: Key-value pairs to be included in "metadata" part of
249 :param mapping: Mapping of the old names of test cases to the new
251 :param ignore: List of TCs to be ignored.
257 # Type of message to parse out from the test messages
258 self._msg_type = None
264 self._timestamp = None
266 # Testbed. The testbed is identified by TG node IP address.
269 # Mapping of TCs long names
270 self._mapping = mapping
273 self._ignore = ignore
275 # Number of VAT History messages found:
277 # 1 - VAT History of DUT1
278 # 2 - VAT History of DUT2
279 self._lookup_kw_nr = 0
280 self._conf_history_lookup_nr = 0
282 # Number of Show Running messages found
284 # 1 - Show run message found
285 self._show_run_lookup_nr = 0
287 # Test ID of currently processed test- the lowercase full path to the
291 # The main data structure
293 "metadata": OrderedDict(),
294 "suites": OrderedDict(),
295 "tests": OrderedDict()
298 # Save the provided metadata
299 for key, val in metadata.items():
300 self._data["metadata"][key] = val
302 # Dictionary defining the methods used to parse different types of
305 "timestamp": self._get_timestamp,
306 "vpp-version": self._get_vpp_version,
307 "dpdk-version": self._get_dpdk_version,
308 "teardown-vat-history": self._get_vat_history,
309 "teardown-papi-history": self._get_papi_history,
310 "test-show-runtime": self._get_show_run,
311 "testbed": self._get_testbed
316 """Getter - Data parsed from the XML file.
318 :returns: Data parsed from the XML file.
323 def _get_testbed(self, msg):
324 """Called when extraction of testbed IP is required.
325 The testbed is identified by TG node IP address.
327 :param msg: Message to process.
332 if msg.message.count("Setup of TG node"):
333 reg_tg_ip = re.compile(
334 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
336 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
337 except (KeyError, ValueError, IndexError, AttributeError):
340 self._data["metadata"]["testbed"] = self._testbed
341 self._msg_type = None
343 def _get_vpp_version(self, msg):
344 """Called when extraction of VPP version is required.
346 :param msg: Message to process.
351 if msg.message.count("return STDOUT Version:") or \
352 msg.message.count("VPP Version:") or \
353 msg.message.count("VPP version:"):
354 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
356 self._data["metadata"]["version"] = self._version
357 self._msg_type = None
359 def _get_dpdk_version(self, msg):
360 """Called when extraction of DPDK version is required.
362 :param msg: Message to process.
367 if msg.message.count("DPDK Version:"):
369 self._version = str(re.search(
370 self.REGEX_VERSION_DPDK, msg.message). group(2))
371 self._data["metadata"]["version"] = self._version
375 self._msg_type = None
377 def _get_timestamp(self, msg):
378 """Called when extraction of timestamp is required.
380 :param msg: Message to process.
385 self._timestamp = msg.timestamp[:14]
386 self._data["metadata"]["generated"] = self._timestamp
387 self._msg_type = None
389 def _get_vat_history(self, msg):
390 """Called when extraction of VAT command history is required.
392 :param msg: Message to process.
396 if msg.message.count("VAT command history:"):
397 self._conf_history_lookup_nr += 1
398 if self._conf_history_lookup_nr == 1:
399 self._data["tests"][self._test_ID]["conf-history"] = str()
401 self._msg_type = None
402 text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
403 "VAT command history:", "", msg.message, count=1). \
404 replace("\n\n", "\n").replace('\n', ' |br| ').\
405 replace('\r', '').replace('"', "'")
407 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
408 self._data["tests"][self._test_ID]["conf-history"] += \
409 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
411 def _get_papi_history(self, msg):
412 """Called when extraction of PAPI command history is required.
414 :param msg: Message to process.
418 if msg.message.count("PAPI command history:"):
419 self._conf_history_lookup_nr += 1
420 if self._conf_history_lookup_nr == 1:
421 self._data["tests"][self._test_ID]["conf-history"] = str()
423 self._msg_type = None
424 text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
425 "PAPI command history:", "", msg.message, count=1). \
426 replace("\n\n", "\n").replace('\n', ' |br| ').\
427 replace('\r', '').replace('"', "'")
429 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
430 self._data["tests"][self._test_ID]["conf-history"] += \
431 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
433 def _get_show_run(self, msg):
434 """Called when extraction of VPP operational data (output of CLI command
435 Show Runtime) is required.
437 :param msg: Message to process.
441 if not "show-run" in self._data["tests"][self._test_ID].keys():
442 self._data["tests"][self._test_ID]["show-run"] = str()
444 if msg.message.count("stats runtime"):
445 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).\
447 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).\
449 message = str(msg.message).replace(' ', '').replace('\n', '').\
450 replace("'", '"').replace('b"', '"').replace('u"', '"').\
452 runtime = loads(message)
454 threads_nr = len(runtime[0]["clocks"])
455 except (IndexError, KeyError):
457 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks",
459 table = [[tbl_hdr, ] for _ in range(threads_nr)]
461 for idx in range(threads_nr):
462 name = format(item["name"])
463 calls = format(item["calls"][idx])
464 vectors = format(item["vectors"][idx])
465 suspends = format(item["suspends"][idx])
466 if item["vectors"][idx] > 0:
468 item["clocks"][idx]/item["vectors"][idx], ".2e")
469 elif item["calls"][idx] > 0:
471 item["clocks"][idx]/item["calls"][idx], ".2e")
472 elif item["suspends"][idx] > 0:
474 item["clocks"][idx]/item["suspends"][idx], ".2e")
477 if item["calls"][idx] > 0:
478 vectors_call = format(
479 item["vectors"][idx]/item["calls"][idx], ".2f")
481 vectors_call = format(0, ".2f")
482 if int(calls) + int(vectors) + int(suspends):
484 name, calls, vectors, suspends, clocks, vectors_call
487 for idx in range(threads_nr):
488 text += "Thread {idx} ".format(idx=idx)
489 text += "vpp_main\n" if idx == 0 else \
490 "vpp_wk_{idx}\n".format(idx=idx-1)
492 for row in table[idx]:
493 if txt_table is None:
494 txt_table = prettytable.PrettyTable(row)
497 txt_table.add_row(row)
498 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
499 txt_table.align["Name"] = "l"
500 txt_table.align["Calls"] = "r"
501 txt_table.align["Vectors"] = "r"
502 txt_table.align["Suspends"] = "r"
503 txt_table.align["Clocks"] = "r"
504 txt_table.align["Vectors/Calls"] = "r"
506 text += txt_table.get_string(sortby="Name") + '\n'
507 text = (" \n **DUT: {host}/{socket}** \n {text}".
508 format(host=host, socket=socket, text=text))
509 text = text.replace('\n', ' |br| ').replace('\r', '').\
511 self._data["tests"][self._test_ID]["show-run"] += text
513 def _get_ndrpdr_throughput(self, msg):
514 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
517 :param msg: The test message to be parsed.
519 :returns: Parsed data as a dict and the status (PASS/FAIL).
520 :rtype: tuple(dict, str)
524 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
525 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
528 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
530 if groups is not None:
532 throughput["NDR"]["LOWER"] = float(groups.group(1))
533 throughput["NDR"]["UPPER"] = float(groups.group(2))
534 throughput["PDR"]["LOWER"] = float(groups.group(3))
535 throughput["PDR"]["UPPER"] = float(groups.group(4))
537 except (IndexError, ValueError):
540 return throughput, status
542 def _get_plr_throughput(self, msg):
543 """Get PLRsearch lower bound and PLRsearch upper bound from the test
546 :param msg: The test message to be parsed.
548 :returns: Parsed data as a dict and the status (PASS/FAIL).
549 :rtype: tuple(dict, str)
557 groups = re.search(self.REGEX_PLR_RATE, msg)
559 if groups is not None:
561 throughput["LOWER"] = float(groups.group(1))
562 throughput["UPPER"] = float(groups.group(2))
564 except (IndexError, ValueError):
567 return throughput, status
569 def _get_ndrpdr_latency(self, msg):
570 """Get LATENCY from the test message.
572 :param msg: The test message to be parsed.
574 :returns: Parsed data as a dict and the status (PASS/FAIL).
575 :rtype: tuple(dict, str)
577 latency_default = {"min": -1.0, "avg": -1.0, "max": -1.0, "hdrh": ""}
580 "direction1": copy.copy(latency_default),
581 "direction2": copy.copy(latency_default)
584 "direction1": copy.copy(latency_default),
585 "direction2": copy.copy(latency_default)
589 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
591 def process_latency(in_str):
592 """Return object with parsed latency values.
594 TODO: Define class for the return type.
596 :param in_str: Input string, min/avg/max/hdrh format.
598 :returns: Dict with corresponding keys, except hdrh float values.
600 :throws IndexError: If in_str does not have enough substrings.
601 :throws ValueError: If a substring does not convert to float.
603 in_list = in_str.split('/')
606 "min": float(in_list[0]),
607 "avg": float(in_list[1]),
608 "max": float(in_list[2]),
612 if len(in_list) == 4:
613 rval["hdrh"] = str(in_list[3])
617 if groups is not None:
619 latency["NDR"]["direction1"] = process_latency(groups.group(1))
620 latency["NDR"]["direction2"] = process_latency(groups.group(2))
621 latency["PDR"]["direction1"] = process_latency(groups.group(3))
622 latency["PDR"]["direction2"] = process_latency(groups.group(4))
624 except (IndexError, ValueError):
627 return latency, status
629 def visit_suite(self, suite):
630 """Implements traversing through the suite and its direct children.
632 :param suite: Suite to process.
636 if self.start_suite(suite) is not False:
637 suite.suites.visit(self)
638 suite.tests.visit(self)
639 self.end_suite(suite)
641 def start_suite(self, suite):
642 """Called when suite starts.
644 :param suite: Suite to process.
650 parent_name = suite.parent.name
651 except AttributeError:
654 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
655 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
656 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
658 self._data["suites"][suite.longname.lower().replace('"', "'").
659 replace(" ", "_")] = {
660 "name": suite.name.lower(),
662 "parent": parent_name,
663 "level": len(suite.longname.split("."))
666 suite.keywords.visit(self)
668 def end_suite(self, suite):
669 """Called when suite ends.
671 :param suite: Suite to process.
677 def visit_test(self, test):
678 """Implements traversing through the test.
680 :param test: Test to process.
684 if self.start_test(test) is not False:
685 test.keywords.visit(self)
688 def start_test(self, test):
689 """Called when test starts.
691 :param test: Test to process.
696 longname_orig = test.longname.lower()
698 # Check the ignore list
699 if longname_orig in self._ignore:
702 tags = [str(tag) for tag in test.tags]
705 # Change the TC long name and name if defined in the mapping table
706 longname = self._mapping.get(longname_orig, None)
707 if longname is not None:
708 name = longname.split('.')[-1]
709 logging.debug("{0}\n{1}\n{2}\n{3}".format(
710 self._data["metadata"], longname_orig, longname, name))
712 longname = longname_orig
713 name = test.name.lower()
715 # Remove TC number from the TC long name (backward compatibility):
716 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
717 # Remove TC number from the TC name (not needed):
718 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
720 test_result["parent"] = test.parent.name.lower()
721 test_result["tags"] = tags
722 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
723 replace('\r', '').replace('[', ' |br| [')
724 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
725 test_result["msg"] = test.message.replace('\n', ' |br| '). \
726 replace('\r', '').replace('"', "'")
727 test_result["type"] = "FUNC"
728 test_result["status"] = test.status
730 if "PERFTEST" in tags:
731 # Replace info about cores (e.g. -1c-) with the info about threads
732 # and cores (e.g. -1t1c-) in the long test case names and in the
733 # test case names if necessary.
734 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
738 for tag in test_result["tags"]:
739 groups = re.search(self.REGEX_TC_TAG, tag)
745 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
746 "-{0}-".format(tag_tc.lower()),
749 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
750 "-{0}-".format(tag_tc.lower()),
754 test_result["status"] = "FAIL"
755 self._data["tests"][self._test_ID] = test_result
756 logging.debug("The test '{0}' has no or more than one "
757 "multi-threading tags.".format(self._test_ID))
758 logging.debug("Tags: {0}".format(test_result["tags"]))
761 if test.status == "PASS" and ("NDRPDRDISC" in tags or
768 # TODO: Remove when definitely no NDRPDRDISC tests are used:
769 if "NDRDISC" in tags:
770 test_result["type"] = "NDR"
771 # TODO: Remove when definitely no NDRPDRDISC tests are used:
772 elif "PDRDISC" in tags:
773 test_result["type"] = "PDR"
774 elif "NDRPDR" in tags:
775 test_result["type"] = "NDRPDR"
777 test_result["type"] = "SOAK"
779 test_result["type"] = "TCP"
781 test_result["type"] = "MRR"
782 elif "FRMOBL" in tags or "BMRR" in tags:
783 test_result["type"] = "BMRR"
784 elif "RECONF" in tags:
785 test_result["type"] = "RECONF"
787 test_result["status"] = "FAIL"
788 self._data["tests"][self._test_ID] = test_result
791 # TODO: Remove when definitely no NDRPDRDISC tests are used:
792 if test_result["type"] in ("NDR", "PDR"):
794 rate_value = str(re.search(
795 self.REGEX_RATE, test.message).group(1))
796 except AttributeError:
799 rate_unit = str(re.search(
800 self.REGEX_RATE, test.message).group(2))
801 except AttributeError:
804 test_result["throughput"] = dict()
805 test_result["throughput"]["value"] = \
806 int(rate_value.split('.')[0])
807 test_result["throughput"]["unit"] = rate_unit
808 test_result["latency"] = \
809 self._get_latency(test.message, test_result["type"])
810 if test_result["type"] == "PDR":
811 test_result["lossTolerance"] = str(re.search(
812 self.REGEX_TOLERANCE, test.message).group(1))
814 elif test_result["type"] in ("NDRPDR", ):
815 test_result["throughput"], test_result["status"] = \
816 self._get_ndrpdr_throughput(test.message)
817 test_result["latency"], test_result["status"] = \
818 self._get_ndrpdr_latency(test.message)
820 elif test_result["type"] in ("SOAK", ):
821 test_result["throughput"], test_result["status"] = \
822 self._get_plr_throughput(test.message)
824 elif test_result["type"] in ("TCP", ):
825 groups = re.search(self.REGEX_TCP, test.message)
826 test_result["result"] = int(groups.group(2))
828 elif test_result["type"] in ("MRR", "BMRR"):
829 test_result["result"] = dict()
830 groups = re.search(self.REGEX_BMRR, test.message)
831 if groups is not None:
832 items_str = groups.group(1)
833 items_float = [float(item.strip()) for item
834 in items_str.split(",")]
835 metadata = AvgStdevMetadataFactory.from_data(items_float)
836 # Next two lines have been introduced in CSIT-1179,
837 # to be removed in CSIT-1180.
840 test_result["result"]["receive-rate"] = metadata
842 groups = re.search(self.REGEX_MRR, test.message)
843 test_result["result"]["receive-rate"] = \
844 AvgStdevMetadataFactory.from_data([
845 float(groups.group(3)) / float(groups.group(1)), ])
847 elif test_result["type"] == "RECONF":
848 test_result["result"] = None
850 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
851 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
852 test_result["result"] = {
853 "loss": int(grps_loss.group(1)),
854 "time": float(grps_time.group(1))
856 except (AttributeError, IndexError, ValueError, TypeError):
857 test_result["status"] = "FAIL"
859 self._data["tests"][self._test_ID] = test_result
861 def end_test(self, test):
862 """Called when test ends.
864 :param test: Test to process.
870 def visit_keyword(self, keyword):
871 """Implements traversing through the keyword and its child keywords.
873 :param keyword: Keyword to process.
874 :type keyword: Keyword
877 if self.start_keyword(keyword) is not False:
878 self.end_keyword(keyword)
880 def start_keyword(self, keyword):
881 """Called when keyword starts. Default implementation does nothing.
883 :param keyword: Keyword to process.
884 :type keyword: Keyword
888 if keyword.type == "setup":
889 self.visit_setup_kw(keyword)
890 elif keyword.type == "teardown":
891 self._lookup_kw_nr = 0
892 self.visit_teardown_kw(keyword)
894 self._lookup_kw_nr = 0
895 self.visit_test_kw(keyword)
896 except AttributeError:
899 def end_keyword(self, keyword):
900 """Called when keyword ends. Default implementation does nothing.
902 :param keyword: Keyword to process.
903 :type keyword: Keyword
908 def visit_test_kw(self, test_kw):
909 """Implements traversing through the test keyword and its child
912 :param test_kw: Keyword to process.
913 :type test_kw: Keyword
916 for keyword in test_kw.keywords:
917 if self.start_test_kw(keyword) is not False:
918 self.visit_test_kw(keyword)
919 self.end_test_kw(keyword)
921 def start_test_kw(self, test_kw):
922 """Called when test keyword starts. Default implementation does
925 :param test_kw: Keyword to process.
926 :type test_kw: Keyword
929 if test_kw.name.count("Show Runtime Counters On All Duts"):
930 self._lookup_kw_nr += 1
931 self._show_run_lookup_nr = 0
932 self._msg_type = "test-show-runtime"
933 elif test_kw.name.count("Install Dpdk Test") and not self._version:
934 self._msg_type = "dpdk-version"
937 test_kw.messages.visit(self)
939 def end_test_kw(self, test_kw):
940 """Called when keyword ends. Default implementation does nothing.
942 :param test_kw: Keyword to process.
943 :type test_kw: Keyword
948 def visit_setup_kw(self, setup_kw):
949 """Implements traversing through the teardown keyword and its child
952 :param setup_kw: Keyword to process.
953 :type setup_kw: Keyword
956 for keyword in setup_kw.keywords:
957 if self.start_setup_kw(keyword) is not False:
958 self.visit_setup_kw(keyword)
959 self.end_setup_kw(keyword)
961 def start_setup_kw(self, setup_kw):
962 """Called when teardown keyword starts. Default implementation does
965 :param setup_kw: Keyword to process.
966 :type setup_kw: Keyword
969 if setup_kw.name.count("Show Vpp Version On All Duts") \
970 and not self._version:
971 self._msg_type = "vpp-version"
972 elif setup_kw.name.count("Set Global Variable") \
973 and not self._timestamp:
974 self._msg_type = "timestamp"
975 elif setup_kw.name.count("Setup Framework") and not self._testbed:
976 self._msg_type = "testbed"
979 setup_kw.messages.visit(self)
981 def end_setup_kw(self, setup_kw):
982 """Called when keyword ends. Default implementation does nothing.
984 :param setup_kw: Keyword to process.
985 :type setup_kw: Keyword
990 def visit_teardown_kw(self, teardown_kw):
991 """Implements traversing through the teardown keyword and its child
994 :param teardown_kw: Keyword to process.
995 :type teardown_kw: Keyword
998 for keyword in teardown_kw.keywords:
999 if self.start_teardown_kw(keyword) is not False:
1000 self.visit_teardown_kw(keyword)
1001 self.end_teardown_kw(keyword)
1003 def start_teardown_kw(self, teardown_kw):
1004 """Called when teardown keyword starts. Default implementation does
1007 :param teardown_kw: Keyword to process.
1008 :type teardown_kw: Keyword
1012 if teardown_kw.name.count("Show Vat History On All Duts"):
1013 self._conf_history_lookup_nr = 0
1014 self._msg_type = "teardown-vat-history"
1015 teardown_kw.messages.visit(self)
1016 elif teardown_kw.name.count("Show Papi History On All Duts"):
1017 self._conf_history_lookup_nr = 0
1018 self._msg_type = "teardown-papi-history"
1019 teardown_kw.messages.visit(self)
1021 def end_teardown_kw(self, teardown_kw):
1022 """Called when keyword ends. Default implementation does nothing.
1024 :param teardown_kw: Keyword to process.
1025 :type teardown_kw: Keyword
1030 def visit_message(self, msg):
1031 """Implements visiting the message.
1033 :param msg: Message to process.
1037 if self.start_message(msg) is not False:
1038 self.end_message(msg)
1040 def start_message(self, msg):
1041 """Called when message starts. Get required information from messages:
1044 :param msg: Message to process.
1050 self.parse_msg[self._msg_type](msg)
1052 def end_message(self, msg):
1053 """Called when message ends. Default implementation does nothing.
1055 :param msg: Message to process.
1065 The data is extracted from output.xml files generated by Jenkins jobs and
1066 stored in pandas' DataFrames.
1072 (as described in ExecutionChecker documentation)
1074 (as described in ExecutionChecker documentation)
1076 (as described in ExecutionChecker documentation)
1079 def __init__(self, spec):
1082 :param spec: Specification.
1083 :type spec: Specification
1090 self._input_data = pd.Series()
1094 """Getter - Input data.
1096 :returns: Input data
1097 :rtype: pandas.Series
1099 return self._input_data
1101 def metadata(self, job, build):
1102 """Getter - metadata
1104 :param job: Job which metadata we want.
1105 :param build: Build which metadata we want.
1109 :rtype: pandas.Series
1112 return self.data[job][build]["metadata"]
1114 def suites(self, job, build):
1117 :param job: Job which suites we want.
1118 :param build: Build which suites we want.
1122 :rtype: pandas.Series
1125 return self.data[job][str(build)]["suites"]
1127 def tests(self, job, build):
1130 :param job: Job which tests we want.
1131 :param build: Build which tests we want.
1135 :rtype: pandas.Series
1138 return self.data[job][build]["tests"]
1140 def _parse_tests(self, job, build, log):
1141 """Process data from robot output.xml file and return JSON structured
1144 :param job: The name of job which build output data will be processed.
1145 :param build: The build which output data will be processed.
1146 :param log: List of log messages.
1149 :type log: list of tuples (severity, msg)
1150 :returns: JSON data structure.
1159 with open(build["file-name"], 'r') as data_file:
1161 result = ExecutionResult(data_file)
1162 except errors.DataError as err:
1163 log.append(("ERROR", "Error occurred while parsing output.xml: "
1166 checker = ExecutionChecker(metadata, self._cfg.mapping,
1168 result.visit(checker)
1172 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1173 """Download and parse the input data file.
1175 :param pid: PID of the process executing this method.
1176 :param job: Name of the Jenkins job which generated the processed input
1178 :param build: Information about the Jenkins build which generated the
1179 processed input file.
1180 :param repeat: Repeat the download specified number of times if not
1190 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1191 format(job, build["build"])))
1198 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1204 logs.append(("ERROR", "It is not possible to download the input "
1205 "data file from the job '{job}', build "
1206 "'{build}', or it is damaged. Skipped.".
1207 format(job=job, build=build["build"])))
1209 logs.append(("INFO", " Processing data from the build '{0}' ...".
1210 format(build["build"])))
1211 data = self._parse_tests(job, build, logs)
1213 logs.append(("ERROR", "Input data file from the job '{job}', "
1214 "build '{build}' is damaged. Skipped.".
1215 format(job=job, build=build["build"])))
1220 remove(build["file-name"])
1221 except OSError as err:
1222 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1223 format(build["file-name"], repr(err))))
1225 # If the time-period is defined in the specification file, remove all
1226 # files which are outside the time period.
1227 timeperiod = self._cfg.input.get("time-period", None)
1228 if timeperiod and data:
1230 timeperiod = timedelta(int(timeperiod))
1231 metadata = data.get("metadata", None)
1233 generated = metadata.get("generated", None)
1235 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1236 if (now - generated) > timeperiod:
1237 # Remove the data and the file:
1242 " The build {job}/{build} is outdated, will be "
1243 "removed".format(job=job, build=build["build"])))
1244 logs.append(("INFO", " Done."))
1246 for level, line in logs:
1249 elif level == "ERROR":
1251 elif level == "DEBUG":
1253 elif level == "CRITICAL":
1254 logging.critical(line)
1255 elif level == "WARNING":
1256 logging.warning(line)
1258 return {"data": data, "state": state, "job": job, "build": build}
1260 def download_and_parse_data(self, repeat=1):
1261 """Download the input data files, parse input data from input files and
1262 store in pandas' Series.
1264 :param repeat: Repeat the download specified number of times if not
1269 logging.info("Downloading and parsing input files ...")
1271 for job, builds in self._cfg.builds.items():
1272 for build in builds:
1274 result = self._download_and_parse_build(job, build, repeat)
1275 build_nr = result["build"]["build"]
1278 data = result["data"]
1279 build_data = pd.Series({
1280 "metadata": pd.Series(
1281 data["metadata"].values(),
1282 index=data["metadata"].keys()),
1283 "suites": pd.Series(data["suites"].values(),
1284 index=data["suites"].keys()),
1285 "tests": pd.Series(data["tests"].values(),
1286 index=data["tests"].keys())})
1288 if self._input_data.get(job, None) is None:
1289 self._input_data[job] = pd.Series()
1290 self._input_data[job][str(build_nr)] = build_data
1292 self._cfg.set_input_file_name(
1293 job, build_nr, result["build"]["file-name"])
1295 self._cfg.set_input_state(job, build_nr, result["state"])
1297 logging.info("Memory allocation: {0:,d}MB".format(
1298 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1300 logging.info("Done.")
1303 def _end_of_tag(tag_filter, start=0, closer="'"):
1304 """Return the index of character in the string which is the end of tag.
1306 :param tag_filter: The string where the end of tag is being searched.
1307 :param start: The index where the searching is stated.
1308 :param closer: The character which is the tag closer.
1309 :type tag_filter: str
1312 :returns: The index of the tag closer.
1317 idx_opener = tag_filter.index(closer, start)
1318 return tag_filter.index(closer, idx_opener + 1)
1323 def _condition(tag_filter):
1324 """Create a conditional statement from the given tag filter.
1326 :param tag_filter: Filter based on tags from the element specification.
1327 :type tag_filter: str
1328 :returns: Conditional statement which can be evaluated.
1334 index = InputData._end_of_tag(tag_filter, index)
1338 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1340 def filter_data(self, element, params=None, data=None, data_set="tests",
1341 continue_on_error=False):
1342 """Filter required data from the given jobs and builds.
1344 The output data structure is:
1348 - test (or suite) 1 ID:
1354 - test (or suite) n ID:
1361 :param element: Element which will use the filtered data.
1362 :param params: Parameters which will be included in the output. If None,
1363 all parameters are included.
1364 :param data: If not None, this data is used instead of data specified
1366 :param data_set: The set of data to be filtered: tests, suites,
1368 :param continue_on_error: Continue if there is error while reading the
1369 data. The Item will be empty then
1370 :type element: pandas.Series
1374 :type continue_on_error: bool
1375 :returns: Filtered data.
1376 :rtype pandas.Series
1380 if element["filter"] in ("all", "template"):
1383 cond = InputData._condition(element["filter"])
1384 logging.debug(" Filter: {0}".format(cond))
1386 logging.error(" No filter defined.")
1390 params = element.get("parameters", None)
1392 params.append("type")
1394 data_to_filter = data if data else element["data"]
1397 for job, builds in data_to_filter.items():
1398 data[job] = pd.Series()
1399 for build in builds:
1400 data[job][str(build)] = pd.Series()
1402 data_iter = self.data[job][str(build)][data_set].\
1405 if continue_on_error:
1409 for test_ID, test_data in data_iter:
1410 if eval(cond, {"tags": test_data.get("tags", "")}):
1411 data[job][str(build)][test_ID] = pd.Series()
1413 for param, val in test_data.items():
1414 data[job][str(build)][test_ID][param] = val
1416 for param in params:
1418 data[job][str(build)][test_ID][param] =\
1421 data[job][str(build)][test_ID][param] =\
1425 except (KeyError, IndexError, ValueError) as err:
1426 logging.error(" Missing mandatory parameter in the element "
1427 "specification: {0}".format(err))
1429 except AttributeError:
1432 logging.error(" The filter '{0}' is not correct. Check if all "
1433 "tags are enclosed by apostrophes.".format(cond))
1436 def filter_tests_by_name(self, element, params=None, data_set="tests",
1437 continue_on_error=False):
1438 """Filter required data from the given jobs and builds.
1440 The output data structure is:
1444 - test (or suite) 1 ID:
1450 - test (or suite) n ID:
1457 :param element: Element which will use the filtered data.
1458 :param params: Parameters which will be included in the output. If None,
1459 all parameters are included.
1460 :param data_set: The set of data to be filtered: tests, suites,
1462 :param continue_on_error: Continue if there is error while reading the
1463 data. The Item will be empty then
1464 :type element: pandas.Series
1467 :type continue_on_error: bool
1468 :returns: Filtered data.
1469 :rtype pandas.Series
1472 include = element.get("include", None)
1474 logging.warning("No tests to include, skipping the element.")
1478 params = element.get("parameters", None)
1480 params.append("type")
1484 for job, builds in element["data"].items():
1485 data[job] = pd.Series()
1486 for build in builds:
1487 data[job][str(build)] = pd.Series()
1488 for test in include:
1490 reg_ex = re.compile(str(test).lower())
1491 for test_ID in self.data[job][str(build)]\
1493 if re.match(reg_ex, str(test_ID).lower()):
1494 test_data = self.data[job][str(build)]\
1496 data[job][str(build)][test_ID] = pd.Series()
1498 for param, val in test_data.items():
1499 data[job][str(build)][test_ID]\
1502 for param in params:
1504 data[job][str(build)][test_ID]\
1505 [param] = test_data[param]
1507 data[job][str(build)][test_ID]\
1509 except KeyError as err:
1510 logging.error("{err!r}".format(err=err))
1511 if continue_on_error:
1517 except (KeyError, IndexError, ValueError) as err:
1518 logging.error("Missing mandatory parameter in the element "
1519 "specification: {err!r}".format(err=err))
1521 except AttributeError as err:
1522 logging.error("{err!r}".format(err=err))
1527 def merge_data(data):
1528 """Merge data from more jobs and builds to a simple data structure.
1530 The output data structure is:
1532 - test (suite) 1 ID:
1538 - test (suite) n ID:
1541 :param data: Data to merge.
1542 :type data: pandas.Series
1543 :returns: Merged data.
1544 :rtype: pandas.Series
1547 logging.info(" Merging data ...")
1549 merged_data = pd.Series()
1550 for _, builds in data.iteritems():
1551 for _, item in builds.iteritems():
1552 for ID, item_data in item.iteritems():
1553 merged_data[ID] = item_data