1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
29 from robot.api import ExecutionResult, ResultVisitor
30 from robot import errors
31 from collections import OrderedDict
32 from string import replace
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
38 from resources.libraries.python import jumpavg
39 from input_data_files import download_and_unzip_data_file
42 # Separator used in file names
46 class ExecutionChecker(ResultVisitor):
47 """Class to traverse through the test suite structure.
49 The functionality implemented in this class generates a json structure:
55 "generated": "Timestamp",
56 "version": "SUT version",
57 "job": "Jenkins job name",
58 "build": "Information about the build"
61 "Suite long name 1": {
63 "doc": "Suite 1 documentation",
64 "parent": "Suite 1 parent",
65 "level": "Level of the suite in the suite hierarchy"
67 "Suite long name N": {
69 "doc": "Suite N documentation",
70 "parent": "Suite 2 parent",
71 "level": "Level of the suite in the suite hierarchy"
78 "parent": "Name of the parent of the test",
79 "doc": "Test documentation",
80 "msg": "Test message",
81 "conf-history": "DUT1 and DUT2 VAT History",
82 "show-run": "Show Run",
83 "tags": ["tag 1", "tag 2", "tag n"],
85 "status": "PASS" | "FAIL",
131 "parent": "Name of the parent of the test",
132 "doc": "Test documentation",
133 "msg": "Test message",
134 "tags": ["tag 1", "tag 2", "tag n"],
136 "status": "PASS" | "FAIL",
143 "parent": "Name of the parent of the test",
144 "doc": "Test documentation",
145 "msg": "Test message",
146 "tags": ["tag 1", "tag 2", "tag n"],
147 "type": "MRR" | "BMRR",
148 "status": "PASS" | "FAIL",
150 "receive-rate": float,
151 # Average of a list, computed using AvgStdevStats.
152 # In CSIT-1180, replace with List[float].
166 "metadata": { # Optional
167 "version": "VPP version",
168 "job": "Jenkins job name",
169 "build": "Information about the build"
173 "doc": "Suite 1 documentation",
174 "parent": "Suite 1 parent",
175 "level": "Level of the suite in the suite hierarchy"
178 "doc": "Suite N documentation",
179 "parent": "Suite 2 parent",
180 "level": "Level of the suite in the suite hierarchy"
186 "parent": "Name of the parent of the test",
187 "doc": "Test documentation"
188 "msg": "Test message"
189 "tags": ["tag 1", "tag 2", "tag n"],
190 "conf-history": "DUT1 and DUT2 VAT History"
191 "show-run": "Show Run"
192 "status": "PASS" | "FAIL"
200 .. note:: ID is the lowercase full path to the test.
203 # TODO: Remove when definitely no NDRPDRDISC tests are used:
204 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
206 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
207 r'PLRsearch upper bound::?\s(\d+.\d+)')
209 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
210 r'NDR_UPPER:\s(\d+.\d+).*\n'
211 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'PDR_UPPER:\s(\d+.\d+)')
214 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
215 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
217 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
220 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
221 r"VPP Version:\s*|VPP version:\s*)(.*)")
223 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
225 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
227 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
228 r'tx\s(\d*),\srx\s(\d*)')
230 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
231 r' in packets per second: \[(.*)\]')
233 REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
234 REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
236 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
238 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
240 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
242 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
244 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
246 def __init__(self, metadata, mapping, ignore):
249 :param metadata: Key-value pairs to be included in "metadata" part of
251 :param mapping: Mapping of the old names of test cases to the new
253 :param ignore: List of TCs to be ignored.
259 # Type of message to parse out from the test messages
260 self._msg_type = None
266 self._timestamp = None
268 # Testbed. The testbed is identified by TG node IP address.
271 # Mapping of TCs long names
272 self._mapping = mapping
275 self._ignore = ignore
277 # Number of VAT History messages found:
279 # 1 - VAT History of DUT1
280 # 2 - VAT History of DUT2
281 self._lookup_kw_nr = 0
282 self._conf_history_lookup_nr = 0
284 # Number of Show Running messages found
286 # 1 - Show run message found
287 self._show_run_lookup_nr = 0
289 # Test ID of currently processed test- the lowercase full path to the
293 # The main data structure
295 "metadata": OrderedDict(),
296 "suites": OrderedDict(),
297 "tests": OrderedDict()
300 # Save the provided metadata
301 for key, val in metadata.items():
302 self._data["metadata"][key] = val
304 # Dictionary defining the methods used to parse different types of
307 "timestamp": self._get_timestamp,
308 "vpp-version": self._get_vpp_version,
309 "dpdk-version": self._get_dpdk_version,
310 "teardown-vat-history": self._get_vat_history,
311 "teardown-papi-history": self._get_papi_history,
312 "test-show-runtime": self._get_show_run,
313 "testbed": self._get_testbed
318 """Getter - Data parsed from the XML file.
320 :returns: Data parsed from the XML file.
325 def _get_testbed(self, msg):
326 """Called when extraction of testbed IP is required.
327 The testbed is identified by TG node IP address.
329 :param msg: Message to process.
334 if msg.message.count("Setup of TG node"):
335 reg_tg_ip = re.compile(
336 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
338 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
339 except (KeyError, ValueError, IndexError, AttributeError):
342 self._data["metadata"]["testbed"] = self._testbed
343 self._msg_type = None
345 def _get_vpp_version(self, msg):
346 """Called when extraction of VPP version is required.
348 :param msg: Message to process.
353 if msg.message.count("return STDOUT Version:") or \
354 msg.message.count("VPP Version:") or \
355 msg.message.count("VPP version:"):
356 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
358 self._data["metadata"]["version"] = self._version
359 self._msg_type = None
361 def _get_dpdk_version(self, msg):
362 """Called when extraction of DPDK version is required.
364 :param msg: Message to process.
369 if msg.message.count("DPDK Version:"):
371 self._version = str(re.search(
372 self.REGEX_VERSION_DPDK, msg.message). group(2))
373 self._data["metadata"]["version"] = self._version
377 self._msg_type = None
379 def _get_timestamp(self, msg):
380 """Called when extraction of timestamp is required.
382 :param msg: Message to process.
387 self._timestamp = msg.timestamp[:14]
388 self._data["metadata"]["generated"] = self._timestamp
389 self._msg_type = None
391 def _get_vat_history(self, msg):
392 """Called when extraction of VAT command history is required.
394 :param msg: Message to process.
398 if msg.message.count("VAT command history:"):
399 self._conf_history_lookup_nr += 1
400 if self._conf_history_lookup_nr == 1:
401 self._data["tests"][self._test_ID]["conf-history"] = str()
403 self._msg_type = None
404 text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
405 "VAT command history:", "", msg.message, count=1). \
406 replace("\n\n", "\n").replace('\n', ' |br| ').\
407 replace('\r', '').replace('"', "'")
409 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
410 self._data["tests"][self._test_ID]["conf-history"] += \
411 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
413 def _get_papi_history(self, msg):
414 """Called when extraction of PAPI command history is required.
416 :param msg: Message to process.
420 if msg.message.count("PAPI command history:"):
421 self._conf_history_lookup_nr += 1
422 if self._conf_history_lookup_nr == 1:
423 self._data["tests"][self._test_ID]["conf-history"] = str()
425 self._msg_type = None
426 text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
427 "PAPI command history:", "", msg.message, count=1). \
428 replace("\n\n", "\n").replace('\n', ' |br| ').\
429 replace('\r', '').replace('"', "'")
431 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
432 self._data["tests"][self._test_ID]["conf-history"] += \
433 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
435 def _get_show_run(self, msg):
436 """Called when extraction of VPP operational data (output of CLI command
437 Show Runtime) is required.
439 :param msg: Message to process.
443 if not "show-run" in self._data["tests"][self._test_ID].keys():
444 self._data["tests"][self._test_ID]["show-run"] = str()
446 if msg.message.count("stats runtime"):
447 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).\
449 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).\
451 message = str(msg.message).replace(' ', '').replace('\n', '').\
452 replace("'", '"').replace('b"', '"').replace('u"', '"').\
454 runtime = loads(message)
456 threads_nr = len(runtime[0]["clocks"])
457 except (IndexError, KeyError):
459 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks",
461 table = [[tbl_hdr, ] for _ in range(threads_nr)]
463 for idx in range(threads_nr):
464 name = format(item["name"])
465 calls = format(item["calls"][idx])
466 vectors = format(item["vectors"][idx])
467 suspends = format(item["suspends"][idx])
468 if item["vectors"][idx] > 0:
470 item["clocks"][idx]/item["vectors"][idx], ".2e")
471 elif item["calls"][idx] > 0:
473 item["clocks"][idx]/item["calls"][idx], ".2e")
474 elif item["suspends"][idx] > 0:
476 item["clocks"][idx]/item["suspends"][idx], ".2e")
479 if item["calls"][idx] > 0:
480 vectors_call = format(
481 item["vectors"][idx]/item["calls"][idx], ".2f")
483 vectors_call = format(0, ".2f")
484 if int(calls) + int(vectors) + int(suspends):
486 name, calls, vectors, suspends, clocks, vectors_call
489 for idx in range(threads_nr):
490 text += "Thread {idx} ".format(idx=idx)
491 text += "vpp_main\n" if idx == 0 else \
492 "vpp_wk_{idx}\n".format(idx=idx-1)
494 for row in table[idx]:
495 if txt_table is None:
496 txt_table = prettytable.PrettyTable(row)
499 txt_table.add_row(row)
500 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
501 txt_table.align["Name"] = "l"
502 txt_table.align["Calls"] = "r"
503 txt_table.align["Vectors"] = "r"
504 txt_table.align["Suspends"] = "r"
505 txt_table.align["Clocks"] = "r"
506 txt_table.align["Vectors/Calls"] = "r"
508 text += txt_table.get_string(sortby="Name") + '\n'
509 text = (" \n **DUT: {host}/{socket}** \n {text}".
510 format(host=host, socket=socket, text=text))
511 text = text.replace('\n', ' |br| ').replace('\r', '').\
513 self._data["tests"][self._test_ID]["show-run"] += text
515 def _get_ndrpdr_throughput(self, msg):
516 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
519 :param msg: The test message to be parsed.
521 :returns: Parsed data as a dict and the status (PASS/FAIL).
522 :rtype: tuple(dict, str)
526 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
527 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
530 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
532 if groups is not None:
534 throughput["NDR"]["LOWER"] = float(groups.group(1))
535 throughput["NDR"]["UPPER"] = float(groups.group(2))
536 throughput["PDR"]["LOWER"] = float(groups.group(3))
537 throughput["PDR"]["UPPER"] = float(groups.group(4))
539 except (IndexError, ValueError):
542 return throughput, status
544 def _get_plr_throughput(self, msg):
545 """Get PLRsearch lower bound and PLRsearch upper bound from the test
548 :param msg: The test message to be parsed.
550 :returns: Parsed data as a dict and the status (PASS/FAIL).
551 :rtype: tuple(dict, str)
559 groups = re.search(self.REGEX_PLR_RATE, msg)
561 if groups is not None:
563 throughput["LOWER"] = float(groups.group(1))
564 throughput["UPPER"] = float(groups.group(2))
566 except (IndexError, ValueError):
569 return throughput, status
571 def _get_ndrpdr_latency(self, msg):
572 """Get LATENCY from the test message.
574 :param msg: The test message to be parsed.
576 :returns: Parsed data as a dict and the status (PASS/FAIL).
577 :rtype: tuple(dict, str)
579 latency_default = {"min": -1.0, "avg": -1.0, "max": -1.0, "hdrh": ""}
582 "direction1": copy.copy(latency_default),
583 "direction2": copy.copy(latency_default)
586 "direction1": copy.copy(latency_default),
587 "direction2": copy.copy(latency_default)
591 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
593 def process_latency(in_str):
594 """Return object with parsed latency values.
596 TODO: Define class for the return type.
598 :param in_str: Input string, min/avg/max/hdrh format.
600 :returns: Dict with corresponding keys, except hdrh float values.
602 :throws IndexError: If in_str does not have enough substrings.
603 :throws ValueError: If a substring does not convert to float.
605 in_list = in_str.split('/')
608 "min": float(in_list[0]),
609 "avg": float(in_list[1]),
610 "max": float(in_list[2]),
614 if len(in_list) == 4:
615 rval["hdrh"] = str(in_list[3])
619 if groups is not None:
621 latency["NDR"]["direction1"] = process_latency(groups.group(1))
622 latency["NDR"]["direction2"] = process_latency(groups.group(2))
623 latency["PDR"]["direction1"] = process_latency(groups.group(3))
624 latency["PDR"]["direction2"] = process_latency(groups.group(4))
626 except (IndexError, ValueError):
629 return latency, status
631 def visit_suite(self, suite):
632 """Implements traversing through the suite and its direct children.
634 :param suite: Suite to process.
638 if self.start_suite(suite) is not False:
639 suite.suites.visit(self)
640 suite.tests.visit(self)
641 self.end_suite(suite)
643 def start_suite(self, suite):
644 """Called when suite starts.
646 :param suite: Suite to process.
652 parent_name = suite.parent.name
653 except AttributeError:
656 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
657 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
658 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
660 self._data["suites"][suite.longname.lower().replace('"', "'").
661 replace(" ", "_")] = {
662 "name": suite.name.lower(),
664 "parent": parent_name,
665 "level": len(suite.longname.split("."))
668 suite.keywords.visit(self)
670 def end_suite(self, suite):
671 """Called when suite ends.
673 :param suite: Suite to process.
679 def visit_test(self, test):
680 """Implements traversing through the test.
682 :param test: Test to process.
686 if self.start_test(test) is not False:
687 test.keywords.visit(self)
690 def start_test(self, test):
691 """Called when test starts.
693 :param test: Test to process.
698 longname_orig = test.longname.lower()
700 # Check the ignore list
701 if longname_orig in self._ignore:
704 tags = [str(tag) for tag in test.tags]
707 # Change the TC long name and name if defined in the mapping table
708 longname = self._mapping.get(longname_orig, None)
709 if longname is not None:
710 name = longname.split('.')[-1]
711 logging.debug("{0}\n{1}\n{2}\n{3}".format(
712 self._data["metadata"], longname_orig, longname, name))
714 longname = longname_orig
715 name = test.name.lower()
717 # Remove TC number from the TC long name (backward compatibility):
718 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
719 # Remove TC number from the TC name (not needed):
720 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
722 test_result["parent"] = test.parent.name.lower()
723 test_result["tags"] = tags
724 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
725 replace('\r', '').replace('[', ' |br| [')
726 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
727 test_result["msg"] = test.message.replace('\n', ' |br| '). \
728 replace('\r', '').replace('"', "'")
729 test_result["type"] = "FUNC"
730 test_result["status"] = test.status
732 if "PERFTEST" in tags:
733 # Replace info about cores (e.g. -1c-) with the info about threads
734 # and cores (e.g. -1t1c-) in the long test case names and in the
735 # test case names if necessary.
736 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
740 for tag in test_result["tags"]:
741 groups = re.search(self.REGEX_TC_TAG, tag)
747 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
748 "-{0}-".format(tag_tc.lower()),
751 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
752 "-{0}-".format(tag_tc.lower()),
756 test_result["status"] = "FAIL"
757 self._data["tests"][self._test_ID] = test_result
758 logging.debug("The test '{0}' has no or more than one "
759 "multi-threading tags.".format(self._test_ID))
760 logging.debug("Tags: {0}".format(test_result["tags"]))
763 if test.status == "PASS" and ("NDRPDRDISC" in tags or
770 # TODO: Remove when definitely no NDRPDRDISC tests are used:
771 if "NDRDISC" in tags:
772 test_result["type"] = "NDR"
773 # TODO: Remove when definitely no NDRPDRDISC tests are used:
774 elif "PDRDISC" in tags:
775 test_result["type"] = "PDR"
776 elif "NDRPDR" in tags:
777 test_result["type"] = "NDRPDR"
779 test_result["type"] = "SOAK"
781 test_result["type"] = "TCP"
783 test_result["type"] = "MRR"
784 elif "FRMOBL" in tags or "BMRR" in tags:
785 test_result["type"] = "BMRR"
786 elif "RECONF" in tags:
787 test_result["type"] = "RECONF"
789 test_result["status"] = "FAIL"
790 self._data["tests"][self._test_ID] = test_result
793 # TODO: Remove when definitely no NDRPDRDISC tests are used:
794 if test_result["type"] in ("NDR", "PDR"):
796 rate_value = str(re.search(
797 self.REGEX_RATE, test.message).group(1))
798 except AttributeError:
801 rate_unit = str(re.search(
802 self.REGEX_RATE, test.message).group(2))
803 except AttributeError:
806 test_result["throughput"] = dict()
807 test_result["throughput"]["value"] = \
808 int(rate_value.split('.')[0])
809 test_result["throughput"]["unit"] = rate_unit
810 test_result["latency"] = \
811 self._get_latency(test.message, test_result["type"])
812 if test_result["type"] == "PDR":
813 test_result["lossTolerance"] = str(re.search(
814 self.REGEX_TOLERANCE, test.message).group(1))
816 elif test_result["type"] in ("NDRPDR", ):
817 test_result["throughput"], test_result["status"] = \
818 self._get_ndrpdr_throughput(test.message)
819 test_result["latency"], test_result["status"] = \
820 self._get_ndrpdr_latency(test.message)
822 elif test_result["type"] in ("SOAK", ):
823 test_result["throughput"], test_result["status"] = \
824 self._get_plr_throughput(test.message)
826 elif test_result["type"] in ("TCP", ):
827 groups = re.search(self.REGEX_TCP, test.message)
828 test_result["result"] = int(groups.group(2))
830 elif test_result["type"] in ("MRR", "BMRR"):
831 test_result["result"] = dict()
832 groups = re.search(self.REGEX_BMRR, test.message)
833 if groups is not None:
834 items_str = groups.group(1)
835 items_float = [float(item.strip()) for item
836 in items_str.split(",")]
837 # Use whole list in CSIT-1180.
838 stats = jumpavg.AvgStdevStats.for_runs(items_float)
839 test_result["result"]["receive-rate"] = stats.avg
841 groups = re.search(self.REGEX_MRR, test.message)
842 test_result["result"]["receive-rate"] = \
843 float(groups.group(3)) / float(groups.group(1))
845 elif test_result["type"] == "RECONF":
846 test_result["result"] = None
848 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
849 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
850 test_result["result"] = {
851 "loss": int(grps_loss.group(1)),
852 "time": float(grps_time.group(1))
854 except (AttributeError, IndexError, ValueError, TypeError):
855 test_result["status"] = "FAIL"
857 self._data["tests"][self._test_ID] = test_result
859 def end_test(self, test):
860 """Called when test ends.
862 :param test: Test to process.
868 def visit_keyword(self, keyword):
869 """Implements traversing through the keyword and its child keywords.
871 :param keyword: Keyword to process.
872 :type keyword: Keyword
875 if self.start_keyword(keyword) is not False:
876 self.end_keyword(keyword)
878 def start_keyword(self, keyword):
879 """Called when keyword starts. Default implementation does nothing.
881 :param keyword: Keyword to process.
882 :type keyword: Keyword
886 if keyword.type == "setup":
887 self.visit_setup_kw(keyword)
888 elif keyword.type == "teardown":
889 self._lookup_kw_nr = 0
890 self.visit_teardown_kw(keyword)
892 self._lookup_kw_nr = 0
893 self.visit_test_kw(keyword)
894 except AttributeError:
897 def end_keyword(self, keyword):
898 """Called when keyword ends. Default implementation does nothing.
900 :param keyword: Keyword to process.
901 :type keyword: Keyword
906 def visit_test_kw(self, test_kw):
907 """Implements traversing through the test keyword and its child
910 :param test_kw: Keyword to process.
911 :type test_kw: Keyword
914 for keyword in test_kw.keywords:
915 if self.start_test_kw(keyword) is not False:
916 self.visit_test_kw(keyword)
917 self.end_test_kw(keyword)
919 def start_test_kw(self, test_kw):
920 """Called when test keyword starts. Default implementation does
923 :param test_kw: Keyword to process.
924 :type test_kw: Keyword
927 if test_kw.name.count("Show Runtime Counters On All Duts"):
928 self._lookup_kw_nr += 1
929 self._show_run_lookup_nr = 0
930 self._msg_type = "test-show-runtime"
931 elif test_kw.name.count("Install Dpdk Test") and not self._version:
932 self._msg_type = "dpdk-version"
935 test_kw.messages.visit(self)
937 def end_test_kw(self, test_kw):
938 """Called when keyword ends. Default implementation does nothing.
940 :param test_kw: Keyword to process.
941 :type test_kw: Keyword
946 def visit_setup_kw(self, setup_kw):
947 """Implements traversing through the teardown keyword and its child
950 :param setup_kw: Keyword to process.
951 :type setup_kw: Keyword
954 for keyword in setup_kw.keywords:
955 if self.start_setup_kw(keyword) is not False:
956 self.visit_setup_kw(keyword)
957 self.end_setup_kw(keyword)
959 def start_setup_kw(self, setup_kw):
960 """Called when teardown keyword starts. Default implementation does
963 :param setup_kw: Keyword to process.
964 :type setup_kw: Keyword
967 if setup_kw.name.count("Show Vpp Version On All Duts") \
968 and not self._version:
969 self._msg_type = "vpp-version"
970 elif setup_kw.name.count("Set Global Variable") \
971 and not self._timestamp:
972 self._msg_type = "timestamp"
973 elif setup_kw.name.count("Setup Framework") and not self._testbed:
974 self._msg_type = "testbed"
977 setup_kw.messages.visit(self)
979 def end_setup_kw(self, setup_kw):
980 """Called when keyword ends. Default implementation does nothing.
982 :param setup_kw: Keyword to process.
983 :type setup_kw: Keyword
988 def visit_teardown_kw(self, teardown_kw):
989 """Implements traversing through the teardown keyword and its child
992 :param teardown_kw: Keyword to process.
993 :type teardown_kw: Keyword
996 for keyword in teardown_kw.keywords:
997 if self.start_teardown_kw(keyword) is not False:
998 self.visit_teardown_kw(keyword)
999 self.end_teardown_kw(keyword)
1001 def start_teardown_kw(self, teardown_kw):
1002 """Called when teardown keyword starts. Default implementation does
1005 :param teardown_kw: Keyword to process.
1006 :type teardown_kw: Keyword
1010 if teardown_kw.name.count("Show Vat History On All Duts"):
1011 self._conf_history_lookup_nr = 0
1012 self._msg_type = "teardown-vat-history"
1013 teardown_kw.messages.visit(self)
1014 elif teardown_kw.name.count("Show Papi History On All Duts"):
1015 self._conf_history_lookup_nr = 0
1016 self._msg_type = "teardown-papi-history"
1017 teardown_kw.messages.visit(self)
1019 def end_teardown_kw(self, teardown_kw):
1020 """Called when keyword ends. Default implementation does nothing.
1022 :param teardown_kw: Keyword to process.
1023 :type teardown_kw: Keyword
1028 def visit_message(self, msg):
1029 """Implements visiting the message.
1031 :param msg: Message to process.
1035 if self.start_message(msg) is not False:
1036 self.end_message(msg)
1038 def start_message(self, msg):
1039 """Called when message starts. Get required information from messages:
1042 :param msg: Message to process.
1048 self.parse_msg[self._msg_type](msg)
1050 def end_message(self, msg):
1051 """Called when message ends. Default implementation does nothing.
1053 :param msg: Message to process.
1063 The data is extracted from output.xml files generated by Jenkins jobs and
1064 stored in pandas' DataFrames.
1070 (as described in ExecutionChecker documentation)
1072 (as described in ExecutionChecker documentation)
1074 (as described in ExecutionChecker documentation)
1077 def __init__(self, spec):
1080 :param spec: Specification.
1081 :type spec: Specification
1088 self._input_data = pd.Series()
1092 """Getter - Input data.
1094 :returns: Input data
1095 :rtype: pandas.Series
1097 return self._input_data
1099 def metadata(self, job, build):
1100 """Getter - metadata
1102 :param job: Job which metadata we want.
1103 :param build: Build which metadata we want.
1107 :rtype: pandas.Series
1110 return self.data[job][build]["metadata"]
1112 def suites(self, job, build):
1115 :param job: Job which suites we want.
1116 :param build: Build which suites we want.
1120 :rtype: pandas.Series
1123 return self.data[job][str(build)]["suites"]
1125 def tests(self, job, build):
1128 :param job: Job which tests we want.
1129 :param build: Build which tests we want.
1133 :rtype: pandas.Series
1136 return self.data[job][build]["tests"]
1138 def _parse_tests(self, job, build, log):
1139 """Process data from robot output.xml file and return JSON structured
1142 :param job: The name of job which build output data will be processed.
1143 :param build: The build which output data will be processed.
1144 :param log: List of log messages.
1147 :type log: list of tuples (severity, msg)
1148 :returns: JSON data structure.
1157 with open(build["file-name"], 'r') as data_file:
1159 result = ExecutionResult(data_file)
1160 except errors.DataError as err:
1161 log.append(("ERROR", "Error occurred while parsing output.xml: "
1164 checker = ExecutionChecker(metadata, self._cfg.mapping,
1166 result.visit(checker)
1170 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1171 """Download and parse the input data file.
1173 :param pid: PID of the process executing this method.
1174 :param job: Name of the Jenkins job which generated the processed input
1176 :param build: Information about the Jenkins build which generated the
1177 processed input file.
1178 :param repeat: Repeat the download specified number of times if not
1188 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1189 format(job, build["build"])))
1196 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1202 logs.append(("ERROR", "It is not possible to download the input "
1203 "data file from the job '{job}', build "
1204 "'{build}', or it is damaged. Skipped.".
1205 format(job=job, build=build["build"])))
1207 logs.append(("INFO", " Processing data from the build '{0}' ...".
1208 format(build["build"])))
1209 data = self._parse_tests(job, build, logs)
1211 logs.append(("ERROR", "Input data file from the job '{job}', "
1212 "build '{build}' is damaged. Skipped.".
1213 format(job=job, build=build["build"])))
1218 remove(build["file-name"])
1219 except OSError as err:
1220 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1221 format(build["file-name"], repr(err))))
1223 # If the time-period is defined in the specification file, remove all
1224 # files which are outside the time period.
1225 timeperiod = self._cfg.input.get("time-period", None)
1226 if timeperiod and data:
1228 timeperiod = timedelta(int(timeperiod))
1229 metadata = data.get("metadata", None)
1231 generated = metadata.get("generated", None)
1233 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1234 if (now - generated) > timeperiod:
1235 # Remove the data and the file:
1240 " The build {job}/{build} is outdated, will be "
1241 "removed".format(job=job, build=build["build"])))
1242 logs.append(("INFO", " Done."))
1244 for level, line in logs:
1247 elif level == "ERROR":
1249 elif level == "DEBUG":
1251 elif level == "CRITICAL":
1252 logging.critical(line)
1253 elif level == "WARNING":
1254 logging.warning(line)
1256 return {"data": data, "state": state, "job": job, "build": build}
1258 def download_and_parse_data(self, repeat=1):
1259 """Download the input data files, parse input data from input files and
1260 store in pandas' Series.
1262 :param repeat: Repeat the download specified number of times if not
1267 logging.info("Downloading and parsing input files ...")
1269 for job, builds in self._cfg.builds.items():
1270 for build in builds:
1272 result = self._download_and_parse_build(job, build, repeat)
1273 build_nr = result["build"]["build"]
1276 data = result["data"]
1277 build_data = pd.Series({
1278 "metadata": pd.Series(
1279 data["metadata"].values(),
1280 index=data["metadata"].keys()),
1281 "suites": pd.Series(data["suites"].values(),
1282 index=data["suites"].keys()),
1283 "tests": pd.Series(data["tests"].values(),
1284 index=data["tests"].keys())})
1286 if self._input_data.get(job, None) is None:
1287 self._input_data[job] = pd.Series()
1288 self._input_data[job][str(build_nr)] = build_data
1290 self._cfg.set_input_file_name(
1291 job, build_nr, result["build"]["file-name"])
1293 self._cfg.set_input_state(job, build_nr, result["state"])
1295 logging.info("Memory allocation: {0:,d}MB".format(
1296 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1298 logging.info("Done.")
1301 def _end_of_tag(tag_filter, start=0, closer="'"):
1302 """Return the index of character in the string which is the end of tag.
1304 :param tag_filter: The string where the end of tag is being searched.
1305 :param start: The index where the searching is stated.
1306 :param closer: The character which is the tag closer.
1307 :type tag_filter: str
1310 :returns: The index of the tag closer.
1315 idx_opener = tag_filter.index(closer, start)
1316 return tag_filter.index(closer, idx_opener + 1)
1321 def _condition(tag_filter):
1322 """Create a conditional statement from the given tag filter.
1324 :param tag_filter: Filter based on tags from the element specification.
1325 :type tag_filter: str
1326 :returns: Conditional statement which can be evaluated.
1332 index = InputData._end_of_tag(tag_filter, index)
1336 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1338 def filter_data(self, element, params=None, data=None, data_set="tests",
1339 continue_on_error=False):
1340 """Filter required data from the given jobs and builds.
1342 The output data structure is:
1346 - test (or suite) 1 ID:
1352 - test (or suite) n ID:
1359 :param element: Element which will use the filtered data.
1360 :param params: Parameters which will be included in the output. If None,
1361 all parameters are included.
1362 :param data: If not None, this data is used instead of data specified
1364 :param data_set: The set of data to be filtered: tests, suites,
1366 :param continue_on_error: Continue if there is error while reading the
1367 data. The Item will be empty then
1368 :type element: pandas.Series
1372 :type continue_on_error: bool
1373 :returns: Filtered data.
1374 :rtype pandas.Series
1378 if element["filter"] in ("all", "template"):
1381 cond = InputData._condition(element["filter"])
1382 logging.debug(" Filter: {0}".format(cond))
1384 logging.error(" No filter defined.")
1388 params = element.get("parameters", None)
1390 params.append("type")
1392 data_to_filter = data if data else element["data"]
1395 for job, builds in data_to_filter.items():
1396 data[job] = pd.Series()
1397 for build in builds:
1398 data[job][str(build)] = pd.Series()
1400 data_iter = self.data[job][str(build)][data_set].\
1403 if continue_on_error:
1407 for test_ID, test_data in data_iter:
1408 if eval(cond, {"tags": test_data.get("tags", "")}):
1409 data[job][str(build)][test_ID] = pd.Series()
1411 for param, val in test_data.items():
1412 data[job][str(build)][test_ID][param] = val
1414 for param in params:
1416 data[job][str(build)][test_ID][param] =\
1419 data[job][str(build)][test_ID][param] =\
1423 except (KeyError, IndexError, ValueError) as err:
1424 logging.error(" Missing mandatory parameter in the element "
1425 "specification: {0}".format(err))
1427 except AttributeError:
1430 logging.error(" The filter '{0}' is not correct. Check if all "
1431 "tags are enclosed by apostrophes.".format(cond))
1434 def filter_tests_by_name(self, element, params=None, data_set="tests",
1435 continue_on_error=False):
1436 """Filter required data from the given jobs and builds.
1438 The output data structure is:
1442 - test (or suite) 1 ID:
1448 - test (or suite) n ID:
1455 :param element: Element which will use the filtered data.
1456 :param params: Parameters which will be included in the output. If None,
1457 all parameters are included.
1458 :param data_set: The set of data to be filtered: tests, suites,
1460 :param continue_on_error: Continue if there is error while reading the
1461 data. The Item will be empty then
1462 :type element: pandas.Series
1465 :type continue_on_error: bool
1466 :returns: Filtered data.
1467 :rtype pandas.Series
1470 include = element.get("include", None)
1472 logging.warning("No tests to include, skipping the element.")
1476 params = element.get("parameters", None)
1478 params.append("type")
1482 for job, builds in element["data"].items():
1483 data[job] = pd.Series()
1484 for build in builds:
1485 data[job][str(build)] = pd.Series()
1486 for test in include:
1488 reg_ex = re.compile(str(test).lower())
1489 for test_ID in self.data[job][str(build)]\
1491 if re.match(reg_ex, str(test_ID).lower()):
1492 test_data = self.data[job][str(build)]\
1494 data[job][str(build)][test_ID] = pd.Series()
1496 for param, val in test_data.items():
1497 data[job][str(build)][test_ID]\
1500 for param in params:
1502 data[job][str(build)][test_ID]\
1503 [param] = test_data[param]
1505 data[job][str(build)][test_ID]\
1507 except KeyError as err:
1508 logging.error("{err!r}".format(err=err))
1509 if continue_on_error:
1515 except (KeyError, IndexError, ValueError) as err:
1516 logging.error("Missing mandatory parameter in the element "
1517 "specification: {err!r}".format(err=err))
1519 except AttributeError as err:
1520 logging.error("{err!r}".format(err=err))
1525 def merge_data(data):
1526 """Merge data from more jobs and builds to a simple data structure.
1528 The output data structure is:
1530 - test (suite) 1 ID:
1536 - test (suite) n ID:
1539 :param data: Data to merge.
1540 :type data: pandas.Series
1541 :returns: Merged data.
1542 :rtype: pandas.Series
1545 logging.info(" Merging data ...")
1547 merged_data = pd.Series()
1548 for _, builds in data.iteritems():
1549 for _, item in builds.iteritems():
1550 for ID, item_data in item.iteritems():
1551 merged_data[ID] = item_data