1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
42 # Separator used in file names
46 class ExecutionChecker(ResultVisitor):
47 """Class to traverse through the test suite structure.
49 The functionality implemented in this class generates a json structure:
55 "generated": "Timestamp",
56 "version": "SUT version",
57 "job": "Jenkins job name",
58 "build": "Information about the build"
61 "Suite long name 1": {
63 "doc": "Suite 1 documentation",
64 "parent": "Suite 1 parent",
65 "level": "Level of the suite in the suite hierarchy"
67 "Suite long name N": {
69 "doc": "Suite N documentation",
70 "parent": "Suite 2 parent",
71 "level": "Level of the suite in the suite hierarchy"
78 "parent": "Name of the parent of the test",
79 "doc": "Test documentation",
80 "msg": "Test message",
81 "conf-history": "DUT1 and DUT2 VAT History",
82 "show-run": "Show Run",
83 "tags": ["tag 1", "tag 2", "tag n"],
85 "status": "PASS" | "FAIL",
127 "parent": "Name of the parent of the test",
128 "doc": "Test documentation",
129 "msg": "Test message",
130 "tags": ["tag 1", "tag 2", "tag n"],
132 "status": "PASS" | "FAIL",
139 "parent": "Name of the parent of the test",
140 "doc": "Test documentation",
141 "msg": "Test message",
142 "tags": ["tag 1", "tag 2", "tag n"],
143 "type": "MRR" | "BMRR",
144 "status": "PASS" | "FAIL",
146 "receive-rate": AvgStdevMetadata,
150 # TODO: Remove when definitely no NDRPDRDISC tests are used:
154 "parent": "Name of the parent of the test",
155 "doc": "Test documentation",
156 "msg": "Test message",
157 "tags": ["tag 1", "tag 2", "tag n"],
158 "type": "PDR" | "NDR",
159 "status": "PASS" | "FAIL",
160 "throughput": { # Only type: "PDR" | "NDR"
162 "unit": "pps" | "bps" | "percentage"
164 "latency": { # Only type: "PDR" | "NDR"
171 "50": { # Only for NDR
176 "10": { # Only for NDR
188 "50": { # Only for NDR
193 "10": { # Only for NDR
200 "lossTolerance": "lossTolerance", # Only type: "PDR"
201 "conf-history": "DUT1 and DUT2 VAT History"
202 "show-run": "Show Run"
214 "metadata": { # Optional
215 "version": "VPP version",
216 "job": "Jenkins job name",
217 "build": "Information about the build"
221 "doc": "Suite 1 documentation",
222 "parent": "Suite 1 parent",
223 "level": "Level of the suite in the suite hierarchy"
226 "doc": "Suite N documentation",
227 "parent": "Suite 2 parent",
228 "level": "Level of the suite in the suite hierarchy"
234 "parent": "Name of the parent of the test",
235 "doc": "Test documentation"
236 "msg": "Test message"
237 "tags": ["tag 1", "tag 2", "tag n"],
238 "conf-history": "DUT1 and DUT2 VAT History"
239 "show-run": "Show Run"
240 "status": "PASS" | "FAIL"
248 .. note:: ID is the lowercase full path to the test.
251 # TODO: Remove when definitely no NDRPDRDISC tests are used:
252 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
254 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
255 r'PLRsearch upper bound::\s(\d+.\d+)')
257 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
258 r'NDR_UPPER:\s(\d+.\d+).*\n'
259 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
260 r'PDR_UPPER:\s(\d+.\d+)')
262 # TODO: Remove when definitely no NDRPDRDISC tests are used:
263 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
264 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
265 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
266 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
271 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
272 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
273 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
275 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
276 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
278 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
281 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
282 r"VPP Version:\s*|VPP version:\s*)(.*)")
284 REGEX_VERSION_DPDK = re.compile(r"DPDK Version: (\d*.\d*)")
286 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
288 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289 r'tx\s(\d*),\srx\s(\d*)')
291 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292 r' in packets per second: \[(.*)\]')
294 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
296 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
298 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
300 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
302 def __init__(self, metadata, mapping, ignore):
305 :param metadata: Key-value pairs to be included in "metadata" part of
307 :param mapping: Mapping of the old names of test cases to the new
309 :param ignore: List of TCs to be ignored.
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 # Number of VAT History messages found:
335 # 1 - VAT History of DUT1
336 # 2 - VAT History of DUT2
337 self._lookup_kw_nr = 0
338 self._conf_history_lookup_nr = 0
340 # Number of Show Running messages found
342 # 1 - Show run message found
343 self._show_run_lookup_nr = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 "metadata": OrderedDict(),
352 "suites": OrderedDict(),
353 "tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data["metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 "timestamp": self._get_timestamp,
364 "vpp-version": self._get_vpp_version,
365 "dpdk-version": self._get_dpdk_version,
366 "teardown-vat-history": self._get_vat_history,
367 "teardown-papi-history": self._get_papi_history,
368 "test-show-runtime": self._get_show_run,
369 "testbed": self._get_testbed
374 """Getter - Data parsed from the XML file.
376 :returns: Data parsed from the XML file.
381 def _get_testbed(self, msg):
382 """Called when extraction of testbed IP is required.
383 The testbed is identified by TG node IP address.
385 :param msg: Message to process.
390 if msg.message.count("Setup of TG node"):
391 reg_tg_ip = re.compile(
392 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
394 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
395 except (KeyError, ValueError, IndexError, AttributeError):
398 self._data["metadata"]["testbed"] = self._testbed
399 self._msg_type = None
401 def _get_vpp_version(self, msg):
402 """Called when extraction of VPP version is required.
404 :param msg: Message to process.
409 if msg.message.count("return STDOUT Version:") or \
410 msg.message.count("VPP Version:") or \
411 msg.message.count("VPP version:"):
412 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
414 self._data["metadata"]["version"] = self._version
415 self._msg_type = None
417 def _get_dpdk_version(self, msg):
418 """Called when extraction of DPDK version is required.
420 :param msg: Message to process.
425 if msg.message.count("DPDK Version:"):
427 self._version = str(re.search(
428 self.REGEX_VERSION_DPDK, msg.message). group(1))
429 self._data["metadata"]["version"] = self._version
433 self._msg_type = None
435 def _get_timestamp(self, msg):
436 """Called when extraction of timestamp is required.
438 :param msg: Message to process.
443 self._timestamp = msg.timestamp[:14]
444 self._data["metadata"]["generated"] = self._timestamp
445 self._msg_type = None
447 def _get_vat_history(self, msg):
448 """Called when extraction of VAT command history is required.
450 :param msg: Message to process.
454 if msg.message.count("VAT command history:"):
455 self._conf_history_lookup_nr += 1
456 if self._conf_history_lookup_nr == 1:
457 self._data["tests"][self._test_ID]["conf-history"] = str()
459 self._msg_type = None
460 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
461 "VAT command history:", "", msg.message, count=1). \
462 replace("\n\n", "\n").replace('\n', ' |br| ').\
463 replace('\r', '').replace('"', "'")
465 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
466 self._data["tests"][self._test_ID]["conf-history"] += \
467 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
469 def _get_papi_history(self, msg):
470 """Called when extraction of PAPI command history is required.
472 :param msg: Message to process.
476 if msg.message.count("PAPI command history:"):
477 self._conf_history_lookup_nr += 1
478 if self._conf_history_lookup_nr == 1:
479 self._data["tests"][self._test_ID]["conf-history"] = str()
481 self._msg_type = None
482 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
483 "PAPI command history:", "", msg.message, count=1). \
484 replace("\n\n", "\n").replace('\n', ' |br| ').\
485 replace('\r', '').replace('"', "'")
487 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
488 self._data["tests"][self._test_ID]["conf-history"] += \
489 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
491 def _get_show_run(self, msg):
492 """Called when extraction of VPP operational data (output of CLI command
493 Show Runtime) is required.
495 :param msg: Message to process.
499 if msg.message.count("Runtime:"):
500 self._show_run_lookup_nr += 1
501 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
502 self._data["tests"][self._test_ID]["show-run"] = str()
503 if self._lookup_kw_nr > 1:
504 self._msg_type = None
505 if self._show_run_lookup_nr == 1:
506 message = str(msg.message).replace(' ', '').replace('\n', '').\
507 replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
508 runtime = loads(message)
510 threads_nr = len(runtime[0]["clocks"])
511 except (IndexError, KeyError):
513 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
514 table = [[tbl_hdr, ] for _ in range(threads_nr)]
516 for idx in range(threads_nr):
520 item["vectors"][idx],
521 item["suspends"][idx],
525 for idx in range(threads_nr):
526 text += "Thread {idx} ".format(idx=idx)
527 text += "vpp_main\n" if idx == 0 else \
528 "vpp_wk_{idx}\n".format(idx=idx-1)
530 for row in table[idx]:
531 if txt_table is None:
532 txt_table = prettytable.PrettyTable(row)
535 txt_table.add_row(row)
536 txt_table.align["Name"] = "l"
537 txt_table.align["Calls"] = "r"
538 txt_table.align["Vectors"] = "r"
539 txt_table.align["Suspends"] = "r"
540 txt_table.align["Clocks"] = "r"
542 text += txt_table.get_html_string(sortby="Name") + '\n'
544 text = text.replace('\n', '').replace('\r', '')
546 self._data["tests"][self._test_ID]["show-run"] += " |br| "
547 self._data["tests"][self._test_ID]["show-run"] += \
548 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
552 # TODO: Remove when definitely no NDRPDRDISC tests are used:
553 def _get_latency(self, msg, test_type):
554 """Get the latency data from the test message.
556 :param msg: Message to be parsed.
557 :param test_type: Type of the test - NDR or PDR.
560 :returns: Latencies parsed from the message.
564 if test_type == "NDR":
565 groups = re.search(self.REGEX_LAT_NDR, msg)
566 groups_range = range(1, 7)
567 elif test_type == "PDR":
568 groups = re.search(self.REGEX_LAT_PDR, msg)
569 groups_range = range(1, 3)
574 for idx in groups_range:
576 lat = [int(item) for item in str(groups.group(idx)).split('/')]
577 except (AttributeError, ValueError):
579 latencies.append(lat)
581 keys = ("min", "avg", "max")
589 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
590 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
591 if test_type == "NDR":
592 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
593 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
594 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
595 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
599 def _get_ndrpdr_throughput(self, msg):
600 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
603 :param msg: The test message to be parsed.
605 :returns: Parsed data as a dict and the status (PASS/FAIL).
606 :rtype: tuple(dict, str)
610 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
611 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
614 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
616 if groups is not None:
618 throughput["NDR"]["LOWER"] = float(groups.group(1))
619 throughput["NDR"]["UPPER"] = float(groups.group(2))
620 throughput["PDR"]["LOWER"] = float(groups.group(3))
621 throughput["PDR"]["UPPER"] = float(groups.group(4))
623 except (IndexError, ValueError):
626 return throughput, status
628 def _get_plr_throughput(self, msg):
629 """Get PLRsearch lower bound and PLRsearch upper bound from the test
632 :param msg: The test message to be parsed.
634 :returns: Parsed data as a dict and the status (PASS/FAIL).
635 :rtype: tuple(dict, str)
643 groups = re.search(self.REGEX_PLR_RATE, msg)
645 if groups is not None:
647 throughput["LOWER"] = float(groups.group(1))
648 throughput["UPPER"] = float(groups.group(2))
650 except (IndexError, ValueError):
653 return throughput, status
655 def _get_ndrpdr_latency(self, msg):
656 """Get LATENCY from the test message.
658 :param msg: The test message to be parsed.
660 :returns: Parsed data as a dict and the status (PASS/FAIL).
661 :rtype: tuple(dict, str)
666 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
667 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
670 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
671 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
675 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
677 if groups is not None:
678 keys = ("min", "avg", "max")
680 latency["NDR"]["direction1"] = dict(
681 zip(keys, [float(l) for l in groups.group(1).split('/')]))
682 latency["NDR"]["direction2"] = dict(
683 zip(keys, [float(l) for l in groups.group(2).split('/')]))
684 latency["PDR"]["direction1"] = dict(
685 zip(keys, [float(l) for l in groups.group(3).split('/')]))
686 latency["PDR"]["direction2"] = dict(
687 zip(keys, [float(l) for l in groups.group(4).split('/')]))
689 except (IndexError, ValueError):
692 return latency, status
694 def visit_suite(self, suite):
695 """Implements traversing through the suite and its direct children.
697 :param suite: Suite to process.
701 if self.start_suite(suite) is not False:
702 suite.suites.visit(self)
703 suite.tests.visit(self)
704 self.end_suite(suite)
706 def start_suite(self, suite):
707 """Called when suite starts.
709 :param suite: Suite to process.
715 parent_name = suite.parent.name
716 except AttributeError:
719 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
720 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
721 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
723 self._data["suites"][suite.longname.lower().replace('"', "'").
724 replace(" ", "_")] = {
725 "name": suite.name.lower(),
727 "parent": parent_name,
728 "level": len(suite.longname.split("."))
731 suite.keywords.visit(self)
733 def end_suite(self, suite):
734 """Called when suite ends.
736 :param suite: Suite to process.
742 def visit_test(self, test):
743 """Implements traversing through the test.
745 :param test: Test to process.
749 if self.start_test(test) is not False:
750 test.keywords.visit(self)
753 def start_test(self, test):
754 """Called when test starts.
756 :param test: Test to process.
761 longname_orig = test.longname.lower()
763 # Check the ignore list
764 if longname_orig in self._ignore:
767 tags = [str(tag) for tag in test.tags]
770 # Change the TC long name and name if defined in the mapping table
771 longname = self._mapping.get(longname_orig, None)
772 if longname is not None:
773 name = longname.split('.')[-1]
774 logging.debug("{0}\n{1}\n{2}\n{3}".format(
775 self._data["metadata"], longname_orig, longname, name))
777 longname = longname_orig
778 name = test.name.lower()
780 # Remove TC number from the TC long name (backward compatibility):
781 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
782 # Remove TC number from the TC name (not needed):
783 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
785 test_result["parent"] = test.parent.name.lower()
786 test_result["tags"] = tags
787 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
788 replace('\r', '').replace('[', ' |br| [')
789 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
790 test_result["msg"] = test.message.replace('\n', ' |br| '). \
791 replace('\r', '').replace('"', "'")
792 test_result["type"] = "FUNC"
793 test_result["status"] = test.status
795 if "PERFTEST" in tags:
796 # Replace info about cores (e.g. -1c-) with the info about threads
797 # and cores (e.g. -1t1c-) in the long test case names and in the
798 # test case names if necessary.
799 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
802 for tag in test_result["tags"]:
803 groups = re.search(self.REGEX_TC_TAG, tag)
809 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
810 "-{0}-".format(tag_tc.lower()),
813 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
814 "-{0}-".format(tag_tc.lower()),
818 test_result["status"] = "FAIL"
819 self._data["tests"][self._test_ID] = test_result
820 logging.debug("The test '{0}' has no or more than one "
821 "multi-threading tags.".format(self._test_ID))
822 logging.debug("Tags: {0}".format(test_result["tags"]))
825 if test.status == "PASS" and ("NDRPDRDISC" in tags or
831 # TODO: Remove when definitely no NDRPDRDISC tests are used:
832 if "NDRDISC" in tags:
833 test_result["type"] = "NDR"
834 # TODO: Remove when definitely no NDRPDRDISC tests are used:
835 elif "PDRDISC" in tags:
836 test_result["type"] = "PDR"
837 elif "NDRPDR" in tags:
838 test_result["type"] = "NDRPDR"
840 test_result["type"] = "SOAK"
842 test_result["type"] = "TCP"
844 test_result["type"] = "MRR"
845 elif "FRMOBL" in tags or "BMRR" in tags:
846 test_result["type"] = "BMRR"
848 test_result["status"] = "FAIL"
849 self._data["tests"][self._test_ID] = test_result
852 # TODO: Remove when definitely no NDRPDRDISC tests are used:
853 if test_result["type"] in ("NDR", "PDR"):
855 rate_value = str(re.search(
856 self.REGEX_RATE, test.message).group(1))
857 except AttributeError:
860 rate_unit = str(re.search(
861 self.REGEX_RATE, test.message).group(2))
862 except AttributeError:
865 test_result["throughput"] = dict()
866 test_result["throughput"]["value"] = \
867 int(rate_value.split('.')[0])
868 test_result["throughput"]["unit"] = rate_unit
869 test_result["latency"] = \
870 self._get_latency(test.message, test_result["type"])
871 if test_result["type"] == "PDR":
872 test_result["lossTolerance"] = str(re.search(
873 self.REGEX_TOLERANCE, test.message).group(1))
875 elif test_result["type"] in ("NDRPDR", ):
876 test_result["throughput"], test_result["status"] = \
877 self._get_ndrpdr_throughput(test.message)
878 test_result["latency"], test_result["status"] = \
879 self._get_ndrpdr_latency(test.message)
881 elif test_result["type"] in ("SOAK", ):
882 test_result["throughput"], test_result["status"] = \
883 self._get_plr_throughput(test.message)
885 elif test_result["type"] in ("TCP", ):
886 groups = re.search(self.REGEX_TCP, test.message)
887 test_result["result"] = int(groups.group(2))
889 elif test_result["type"] in ("MRR", "BMRR"):
890 test_result["result"] = dict()
891 groups = re.search(self.REGEX_BMRR, test.message)
892 if groups is not None:
893 items_str = groups.group(1)
894 items_float = [float(item.strip()) for item
895 in items_str.split(",")]
896 metadata = AvgStdevMetadataFactory.from_data(items_float)
897 # Next two lines have been introduced in CSIT-1179,
898 # to be removed in CSIT-1180.
901 test_result["result"]["receive-rate"] = metadata
903 groups = re.search(self.REGEX_MRR, test.message)
904 test_result["result"]["receive-rate"] = \
905 AvgStdevMetadataFactory.from_data([
906 float(groups.group(3)) / float(groups.group(1)), ])
908 self._data["tests"][self._test_ID] = test_result
910 def end_test(self, test):
911 """Called when test ends.
913 :param test: Test to process.
919 def visit_keyword(self, keyword):
920 """Implements traversing through the keyword and its child keywords.
922 :param keyword: Keyword to process.
923 :type keyword: Keyword
926 if self.start_keyword(keyword) is not False:
927 self.end_keyword(keyword)
929 def start_keyword(self, keyword):
930 """Called when keyword starts. Default implementation does nothing.
932 :param keyword: Keyword to process.
933 :type keyword: Keyword
937 if keyword.type == "setup":
938 self.visit_setup_kw(keyword)
939 elif keyword.type == "teardown":
940 self._lookup_kw_nr = 0
941 self.visit_teardown_kw(keyword)
943 self._lookup_kw_nr = 0
944 self.visit_test_kw(keyword)
945 except AttributeError:
948 def end_keyword(self, keyword):
949 """Called when keyword ends. Default implementation does nothing.
951 :param keyword: Keyword to process.
952 :type keyword: Keyword
957 def visit_test_kw(self, test_kw):
958 """Implements traversing through the test keyword and its child
961 :param test_kw: Keyword to process.
962 :type test_kw: Keyword
965 for keyword in test_kw.keywords:
966 if self.start_test_kw(keyword) is not False:
967 self.visit_test_kw(keyword)
968 self.end_test_kw(keyword)
970 def start_test_kw(self, test_kw):
971 """Called when test keyword starts. Default implementation does
974 :param test_kw: Keyword to process.
975 :type test_kw: Keyword
978 if test_kw.name.count("Show Runtime Counters On All Duts"):
979 self._lookup_kw_nr += 1
980 self._show_run_lookup_nr = 0
981 self._msg_type = "test-show-runtime"
982 elif test_kw.name.count("Install Dpdk Test") and not self._version:
983 self._msg_type = "dpdk-version"
986 test_kw.messages.visit(self)
988 def end_test_kw(self, test_kw):
989 """Called when keyword ends. Default implementation does nothing.
991 :param test_kw: Keyword to process.
992 :type test_kw: Keyword
997 def visit_setup_kw(self, setup_kw):
998 """Implements traversing through the teardown keyword and its child
1001 :param setup_kw: Keyword to process.
1002 :type setup_kw: Keyword
1005 for keyword in setup_kw.keywords:
1006 if self.start_setup_kw(keyword) is not False:
1007 self.visit_setup_kw(keyword)
1008 self.end_setup_kw(keyword)
1010 def start_setup_kw(self, setup_kw):
1011 """Called when teardown keyword starts. Default implementation does
1014 :param setup_kw: Keyword to process.
1015 :type setup_kw: Keyword
1018 if setup_kw.name.count("Show Vpp Version On All Duts") \
1019 and not self._version:
1020 self._msg_type = "vpp-version"
1021 elif setup_kw.name.count("Set Global Variable") \
1022 and not self._timestamp:
1023 self._msg_type = "timestamp"
1024 elif setup_kw.name.count("Setup Framework") and not self._testbed:
1025 self._msg_type = "testbed"
1028 setup_kw.messages.visit(self)
1030 def end_setup_kw(self, setup_kw):
1031 """Called when keyword ends. Default implementation does nothing.
1033 :param setup_kw: Keyword to process.
1034 :type setup_kw: Keyword
1039 def visit_teardown_kw(self, teardown_kw):
1040 """Implements traversing through the teardown keyword and its child
1043 :param teardown_kw: Keyword to process.
1044 :type teardown_kw: Keyword
1047 for keyword in teardown_kw.keywords:
1048 if self.start_teardown_kw(keyword) is not False:
1049 self.visit_teardown_kw(keyword)
1050 self.end_teardown_kw(keyword)
1052 def start_teardown_kw(self, teardown_kw):
1053 """Called when teardown keyword starts. Default implementation does
1056 :param teardown_kw: Keyword to process.
1057 :type teardown_kw: Keyword
1061 if teardown_kw.name.count("Show Vat History On All Duts"):
1062 self._conf_history_lookup_nr = 0
1063 self._msg_type = "teardown-vat-history"
1064 teardown_kw.messages.visit(self)
1065 elif teardown_kw.name.count("Show Papi History On All Duts"):
1066 self._conf_history_lookup_nr = 0
1067 self._msg_type = "teardown-papi-history"
1068 teardown_kw.messages.visit(self)
1070 def end_teardown_kw(self, teardown_kw):
1071 """Called when keyword ends. Default implementation does nothing.
1073 :param teardown_kw: Keyword to process.
1074 :type teardown_kw: Keyword
1079 def visit_message(self, msg):
1080 """Implements visiting the message.
1082 :param msg: Message to process.
1086 if self.start_message(msg) is not False:
1087 self.end_message(msg)
1089 def start_message(self, msg):
1090 """Called when message starts. Get required information from messages:
1093 :param msg: Message to process.
1099 self.parse_msg[self._msg_type](msg)
1101 def end_message(self, msg):
1102 """Called when message ends. Default implementation does nothing.
1104 :param msg: Message to process.
1111 class InputData(object):
1114 The data is extracted from output.xml files generated by Jenkins jobs and
1115 stored in pandas' DataFrames.
1121 (as described in ExecutionChecker documentation)
1123 (as described in ExecutionChecker documentation)
1125 (as described in ExecutionChecker documentation)
1128 def __init__(self, spec):
1131 :param spec: Specification.
1132 :type spec: Specification
1139 self._input_data = pd.Series()
1143 """Getter - Input data.
1145 :returns: Input data
1146 :rtype: pandas.Series
1148 return self._input_data
1150 def metadata(self, job, build):
1151 """Getter - metadata
1153 :param job: Job which metadata we want.
1154 :param build: Build which metadata we want.
1158 :rtype: pandas.Series
1161 return self.data[job][build]["metadata"]
1163 def suites(self, job, build):
1166 :param job: Job which suites we want.
1167 :param build: Build which suites we want.
1171 :rtype: pandas.Series
1174 return self.data[job][str(build)]["suites"]
1176 def tests(self, job, build):
1179 :param job: Job which tests we want.
1180 :param build: Build which tests we want.
1184 :rtype: pandas.Series
1187 return self.data[job][build]["tests"]
1189 def _parse_tests(self, job, build, log):
1190 """Process data from robot output.xml file and return JSON structured
1193 :param job: The name of job which build output data will be processed.
1194 :param build: The build which output data will be processed.
1195 :param log: List of log messages.
1198 :type log: list of tuples (severity, msg)
1199 :returns: JSON data structure.
1208 with open(build["file-name"], 'r') as data_file:
1210 result = ExecutionResult(data_file)
1211 except errors.DataError as err:
1212 log.append(("ERROR", "Error occurred while parsing output.xml: "
1215 checker = ExecutionChecker(metadata, self._cfg.mapping,
1217 result.visit(checker)
1221 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1222 """Download and parse the input data file.
1224 :param pid: PID of the process executing this method.
1225 :param job: Name of the Jenkins job which generated the processed input
1227 :param build: Information about the Jenkins build which generated the
1228 processed input file.
1229 :param repeat: Repeat the download specified number of times if not
1239 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1240 format(job, build["build"])))
1247 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1253 logs.append(("ERROR", "It is not possible to download the input "
1254 "data file from the job '{job}', build "
1255 "'{build}', or it is damaged. Skipped.".
1256 format(job=job, build=build["build"])))
1258 logs.append(("INFO", " Processing data from the build '{0}' ...".
1259 format(build["build"])))
1260 data = self._parse_tests(job, build, logs)
1262 logs.append(("ERROR", "Input data file from the job '{job}', "
1263 "build '{build}' is damaged. Skipped.".
1264 format(job=job, build=build["build"])))
1269 remove(build["file-name"])
1270 except OSError as err:
1271 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1272 format(build["file-name"], repr(err))))
1274 # If the time-period is defined in the specification file, remove all
1275 # files which are outside the time period.
1276 timeperiod = self._cfg.input.get("time-period", None)
1277 if timeperiod and data:
1279 timeperiod = timedelta(int(timeperiod))
1280 metadata = data.get("metadata", None)
1282 generated = metadata.get("generated", None)
1284 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1285 if (now - generated) > timeperiod:
1286 # Remove the data and the file:
1291 " The build {job}/{build} is outdated, will be "
1292 "removed".format(job=job, build=build["build"])))
1293 file_name = self._cfg.input["file-name"]
1295 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1296 "{job}{sep}{build}{sep}{name}".format(
1299 build=build["build"],
1303 logs.append(("INFO",
1304 " The file {name} has been removed".
1305 format(name=full_name)))
1306 except OSError as err:
1307 logs.append(("ERROR",
1308 "Cannot remove the file '{0}': {1}".
1309 format(full_name, repr(err))))
1310 logs.append(("INFO", " Done."))
1312 for level, line in logs:
1315 elif level == "ERROR":
1317 elif level == "DEBUG":
1319 elif level == "CRITICAL":
1320 logging.critical(line)
1321 elif level == "WARNING":
1322 logging.warning(line)
1324 return {"data": data, "state": state, "job": job, "build": build}
1326 def download_and_parse_data(self, repeat=1):
1327 """Download the input data files, parse input data from input files and
1328 store in pandas' Series.
1330 :param repeat: Repeat the download specified number of times if not
1335 logging.info("Downloading and parsing input files ...")
1337 for job, builds in self._cfg.builds.items():
1338 for build in builds:
1340 result = self._download_and_parse_build(job, build, repeat)
1341 build_nr = result["build"]["build"]
1344 data = result["data"]
1345 build_data = pd.Series({
1346 "metadata": pd.Series(
1347 data["metadata"].values(),
1348 index=data["metadata"].keys()),
1349 "suites": pd.Series(data["suites"].values(),
1350 index=data["suites"].keys()),
1351 "tests": pd.Series(data["tests"].values(),
1352 index=data["tests"].keys())})
1354 if self._input_data.get(job, None) is None:
1355 self._input_data[job] = pd.Series()
1356 self._input_data[job][str(build_nr)] = build_data
1358 self._cfg.set_input_file_name(
1359 job, build_nr, result["build"]["file-name"])
1361 self._cfg.set_input_state(job, build_nr, result["state"])
1363 logging.info("Memory allocation: {0:,d}MB".format(
1364 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1366 logging.info("Done.")
1369 def _end_of_tag(tag_filter, start=0, closer="'"):
1370 """Return the index of character in the string which is the end of tag.
1372 :param tag_filter: The string where the end of tag is being searched.
1373 :param start: The index where the searching is stated.
1374 :param closer: The character which is the tag closer.
1375 :type tag_filter: str
1378 :returns: The index of the tag closer.
1383 idx_opener = tag_filter.index(closer, start)
1384 return tag_filter.index(closer, idx_opener + 1)
1389 def _condition(tag_filter):
1390 """Create a conditional statement from the given tag filter.
1392 :param tag_filter: Filter based on tags from the element specification.
1393 :type tag_filter: str
1394 :returns: Conditional statement which can be evaluated.
1400 index = InputData._end_of_tag(tag_filter, index)
1404 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1406 def filter_data(self, element, params=None, data_set="tests",
1407 continue_on_error=False):
1408 """Filter required data from the given jobs and builds.
1410 The output data structure is:
1414 - test (or suite) 1 ID:
1420 - test (or suite) n ID:
1427 :param element: Element which will use the filtered data.
1428 :param params: Parameters which will be included in the output. If None,
1429 all parameters are included.
1430 :param data_set: The set of data to be filtered: tests, suites,
1432 :param continue_on_error: Continue if there is error while reading the
1433 data. The Item will be empty then
1434 :type element: pandas.Series
1437 :type continue_on_error: bool
1438 :returns: Filtered data.
1439 :rtype pandas.Series
1443 if element["filter"] in ("all", "template"):
1446 cond = InputData._condition(element["filter"])
1447 logging.debug(" Filter: {0}".format(cond))
1449 logging.error(" No filter defined.")
1453 params = element.get("parameters", None)
1455 params.append("type")
1459 for job, builds in element["data"].items():
1460 data[job] = pd.Series()
1461 for build in builds:
1462 data[job][str(build)] = pd.Series()
1464 data_iter = self.data[job][str(build)][data_set].\
1467 if continue_on_error:
1471 for test_ID, test_data in data_iter:
1472 if eval(cond, {"tags": test_data.get("tags", "")}):
1473 data[job][str(build)][test_ID] = pd.Series()
1475 for param, val in test_data.items():
1476 data[job][str(build)][test_ID][param] = val
1478 for param in params:
1480 data[job][str(build)][test_ID][param] =\
1483 data[job][str(build)][test_ID][param] =\
1487 except (KeyError, IndexError, ValueError) as err:
1488 logging.error(" Missing mandatory parameter in the element "
1489 "specification: {0}".format(err))
1491 except AttributeError:
1494 logging.error(" The filter '{0}' is not correct. Check if all "
1495 "tags are enclosed by apostrophes.".format(cond))
1499 def merge_data(data):
1500 """Merge data from more jobs and builds to a simple data structure.
1502 The output data structure is:
1504 - test (suite) 1 ID:
1510 - test (suite) n ID:
1513 :param data: Data to merge.
1514 :type data: pandas.Series
1515 :returns: Merged data.
1516 :rtype: pandas.Series
1519 logging.info(" Merging data ...")
1521 merged_data = pd.Series()
1522 for _, builds in data.iteritems():
1523 for _, item in builds.iteritems():
1524 for ID, item_data in item.iteritems():
1525 merged_data[ID] = item_data