1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from robot.api import ExecutionResult, ResultVisitor
28 from robot import errors
29 from collections import OrderedDict
30 from string import replace
32 from os.path import join
33 from datetime import datetime as dt
34 from datetime import timedelta
35 from json import loads
36 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38 from input_data_files import download_and_unzip_data_file
41 # Separator used in file names
45 class ExecutionChecker(ResultVisitor):
46 """Class to traverse through the test suite structure.
48 The functionality implemented in this class generates a json structure:
54 "generated": "Timestamp",
55 "version": "SUT version",
56 "job": "Jenkins job name",
57 "build": "Information about the build"
60 "Suite long name 1": {
62 "doc": "Suite 1 documentation",
63 "parent": "Suite 1 parent",
64 "level": "Level of the suite in the suite hierarchy"
66 "Suite long name N": {
68 "doc": "Suite N documentation",
69 "parent": "Suite 2 parent",
70 "level": "Level of the suite in the suite hierarchy"
77 "parent": "Name of the parent of the test",
78 "doc": "Test documentation",
79 "msg": "Test message",
80 "conf-history": "DUT1 and DUT2 VAT History",
81 "show-run": "Show Run",
82 "tags": ["tag 1", "tag 2", "tag n"],
84 "status": "PASS" | "FAIL",
126 "parent": "Name of the parent of the test",
127 "doc": "Test documentation",
128 "msg": "Test message",
129 "tags": ["tag 1", "tag 2", "tag n"],
131 "status": "PASS" | "FAIL",
138 "parent": "Name of the parent of the test",
139 "doc": "Test documentation",
140 "msg": "Test message",
141 "tags": ["tag 1", "tag 2", "tag n"],
142 "type": "MRR" | "BMRR",
143 "status": "PASS" | "FAIL",
145 "receive-rate": AvgStdevMetadata,
149 # TODO: Remove when definitely no NDRPDRDISC tests are used:
153 "parent": "Name of the parent of the test",
154 "doc": "Test documentation",
155 "msg": "Test message",
156 "tags": ["tag 1", "tag 2", "tag n"],
157 "type": "PDR" | "NDR",
158 "status": "PASS" | "FAIL",
159 "throughput": { # Only type: "PDR" | "NDR"
161 "unit": "pps" | "bps" | "percentage"
163 "latency": { # Only type: "PDR" | "NDR"
170 "50": { # Only for NDR
175 "10": { # Only for NDR
187 "50": { # Only for NDR
192 "10": { # Only for NDR
199 "lossTolerance": "lossTolerance", # Only type: "PDR"
200 "conf-history": "DUT1 and DUT2 VAT History"
201 "show-run": "Show Run"
213 "metadata": { # Optional
214 "version": "VPP version",
215 "job": "Jenkins job name",
216 "build": "Information about the build"
220 "doc": "Suite 1 documentation",
221 "parent": "Suite 1 parent",
222 "level": "Level of the suite in the suite hierarchy"
225 "doc": "Suite N documentation",
226 "parent": "Suite 2 parent",
227 "level": "Level of the suite in the suite hierarchy"
233 "parent": "Name of the parent of the test",
234 "doc": "Test documentation"
235 "msg": "Test message"
236 "tags": ["tag 1", "tag 2", "tag n"],
237 "conf-history": "DUT1 and DUT2 VAT History"
238 "show-run": "Show Run"
239 "status": "PASS" | "FAIL"
247 .. note:: ID is the lowercase full path to the test.
250 # TODO: Remove when definitely no NDRPDRDISC tests are used:
251 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
253 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
254 r'PLRsearch upper bound::\s(\d+.\d+)')
256 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
257 r'NDR_UPPER:\s(\d+.\d+).*\n'
258 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
259 r'PDR_UPPER:\s(\d+.\d+)')
261 # TODO: Remove when definitely no NDRPDRDISC tests are used:
262 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
263 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
264 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
265 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
270 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
271 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
272 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
274 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
275 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
277 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
280 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
281 r"VPP Version:\s*|VPP version:\s*)(.*)")
283 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
284 r"(RTE Version: 'DPDK )(.*)(')")
286 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
288 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289 r'tx\s(\d*),\srx\s(\d*)')
291 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292 r' in packets per second: \[(.*)\]')
294 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
296 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
298 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
300 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
302 def __init__(self, metadata, mapping, ignore):
305 :param metadata: Key-value pairs to be included in "metadata" part of
307 :param mapping: Mapping of the old names of test cases to the new
309 :param ignore: List of TCs to be ignored.
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 # Number of VAT History messages found:
335 # 1 - VAT History of DUT1
336 # 2 - VAT History of DUT2
337 self._lookup_kw_nr = 0
338 self._conf_history_lookup_nr = 0
340 # Number of Show Running messages found
342 # 1 - Show run message found
343 self._show_run_lookup_nr = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 "metadata": OrderedDict(),
352 "suites": OrderedDict(),
353 "tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data["metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 "timestamp": self._get_timestamp,
364 "vpp-version": self._get_vpp_version,
365 "dpdk-version": self._get_dpdk_version,
366 "teardown-vat-history": self._get_vat_history,
367 "teardown-papi-history": self._get_papi_history,
368 "test-show-runtime": self._get_show_run,
369 "testbed": self._get_testbed
374 """Getter - Data parsed from the XML file.
376 :returns: Data parsed from the XML file.
381 def _get_testbed(self, msg):
382 """Called when extraction of testbed IP is required.
383 The testbed is identified by TG node IP address.
385 :param msg: Message to process.
390 if msg.message.count("Setup of TG node"):
391 reg_tg_ip = re.compile(
392 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
394 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
395 except (KeyError, ValueError, IndexError, AttributeError):
398 self._data["metadata"]["testbed"] = self._testbed
399 self._msg_type = None
401 def _get_vpp_version(self, msg):
402 """Called when extraction of VPP version is required.
404 :param msg: Message to process.
409 if msg.message.count("return STDOUT Version:") or \
410 msg.message.count("VPP Version:") or \
411 msg.message.count("VPP version:"):
412 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
414 self._data["metadata"]["version"] = self._version
415 self._msg_type = None
417 def _get_dpdk_version(self, msg):
418 """Called when extraction of DPDK version is required.
420 :param msg: Message to process.
425 if msg.message.count("return STDOUT testpmd"):
427 self._version = str(re.search(
428 self.REGEX_VERSION_DPDK, msg.message). group(4))
429 self._data["metadata"]["version"] = self._version
433 self._msg_type = None
435 def _get_timestamp(self, msg):
436 """Called when extraction of timestamp is required.
438 :param msg: Message to process.
443 self._timestamp = msg.timestamp[:14]
444 self._data["metadata"]["generated"] = self._timestamp
445 self._msg_type = None
447 def _get_vat_history(self, msg):
448 """Called when extraction of VAT command history is required.
450 :param msg: Message to process.
454 if msg.message.count("VAT command history:"):
455 self._conf_history_lookup_nr += 1
456 if self._conf_history_lookup_nr == 1:
457 self._data["tests"][self._test_ID]["conf-history"] = str()
459 self._msg_type = None
460 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
461 "VAT command history:", "", msg.message, count=1). \
462 replace("\n\n", "\n").replace('\n', ' |br| ').\
463 replace('\r', '').replace('"', "'")
465 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
466 self._data["tests"][self._test_ID]["conf-history"] += \
467 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
469 def _get_papi_history(self, msg):
470 """Called when extraction of PAPI command history is required.
472 :param msg: Message to process.
476 if msg.message.count("PAPI command history:"):
477 self._conf_history_lookup_nr += 1
478 if self._conf_history_lookup_nr == 1:
479 self._data["tests"][self._test_ID]["conf-history"] = str()
481 self._msg_type = None
482 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
483 "PAPI command history:", "", msg.message, count=1). \
484 replace("\n\n", "\n").replace('\n', ' |br| ').\
485 replace('\r', '').replace('"', "'")
487 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
488 self._data["tests"][self._test_ID]["conf-history"] += \
489 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
491 def _get_show_run(self, msg):
492 """Called when extraction of VPP operational data (output of CLI command
493 Show Runtime) is required.
495 :param msg: Message to process.
499 if msg.message.count("Thread 0 vpp_main"):
500 self._show_run_lookup_nr += 1
501 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
502 self._data["tests"][self._test_ID]["show-run"] = str()
503 if self._lookup_kw_nr > 1:
504 self._msg_type = None
505 if self._show_run_lookup_nr == 1:
506 text = msg.message.replace("vat# ", "").\
507 replace("return STDOUT ", "").replace("\n\n", "\n").\
508 replace('\n', ' |br| ').\
509 replace('\r', '').replace('"', "'")
511 self._data["tests"][self._test_ID]["show-run"] += " |br| "
512 self._data["tests"][self._test_ID]["show-run"] += \
513 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
517 # TODO: Remove when definitely no NDRPDRDISC tests are used:
518 def _get_latency(self, msg, test_type):
519 """Get the latency data from the test message.
521 :param msg: Message to be parsed.
522 :param test_type: Type of the test - NDR or PDR.
525 :returns: Latencies parsed from the message.
529 if test_type == "NDR":
530 groups = re.search(self.REGEX_LAT_NDR, msg)
531 groups_range = range(1, 7)
532 elif test_type == "PDR":
533 groups = re.search(self.REGEX_LAT_PDR, msg)
534 groups_range = range(1, 3)
539 for idx in groups_range:
541 lat = [int(item) for item in str(groups.group(idx)).split('/')]
542 except (AttributeError, ValueError):
544 latencies.append(lat)
546 keys = ("min", "avg", "max")
554 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
555 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
556 if test_type == "NDR":
557 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
558 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
559 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
560 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
564 def _get_ndrpdr_throughput(self, msg):
565 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
568 :param msg: The test message to be parsed.
570 :returns: Parsed data as a dict and the status (PASS/FAIL).
571 :rtype: tuple(dict, str)
575 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
576 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
579 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
581 if groups is not None:
583 throughput["NDR"]["LOWER"] = float(groups.group(1))
584 throughput["NDR"]["UPPER"] = float(groups.group(2))
585 throughput["PDR"]["LOWER"] = float(groups.group(3))
586 throughput["PDR"]["UPPER"] = float(groups.group(4))
588 except (IndexError, ValueError):
591 return throughput, status
593 def _get_plr_throughput(self, msg):
594 """Get PLRsearch lower bound and PLRsearch upper bound from the test
597 :param msg: The test message to be parsed.
599 :returns: Parsed data as a dict and the status (PASS/FAIL).
600 :rtype: tuple(dict, str)
608 groups = re.search(self.REGEX_PLR_RATE, msg)
610 if groups is not None:
612 throughput["LOWER"] = float(groups.group(1))
613 throughput["UPPER"] = float(groups.group(2))
615 except (IndexError, ValueError):
618 return throughput, status
620 def _get_ndrpdr_latency(self, msg):
621 """Get LATENCY from the test message.
623 :param msg: The test message to be parsed.
625 :returns: Parsed data as a dict and the status (PASS/FAIL).
626 :rtype: tuple(dict, str)
631 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
632 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
635 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
636 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
640 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
642 if groups is not None:
643 keys = ("min", "avg", "max")
645 latency["NDR"]["direction1"] = dict(
646 zip(keys, [float(l) for l in groups.group(1).split('/')]))
647 latency["NDR"]["direction2"] = dict(
648 zip(keys, [float(l) for l in groups.group(2).split('/')]))
649 latency["PDR"]["direction1"] = dict(
650 zip(keys, [float(l) for l in groups.group(3).split('/')]))
651 latency["PDR"]["direction2"] = dict(
652 zip(keys, [float(l) for l in groups.group(4).split('/')]))
654 except (IndexError, ValueError):
657 return latency, status
659 def visit_suite(self, suite):
660 """Implements traversing through the suite and its direct children.
662 :param suite: Suite to process.
666 if self.start_suite(suite) is not False:
667 suite.suites.visit(self)
668 suite.tests.visit(self)
669 self.end_suite(suite)
671 def start_suite(self, suite):
672 """Called when suite starts.
674 :param suite: Suite to process.
680 parent_name = suite.parent.name
681 except AttributeError:
684 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
685 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
686 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
688 self._data["suites"][suite.longname.lower().replace('"', "'").
689 replace(" ", "_")] = {
690 "name": suite.name.lower(),
692 "parent": parent_name,
693 "level": len(suite.longname.split("."))
696 suite.keywords.visit(self)
698 def end_suite(self, suite):
699 """Called when suite ends.
701 :param suite: Suite to process.
707 def visit_test(self, test):
708 """Implements traversing through the test.
710 :param test: Test to process.
714 if self.start_test(test) is not False:
715 test.keywords.visit(self)
718 def start_test(self, test):
719 """Called when test starts.
721 :param test: Test to process.
726 longname_orig = test.longname.lower()
728 # Check the ignore list
729 if longname_orig in self._ignore:
732 tags = [str(tag) for tag in test.tags]
735 # Change the TC long name and name if defined in the mapping table
736 longname = self._mapping.get(longname_orig, None)
737 if longname is not None:
738 name = longname.split('.')[-1]
739 logging.debug("{0}\n{1}\n{2}\n{3}".format(
740 self._data["metadata"], longname_orig, longname, name))
742 longname = longname_orig
743 name = test.name.lower()
745 # Remove TC number from the TC long name (backward compatibility):
746 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
747 # Remove TC number from the TC name (not needed):
748 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
750 test_result["parent"] = test.parent.name.lower()
751 test_result["tags"] = tags
752 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
753 replace('\r', '').replace('[', ' |br| [')
754 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
755 test_result["msg"] = test.message.replace('\n', ' |br| '). \
756 replace('\r', '').replace('"', "'")
757 test_result["type"] = "FUNC"
758 test_result["status"] = test.status
760 if "PERFTEST" in tags:
761 # Replace info about cores (e.g. -1c-) with the info about threads
762 # and cores (e.g. -1t1c-) in the long test case names and in the
763 # test case names if necessary.
764 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
767 for tag in test_result["tags"]:
768 groups = re.search(self.REGEX_TC_TAG, tag)
774 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
775 "-{0}-".format(tag_tc.lower()),
778 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
779 "-{0}-".format(tag_tc.lower()),
783 test_result["status"] = "FAIL"
784 self._data["tests"][self._test_ID] = test_result
785 logging.debug("The test '{0}' has no or more than one "
786 "multi-threading tags.".format(self._test_ID))
787 logging.debug("Tags: {0}".format(test_result["tags"]))
790 if test.status == "PASS" and ("NDRPDRDISC" in tags or
796 # TODO: Remove when definitely no NDRPDRDISC tests are used:
797 if "NDRDISC" in tags:
798 test_result["type"] = "NDR"
799 # TODO: Remove when definitely no NDRPDRDISC tests are used:
800 elif "PDRDISC" in tags:
801 test_result["type"] = "PDR"
802 elif "NDRPDR" in tags:
803 test_result["type"] = "NDRPDR"
805 test_result["type"] = "SOAK"
807 test_result["type"] = "TCP"
809 test_result["type"] = "MRR"
810 elif "FRMOBL" in tags or "BMRR" in tags:
811 test_result["type"] = "BMRR"
813 test_result["status"] = "FAIL"
814 self._data["tests"][self._test_ID] = test_result
817 # TODO: Remove when definitely no NDRPDRDISC tests are used:
818 if test_result["type"] in ("NDR", "PDR"):
820 rate_value = str(re.search(
821 self.REGEX_RATE, test.message).group(1))
822 except AttributeError:
825 rate_unit = str(re.search(
826 self.REGEX_RATE, test.message).group(2))
827 except AttributeError:
830 test_result["throughput"] = dict()
831 test_result["throughput"]["value"] = \
832 int(rate_value.split('.')[0])
833 test_result["throughput"]["unit"] = rate_unit
834 test_result["latency"] = \
835 self._get_latency(test.message, test_result["type"])
836 if test_result["type"] == "PDR":
837 test_result["lossTolerance"] = str(re.search(
838 self.REGEX_TOLERANCE, test.message).group(1))
840 elif test_result["type"] in ("NDRPDR", ):
841 test_result["throughput"], test_result["status"] = \
842 self._get_ndrpdr_throughput(test.message)
843 test_result["latency"], test_result["status"] = \
844 self._get_ndrpdr_latency(test.message)
846 elif test_result["type"] in ("SOAK", ):
847 test_result["throughput"], test_result["status"] = \
848 self._get_plr_throughput(test.message)
850 elif test_result["type"] in ("TCP", ):
851 groups = re.search(self.REGEX_TCP, test.message)
852 test_result["result"] = int(groups.group(2))
854 elif test_result["type"] in ("MRR", "BMRR"):
855 test_result["result"] = dict()
856 groups = re.search(self.REGEX_BMRR, test.message)
857 if groups is not None:
858 items_str = groups.group(1)
859 items_float = [float(item.strip()) for item
860 in items_str.split(",")]
861 metadata = AvgStdevMetadataFactory.from_data(items_float)
862 # Next two lines have been introduced in CSIT-1179,
863 # to be removed in CSIT-1180.
866 test_result["result"]["receive-rate"] = metadata
868 groups = re.search(self.REGEX_MRR, test.message)
869 test_result["result"]["receive-rate"] = \
870 AvgStdevMetadataFactory.from_data([
871 float(groups.group(3)) / float(groups.group(1)), ])
873 self._data["tests"][self._test_ID] = test_result
875 def end_test(self, test):
876 """Called when test ends.
878 :param test: Test to process.
884 def visit_keyword(self, keyword):
885 """Implements traversing through the keyword and its child keywords.
887 :param keyword: Keyword to process.
888 :type keyword: Keyword
891 if self.start_keyword(keyword) is not False:
892 self.end_keyword(keyword)
894 def start_keyword(self, keyword):
895 """Called when keyword starts. Default implementation does nothing.
897 :param keyword: Keyword to process.
898 :type keyword: Keyword
902 if keyword.type == "setup":
903 self.visit_setup_kw(keyword)
904 elif keyword.type == "teardown":
905 self._lookup_kw_nr = 0
906 self.visit_teardown_kw(keyword)
908 self._lookup_kw_nr = 0
909 self.visit_test_kw(keyword)
910 except AttributeError:
913 def end_keyword(self, keyword):
914 """Called when keyword ends. Default implementation does nothing.
916 :param keyword: Keyword to process.
917 :type keyword: Keyword
922 def visit_test_kw(self, test_kw):
923 """Implements traversing through the test keyword and its child
926 :param test_kw: Keyword to process.
927 :type test_kw: Keyword
930 for keyword in test_kw.keywords:
931 if self.start_test_kw(keyword) is not False:
932 self.visit_test_kw(keyword)
933 self.end_test_kw(keyword)
935 def start_test_kw(self, test_kw):
936 """Called when test keyword starts. Default implementation does
939 :param test_kw: Keyword to process.
940 :type test_kw: Keyword
943 if test_kw.name.count("Show Runtime Counters On All Duts"):
944 self._lookup_kw_nr += 1
945 self._show_run_lookup_nr = 0
946 self._msg_type = "test-show-runtime"
947 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
948 self._msg_type = "dpdk-version"
951 test_kw.messages.visit(self)
953 def end_test_kw(self, test_kw):
954 """Called when keyword ends. Default implementation does nothing.
956 :param test_kw: Keyword to process.
957 :type test_kw: Keyword
962 def visit_setup_kw(self, setup_kw):
963 """Implements traversing through the teardown keyword and its child
966 :param setup_kw: Keyword to process.
967 :type setup_kw: Keyword
970 for keyword in setup_kw.keywords:
971 if self.start_setup_kw(keyword) is not False:
972 self.visit_setup_kw(keyword)
973 self.end_setup_kw(keyword)
975 def start_setup_kw(self, setup_kw):
976 """Called when teardown keyword starts. Default implementation does
979 :param setup_kw: Keyword to process.
980 :type setup_kw: Keyword
983 if setup_kw.name.count("Show Vpp Version On All Duts") \
984 and not self._version:
985 self._msg_type = "vpp-version"
986 elif setup_kw.name.count("Set Global Variable") \
987 and not self._timestamp:
988 self._msg_type = "timestamp"
989 elif setup_kw.name.count("Setup Framework") and not self._testbed:
990 self._msg_type = "testbed"
993 setup_kw.messages.visit(self)
995 def end_setup_kw(self, setup_kw):
996 """Called when keyword ends. Default implementation does nothing.
998 :param setup_kw: Keyword to process.
999 :type setup_kw: Keyword
1004 def visit_teardown_kw(self, teardown_kw):
1005 """Implements traversing through the teardown keyword and its child
1008 :param teardown_kw: Keyword to process.
1009 :type teardown_kw: Keyword
1012 for keyword in teardown_kw.keywords:
1013 if self.start_teardown_kw(keyword) is not False:
1014 self.visit_teardown_kw(keyword)
1015 self.end_teardown_kw(keyword)
1017 def start_teardown_kw(self, teardown_kw):
1018 """Called when teardown keyword starts. Default implementation does
1021 :param teardown_kw: Keyword to process.
1022 :type teardown_kw: Keyword
1026 if teardown_kw.name.count("Show Vat History On All Duts"):
1027 self._conf_history_lookup_nr = 0
1028 self._msg_type = "teardown-vat-history"
1029 teardown_kw.messages.visit(self)
1030 elif teardown_kw.name.count("Show Papi History On All Duts"):
1031 self._conf_history_lookup_nr = 0
1032 self._msg_type = "teardown-papi-history"
1033 teardown_kw.messages.visit(self)
1035 def end_teardown_kw(self, teardown_kw):
1036 """Called when keyword ends. Default implementation does nothing.
1038 :param teardown_kw: Keyword to process.
1039 :type teardown_kw: Keyword
1044 def visit_message(self, msg):
1045 """Implements visiting the message.
1047 :param msg: Message to process.
1051 if self.start_message(msg) is not False:
1052 self.end_message(msg)
1054 def start_message(self, msg):
1055 """Called when message starts. Get required information from messages:
1058 :param msg: Message to process.
1064 self.parse_msg[self._msg_type](msg)
1066 def end_message(self, msg):
1067 """Called when message ends. Default implementation does nothing.
1069 :param msg: Message to process.
1076 class InputData(object):
1079 The data is extracted from output.xml files generated by Jenkins jobs and
1080 stored in pandas' DataFrames.
1086 (as described in ExecutionChecker documentation)
1088 (as described in ExecutionChecker documentation)
1090 (as described in ExecutionChecker documentation)
1093 def __init__(self, spec):
1096 :param spec: Specification.
1097 :type spec: Specification
1104 self._input_data = pd.Series()
1108 """Getter - Input data.
1110 :returns: Input data
1111 :rtype: pandas.Series
1113 return self._input_data
1115 def metadata(self, job, build):
1116 """Getter - metadata
1118 :param job: Job which metadata we want.
1119 :param build: Build which metadata we want.
1123 :rtype: pandas.Series
1126 return self.data[job][build]["metadata"]
1128 def suites(self, job, build):
1131 :param job: Job which suites we want.
1132 :param build: Build which suites we want.
1136 :rtype: pandas.Series
1139 return self.data[job][str(build)]["suites"]
1141 def tests(self, job, build):
1144 :param job: Job which tests we want.
1145 :param build: Build which tests we want.
1149 :rtype: pandas.Series
1152 return self.data[job][build]["tests"]
1154 def _parse_tests(self, job, build, log):
1155 """Process data from robot output.xml file and return JSON structured
1158 :param job: The name of job which build output data will be processed.
1159 :param build: The build which output data will be processed.
1160 :param log: List of log messages.
1163 :type log: list of tuples (severity, msg)
1164 :returns: JSON data structure.
1173 with open(build["file-name"], 'r') as data_file:
1175 result = ExecutionResult(data_file)
1176 except errors.DataError as err:
1177 log.append(("ERROR", "Error occurred while parsing output.xml: "
1180 checker = ExecutionChecker(metadata, self._cfg.mapping,
1182 result.visit(checker)
1186 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1187 """Download and parse the input data file.
1189 :param pid: PID of the process executing this method.
1190 :param job: Name of the Jenkins job which generated the processed input
1192 :param build: Information about the Jenkins build which generated the
1193 processed input file.
1194 :param repeat: Repeat the download specified number of times if not
1204 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1205 format(job, build["build"])))
1212 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1218 logs.append(("ERROR", "It is not possible to download the input "
1219 "data file from the job '{job}', build "
1220 "'{build}', or it is damaged. Skipped.".
1221 format(job=job, build=build["build"])))
1223 logs.append(("INFO", " Processing data from the build '{0}' ...".
1224 format(build["build"])))
1225 data = self._parse_tests(job, build, logs)
1227 logs.append(("ERROR", "Input data file from the job '{job}', "
1228 "build '{build}' is damaged. Skipped.".
1229 format(job=job, build=build["build"])))
1234 remove(build["file-name"])
1235 except OSError as err:
1236 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1237 format(build["file-name"], repr(err))))
1239 # If the time-period is defined in the specification file, remove all
1240 # files which are outside the time period.
1241 timeperiod = self._cfg.input.get("time-period", None)
1242 if timeperiod and data:
1244 timeperiod = timedelta(int(timeperiod))
1245 metadata = data.get("metadata", None)
1247 generated = metadata.get("generated", None)
1249 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1250 if (now - generated) > timeperiod:
1251 # Remove the data and the file:
1256 " The build {job}/{build} is outdated, will be "
1257 "removed".format(job=job, build=build["build"])))
1258 file_name = self._cfg.input["file-name"]
1260 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1261 "{job}{sep}{build}{sep}{name}".format(
1264 build=build["build"],
1268 logs.append(("INFO",
1269 " The file {name} has been removed".
1270 format(name=full_name)))
1271 except OSError as err:
1272 logs.append(("ERROR",
1273 "Cannot remove the file '{0}': {1}".
1274 format(full_name, repr(err))))
1275 logs.append(("INFO", " Done."))
1277 for level, line in logs:
1280 elif level == "ERROR":
1282 elif level == "DEBUG":
1284 elif level == "CRITICAL":
1285 logging.critical(line)
1286 elif level == "WARNING":
1287 logging.warning(line)
1289 return {"data": data, "state": state, "job": job, "build": build}
1291 def download_and_parse_data(self, repeat=1):
1292 """Download the input data files, parse input data from input files and
1293 store in pandas' Series.
1295 :param repeat: Repeat the download specified number of times if not
1300 logging.info("Downloading and parsing input files ...")
1302 for job, builds in self._cfg.builds.items():
1303 for build in builds:
1305 result = self._download_and_parse_build(job, build, repeat)
1306 build_nr = result["build"]["build"]
1309 data = result["data"]
1310 build_data = pd.Series({
1311 "metadata": pd.Series(
1312 data["metadata"].values(),
1313 index=data["metadata"].keys()),
1314 "suites": pd.Series(data["suites"].values(),
1315 index=data["suites"].keys()),
1316 "tests": pd.Series(data["tests"].values(),
1317 index=data["tests"].keys())})
1319 if self._input_data.get(job, None) is None:
1320 self._input_data[job] = pd.Series()
1321 self._input_data[job][str(build_nr)] = build_data
1323 self._cfg.set_input_file_name(
1324 job, build_nr, result["build"]["file-name"])
1326 self._cfg.set_input_state(job, build_nr, result["state"])
1328 logging.info("Memory allocation: {0:,d}MB".format(
1329 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1331 logging.info("Done.")
1334 def _end_of_tag(tag_filter, start=0, closer="'"):
1335 """Return the index of character in the string which is the end of tag.
1337 :param tag_filter: The string where the end of tag is being searched.
1338 :param start: The index where the searching is stated.
1339 :param closer: The character which is the tag closer.
1340 :type tag_filter: str
1343 :returns: The index of the tag closer.
1348 idx_opener = tag_filter.index(closer, start)
1349 return tag_filter.index(closer, idx_opener + 1)
1354 def _condition(tag_filter):
1355 """Create a conditional statement from the given tag filter.
1357 :param tag_filter: Filter based on tags from the element specification.
1358 :type tag_filter: str
1359 :returns: Conditional statement which can be evaluated.
1365 index = InputData._end_of_tag(tag_filter, index)
1369 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1371 def filter_data(self, element, params=None, data_set="tests",
1372 continue_on_error=False):
1373 """Filter required data from the given jobs and builds.
1375 The output data structure is:
1379 - test (or suite) 1 ID:
1385 - test (or suite) n ID:
1392 :param element: Element which will use the filtered data.
1393 :param params: Parameters which will be included in the output. If None,
1394 all parameters are included.
1395 :param data_set: The set of data to be filtered: tests, suites,
1397 :param continue_on_error: Continue if there is error while reading the
1398 data. The Item will be empty then
1399 :type element: pandas.Series
1402 :type continue_on_error: bool
1403 :returns: Filtered data.
1404 :rtype pandas.Series
1408 if element["filter"] in ("all", "template"):
1411 cond = InputData._condition(element["filter"])
1412 logging.debug(" Filter: {0}".format(cond))
1414 logging.error(" No filter defined.")
1418 params = element.get("parameters", None)
1420 params.append("type")
1424 for job, builds in element["data"].items():
1425 data[job] = pd.Series()
1426 for build in builds:
1427 data[job][str(build)] = pd.Series()
1429 data_iter = self.data[job][str(build)][data_set].\
1432 if continue_on_error:
1436 for test_ID, test_data in data_iter:
1437 if eval(cond, {"tags": test_data.get("tags", "")}):
1438 data[job][str(build)][test_ID] = pd.Series()
1440 for param, val in test_data.items():
1441 data[job][str(build)][test_ID][param] = val
1443 for param in params:
1445 data[job][str(build)][test_ID][param] =\
1448 data[job][str(build)][test_ID][param] =\
1452 except (KeyError, IndexError, ValueError) as err:
1453 logging.error(" Missing mandatory parameter in the element "
1454 "specification: {0}".format(err))
1456 except AttributeError:
1459 logging.error(" The filter '{0}' is not correct. Check if all "
1460 "tags are enclosed by apostrophes.".format(cond))
1464 def merge_data(data):
1465 """Merge data from more jobs and builds to a simple data structure.
1467 The output data structure is:
1469 - test (suite) 1 ID:
1475 - test (suite) n ID:
1478 :param data: Data to merge.
1479 :type data: pandas.Series
1480 :returns: Merged data.
1481 :rtype: pandas.Series
1484 logging.info(" Merging data ...")
1486 merged_data = pd.Series()
1487 for _, builds in data.iteritems():
1488 for _, item in builds.iteritems():
1489 for ID, item_data in item.iteritems():
1490 merged_data[ID] = item_data