1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from robot.api import ExecutionResult, ResultVisitor
28 from robot import errors
29 from collections import OrderedDict
30 from string import replace
32 from os.path import join
33 from datetime import datetime as dt
34 from datetime import timedelta
35 from json import loads
36 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38 from input_data_files import download_and_unzip_data_file
41 # Separator used in file names
45 class ExecutionChecker(ResultVisitor):
46 """Class to traverse through the test suite structure.
48 The functionality implemented in this class generates a json structure:
54 "generated": "Timestamp",
55 "version": "SUT version",
56 "job": "Jenkins job name",
57 "build": "Information about the build"
60 "Suite long name 1": {
62 "doc": "Suite 1 documentation",
63 "parent": "Suite 1 parent",
64 "level": "Level of the suite in the suite hierarchy"
66 "Suite long name N": {
68 "doc": "Suite N documentation",
69 "parent": "Suite 2 parent",
70 "level": "Level of the suite in the suite hierarchy"
77 "parent": "Name of the parent of the test",
78 "doc": "Test documentation",
79 "msg": "Test message",
80 "conf-history": "DUT1 and DUT2 VAT History",
81 "show-run": "Show Run",
82 "tags": ["tag 1", "tag 2", "tag n"],
84 "status": "PASS" | "FAIL",
126 "parent": "Name of the parent of the test",
127 "doc": "Test documentation",
128 "msg": "Test message",
129 "tags": ["tag 1", "tag 2", "tag n"],
131 "status": "PASS" | "FAIL",
138 "parent": "Name of the parent of the test",
139 "doc": "Test documentation",
140 "msg": "Test message",
141 "tags": ["tag 1", "tag 2", "tag n"],
142 "type": "MRR" | "BMRR",
143 "status": "PASS" | "FAIL",
145 "receive-rate": AvgStdevMetadata,
149 # TODO: Remove when definitely no NDRPDRDISC tests are used:
153 "parent": "Name of the parent of the test",
154 "doc": "Test documentation",
155 "msg": "Test message",
156 "tags": ["tag 1", "tag 2", "tag n"],
157 "type": "PDR" | "NDR",
158 "status": "PASS" | "FAIL",
159 "throughput": { # Only type: "PDR" | "NDR"
161 "unit": "pps" | "bps" | "percentage"
163 "latency": { # Only type: "PDR" | "NDR"
170 "50": { # Only for NDR
175 "10": { # Only for NDR
187 "50": { # Only for NDR
192 "10": { # Only for NDR
199 "lossTolerance": "lossTolerance", # Only type: "PDR"
200 "conf-history": "DUT1 and DUT2 VAT History"
201 "show-run": "Show Run"
213 "metadata": { # Optional
214 "version": "VPP version",
215 "job": "Jenkins job name",
216 "build": "Information about the build"
220 "doc": "Suite 1 documentation",
221 "parent": "Suite 1 parent",
222 "level": "Level of the suite in the suite hierarchy"
225 "doc": "Suite N documentation",
226 "parent": "Suite 2 parent",
227 "level": "Level of the suite in the suite hierarchy"
233 "parent": "Name of the parent of the test",
234 "doc": "Test documentation"
235 "msg": "Test message"
236 "tags": ["tag 1", "tag 2", "tag n"],
237 "conf-history": "DUT1 and DUT2 VAT History"
238 "show-run": "Show Run"
239 "status": "PASS" | "FAIL"
247 .. note:: ID is the lowercase full path to the test.
250 # TODO: Remove when definitely no NDRPDRDISC tests are used:
251 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
253 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
254 r'PLRsearch upper bound::\s(\d+.\d+)')
256 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
257 r'NDR_UPPER:\s(\d+.\d+).*\n'
258 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
259 r'PDR_UPPER:\s(\d+.\d+)')
261 # TODO: Remove when definitely no NDRPDRDISC tests are used:
262 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
263 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
264 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
265 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
270 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
271 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
272 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
274 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
275 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
277 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
280 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
281 r"VPP Version:\s*|VPP version:\s*)(.*)")
283 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
284 r"(RTE Version: 'DPDK )(.*)(')")
286 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
288 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289 r'tx\s(\d*),\srx\s(\d*)')
291 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292 r' in packets per second: \[(.*)\]')
294 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
296 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
298 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
300 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
302 def __init__(self, metadata, mapping, ignore):
305 :param metadata: Key-value pairs to be included in "metadata" part of
307 :param mapping: Mapping of the old names of test cases to the new
309 :param ignore: List of TCs to be ignored.
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 # Number of VAT History messages found:
335 # 1 - VAT History of DUT1
336 # 2 - VAT History of DUT2
337 self._lookup_kw_nr = 0
338 self._conf_history_lookup_nr = 0
340 # Number of Show Running messages found
342 # 1 - Show run message found
343 self._show_run_lookup_nr = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 "metadata": OrderedDict(),
352 "suites": OrderedDict(),
353 "tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data["metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 "timestamp": self._get_timestamp,
364 "vpp-version": self._get_vpp_version,
365 "dpdk-version": self._get_dpdk_version,
366 "teardown-vat-history": self._get_vat_history,
367 "teardown-papi-history": self._get_papi_history,
368 "test-show-runtime": self._get_show_run,
369 "testbed": self._get_testbed
374 """Getter - Data parsed from the XML file.
376 :returns: Data parsed from the XML file.
381 def _get_testbed(self, msg):
382 """Called when extraction of testbed IP is required.
383 The testbed is identified by TG node IP address.
385 :param msg: Message to process.
390 if msg.message.count("Arguments:"):
391 message = str(msg.message).replace(' ', '').replace('\n', '').\
392 replace("'", '"').replace('b"', '"').\
393 replace("honeycom", "honeycomb")
394 message = loads(message[11:-1])
396 self._testbed = message["TG"]["host"]
397 except (KeyError, ValueError):
400 self._data["metadata"]["testbed"] = self._testbed
401 self._msg_type = None
403 def _get_vpp_version(self, msg):
404 """Called when extraction of VPP version is required.
406 :param msg: Message to process.
411 if msg.message.count("return STDOUT Version:") or \
412 msg.message.count("VPP Version:") or \
413 msg.message.count("VPP version:"):
414 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
416 self._data["metadata"]["version"] = self._version
417 self._msg_type = None
419 def _get_dpdk_version(self, msg):
420 """Called when extraction of DPDK version is required.
422 :param msg: Message to process.
427 if msg.message.count("return STDOUT testpmd"):
429 self._version = str(re.search(
430 self.REGEX_VERSION_DPDK, msg.message). group(4))
431 self._data["metadata"]["version"] = self._version
435 self._msg_type = None
437 def _get_timestamp(self, msg):
438 """Called when extraction of timestamp is required.
440 :param msg: Message to process.
445 self._timestamp = msg.timestamp[:14]
446 self._data["metadata"]["generated"] = self._timestamp
447 self._msg_type = None
449 def _get_vat_history(self, msg):
450 """Called when extraction of VAT command history is required.
452 :param msg: Message to process.
456 if msg.message.count("VAT command history:"):
457 self._conf_history_lookup_nr += 1
458 if self._conf_history_lookup_nr == 1:
459 self._data["tests"][self._test_ID]["conf-history"] = str()
461 self._msg_type = None
462 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
463 "VAT command history:", "", msg.message, count=1). \
464 replace("\n\n", "\n").replace('\n', ' |br| ').\
465 replace('\r', '').replace('"', "'")
467 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
468 self._data["tests"][self._test_ID]["conf-history"] += \
469 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
471 def _get_papi_history(self, msg):
472 """Called when extraction of PAPI command history is required.
474 :param msg: Message to process.
478 if msg.message.count("PAPI command history:"):
479 self._conf_history_lookup_nr += 1
480 if self._conf_history_lookup_nr == 1:
481 self._data["tests"][self._test_ID]["conf-history"] = str()
483 self._msg_type = None
484 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
485 "PAPI command history:", "", msg.message, count=1). \
486 replace("\n\n", "\n").replace('\n', ' |br| ').\
487 replace('\r', '').replace('"', "'")
489 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
490 self._data["tests"][self._test_ID]["conf-history"] += \
491 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
493 def _get_show_run(self, msg):
494 """Called when extraction of VPP operational data (output of CLI command
495 Show Runtime) is required.
497 :param msg: Message to process.
501 if msg.message.count("Thread 0 vpp_main"):
502 self._show_run_lookup_nr += 1
503 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
504 self._data["tests"][self._test_ID]["show-run"] = str()
505 if self._lookup_kw_nr > 1:
506 self._msg_type = None
507 if self._show_run_lookup_nr == 1:
508 text = msg.message.replace("vat# ", "").\
509 replace("return STDOUT ", "").replace("\n\n", "\n").\
510 replace('\n', ' |br| ').\
511 replace('\r', '').replace('"', "'")
513 self._data["tests"][self._test_ID]["show-run"] += " |br| "
514 self._data["tests"][self._test_ID]["show-run"] += \
515 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
519 # TODO: Remove when definitely no NDRPDRDISC tests are used:
520 def _get_latency(self, msg, test_type):
521 """Get the latency data from the test message.
523 :param msg: Message to be parsed.
524 :param test_type: Type of the test - NDR or PDR.
527 :returns: Latencies parsed from the message.
531 if test_type == "NDR":
532 groups = re.search(self.REGEX_LAT_NDR, msg)
533 groups_range = range(1, 7)
534 elif test_type == "PDR":
535 groups = re.search(self.REGEX_LAT_PDR, msg)
536 groups_range = range(1, 3)
541 for idx in groups_range:
543 lat = [int(item) for item in str(groups.group(idx)).split('/')]
544 except (AttributeError, ValueError):
546 latencies.append(lat)
548 keys = ("min", "avg", "max")
556 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
557 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
558 if test_type == "NDR":
559 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
560 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
561 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
562 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
566 def _get_ndrpdr_throughput(self, msg):
567 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
570 :param msg: The test message to be parsed.
572 :returns: Parsed data as a dict and the status (PASS/FAIL).
573 :rtype: tuple(dict, str)
577 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
578 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
581 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
583 if groups is not None:
585 throughput["NDR"]["LOWER"] = float(groups.group(1))
586 throughput["NDR"]["UPPER"] = float(groups.group(2))
587 throughput["PDR"]["LOWER"] = float(groups.group(3))
588 throughput["PDR"]["UPPER"] = float(groups.group(4))
590 except (IndexError, ValueError):
593 return throughput, status
595 def _get_plr_throughput(self, msg):
596 """Get PLRsearch lower bound and PLRsearch upper bound from the test
599 :param msg: The test message to be parsed.
601 :returns: Parsed data as a dict and the status (PASS/FAIL).
602 :rtype: tuple(dict, str)
610 groups = re.search(self.REGEX_PLR_RATE, msg)
612 if groups is not None:
614 throughput["LOWER"] = float(groups.group(1))
615 throughput["UPPER"] = float(groups.group(2))
617 except (IndexError, ValueError):
620 return throughput, status
622 def _get_ndrpdr_latency(self, msg):
623 """Get LATENCY from the test message.
625 :param msg: The test message to be parsed.
627 :returns: Parsed data as a dict and the status (PASS/FAIL).
628 :rtype: tuple(dict, str)
633 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
634 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
637 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
638 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
642 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
644 if groups is not None:
645 keys = ("min", "avg", "max")
647 latency["NDR"]["direction1"] = dict(
648 zip(keys, [float(l) for l in groups.group(1).split('/')]))
649 latency["NDR"]["direction2"] = dict(
650 zip(keys, [float(l) for l in groups.group(2).split('/')]))
651 latency["PDR"]["direction1"] = dict(
652 zip(keys, [float(l) for l in groups.group(3).split('/')]))
653 latency["PDR"]["direction2"] = dict(
654 zip(keys, [float(l) for l in groups.group(4).split('/')]))
656 except (IndexError, ValueError):
659 return latency, status
661 def visit_suite(self, suite):
662 """Implements traversing through the suite and its direct children.
664 :param suite: Suite to process.
668 if self.start_suite(suite) is not False:
669 suite.suites.visit(self)
670 suite.tests.visit(self)
671 self.end_suite(suite)
673 def start_suite(self, suite):
674 """Called when suite starts.
676 :param suite: Suite to process.
682 parent_name = suite.parent.name
683 except AttributeError:
686 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
687 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
688 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
690 self._data["suites"][suite.longname.lower().replace('"', "'").
691 replace(" ", "_")] = {
692 "name": suite.name.lower(),
694 "parent": parent_name,
695 "level": len(suite.longname.split("."))
698 suite.keywords.visit(self)
700 def end_suite(self, suite):
701 """Called when suite ends.
703 :param suite: Suite to process.
709 def visit_test(self, test):
710 """Implements traversing through the test.
712 :param test: Test to process.
716 if self.start_test(test) is not False:
717 test.keywords.visit(self)
720 def start_test(self, test):
721 """Called when test starts.
723 :param test: Test to process.
728 longname_orig = test.longname.lower()
730 # Check the ignore list
731 if longname_orig in self._ignore:
734 tags = [str(tag) for tag in test.tags]
737 # Change the TC long name and name if defined in the mapping table
738 longname = self._mapping.get(longname_orig, None)
739 if longname is not None:
740 name = longname.split('.')[-1]
741 logging.debug("{0}\n{1}\n{2}\n{3}".format(
742 self._data["metadata"], longname_orig, longname, name))
744 longname = longname_orig
745 name = test.name.lower()
747 # Remove TC number from the TC long name (backward compatibility):
748 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
749 # Remove TC number from the TC name (not needed):
750 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
752 test_result["parent"] = test.parent.name.lower()
753 test_result["tags"] = tags
754 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
755 replace('\r', '').replace('[', ' |br| [')
756 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
757 test_result["msg"] = test.message.replace('\n', ' |br| '). \
758 replace('\r', '').replace('"', "'")
759 test_result["type"] = "FUNC"
760 test_result["status"] = test.status
762 if "PERFTEST" in tags:
763 # Replace info about cores (e.g. -1c-) with the info about threads
764 # and cores (e.g. -1t1c-) in the long test case names and in the
765 # test case names if necessary.
766 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
769 for tag in test_result["tags"]:
770 groups = re.search(self.REGEX_TC_TAG, tag)
776 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
777 "-{0}-".format(tag_tc.lower()),
780 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
781 "-{0}-".format(tag_tc.lower()),
785 test_result["status"] = "FAIL"
786 self._data["tests"][self._test_ID] = test_result
787 logging.debug("The test '{0}' has no or more than one "
788 "multi-threading tags.".format(self._test_ID))
789 logging.debug("Tags: {0}".format(test_result["tags"]))
792 if test.status == "PASS" and ("NDRPDRDISC" in tags or
798 # TODO: Remove when definitely no NDRPDRDISC tests are used:
799 if "NDRDISC" in tags:
800 test_result["type"] = "NDR"
801 # TODO: Remove when definitely no NDRPDRDISC tests are used:
802 elif "PDRDISC" in tags:
803 test_result["type"] = "PDR"
804 elif "NDRPDR" in tags:
805 test_result["type"] = "NDRPDR"
807 test_result["type"] = "SOAK"
809 test_result["type"] = "TCP"
811 test_result["type"] = "MRR"
812 elif "FRMOBL" in tags or "BMRR" in tags:
813 test_result["type"] = "BMRR"
815 test_result["status"] = "FAIL"
816 self._data["tests"][self._test_ID] = test_result
819 # TODO: Remove when definitely no NDRPDRDISC tests are used:
820 if test_result["type"] in ("NDR", "PDR"):
822 rate_value = str(re.search(
823 self.REGEX_RATE, test.message).group(1))
824 except AttributeError:
827 rate_unit = str(re.search(
828 self.REGEX_RATE, test.message).group(2))
829 except AttributeError:
832 test_result["throughput"] = dict()
833 test_result["throughput"]["value"] = \
834 int(rate_value.split('.')[0])
835 test_result["throughput"]["unit"] = rate_unit
836 test_result["latency"] = \
837 self._get_latency(test.message, test_result["type"])
838 if test_result["type"] == "PDR":
839 test_result["lossTolerance"] = str(re.search(
840 self.REGEX_TOLERANCE, test.message).group(1))
842 elif test_result["type"] in ("NDRPDR", ):
843 test_result["throughput"], test_result["status"] = \
844 self._get_ndrpdr_throughput(test.message)
845 test_result["latency"], test_result["status"] = \
846 self._get_ndrpdr_latency(test.message)
848 elif test_result["type"] in ("SOAK", ):
849 test_result["throughput"], test_result["status"] = \
850 self._get_plr_throughput(test.message)
852 elif test_result["type"] in ("TCP", ):
853 groups = re.search(self.REGEX_TCP, test.message)
854 test_result["result"] = int(groups.group(2))
856 elif test_result["type"] in ("MRR", "BMRR"):
857 test_result["result"] = dict()
858 groups = re.search(self.REGEX_BMRR, test.message)
859 if groups is not None:
860 items_str = groups.group(1)
861 items_float = [float(item.strip()) for item
862 in items_str.split(",")]
863 metadata = AvgStdevMetadataFactory.from_data(items_float)
864 # Next two lines have been introduced in CSIT-1179,
865 # to be removed in CSIT-1180.
868 test_result["result"]["receive-rate"] = metadata
870 groups = re.search(self.REGEX_MRR, test.message)
871 test_result["result"]["receive-rate"] = \
872 AvgStdevMetadataFactory.from_data([
873 float(groups.group(3)) / float(groups.group(1)), ])
875 self._data["tests"][self._test_ID] = test_result
877 def end_test(self, test):
878 """Called when test ends.
880 :param test: Test to process.
886 def visit_keyword(self, keyword):
887 """Implements traversing through the keyword and its child keywords.
889 :param keyword: Keyword to process.
890 :type keyword: Keyword
893 if self.start_keyword(keyword) is not False:
894 self.end_keyword(keyword)
896 def start_keyword(self, keyword):
897 """Called when keyword starts. Default implementation does nothing.
899 :param keyword: Keyword to process.
900 :type keyword: Keyword
904 if keyword.type == "setup":
905 self.visit_setup_kw(keyword)
906 elif keyword.type == "teardown":
907 self._lookup_kw_nr = 0
908 self.visit_teardown_kw(keyword)
910 self._lookup_kw_nr = 0
911 self.visit_test_kw(keyword)
912 except AttributeError:
915 def end_keyword(self, keyword):
916 """Called when keyword ends. Default implementation does nothing.
918 :param keyword: Keyword to process.
919 :type keyword: Keyword
924 def visit_test_kw(self, test_kw):
925 """Implements traversing through the test keyword and its child
928 :param test_kw: Keyword to process.
929 :type test_kw: Keyword
932 for keyword in test_kw.keywords:
933 if self.start_test_kw(keyword) is not False:
934 self.visit_test_kw(keyword)
935 self.end_test_kw(keyword)
937 def start_test_kw(self, test_kw):
938 """Called when test keyword starts. Default implementation does
941 :param test_kw: Keyword to process.
942 :type test_kw: Keyword
945 if test_kw.name.count("Show Runtime Counters On All Duts"):
946 self._lookup_kw_nr += 1
947 self._show_run_lookup_nr = 0
948 self._msg_type = "test-show-runtime"
949 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
950 self._msg_type = "dpdk-version"
953 test_kw.messages.visit(self)
955 def end_test_kw(self, test_kw):
956 """Called when keyword ends. Default implementation does nothing.
958 :param test_kw: Keyword to process.
959 :type test_kw: Keyword
964 def visit_setup_kw(self, setup_kw):
965 """Implements traversing through the teardown keyword and its child
968 :param setup_kw: Keyword to process.
969 :type setup_kw: Keyword
972 for keyword in setup_kw.keywords:
973 if self.start_setup_kw(keyword) is not False:
974 self.visit_setup_kw(keyword)
975 self.end_setup_kw(keyword)
977 def start_setup_kw(self, setup_kw):
978 """Called when teardown keyword starts. Default implementation does
981 :param setup_kw: Keyword to process.
982 :type setup_kw: Keyword
985 if setup_kw.name.count("Show Vpp Version On All Duts") \
986 and not self._version:
987 self._msg_type = "vpp-version"
988 elif setup_kw.name.count("Set Global Variable") \
989 and not self._timestamp:
990 self._msg_type = "timestamp"
991 elif setup_kw.name.count("Setup Framework") and not self._testbed:
992 self._msg_type = "testbed"
995 setup_kw.messages.visit(self)
997 def end_setup_kw(self, setup_kw):
998 """Called when keyword ends. Default implementation does nothing.
1000 :param setup_kw: Keyword to process.
1001 :type setup_kw: Keyword
1006 def visit_teardown_kw(self, teardown_kw):
1007 """Implements traversing through the teardown keyword and its child
1010 :param teardown_kw: Keyword to process.
1011 :type teardown_kw: Keyword
1014 for keyword in teardown_kw.keywords:
1015 if self.start_teardown_kw(keyword) is not False:
1016 self.visit_teardown_kw(keyword)
1017 self.end_teardown_kw(keyword)
1019 def start_teardown_kw(self, teardown_kw):
1020 """Called when teardown keyword starts. Default implementation does
1023 :param teardown_kw: Keyword to process.
1024 :type teardown_kw: Keyword
1028 if teardown_kw.name.count("Show Vat History On All Duts"):
1029 self._conf_history_lookup_nr = 0
1030 self._msg_type = "teardown-vat-history"
1031 teardown_kw.messages.visit(self)
1032 elif teardown_kw.name.count("Show Papi History On All Duts"):
1033 self._conf_history_lookup_nr = 0
1034 self._msg_type = "teardown-papi-history"
1035 teardown_kw.messages.visit(self)
1037 def end_teardown_kw(self, teardown_kw):
1038 """Called when keyword ends. Default implementation does nothing.
1040 :param teardown_kw: Keyword to process.
1041 :type teardown_kw: Keyword
1046 def visit_message(self, msg):
1047 """Implements visiting the message.
1049 :param msg: Message to process.
1053 if self.start_message(msg) is not False:
1054 self.end_message(msg)
1056 def start_message(self, msg):
1057 """Called when message starts. Get required information from messages:
1060 :param msg: Message to process.
1066 self.parse_msg[self._msg_type](msg)
1068 def end_message(self, msg):
1069 """Called when message ends. Default implementation does nothing.
1071 :param msg: Message to process.
1078 class InputData(object):
1081 The data is extracted from output.xml files generated by Jenkins jobs and
1082 stored in pandas' DataFrames.
1088 (as described in ExecutionChecker documentation)
1090 (as described in ExecutionChecker documentation)
1092 (as described in ExecutionChecker documentation)
1095 def __init__(self, spec):
1098 :param spec: Specification.
1099 :type spec: Specification
1106 self._input_data = pd.Series()
1110 """Getter - Input data.
1112 :returns: Input data
1113 :rtype: pandas.Series
1115 return self._input_data
1117 def metadata(self, job, build):
1118 """Getter - metadata
1120 :param job: Job which metadata we want.
1121 :param build: Build which metadata we want.
1125 :rtype: pandas.Series
1128 return self.data[job][build]["metadata"]
1130 def suites(self, job, build):
1133 :param job: Job which suites we want.
1134 :param build: Build which suites we want.
1138 :rtype: pandas.Series
1141 return self.data[job][str(build)]["suites"]
1143 def tests(self, job, build):
1146 :param job: Job which tests we want.
1147 :param build: Build which tests we want.
1151 :rtype: pandas.Series
1154 return self.data[job][build]["tests"]
1156 def _parse_tests(self, job, build, log):
1157 """Process data from robot output.xml file and return JSON structured
1160 :param job: The name of job which build output data will be processed.
1161 :param build: The build which output data will be processed.
1162 :param log: List of log messages.
1165 :type log: list of tuples (severity, msg)
1166 :returns: JSON data structure.
1175 with open(build["file-name"], 'r') as data_file:
1177 result = ExecutionResult(data_file)
1178 except errors.DataError as err:
1179 log.append(("ERROR", "Error occurred while parsing output.xml: "
1182 checker = ExecutionChecker(metadata, self._cfg.mapping,
1184 result.visit(checker)
1188 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1189 """Download and parse the input data file.
1191 :param pid: PID of the process executing this method.
1192 :param job: Name of the Jenkins job which generated the processed input
1194 :param build: Information about the Jenkins build which generated the
1195 processed input file.
1196 :param repeat: Repeat the download specified number of times if not
1206 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1207 format(job, build["build"])))
1214 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1220 logs.append(("ERROR", "It is not possible to download the input "
1221 "data file from the job '{job}', build "
1222 "'{build}', or it is damaged. Skipped.".
1223 format(job=job, build=build["build"])))
1225 logs.append(("INFO", " Processing data from the build '{0}' ...".
1226 format(build["build"])))
1227 data = self._parse_tests(job, build, logs)
1229 logs.append(("ERROR", "Input data file from the job '{job}', "
1230 "build '{build}' is damaged. Skipped.".
1231 format(job=job, build=build["build"])))
1236 remove(build["file-name"])
1237 except OSError as err:
1238 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1239 format(build["file-name"], repr(err))))
1241 # If the time-period is defined in the specification file, remove all
1242 # files which are outside the time period.
1243 timeperiod = self._cfg.input.get("time-period", None)
1244 if timeperiod and data:
1246 timeperiod = timedelta(int(timeperiod))
1247 metadata = data.get("metadata", None)
1249 generated = metadata.get("generated", None)
1251 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1252 if (now - generated) > timeperiod:
1253 # Remove the data and the file:
1258 " The build {job}/{build} is outdated, will be "
1259 "removed".format(job=job, build=build["build"])))
1260 file_name = self._cfg.input["file-name"]
1262 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1263 "{job}{sep}{build}{sep}{name}".format(
1266 build=build["build"],
1270 logs.append(("INFO",
1271 " The file {name} has been removed".
1272 format(name=full_name)))
1273 except OSError as err:
1274 logs.append(("ERROR",
1275 "Cannot remove the file '{0}': {1}".
1276 format(full_name, repr(err))))
1277 logs.append(("INFO", " Done."))
1279 for level, line in logs:
1282 elif level == "ERROR":
1284 elif level == "DEBUG":
1286 elif level == "CRITICAL":
1287 logging.critical(line)
1288 elif level == "WARNING":
1289 logging.warning(line)
1291 return {"data": data, "state": state, "job": job, "build": build}
1293 def download_and_parse_data(self, repeat=1):
1294 """Download the input data files, parse input data from input files and
1295 store in pandas' Series.
1297 :param repeat: Repeat the download specified number of times if not
1302 logging.info("Downloading and parsing input files ...")
1304 for job, builds in self._cfg.builds.items():
1305 for build in builds:
1307 result = self._download_and_parse_build(job, build, repeat)
1308 build_nr = result["build"]["build"]
1311 data = result["data"]
1312 build_data = pd.Series({
1313 "metadata": pd.Series(
1314 data["metadata"].values(),
1315 index=data["metadata"].keys()),
1316 "suites": pd.Series(data["suites"].values(),
1317 index=data["suites"].keys()),
1318 "tests": pd.Series(data["tests"].values(),
1319 index=data["tests"].keys())})
1321 if self._input_data.get(job, None) is None:
1322 self._input_data[job] = pd.Series()
1323 self._input_data[job][str(build_nr)] = build_data
1325 self._cfg.set_input_file_name(
1326 job, build_nr, result["build"]["file-name"])
1328 self._cfg.set_input_state(job, build_nr, result["state"])
1330 logging.info("Memory allocation: {0:,d}MB".format(
1331 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1333 logging.info("Done.")
1336 def _end_of_tag(tag_filter, start=0, closer="'"):
1337 """Return the index of character in the string which is the end of tag.
1339 :param tag_filter: The string where the end of tag is being searched.
1340 :param start: The index where the searching is stated.
1341 :param closer: The character which is the tag closer.
1342 :type tag_filter: str
1345 :returns: The index of the tag closer.
1350 idx_opener = tag_filter.index(closer, start)
1351 return tag_filter.index(closer, idx_opener + 1)
1356 def _condition(tag_filter):
1357 """Create a conditional statement from the given tag filter.
1359 :param tag_filter: Filter based on tags from the element specification.
1360 :type tag_filter: str
1361 :returns: Conditional statement which can be evaluated.
1367 index = InputData._end_of_tag(tag_filter, index)
1371 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1373 def filter_data(self, element, params=None, data_set="tests",
1374 continue_on_error=False):
1375 """Filter required data from the given jobs and builds.
1377 The output data structure is:
1381 - test (or suite) 1 ID:
1387 - test (or suite) n ID:
1394 :param element: Element which will use the filtered data.
1395 :param params: Parameters which will be included in the output. If None,
1396 all parameters are included.
1397 :param data_set: The set of data to be filtered: tests, suites,
1399 :param continue_on_error: Continue if there is error while reading the
1400 data. The Item will be empty then
1401 :type element: pandas.Series
1404 :type continue_on_error: bool
1405 :returns: Filtered data.
1406 :rtype pandas.Series
1410 if element["filter"] in ("all", "template"):
1413 cond = InputData._condition(element["filter"])
1414 logging.debug(" Filter: {0}".format(cond))
1416 logging.error(" No filter defined.")
1420 params = element.get("parameters", None)
1422 params.append("type")
1426 for job, builds in element["data"].items():
1427 data[job] = pd.Series()
1428 for build in builds:
1429 data[job][str(build)] = pd.Series()
1431 data_iter = self.data[job][str(build)][data_set].\
1434 if continue_on_error:
1438 for test_ID, test_data in data_iter:
1439 if eval(cond, {"tags": test_data.get("tags", "")}):
1440 data[job][str(build)][test_ID] = pd.Series()
1442 for param, val in test_data.items():
1443 data[job][str(build)][test_ID][param] = val
1445 for param in params:
1447 data[job][str(build)][test_ID][param] =\
1450 data[job][str(build)][test_ID][param] =\
1454 except (KeyError, IndexError, ValueError) as err:
1455 logging.error(" Missing mandatory parameter in the element "
1456 "specification: {0}".format(err))
1458 except AttributeError:
1461 logging.error(" The filter '{0}' is not correct. Check if all "
1462 "tags are enclosed by apostrophes.".format(cond))
1466 def merge_data(data):
1467 """Merge data from more jobs and builds to a simple data structure.
1469 The output data structure is:
1471 - test (suite) 1 ID:
1477 - test (suite) n ID:
1480 :param data: Data to merge.
1481 :type data: pandas.Series
1482 :returns: Merged data.
1483 :rtype: pandas.Series
1486 logging.info(" Merging data ...")
1488 merged_data = pd.Series()
1489 for _, builds in data.iteritems():
1490 for _, item in builds.iteritems():
1491 for ID, item_data in item.iteritems():
1492 merged_data[ID] = item_data