1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
30 from robot.api import ExecutionResult, ResultVisitor
31 from robot import errors
32 from collections import OrderedDict
33 from string import replace
35 from os.path import join
36 from datetime import datetime as dt
37 from datetime import timedelta
38 from json import loads
39 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
41 from input_data_files import download_and_unzip_data_file
42 from utils import Worker
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
130 "parent": "Name of the parent of the test",
131 "doc": "Test documentation",
132 "msg": "Test message",
133 "tags": ["tag 1", "tag 2", "tag n"],
135 "status": "PASS" | "FAIL",
142 "parent": "Name of the parent of the test",
143 "doc": "Test documentation",
144 "msg": "Test message",
145 "tags": ["tag 1", "tag 2", "tag n"],
146 "type": "MRR" | "BMRR",
147 "status": "PASS" | "FAIL",
149 "receive-rate": AvgStdevMetadata,
153 # TODO: Remove when definitely no NDRPDRDISC tests are used:
157 "parent": "Name of the parent of the test",
158 "doc": "Test documentation",
159 "msg": "Test message",
160 "tags": ["tag 1", "tag 2", "tag n"],
161 "type": "PDR" | "NDR",
162 "status": "PASS" | "FAIL",
163 "throughput": { # Only type: "PDR" | "NDR"
165 "unit": "pps" | "bps" | "percentage"
167 "latency": { # Only type: "PDR" | "NDR"
174 "50": { # Only for NDR
179 "10": { # Only for NDR
191 "50": { # Only for NDR
196 "10": { # Only for NDR
203 "lossTolerance": "lossTolerance", # Only type: "PDR"
204 "conf-history": "DUT1 and DUT2 VAT History"
205 "show-run": "Show Run"
217 "metadata": { # Optional
218 "version": "VPP version",
219 "job": "Jenkins job name",
220 "build": "Information about the build"
224 "doc": "Suite 1 documentation",
225 "parent": "Suite 1 parent",
226 "level": "Level of the suite in the suite hierarchy"
229 "doc": "Suite N documentation",
230 "parent": "Suite 2 parent",
231 "level": "Level of the suite in the suite hierarchy"
237 "parent": "Name of the parent of the test",
238 "doc": "Test documentation"
239 "msg": "Test message"
240 "tags": ["tag 1", "tag 2", "tag n"],
241 "conf-history": "DUT1 and DUT2 VAT History"
242 "show-run": "Show Run"
243 "status": "PASS" | "FAIL"
251 .. note:: ID is the lowercase full path to the test.
254 # TODO: Remove when definitely no NDRPDRDISC tests are used:
255 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
257 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
258 r'PLRsearch upper bound::\s(\d+.\d+)')
260 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
261 r'NDR_UPPER:\s(\d+.\d+).*\n'
262 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
263 r'PDR_UPPER:\s(\d+.\d+)')
265 # TODO: Remove when definitely no NDRPDRDISC tests are used:
266 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
269 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
270 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
271 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
272 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
274 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
275 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
276 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
278 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
279 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
281 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
284 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
285 r"VPP Version:\s*|VPP version:\s*)(.*)")
287 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
288 r"(RTE Version: 'DPDK )(.*)(')")
290 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
292 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
293 r'tx\s(\d*),\srx\s(\d*)')
295 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
296 r' in packets per second: \[(.*)\]')
298 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
300 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
302 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
304 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
306 def __init__(self, metadata, mapping, ignore):
309 :param metadata: Key-value pairs to be included in "metadata" part of
311 :param mapping: Mapping of the old names of test cases to the new
313 :param ignore: List of TCs to be ignored.
319 # Type of message to parse out from the test messages
320 self._msg_type = None
326 self._timestamp = None
328 # Testbed. The testbed is identified by TG node IP address.
331 # Mapping of TCs long names
332 self._mapping = mapping
335 self._ignore = ignore
337 # Number of VAT History messages found:
339 # 1 - VAT History of DUT1
340 # 2 - VAT History of DUT2
341 self._lookup_kw_nr = 0
342 self._conf_history_lookup_nr = 0
344 # Number of Show Running messages found
346 # 1 - Show run message found
347 self._show_run_lookup_nr = 0
349 # Test ID of currently processed test- the lowercase full path to the
353 # The main data structure
355 "metadata": OrderedDict(),
356 "suites": OrderedDict(),
357 "tests": OrderedDict()
360 # Save the provided metadata
361 for key, val in metadata.items():
362 self._data["metadata"][key] = val
364 # Dictionary defining the methods used to parse different types of
367 "timestamp": self._get_timestamp,
368 "vpp-version": self._get_vpp_version,
369 "dpdk-version": self._get_dpdk_version,
370 "teardown-vat-history": self._get_vat_history,
371 "teardown-papi-history": self._get_papi_history,
372 "test-show-runtime": self._get_show_run,
373 "testbed": self._get_testbed
378 """Getter - Data parsed from the XML file.
380 :returns: Data parsed from the XML file.
385 def _get_testbed(self, msg):
386 """Called when extraction of testbed IP is required.
387 The testbed is identified by TG node IP address.
389 :param msg: Message to process.
394 if msg.message.count("Arguments:"):
395 message = str(msg.message).replace(' ', '').replace('\n', '').\
396 replace("'", '"').replace('b"', '"').\
397 replace("honeycom", "honeycomb")
398 message = loads(message[11:-1])
400 self._testbed = message["TG"]["host"]
401 except (KeyError, ValueError):
404 self._data["metadata"]["testbed"] = self._testbed
405 self._msg_type = None
407 def _get_vpp_version(self, msg):
408 """Called when extraction of VPP version is required.
410 :param msg: Message to process.
415 if msg.message.count("return STDOUT Version:") or \
416 msg.message.count("VPP Version:") or \
417 msg.message.count("VPP version:"):
418 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
420 self._data["metadata"]["version"] = self._version
421 self._msg_type = None
423 def _get_dpdk_version(self, msg):
424 """Called when extraction of DPDK version is required.
426 :param msg: Message to process.
431 if msg.message.count("return STDOUT testpmd"):
433 self._version = str(re.search(
434 self.REGEX_VERSION_DPDK, msg.message). group(4))
435 self._data["metadata"]["version"] = self._version
439 self._msg_type = None
441 def _get_timestamp(self, msg):
442 """Called when extraction of timestamp is required.
444 :param msg: Message to process.
449 self._timestamp = msg.timestamp[:14]
450 self._data["metadata"]["generated"] = self._timestamp
451 self._msg_type = None
453 def _get_vat_history(self, msg):
454 """Called when extraction of VAT command history is required.
456 :param msg: Message to process.
460 if msg.message.count("VAT command history:"):
461 self._conf_history_lookup_nr += 1
462 if self._conf_history_lookup_nr == 1:
463 self._data["tests"][self._test_ID]["conf-history"] = str()
465 self._msg_type = None
466 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
467 "VAT command history:", "", msg.message, count=1). \
468 replace("\n\n", "\n").replace('\n', ' |br| ').\
469 replace('\r', '').replace('"', "'")
471 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
472 self._data["tests"][self._test_ID]["conf-history"] += \
473 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
475 def _get_papi_history(self, msg):
476 """Called when extraction of PAPI command history is required.
478 :param msg: Message to process.
482 if msg.message.count("PAPI command history:"):
483 self._conf_history_lookup_nr += 1
484 if self._conf_history_lookup_nr == 1:
485 self._data["tests"][self._test_ID]["conf-history"] = str()
487 self._msg_type = None
488 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
489 "PAPI command history:", "", msg.message, count=1). \
490 replace("\n\n", "\n").replace('\n', ' |br| ').\
491 replace('\r', '').replace('"', "'")
493 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
494 self._data["tests"][self._test_ID]["conf-history"] += \
495 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
497 def _get_show_run(self, msg):
498 """Called when extraction of VPP operational data (output of CLI command
499 Show Runtime) is required.
501 :param msg: Message to process.
505 if msg.message.count("Thread 0 vpp_main"):
506 self._show_run_lookup_nr += 1
507 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
508 self._data["tests"][self._test_ID]["show-run"] = str()
509 if self._lookup_kw_nr > 1:
510 self._msg_type = None
511 if self._show_run_lookup_nr == 1:
512 text = msg.message.replace("vat# ", "").\
513 replace("return STDOUT ", "").replace("\n\n", "\n").\
514 replace('\n', ' |br| ').\
515 replace('\r', '').replace('"', "'")
517 self._data["tests"][self._test_ID]["show-run"] += " |br| "
518 self._data["tests"][self._test_ID]["show-run"] += \
519 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
523 # TODO: Remove when definitely no NDRPDRDISC tests are used:
524 def _get_latency(self, msg, test_type):
525 """Get the latency data from the test message.
527 :param msg: Message to be parsed.
528 :param test_type: Type of the test - NDR or PDR.
531 :returns: Latencies parsed from the message.
535 if test_type == "NDR":
536 groups = re.search(self.REGEX_LAT_NDR, msg)
537 groups_range = range(1, 7)
538 elif test_type == "PDR":
539 groups = re.search(self.REGEX_LAT_PDR, msg)
540 groups_range = range(1, 3)
545 for idx in groups_range:
547 lat = [int(item) for item in str(groups.group(idx)).split('/')]
548 except (AttributeError, ValueError):
550 latencies.append(lat)
552 keys = ("min", "avg", "max")
560 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
561 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
562 if test_type == "NDR":
563 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
564 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
565 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
566 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
570 def _get_ndrpdr_throughput(self, msg):
571 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
574 :param msg: The test message to be parsed.
576 :returns: Parsed data as a dict and the status (PASS/FAIL).
577 :rtype: tuple(dict, str)
581 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
582 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
585 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
587 if groups is not None:
589 throughput["NDR"]["LOWER"] = float(groups.group(1))
590 throughput["NDR"]["UPPER"] = float(groups.group(2))
591 throughput["PDR"]["LOWER"] = float(groups.group(3))
592 throughput["PDR"]["UPPER"] = float(groups.group(4))
594 except (IndexError, ValueError):
597 return throughput, status
599 def _get_plr_throughput(self, msg):
600 """Get PLRsearch lower bound and PLRsearch upper bound from the test
603 :param msg: The test message to be parsed.
605 :returns: Parsed data as a dict and the status (PASS/FAIL).
606 :rtype: tuple(dict, str)
614 groups = re.search(self.REGEX_PLR_RATE, msg)
616 if groups is not None:
618 throughput["LOWER"] = float(groups.group(1))
619 throughput["UPPER"] = float(groups.group(2))
621 except (IndexError, ValueError):
624 return throughput, status
626 def _get_ndrpdr_latency(self, msg):
627 """Get LATENCY from the test message.
629 :param msg: The test message to be parsed.
631 :returns: Parsed data as a dict and the status (PASS/FAIL).
632 :rtype: tuple(dict, str)
637 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
638 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
641 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
642 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
646 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
648 if groups is not None:
649 keys = ("min", "avg", "max")
651 latency["NDR"]["direction1"] = dict(
652 zip(keys, [float(l) for l in groups.group(1).split('/')]))
653 latency["NDR"]["direction2"] = dict(
654 zip(keys, [float(l) for l in groups.group(2).split('/')]))
655 latency["PDR"]["direction1"] = dict(
656 zip(keys, [float(l) for l in groups.group(3).split('/')]))
657 latency["PDR"]["direction2"] = dict(
658 zip(keys, [float(l) for l in groups.group(4).split('/')]))
660 except (IndexError, ValueError):
663 return latency, status
665 def visit_suite(self, suite):
666 """Implements traversing through the suite and its direct children.
668 :param suite: Suite to process.
672 if self.start_suite(suite) is not False:
673 suite.suites.visit(self)
674 suite.tests.visit(self)
675 self.end_suite(suite)
677 def start_suite(self, suite):
678 """Called when suite starts.
680 :param suite: Suite to process.
686 parent_name = suite.parent.name
687 except AttributeError:
690 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
691 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
692 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
694 self._data["suites"][suite.longname.lower().replace('"', "'").
695 replace(" ", "_")] = {
696 "name": suite.name.lower(),
698 "parent": parent_name,
699 "level": len(suite.longname.split("."))
702 suite.keywords.visit(self)
704 def end_suite(self, suite):
705 """Called when suite ends.
707 :param suite: Suite to process.
713 def visit_test(self, test):
714 """Implements traversing through the test.
716 :param test: Test to process.
720 if self.start_test(test) is not False:
721 test.keywords.visit(self)
724 def start_test(self, test):
725 """Called when test starts.
727 :param test: Test to process.
732 longname_orig = test.longname.lower()
734 # Check the ignore list
735 if longname_orig in self._ignore:
738 tags = [str(tag) for tag in test.tags]
741 # Change the TC long name and name if defined in the mapping table
742 longname = self._mapping.get(longname_orig, None)
743 if longname is not None:
744 name = longname.split('.')[-1]
745 logging.debug("{0}\n{1}\n{2}\n{3}".format(
746 self._data["metadata"], longname_orig, longname, name))
748 longname = longname_orig
749 name = test.name.lower()
751 # Remove TC number from the TC long name (backward compatibility):
752 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
753 # Remove TC number from the TC name (not needed):
754 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
756 test_result["parent"] = test.parent.name.lower()
757 test_result["tags"] = tags
758 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
759 replace('\r', '').replace('[', ' |br| [')
760 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
761 test_result["msg"] = test.message.replace('\n', ' |br| '). \
762 replace('\r', '').replace('"', "'")
763 test_result["type"] = "FUNC"
764 test_result["status"] = test.status
766 if "PERFTEST" in tags:
767 # Replace info about cores (e.g. -1c-) with the info about threads
768 # and cores (e.g. -1t1c-) in the long test case names and in the
769 # test case names if necessary.
770 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
773 for tag in test_result["tags"]:
774 groups = re.search(self.REGEX_TC_TAG, tag)
780 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
781 "-{0}-".format(tag_tc.lower()),
784 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
785 "-{0}-".format(tag_tc.lower()),
789 test_result["status"] = "FAIL"
790 self._data["tests"][self._test_ID] = test_result
791 logging.debug("The test '{0}' has no or more than one "
792 "multi-threading tags.".format(self._test_ID))
793 logging.debug("Tags: {0}".format(test_result["tags"]))
796 if test.status == "PASS" and ("NDRPDRDISC" in tags or
802 # TODO: Remove when definitely no NDRPDRDISC tests are used:
803 if "NDRDISC" in tags:
804 test_result["type"] = "NDR"
805 # TODO: Remove when definitely no NDRPDRDISC tests are used:
806 elif "PDRDISC" in tags:
807 test_result["type"] = "PDR"
808 elif "NDRPDR" in tags:
809 test_result["type"] = "NDRPDR"
811 test_result["type"] = "SOAK"
813 test_result["type"] = "TCP"
815 test_result["type"] = "MRR"
816 elif "FRMOBL" in tags or "BMRR" in tags:
817 test_result["type"] = "BMRR"
819 test_result["status"] = "FAIL"
820 self._data["tests"][self._test_ID] = test_result
823 # TODO: Remove when definitely no NDRPDRDISC tests are used:
824 if test_result["type"] in ("NDR", "PDR"):
826 rate_value = str(re.search(
827 self.REGEX_RATE, test.message).group(1))
828 except AttributeError:
831 rate_unit = str(re.search(
832 self.REGEX_RATE, test.message).group(2))
833 except AttributeError:
836 test_result["throughput"] = dict()
837 test_result["throughput"]["value"] = \
838 int(rate_value.split('.')[0])
839 test_result["throughput"]["unit"] = rate_unit
840 test_result["latency"] = \
841 self._get_latency(test.message, test_result["type"])
842 if test_result["type"] == "PDR":
843 test_result["lossTolerance"] = str(re.search(
844 self.REGEX_TOLERANCE, test.message).group(1))
846 elif test_result["type"] in ("NDRPDR", ):
847 test_result["throughput"], test_result["status"] = \
848 self._get_ndrpdr_throughput(test.message)
849 test_result["latency"], test_result["status"] = \
850 self._get_ndrpdr_latency(test.message)
852 elif test_result["type"] in ("SOAK", ):
853 test_result["throughput"], test_result["status"] = \
854 self._get_plr_throughput(test.message)
856 elif test_result["type"] in ("TCP", ):
857 groups = re.search(self.REGEX_TCP, test.message)
858 test_result["result"] = int(groups.group(2))
860 elif test_result["type"] in ("MRR", "BMRR"):
861 test_result["result"] = dict()
862 groups = re.search(self.REGEX_BMRR, test.message)
863 if groups is not None:
864 items_str = groups.group(1)
865 items_float = [float(item.strip()) for item
866 in items_str.split(",")]
867 metadata = AvgStdevMetadataFactory.from_data(items_float)
868 # Next two lines have been introduced in CSIT-1179,
869 # to be removed in CSIT-1180.
872 test_result["result"]["receive-rate"] = metadata
874 groups = re.search(self.REGEX_MRR, test.message)
875 test_result["result"]["receive-rate"] = \
876 AvgStdevMetadataFactory.from_data([
877 float(groups.group(3)) / float(groups.group(1)), ])
879 self._data["tests"][self._test_ID] = test_result
881 def end_test(self, test):
882 """Called when test ends.
884 :param test: Test to process.
890 def visit_keyword(self, keyword):
891 """Implements traversing through the keyword and its child keywords.
893 :param keyword: Keyword to process.
894 :type keyword: Keyword
897 if self.start_keyword(keyword) is not False:
898 self.end_keyword(keyword)
900 def start_keyword(self, keyword):
901 """Called when keyword starts. Default implementation does nothing.
903 :param keyword: Keyword to process.
904 :type keyword: Keyword
908 if keyword.type == "setup":
909 self.visit_setup_kw(keyword)
910 elif keyword.type == "teardown":
911 self._lookup_kw_nr = 0
912 self.visit_teardown_kw(keyword)
914 self._lookup_kw_nr = 0
915 self.visit_test_kw(keyword)
916 except AttributeError:
919 def end_keyword(self, keyword):
920 """Called when keyword ends. Default implementation does nothing.
922 :param keyword: Keyword to process.
923 :type keyword: Keyword
928 def visit_test_kw(self, test_kw):
929 """Implements traversing through the test keyword and its child
932 :param test_kw: Keyword to process.
933 :type test_kw: Keyword
936 for keyword in test_kw.keywords:
937 if self.start_test_kw(keyword) is not False:
938 self.visit_test_kw(keyword)
939 self.end_test_kw(keyword)
941 def start_test_kw(self, test_kw):
942 """Called when test keyword starts. Default implementation does
945 :param test_kw: Keyword to process.
946 :type test_kw: Keyword
949 if test_kw.name.count("Show Runtime Counters On All Duts"):
950 self._lookup_kw_nr += 1
951 self._show_run_lookup_nr = 0
952 self._msg_type = "test-show-runtime"
953 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
954 self._msg_type = "dpdk-version"
957 test_kw.messages.visit(self)
959 def end_test_kw(self, test_kw):
960 """Called when keyword ends. Default implementation does nothing.
962 :param test_kw: Keyword to process.
963 :type test_kw: Keyword
968 def visit_setup_kw(self, setup_kw):
969 """Implements traversing through the teardown keyword and its child
972 :param setup_kw: Keyword to process.
973 :type setup_kw: Keyword
976 for keyword in setup_kw.keywords:
977 if self.start_setup_kw(keyword) is not False:
978 self.visit_setup_kw(keyword)
979 self.end_setup_kw(keyword)
981 def start_setup_kw(self, setup_kw):
982 """Called when teardown keyword starts. Default implementation does
985 :param setup_kw: Keyword to process.
986 :type setup_kw: Keyword
989 if setup_kw.name.count("Show Vpp Version On All Duts") \
990 and not self._version:
991 self._msg_type = "vpp-version"
992 elif setup_kw.name.count("Set Global Variable") \
993 and not self._timestamp:
994 self._msg_type = "timestamp"
995 elif setup_kw.name.count("Setup Framework") and not self._testbed:
996 self._msg_type = "testbed"
999 setup_kw.messages.visit(self)
1001 def end_setup_kw(self, setup_kw):
1002 """Called when keyword ends. Default implementation does nothing.
1004 :param setup_kw: Keyword to process.
1005 :type setup_kw: Keyword
1010 def visit_teardown_kw(self, teardown_kw):
1011 """Implements traversing through the teardown keyword and its child
1014 :param teardown_kw: Keyword to process.
1015 :type teardown_kw: Keyword
1018 for keyword in teardown_kw.keywords:
1019 if self.start_teardown_kw(keyword) is not False:
1020 self.visit_teardown_kw(keyword)
1021 self.end_teardown_kw(keyword)
1023 def start_teardown_kw(self, teardown_kw):
1024 """Called when teardown keyword starts. Default implementation does
1027 :param teardown_kw: Keyword to process.
1028 :type teardown_kw: Keyword
1032 if teardown_kw.name.count("Show Vat History On All Duts"):
1033 self._conf_history_lookup_nr = 0
1034 self._msg_type = "teardown-vat-history"
1035 teardown_kw.messages.visit(self)
1036 elif teardown_kw.name.count("Show Papi History On All Duts"):
1037 self._conf_history_lookup_nr = 0
1038 self._msg_type = "teardown-papi-history"
1039 teardown_kw.messages.visit(self)
1041 def end_teardown_kw(self, teardown_kw):
1042 """Called when keyword ends. Default implementation does nothing.
1044 :param teardown_kw: Keyword to process.
1045 :type teardown_kw: Keyword
1050 def visit_message(self, msg):
1051 """Implements visiting the message.
1053 :param msg: Message to process.
1057 if self.start_message(msg) is not False:
1058 self.end_message(msg)
1060 def start_message(self, msg):
1061 """Called when message starts. Get required information from messages:
1064 :param msg: Message to process.
1070 self.parse_msg[self._msg_type](msg)
1072 def end_message(self, msg):
1073 """Called when message ends. Default implementation does nothing.
1075 :param msg: Message to process.
1082 class InputData(object):
1085 The data is extracted from output.xml files generated by Jenkins jobs and
1086 stored in pandas' DataFrames.
1092 (as described in ExecutionChecker documentation)
1094 (as described in ExecutionChecker documentation)
1096 (as described in ExecutionChecker documentation)
1099 def __init__(self, spec):
1102 :param spec: Specification.
1103 :type spec: Specification
1110 self._input_data = pd.Series()
1114 """Getter - Input data.
1116 :returns: Input data
1117 :rtype: pandas.Series
1119 return self._input_data
1121 def metadata(self, job, build):
1122 """Getter - metadata
1124 :param job: Job which metadata we want.
1125 :param build: Build which metadata we want.
1129 :rtype: pandas.Series
1132 return self.data[job][build]["metadata"]
1134 def suites(self, job, build):
1137 :param job: Job which suites we want.
1138 :param build: Build which suites we want.
1142 :rtype: pandas.Series
1145 return self.data[job][str(build)]["suites"]
1147 def tests(self, job, build):
1150 :param job: Job which tests we want.
1151 :param build: Build which tests we want.
1155 :rtype: pandas.Series
1158 return self.data[job][build]["tests"]
1160 def _parse_tests(self, job, build, log):
1161 """Process data from robot output.xml file and return JSON structured
1164 :param job: The name of job which build output data will be processed.
1165 :param build: The build which output data will be processed.
1166 :param log: List of log messages.
1169 :type log: list of tuples (severity, msg)
1170 :returns: JSON data structure.
1179 with open(build["file-name"], 'r') as data_file:
1181 result = ExecutionResult(data_file)
1182 except errors.DataError as err:
1183 log.append(("ERROR", "Error occurred while parsing output.xml: "
1186 checker = ExecutionChecker(metadata, self._cfg.mapping,
1188 result.visit(checker)
1192 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1193 """Download and parse the input data file.
1195 :param pid: PID of the process executing this method.
1196 :param job: Name of the Jenkins job which generated the processed input
1198 :param build: Information about the Jenkins build which generated the
1199 processed input file.
1200 :param repeat: Repeat the download specified number of times if not
1210 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1211 format(job, build["build"])))
1218 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1224 logs.append(("ERROR", "It is not possible to download the input "
1225 "data file from the job '{job}', build "
1226 "'{build}', or it is damaged. Skipped.".
1227 format(job=job, build=build["build"])))
1229 logs.append(("INFO", " Processing data from the build '{0}' ...".
1230 format(build["build"])))
1231 data = self._parse_tests(job, build, logs)
1233 logs.append(("ERROR", "Input data file from the job '{job}', "
1234 "build '{build}' is damaged. Skipped.".
1235 format(job=job, build=build["build"])))
1240 remove(build["file-name"])
1241 except OSError as err:
1242 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1243 format(build["file-name"], repr(err))))
1245 # If the time-period is defined in the specification file, remove all
1246 # files which are outside the time period.
1247 timeperiod = self._cfg.input.get("time-period", None)
1248 if timeperiod and data:
1250 timeperiod = timedelta(int(timeperiod))
1251 metadata = data.get("metadata", None)
1253 generated = metadata.get("generated", None)
1255 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1256 if (now - generated) > timeperiod:
1257 # Remove the data and the file:
1262 " The build {job}/{build} is outdated, will be "
1263 "removed".format(job=job, build=build["build"])))
1264 file_name = self._cfg.input["file-name"]
1266 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1267 "{job}{sep}{build}{sep}{name}".format(
1270 build=build["build"],
1274 logs.append(("INFO",
1275 " The file {name} has been removed".
1276 format(name=full_name)))
1277 except OSError as err:
1278 logs.append(("ERROR",
1279 "Cannot remove the file '{0}': {1}".
1280 format(full_name, repr(err))))
1281 logs.append(("INFO", " Done."))
1283 for level, line in logs:
1286 elif level == "ERROR":
1288 elif level == "DEBUG":
1290 elif level == "CRITICAL":
1291 logging.critical(line)
1292 elif level == "WARNING":
1293 logging.warning(line)
1295 return {"data": data, "state": state, "job": job, "build": build}
1297 def download_and_parse_data(self, repeat=1):
1298 """Download the input data files, parse input data from input files and
1299 store in pandas' Series.
1301 :param repeat: Repeat the download specified number of times if not
1306 logging.info("Downloading and parsing input files ...")
1308 for job, builds in self._cfg.builds.items():
1309 for build in builds:
1311 result = self._download_and_parse_build(job, build, repeat)
1312 build_nr = result["build"]["build"]
1315 data = result["data"]
1316 build_data = pd.Series({
1317 "metadata": pd.Series(
1318 data["metadata"].values(),
1319 index=data["metadata"].keys()),
1320 "suites": pd.Series(data["suites"].values(),
1321 index=data["suites"].keys()),
1322 "tests": pd.Series(data["tests"].values(),
1323 index=data["tests"].keys())})
1325 if self._input_data.get(job, None) is None:
1326 self._input_data[job] = pd.Series()
1327 self._input_data[job][str(build_nr)] = build_data
1329 self._cfg.set_input_file_name(
1330 job, build_nr, result["build"]["file-name"])
1332 self._cfg.set_input_state(job, build_nr, result["state"])
1334 logging.info("ru_maxrss = {0}".format(
1335 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
1337 logging.info(objgraph.most_common_types())
1339 logging.info("Done.")
1342 def _end_of_tag(tag_filter, start=0, closer="'"):
1343 """Return the index of character in the string which is the end of tag.
1345 :param tag_filter: The string where the end of tag is being searched.
1346 :param start: The index where the searching is stated.
1347 :param closer: The character which is the tag closer.
1348 :type tag_filter: str
1351 :returns: The index of the tag closer.
1356 idx_opener = tag_filter.index(closer, start)
1357 return tag_filter.index(closer, idx_opener + 1)
1362 def _condition(tag_filter):
1363 """Create a conditional statement from the given tag filter.
1365 :param tag_filter: Filter based on tags from the element specification.
1366 :type tag_filter: str
1367 :returns: Conditional statement which can be evaluated.
1373 index = InputData._end_of_tag(tag_filter, index)
1377 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1379 def filter_data(self, element, params=None, data_set="tests",
1380 continue_on_error=False):
1381 """Filter required data from the given jobs and builds.
1383 The output data structure is:
1387 - test (or suite) 1 ID:
1393 - test (or suite) n ID:
1400 :param element: Element which will use the filtered data.
1401 :param params: Parameters which will be included in the output. If None,
1402 all parameters are included.
1403 :param data_set: The set of data to be filtered: tests, suites,
1405 :param continue_on_error: Continue if there is error while reading the
1406 data. The Item will be empty then
1407 :type element: pandas.Series
1410 :type continue_on_error: bool
1411 :returns: Filtered data.
1412 :rtype pandas.Series
1416 if element["filter"] in ("all", "template"):
1419 cond = InputData._condition(element["filter"])
1420 logging.debug(" Filter: {0}".format(cond))
1422 logging.error(" No filter defined.")
1426 params = element.get("parameters", None)
1428 params.append("type")
1432 for job, builds in element["data"].items():
1433 data[job] = pd.Series()
1434 for build in builds:
1435 data[job][str(build)] = pd.Series()
1437 data_iter = self.data[job][str(build)][data_set].\
1440 if continue_on_error:
1444 for test_ID, test_data in data_iter:
1445 if eval(cond, {"tags": test_data.get("tags", "")}):
1446 data[job][str(build)][test_ID] = pd.Series()
1448 for param, val in test_data.items():
1449 data[job][str(build)][test_ID][param] = val
1451 for param in params:
1453 data[job][str(build)][test_ID][param] =\
1456 data[job][str(build)][test_ID][param] =\
1460 except (KeyError, IndexError, ValueError) as err:
1461 logging.error(" Missing mandatory parameter in the element "
1462 "specification: {0}".format(err))
1464 except AttributeError:
1467 logging.error(" The filter '{0}' is not correct. Check if all "
1468 "tags are enclosed by apostrophes.".format(cond))
1472 def merge_data(data):
1473 """Merge data from more jobs and builds to a simple data structure.
1475 The output data structure is:
1477 - test (suite) 1 ID:
1483 - test (suite) n ID:
1486 :param data: Data to merge.
1487 :type data: pandas.Series
1488 :returns: Merged data.
1489 :rtype: pandas.Series
1492 logging.info(" Merging data ...")
1494 merged_data = pd.Series()
1495 for _, builds in data.iteritems():
1496 for _, item in builds.iteritems():
1497 for ID, item_data in item.iteritems():
1498 merged_data[ID] = item_data