1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
40 from utils import Worker
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "vat-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
128 "parent": "Name of the parent of the test",
129 "doc": "Test documentation",
130 "msg": "Test message",
131 "tags": ["tag 1", "tag 2", "tag n"],
133 "status": "PASS" | "FAIL",
140 "parent": "Name of the parent of the test",
141 "doc": "Test documentation",
142 "msg": "Test message",
143 "tags": ["tag 1", "tag 2", "tag n"],
144 "type": "MRR" | "BMRR",
145 "status": "PASS" | "FAIL",
147 "receive-rate": AvgStdevMetadata,
151 # TODO: Remove when definitely no NDRPDRDISC tests are used:
155 "parent": "Name of the parent of the test",
156 "doc": "Test documentation",
157 "msg": "Test message",
158 "tags": ["tag 1", "tag 2", "tag n"],
159 "type": "PDR" | "NDR",
160 "status": "PASS" | "FAIL",
161 "throughput": { # Only type: "PDR" | "NDR"
163 "unit": "pps" | "bps" | "percentage"
165 "latency": { # Only type: "PDR" | "NDR"
172 "50": { # Only for NDR
177 "10": { # Only for NDR
189 "50": { # Only for NDR
194 "10": { # Only for NDR
201 "lossTolerance": "lossTolerance", # Only type: "PDR"
202 "vat-history": "DUT1 and DUT2 VAT History"
203 "show-run": "Show Run"
215 "metadata": { # Optional
216 "version": "VPP version",
217 "job": "Jenkins job name",
218 "build": "Information about the build"
222 "doc": "Suite 1 documentation",
223 "parent": "Suite 1 parent",
224 "level": "Level of the suite in the suite hierarchy"
227 "doc": "Suite N documentation",
228 "parent": "Suite 2 parent",
229 "level": "Level of the suite in the suite hierarchy"
235 "parent": "Name of the parent of the test",
236 "doc": "Test documentation"
237 "msg": "Test message"
238 "tags": ["tag 1", "tag 2", "tag n"],
239 "vat-history": "DUT1 and DUT2 VAT History"
240 "show-run": "Show Run"
241 "status": "PASS" | "FAIL"
249 .. note:: ID is the lowercase full path to the test.
252 # TODO: Remove when definitely no NDRPDRDISC tests are used:
253 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
255 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
256 r'PLRsearch upper bound::\s(\d+.\d+)')
258 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
259 r'NDR_UPPER:\s(\d+.\d+).*\n'
260 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
261 r'PDR_UPPER:\s(\d+.\d+)')
263 # TODO: Remove when definitely no NDRPDRDISC tests are used:
264 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
265 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
269 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
270 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
272 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
273 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
274 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
276 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
277 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
279 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
282 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
283 r"VPP Version:\s*)(.*)")
285 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
286 r"(RTE Version: 'DPDK )(.*)(')")
288 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
290 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
291 r'tx\s(\d*),\srx\s(\d*)')
293 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
294 r' in packets per second: \[(.*)\]')
296 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
298 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
300 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
302 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
304 def __init__(self, metadata, mapping, ignore):
307 :param metadata: Key-value pairs to be included in "metadata" part of
309 :param mapping: Mapping of the old names of test cases to the new
311 :param ignore: List of TCs to be ignored.
317 # Type of message to parse out from the test messages
318 self._msg_type = None
324 self._timestamp = None
326 # Testbed. The testbed is identified by TG node IP address.
329 # Mapping of TCs long names
330 self._mapping = mapping
333 self._ignore = ignore
335 # Number of VAT History messages found:
337 # 1 - VAT History of DUT1
338 # 2 - VAT History of DUT2
339 self._lookup_kw_nr = 0
340 self._vat_history_lookup_nr = 0
342 # Number of Show Running messages found
344 # 1 - Show run message found
345 self._show_run_lookup_nr = 0
347 # Test ID of currently processed test- the lowercase full path to the
351 # The main data structure
353 "metadata": OrderedDict(),
354 "suites": OrderedDict(),
355 "tests": OrderedDict()
358 # Save the provided metadata
359 for key, val in metadata.items():
360 self._data["metadata"][key] = val
362 # Dictionary defining the methods used to parse different types of
365 "timestamp": self._get_timestamp,
366 "vpp-version": self._get_vpp_version,
367 "dpdk-version": self._get_dpdk_version,
368 "teardown-vat-history": self._get_vat_history,
369 "test-show-runtime": self._get_show_run,
370 "testbed": self._get_testbed
375 """Getter - Data parsed from the XML file.
377 :returns: Data parsed from the XML file.
382 def _get_testbed(self, msg):
383 """Called when extraction of testbed IP is required.
384 The testbed is identified by TG node IP address.
386 :param msg: Message to process.
391 if msg.message.count("Arguments:"):
392 message = str(msg.message).replace(' ', '').replace('\n', '').\
393 replace("'", '"').replace('b"', '"').\
394 replace("honeycom", "honeycomb")
395 message = loads(message[11:-1])
397 self._testbed = message["TG"]["host"]
398 except (KeyError, ValueError):
401 self._data["metadata"]["testbed"] = self._testbed
402 self._msg_type = None
404 def _get_vpp_version(self, msg):
405 """Called when extraction of VPP version is required.
407 :param msg: Message to process.
412 if msg.message.count("return STDOUT Version:") or \
413 msg.message.count("VPP Version:"):
414 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
416 self._data["metadata"]["version"] = self._version
417 self._msg_type = None
419 def _get_dpdk_version(self, msg):
420 """Called when extraction of DPDK version is required.
422 :param msg: Message to process.
427 if msg.message.count("return STDOUT testpmd"):
429 self._version = str(re.search(
430 self.REGEX_VERSION_DPDK, msg.message). group(4))
431 self._data["metadata"]["version"] = self._version
435 self._msg_type = None
437 def _get_timestamp(self, msg):
438 """Called when extraction of timestamp is required.
440 :param msg: Message to process.
445 self._timestamp = msg.timestamp[:14]
446 self._data["metadata"]["generated"] = self._timestamp
447 self._msg_type = None
449 def _get_vat_history(self, msg):
450 """Called when extraction of VAT command history is required.
452 :param msg: Message to process.
456 if msg.message.count("VAT command history:"):
457 self._vat_history_lookup_nr += 1
458 if self._vat_history_lookup_nr == 1:
459 self._data["tests"][self._test_ID]["vat-history"] = str()
461 self._msg_type = None
462 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
463 "VAT command history:", "", msg.message, count=1). \
464 replace("\n\n", "\n").replace('\n', ' |br| ').\
465 replace('\r', '').replace('"', "'")
467 self._data["tests"][self._test_ID]["vat-history"] += " |br| "
468 self._data["tests"][self._test_ID]["vat-history"] += \
469 "**DUT" + str(self._vat_history_lookup_nr) + ":** " + text
471 def _get_show_run(self, msg):
472 """Called when extraction of VPP operational data (output of CLI command
473 Show Runtime) is required.
475 :param msg: Message to process.
479 if msg.message.count("return STDOUT Thread "):
480 self._show_run_lookup_nr += 1
481 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
482 self._data["tests"][self._test_ID]["show-run"] = str()
483 if self._lookup_kw_nr > 1:
484 self._msg_type = None
485 if self._show_run_lookup_nr == 1:
486 text = msg.message.replace("vat# ", "").\
487 replace("return STDOUT ", "").replace("\n\n", "\n").\
488 replace('\n', ' |br| ').\
489 replace('\r', '').replace('"', "'")
491 self._data["tests"][self._test_ID]["show-run"] += " |br| "
492 self._data["tests"][self._test_ID]["show-run"] += \
493 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
497 # TODO: Remove when definitely no NDRPDRDISC tests are used:
498 def _get_latency(self, msg, test_type):
499 """Get the latency data from the test message.
501 :param msg: Message to be parsed.
502 :param test_type: Type of the test - NDR or PDR.
505 :returns: Latencies parsed from the message.
509 if test_type == "NDR":
510 groups = re.search(self.REGEX_LAT_NDR, msg)
511 groups_range = range(1, 7)
512 elif test_type == "PDR":
513 groups = re.search(self.REGEX_LAT_PDR, msg)
514 groups_range = range(1, 3)
519 for idx in groups_range:
521 lat = [int(item) for item in str(groups.group(idx)).split('/')]
522 except (AttributeError, ValueError):
524 latencies.append(lat)
526 keys = ("min", "avg", "max")
534 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
535 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
536 if test_type == "NDR":
537 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
538 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
539 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
540 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
544 def _get_ndrpdr_throughput(self, msg):
545 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
548 :param msg: The test message to be parsed.
550 :returns: Parsed data as a dict and the status (PASS/FAIL).
551 :rtype: tuple(dict, str)
555 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
556 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
559 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
561 if groups is not None:
563 throughput["NDR"]["LOWER"] = float(groups.group(1))
564 throughput["NDR"]["UPPER"] = float(groups.group(2))
565 throughput["PDR"]["LOWER"] = float(groups.group(3))
566 throughput["PDR"]["UPPER"] = float(groups.group(4))
568 except (IndexError, ValueError):
571 return throughput, status
573 def _get_plr_throughput(self, msg):
574 """Get PLRsearch lower bound and PLRsearch upper bound from the test
577 :param msg: The test message to be parsed.
579 :returns: Parsed data as a dict and the status (PASS/FAIL).
580 :rtype: tuple(dict, str)
588 groups = re.search(self.REGEX_PLR_RATE, msg)
590 if groups is not None:
592 throughput["LOWER"] = float(groups.group(1))
593 throughput["UPPER"] = float(groups.group(2))
595 except (IndexError, ValueError):
598 return throughput, status
600 def _get_ndrpdr_latency(self, msg):
601 """Get LATENCY from the test message.
603 :param msg: The test message to be parsed.
605 :returns: Parsed data as a dict and the status (PASS/FAIL).
606 :rtype: tuple(dict, str)
611 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
612 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
615 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
616 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
620 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
622 if groups is not None:
623 keys = ("min", "avg", "max")
625 latency["NDR"]["direction1"] = dict(
626 zip(keys, [float(l) for l in groups.group(1).split('/')]))
627 latency["NDR"]["direction2"] = dict(
628 zip(keys, [float(l) for l in groups.group(2).split('/')]))
629 latency["PDR"]["direction1"] = dict(
630 zip(keys, [float(l) for l in groups.group(3).split('/')]))
631 latency["PDR"]["direction2"] = dict(
632 zip(keys, [float(l) for l in groups.group(4).split('/')]))
634 except (IndexError, ValueError):
637 return latency, status
639 def visit_suite(self, suite):
640 """Implements traversing through the suite and its direct children.
642 :param suite: Suite to process.
646 if self.start_suite(suite) is not False:
647 suite.suites.visit(self)
648 suite.tests.visit(self)
649 self.end_suite(suite)
651 def start_suite(self, suite):
652 """Called when suite starts.
654 :param suite: Suite to process.
660 parent_name = suite.parent.name
661 except AttributeError:
664 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
665 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
666 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
668 self._data["suites"][suite.longname.lower().replace('"', "'").
669 replace(" ", "_")] = {
670 "name": suite.name.lower(),
672 "parent": parent_name,
673 "level": len(suite.longname.split("."))
676 suite.keywords.visit(self)
678 def end_suite(self, suite):
679 """Called when suite ends.
681 :param suite: Suite to process.
687 def visit_test(self, test):
688 """Implements traversing through the test.
690 :param test: Test to process.
694 if self.start_test(test) is not False:
695 test.keywords.visit(self)
698 def start_test(self, test):
699 """Called when test starts.
701 :param test: Test to process.
706 longname_orig = test.longname.lower()
708 # Check the ignore list
709 if longname_orig in self._ignore:
712 tags = [str(tag) for tag in test.tags]
715 # Change the TC long name and name if defined in the mapping table
716 longname = self._mapping.get(longname_orig, None)
717 if longname is not None:
718 name = longname.split('.')[-1]
719 logging.debug("{0}\n{1}\n{2}\n{3}".format(
720 self._data["metadata"], longname_orig, longname, name))
722 longname = longname_orig
723 name = test.name.lower()
725 # Remove TC number from the TC long name (backward compatibility):
726 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
727 # Remove TC number from the TC name (not needed):
728 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
730 test_result["parent"] = test.parent.name.lower()
731 test_result["tags"] = tags
732 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
733 replace('\r', '').replace('[', ' |br| [')
734 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
735 test_result["msg"] = test.message.replace('\n', ' |br| '). \
736 replace('\r', '').replace('"', "'")
737 test_result["type"] = "FUNC"
738 test_result["status"] = test.status
740 if "PERFTEST" in tags:
741 # Replace info about cores (e.g. -1c-) with the info about threads
742 # and cores (e.g. -1t1c-) in the long test case names and in the
743 # test case names if necessary.
744 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
747 for tag in test_result["tags"]:
748 groups = re.search(self.REGEX_TC_TAG, tag)
754 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
755 "-{0}-".format(tag_tc.lower()),
758 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
759 "-{0}-".format(tag_tc.lower()),
763 test_result["status"] = "FAIL"
764 self._data["tests"][self._test_ID] = test_result
765 logging.debug("The test '{0}' has no or more than one "
766 "multi-threading tags.".format(self._test_ID))
767 logging.debug("Tags: {0}".format(test_result["tags"]))
770 if test.status == "PASS" and ("NDRPDRDISC" in tags or
776 # TODO: Remove when definitely no NDRPDRDISC tests are used:
777 if "NDRDISC" in tags:
778 test_result["type"] = "NDR"
779 # TODO: Remove when definitely no NDRPDRDISC tests are used:
780 elif "PDRDISC" in tags:
781 test_result["type"] = "PDR"
782 elif "NDRPDR" in tags:
783 test_result["type"] = "NDRPDR"
785 test_result["type"] = "SOAK"
787 test_result["type"] = "TCP"
789 test_result["type"] = "MRR"
790 elif "FRMOBL" in tags or "BMRR" in tags:
791 test_result["type"] = "BMRR"
793 test_result["status"] = "FAIL"
794 self._data["tests"][self._test_ID] = test_result
797 # TODO: Remove when definitely no NDRPDRDISC tests are used:
798 if test_result["type"] in ("NDR", "PDR"):
800 rate_value = str(re.search(
801 self.REGEX_RATE, test.message).group(1))
802 except AttributeError:
805 rate_unit = str(re.search(
806 self.REGEX_RATE, test.message).group(2))
807 except AttributeError:
810 test_result["throughput"] = dict()
811 test_result["throughput"]["value"] = \
812 int(rate_value.split('.')[0])
813 test_result["throughput"]["unit"] = rate_unit
814 test_result["latency"] = \
815 self._get_latency(test.message, test_result["type"])
816 if test_result["type"] == "PDR":
817 test_result["lossTolerance"] = str(re.search(
818 self.REGEX_TOLERANCE, test.message).group(1))
820 elif test_result["type"] in ("NDRPDR", ):
821 test_result["throughput"], test_result["status"] = \
822 self._get_ndrpdr_throughput(test.message)
823 test_result["latency"], test_result["status"] = \
824 self._get_ndrpdr_latency(test.message)
826 elif test_result["type"] in ("SOAK", ):
827 test_result["throughput"], test_result["status"] = \
828 self._get_plr_throughput(test.message)
830 elif test_result["type"] in ("TCP", ):
831 groups = re.search(self.REGEX_TCP, test.message)
832 test_result["result"] = int(groups.group(2))
834 elif test_result["type"] in ("MRR", "BMRR"):
835 test_result["result"] = dict()
836 groups = re.search(self.REGEX_BMRR, test.message)
837 if groups is not None:
838 items_str = groups.group(1)
839 items_float = [float(item.strip()) for item
840 in items_str.split(",")]
841 metadata = AvgStdevMetadataFactory.from_data(items_float)
842 # Next two lines have been introduced in CSIT-1179,
843 # to be removed in CSIT-1180.
846 test_result["result"]["receive-rate"] = metadata
848 groups = re.search(self.REGEX_MRR, test.message)
849 test_result["result"]["receive-rate"] = \
850 AvgStdevMetadataFactory.from_data([
851 float(groups.group(3)) / float(groups.group(1)), ])
853 self._data["tests"][self._test_ID] = test_result
855 def end_test(self, test):
856 """Called when test ends.
858 :param test: Test to process.
864 def visit_keyword(self, keyword):
865 """Implements traversing through the keyword and its child keywords.
867 :param keyword: Keyword to process.
868 :type keyword: Keyword
871 if self.start_keyword(keyword) is not False:
872 self.end_keyword(keyword)
874 def start_keyword(self, keyword):
875 """Called when keyword starts. Default implementation does nothing.
877 :param keyword: Keyword to process.
878 :type keyword: Keyword
882 if keyword.type == "setup":
883 self.visit_setup_kw(keyword)
884 elif keyword.type == "teardown":
885 self._lookup_kw_nr = 0
886 self.visit_teardown_kw(keyword)
888 self._lookup_kw_nr = 0
889 self.visit_test_kw(keyword)
890 except AttributeError:
893 def end_keyword(self, keyword):
894 """Called when keyword ends. Default implementation does nothing.
896 :param keyword: Keyword to process.
897 :type keyword: Keyword
902 def visit_test_kw(self, test_kw):
903 """Implements traversing through the test keyword and its child
906 :param test_kw: Keyword to process.
907 :type test_kw: Keyword
910 for keyword in test_kw.keywords:
911 if self.start_test_kw(keyword) is not False:
912 self.visit_test_kw(keyword)
913 self.end_test_kw(keyword)
915 def start_test_kw(self, test_kw):
916 """Called when test keyword starts. Default implementation does
919 :param test_kw: Keyword to process.
920 :type test_kw: Keyword
923 if test_kw.name.count("Show Runtime Counters On All Duts"):
924 self._lookup_kw_nr += 1
925 self._show_run_lookup_nr = 0
926 self._msg_type = "test-show-runtime"
927 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
928 self._msg_type = "dpdk-version"
931 test_kw.messages.visit(self)
933 def end_test_kw(self, test_kw):
934 """Called when keyword ends. Default implementation does nothing.
936 :param test_kw: Keyword to process.
937 :type test_kw: Keyword
942 def visit_setup_kw(self, setup_kw):
943 """Implements traversing through the teardown keyword and its child
946 :param setup_kw: Keyword to process.
947 :type setup_kw: Keyword
950 for keyword in setup_kw.keywords:
951 if self.start_setup_kw(keyword) is not False:
952 self.visit_setup_kw(keyword)
953 self.end_setup_kw(keyword)
955 def start_setup_kw(self, setup_kw):
956 """Called when teardown keyword starts. Default implementation does
959 :param setup_kw: Keyword to process.
960 :type setup_kw: Keyword
963 if setup_kw.name.count("Show Vpp Version On All Duts") \
964 and not self._version:
965 self._msg_type = "vpp-version"
967 elif setup_kw.name.count("Setup performance global Variables") \
968 and not self._timestamp:
969 self._msg_type = "timestamp"
970 elif setup_kw.name.count("Setup Framework") and not self._testbed:
971 self._msg_type = "testbed"
974 setup_kw.messages.visit(self)
976 def end_setup_kw(self, setup_kw):
977 """Called when keyword ends. Default implementation does nothing.
979 :param setup_kw: Keyword to process.
980 :type setup_kw: Keyword
985 def visit_teardown_kw(self, teardown_kw):
986 """Implements traversing through the teardown keyword and its child
989 :param teardown_kw: Keyword to process.
990 :type teardown_kw: Keyword
993 for keyword in teardown_kw.keywords:
994 if self.start_teardown_kw(keyword) is not False:
995 self.visit_teardown_kw(keyword)
996 self.end_teardown_kw(keyword)
998 def start_teardown_kw(self, teardown_kw):
999 """Called when teardown keyword starts. Default implementation does
1002 :param teardown_kw: Keyword to process.
1003 :type teardown_kw: Keyword
1007 if teardown_kw.name.count("Show Vat History On All Duts"):
1008 self._vat_history_lookup_nr = 0
1009 self._msg_type = "teardown-vat-history"
1010 teardown_kw.messages.visit(self)
1012 def end_teardown_kw(self, teardown_kw):
1013 """Called when keyword ends. Default implementation does nothing.
1015 :param teardown_kw: Keyword to process.
1016 :type teardown_kw: Keyword
1021 def visit_message(self, msg):
1022 """Implements visiting the message.
1024 :param msg: Message to process.
1028 if self.start_message(msg) is not False:
1029 self.end_message(msg)
1031 def start_message(self, msg):
1032 """Called when message starts. Get required information from messages:
1035 :param msg: Message to process.
1041 self.parse_msg[self._msg_type](msg)
1043 def end_message(self, msg):
1044 """Called when message ends. Default implementation does nothing.
1046 :param msg: Message to process.
1053 class InputData(object):
1056 The data is extracted from output.xml files generated by Jenkins jobs and
1057 stored in pandas' DataFrames.
1063 (as described in ExecutionChecker documentation)
1065 (as described in ExecutionChecker documentation)
1067 (as described in ExecutionChecker documentation)
1070 def __init__(self, spec):
1073 :param spec: Specification.
1074 :type spec: Specification
1081 self._input_data = pd.Series()
1085 """Getter - Input data.
1087 :returns: Input data
1088 :rtype: pandas.Series
1090 return self._input_data
1092 def metadata(self, job, build):
1093 """Getter - metadata
1095 :param job: Job which metadata we want.
1096 :param build: Build which metadata we want.
1100 :rtype: pandas.Series
1103 return self.data[job][build]["metadata"]
1105 def suites(self, job, build):
1108 :param job: Job which suites we want.
1109 :param build: Build which suites we want.
1113 :rtype: pandas.Series
1116 return self.data[job][str(build)]["suites"]
1118 def tests(self, job, build):
1121 :param job: Job which tests we want.
1122 :param build: Build which tests we want.
1126 :rtype: pandas.Series
1129 return self.data[job][build]["tests"]
1131 def _parse_tests(self, job, build, log):
1132 """Process data from robot output.xml file and return JSON structured
1135 :param job: The name of job which build output data will be processed.
1136 :param build: The build which output data will be processed.
1137 :param log: List of log messages.
1140 :type log: list of tuples (severity, msg)
1141 :returns: JSON data structure.
1150 with open(build["file-name"], 'r') as data_file:
1152 result = ExecutionResult(data_file)
1153 except errors.DataError as err:
1154 log.append(("ERROR", "Error occurred while parsing output.xml: "
1157 checker = ExecutionChecker(metadata, self._cfg.mapping,
1159 result.visit(checker)
1163 def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1164 """Download and parse the input data file.
1166 :param pid: PID of the process executing this method.
1167 :param data_queue: Shared memory between processes. Queue which keeps
1168 the result data. This data is then read by the main process and used
1169 in further processing.
1170 :param job: Name of the Jenkins job which generated the processed input
1172 :param build: Information about the Jenkins build which generated the
1173 processed input file.
1174 :param repeat: Repeat the download specified number of times if not
1177 :type data_queue: multiprocessing.Manager().Queue()
1185 logging.info(" Processing the job/build: {0}: {1}".
1186 format(job, build["build"]))
1188 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1189 format(job, build["build"])))
1196 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1202 logs.append(("ERROR", "It is not possible to download the input "
1203 "data file from the job '{job}', build "
1204 "'{build}', or it is damaged. Skipped.".
1205 format(job=job, build=build["build"])))
1207 logs.append(("INFO", " Processing data from the build '{0}' ...".
1208 format(build["build"])))
1209 data = self._parse_tests(job, build, logs)
1211 logs.append(("ERROR", "Input data file from the job '{job}', "
1212 "build '{build}' is damaged. Skipped.".
1213 format(job=job, build=build["build"])))
1218 remove(build["file-name"])
1219 except OSError as err:
1220 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1221 format(build["file-name"], repr(err))))
1223 # If the time-period is defined in the specification file, remove all
1224 # files which are outside the time period.
1225 timeperiod = self._cfg.input.get("time-period", None)
1226 if timeperiod and data:
1228 timeperiod = timedelta(int(timeperiod))
1229 metadata = data.get("metadata", None)
1231 generated = metadata.get("generated", None)
1233 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1234 if (now - generated) > timeperiod:
1235 # Remove the data and the file:
1240 " The build {job}/{build} is outdated, will be "
1241 "removed".format(job=job, build=build["build"])))
1242 file_name = self._cfg.input["file-name"]
1244 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1245 "{job}{sep}{build}{sep}{name}".
1248 build=build["build"],
1252 logs.append(("INFO",
1253 " The file {name} has been removed".
1254 format(name=full_name)))
1255 except OSError as err:
1256 logs.append(("ERROR",
1257 "Cannot remove the file '{0}': {1}".
1258 format(full_name, repr(err))))
1260 logs.append(("INFO", " Done."))
1269 data_queue.put(result)
1271 def download_and_parse_data(self, repeat=1):
1272 """Download the input data files, parse input data from input files and
1273 store in pandas' Series.
1275 :param repeat: Repeat the download specified number of times if not
1280 logging.info("Downloading and parsing input files ...")
1282 work_queue = multiprocessing.JoinableQueue()
1283 manager = multiprocessing.Manager()
1284 data_queue = manager.Queue()
1285 cpus = multiprocessing.cpu_count()
1288 for cpu in range(cpus):
1289 worker = Worker(work_queue,
1291 self._download_and_parse_build)
1292 worker.daemon = True
1294 workers.append(worker)
1295 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1296 format(cpu, worker.pid))
1298 for job, builds in self._cfg.builds.items():
1299 for build in builds:
1300 work_queue.put((job, build, repeat))
1304 logging.info("Done.")
1306 while not data_queue.empty():
1307 result = data_queue.get()
1310 build_nr = result["build"]["build"]
1313 data = result["data"]
1314 build_data = pd.Series({
1315 "metadata": pd.Series(data["metadata"].values(),
1316 index=data["metadata"].keys()),
1317 "suites": pd.Series(data["suites"].values(),
1318 index=data["suites"].keys()),
1319 "tests": pd.Series(data["tests"].values(),
1320 index=data["tests"].keys())})
1322 if self._input_data.get(job, None) is None:
1323 self._input_data[job] = pd.Series()
1324 self._input_data[job][str(build_nr)] = build_data
1326 self._cfg.set_input_file_name(job, build_nr,
1327 result["build"]["file-name"])
1329 self._cfg.set_input_state(job, build_nr, result["state"])
1331 for item in result["logs"]:
1332 if item[0] == "INFO":
1333 logging.info(item[1])
1334 elif item[0] == "ERROR":
1335 logging.error(item[1])
1336 elif item[0] == "DEBUG":
1337 logging.debug(item[1])
1338 elif item[0] == "CRITICAL":
1339 logging.critical(item[1])
1340 elif item[0] == "WARNING":
1341 logging.warning(item[1])
1345 # Terminate all workers
1346 for worker in workers:
1350 logging.info("Done.")
1353 def _end_of_tag(tag_filter, start=0, closer="'"):
1354 """Return the index of character in the string which is the end of tag.
1356 :param tag_filter: The string where the end of tag is being searched.
1357 :param start: The index where the searching is stated.
1358 :param closer: The character which is the tag closer.
1359 :type tag_filter: str
1362 :returns: The index of the tag closer.
1367 idx_opener = tag_filter.index(closer, start)
1368 return tag_filter.index(closer, idx_opener + 1)
1373 def _condition(tag_filter):
1374 """Create a conditional statement from the given tag filter.
1376 :param tag_filter: Filter based on tags from the element specification.
1377 :type tag_filter: str
1378 :returns: Conditional statement which can be evaluated.
1384 index = InputData._end_of_tag(tag_filter, index)
1388 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1390 def filter_data(self, element, params=None, data_set="tests",
1391 continue_on_error=False):
1392 """Filter required data from the given jobs and builds.
1394 The output data structure is:
1398 - test (or suite) 1 ID:
1404 - test (or suite) n ID:
1411 :param element: Element which will use the filtered data.
1412 :param params: Parameters which will be included in the output. If None,
1413 all parameters are included.
1414 :param data_set: The set of data to be filtered: tests, suites,
1416 :param continue_on_error: Continue if there is error while reading the
1417 data. The Item will be empty then
1418 :type element: pandas.Series
1421 :type continue_on_error: bool
1422 :returns: Filtered data.
1423 :rtype pandas.Series
1427 if element["filter"] in ("all", "template"):
1430 cond = InputData._condition(element["filter"])
1431 logging.debug(" Filter: {0}".format(cond))
1433 logging.error(" No filter defined.")
1437 params = element.get("parameters", None)
1439 params.append("type")
1443 for job, builds in element["data"].items():
1444 data[job] = pd.Series()
1445 for build in builds:
1446 data[job][str(build)] = pd.Series()
1448 data_iter = self.data[job][str(build)][data_set].\
1451 if continue_on_error:
1455 for test_ID, test_data in data_iter:
1456 if eval(cond, {"tags": test_data.get("tags", "")}):
1457 data[job][str(build)][test_ID] = pd.Series()
1459 for param, val in test_data.items():
1460 data[job][str(build)][test_ID][param] = val
1462 for param in params:
1464 data[job][str(build)][test_ID][param] =\
1467 data[job][str(build)][test_ID][param] =\
1471 except (KeyError, IndexError, ValueError) as err:
1472 logging.error(" Missing mandatory parameter in the element "
1473 "specification: {0}".format(err))
1475 except AttributeError:
1478 logging.error(" The filter '{0}' is not correct. Check if all "
1479 "tags are enclosed by apostrophes.".format(cond))
1483 def merge_data(data):
1484 """Merge data from more jobs and builds to a simple data structure.
1486 The output data structure is:
1488 - test (suite) 1 ID:
1494 - test (suite) n ID:
1497 :param data: Data to merge.
1498 :type data: pandas.Series
1499 :returns: Merged data.
1500 :rtype: pandas.Series
1503 logging.info(" Merging data ...")
1505 merged_data = pd.Series()
1506 for _, builds in data.iteritems():
1507 for _, item in builds.iteritems():
1508 for ID, item_data in item.iteritems():
1509 merged_data[ID] = item_data