1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from robot.api import ExecutionResult, ResultVisitor
28 from robot import errors
29 from collections import OrderedDict
30 from string import replace
32 from os.path import join
33 from datetime import datetime as dt
34 from datetime import timedelta
35 from json import loads
36 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38 from input_data_files import download_and_unzip_data_file
41 # Separator used in file names
45 class ExecutionChecker(ResultVisitor):
46 """Class to traverse through the test suite structure.
48 The functionality implemented in this class generates a json structure:
54 "generated": "Timestamp",
55 "version": "SUT version",
56 "job": "Jenkins job name",
57 "build": "Information about the build"
60 "Suite long name 1": {
62 "doc": "Suite 1 documentation",
63 "parent": "Suite 1 parent",
64 "level": "Level of the suite in the suite hierarchy"
66 "Suite long name N": {
68 "doc": "Suite N documentation",
69 "parent": "Suite 2 parent",
70 "level": "Level of the suite in the suite hierarchy"
77 "parent": "Name of the parent of the test",
78 "doc": "Test documentation",
79 "msg": "Test message",
80 "vat-history": "DUT1 and DUT2 VAT History",
81 "show-run": "Show Run",
82 "tags": ["tag 1", "tag 2", "tag n"],
84 "status": "PASS" | "FAIL",
126 "parent": "Name of the parent of the test",
127 "doc": "Test documentation",
128 "msg": "Test message",
129 "tags": ["tag 1", "tag 2", "tag n"],
131 "status": "PASS" | "FAIL",
138 "parent": "Name of the parent of the test",
139 "doc": "Test documentation",
140 "msg": "Test message",
141 "tags": ["tag 1", "tag 2", "tag n"],
142 "type": "MRR" | "BMRR",
143 "status": "PASS" | "FAIL",
145 "receive-rate": AvgStdevMetadata,
149 # TODO: Remove when definitely no NDRPDRDISC tests are used:
153 "parent": "Name of the parent of the test",
154 "doc": "Test documentation",
155 "msg": "Test message",
156 "tags": ["tag 1", "tag 2", "tag n"],
157 "type": "PDR" | "NDR",
158 "status": "PASS" | "FAIL",
159 "throughput": { # Only type: "PDR" | "NDR"
161 "unit": "pps" | "bps" | "percentage"
163 "latency": { # Only type: "PDR" | "NDR"
170 "50": { # Only for NDR
175 "10": { # Only for NDR
187 "50": { # Only for NDR
192 "10": { # Only for NDR
199 "lossTolerance": "lossTolerance", # Only type: "PDR"
200 "vat-history": "DUT1 and DUT2 VAT History"
201 "show-run": "Show Run"
213 "metadata": { # Optional
214 "version": "VPP version",
215 "job": "Jenkins job name",
216 "build": "Information about the build"
220 "doc": "Suite 1 documentation",
221 "parent": "Suite 1 parent",
222 "level": "Level of the suite in the suite hierarchy"
225 "doc": "Suite N documentation",
226 "parent": "Suite 2 parent",
227 "level": "Level of the suite in the suite hierarchy"
233 "parent": "Name of the parent of the test",
234 "doc": "Test documentation"
235 "msg": "Test message"
236 "tags": ["tag 1", "tag 2", "tag n"],
237 "vat-history": "DUT1 and DUT2 VAT History"
238 "show-run": "Show Run"
239 "status": "PASS" | "FAIL"
247 .. note:: ID is the lowercase full path to the test.
250 # TODO: Remove when definitely no NDRPDRDISC tests are used:
251 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
253 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
254 r'PLRsearch upper bound::\s(\d+.\d+)')
256 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
257 r'NDR_UPPER:\s(\d+.\d+).*\n'
258 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
259 r'PDR_UPPER:\s(\d+.\d+)')
261 # TODO: Remove when definitely no NDRPDRDISC tests are used:
262 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
263 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
264 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
265 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
270 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
271 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
272 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
274 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
275 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
277 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
280 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
281 r"VPP Version:\s*)(.*)")
283 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
284 r"(RTE Version: 'DPDK )(.*)(')")
286 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
288 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289 r'tx\s(\d*),\srx\s(\d*)')
291 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292 r' in packets per second: \[(.*)\]')
294 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
296 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
298 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
300 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
302 def __init__(self, metadata, mapping, ignore):
305 :param metadata: Key-value pairs to be included in "metadata" part of
307 :param mapping: Mapping of the old names of test cases to the new
309 :param ignore: List of TCs to be ignored.
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 # Number of VAT History messages found:
335 # 1 - VAT History of DUT1
336 # 2 - VAT History of DUT2
337 self._lookup_kw_nr = 0
338 self._vat_history_lookup_nr = 0
340 # Number of Show Running messages found
342 # 1 - Show run message found
343 self._show_run_lookup_nr = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 "metadata": OrderedDict(),
352 "suites": OrderedDict(),
353 "tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data["metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 "timestamp": self._get_timestamp,
364 "vpp-version": self._get_vpp_version,
365 "dpdk-version": self._get_dpdk_version,
366 "teardown-vat-history": self._get_vat_history,
367 "test-show-runtime": self._get_show_run,
368 "testbed": self._get_testbed
373 """Getter - Data parsed from the XML file.
375 :returns: Data parsed from the XML file.
380 def _get_testbed(self, msg):
381 """Called when extraction of testbed IP is required.
382 The testbed is identified by TG node IP address.
384 :param msg: Message to process.
389 if msg.message.count("Arguments:"):
390 message = str(msg.message).replace(' ', '').replace('\n', '').\
391 replace("'", '"').replace('b"', '"').\
392 replace("honeycom", "honeycomb")
393 message = loads(message[11:-1])
395 self._testbed = message["TG"]["host"]
396 except (KeyError, ValueError):
399 self._data["metadata"]["testbed"] = self._testbed
400 self._msg_type = None
402 def _get_vpp_version(self, msg):
403 """Called when extraction of VPP version is required.
405 :param msg: Message to process.
410 if msg.message.count("return STDOUT Version:") or \
411 msg.message.count("VPP Version:"):
412 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
414 self._data["metadata"]["version"] = self._version
415 self._msg_type = None
417 def _get_dpdk_version(self, msg):
418 """Called when extraction of DPDK version is required.
420 :param msg: Message to process.
425 if msg.message.count("return STDOUT testpmd"):
427 self._version = str(re.search(
428 self.REGEX_VERSION_DPDK, msg.message). group(4))
429 self._data["metadata"]["version"] = self._version
433 self._msg_type = None
435 def _get_timestamp(self, msg):
436 """Called when extraction of timestamp is required.
438 :param msg: Message to process.
443 self._timestamp = msg.timestamp[:14]
444 self._data["metadata"]["generated"] = self._timestamp
445 self._msg_type = None
447 def _get_vat_history(self, msg):
448 """Called when extraction of VAT command history is required.
450 :param msg: Message to process.
454 if msg.message.count("VAT command history:"):
455 self._vat_history_lookup_nr += 1
456 if self._vat_history_lookup_nr == 1:
457 self._data["tests"][self._test_ID]["vat-history"] = str()
459 self._msg_type = None
460 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
461 "VAT command history:", "", msg.message, count=1). \
462 replace("\n\n", "\n").replace('\n', ' |br| ').\
463 replace('\r', '').replace('"', "'")
465 self._data["tests"][self._test_ID]["vat-history"] += " |br| "
466 self._data["tests"][self._test_ID]["vat-history"] += \
467 "**DUT" + str(self._vat_history_lookup_nr) + ":** " + text
469 def _get_show_run(self, msg):
470 """Called when extraction of VPP operational data (output of CLI command
471 Show Runtime) is required.
473 :param msg: Message to process.
477 if msg.message.count("return STDOUT Thread "):
478 self._show_run_lookup_nr += 1
479 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
480 self._data["tests"][self._test_ID]["show-run"] = str()
481 if self._lookup_kw_nr > 1:
482 self._msg_type = None
483 if self._show_run_lookup_nr == 1:
484 text = msg.message.replace("vat# ", "").\
485 replace("return STDOUT ", "").replace("\n\n", "\n").\
486 replace('\n', ' |br| ').\
487 replace('\r', '').replace('"', "'")
489 self._data["tests"][self._test_ID]["show-run"] += " |br| "
490 self._data["tests"][self._test_ID]["show-run"] += \
491 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
495 # TODO: Remove when definitely no NDRPDRDISC tests are used:
496 def _get_latency(self, msg, test_type):
497 """Get the latency data from the test message.
499 :param msg: Message to be parsed.
500 :param test_type: Type of the test - NDR or PDR.
503 :returns: Latencies parsed from the message.
507 if test_type == "NDR":
508 groups = re.search(self.REGEX_LAT_NDR, msg)
509 groups_range = range(1, 7)
510 elif test_type == "PDR":
511 groups = re.search(self.REGEX_LAT_PDR, msg)
512 groups_range = range(1, 3)
517 for idx in groups_range:
519 lat = [int(item) for item in str(groups.group(idx)).split('/')]
520 except (AttributeError, ValueError):
522 latencies.append(lat)
524 keys = ("min", "avg", "max")
532 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
533 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
534 if test_type == "NDR":
535 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
536 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
537 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
538 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
542 def _get_ndrpdr_throughput(self, msg):
543 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
546 :param msg: The test message to be parsed.
548 :returns: Parsed data as a dict and the status (PASS/FAIL).
549 :rtype: tuple(dict, str)
553 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
554 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
557 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
559 if groups is not None:
561 throughput["NDR"]["LOWER"] = float(groups.group(1))
562 throughput["NDR"]["UPPER"] = float(groups.group(2))
563 throughput["PDR"]["LOWER"] = float(groups.group(3))
564 throughput["PDR"]["UPPER"] = float(groups.group(4))
566 except (IndexError, ValueError):
569 return throughput, status
571 def _get_plr_throughput(self, msg):
572 """Get PLRsearch lower bound and PLRsearch upper bound from the test
575 :param msg: The test message to be parsed.
577 :returns: Parsed data as a dict and the status (PASS/FAIL).
578 :rtype: tuple(dict, str)
586 groups = re.search(self.REGEX_PLR_RATE, msg)
588 if groups is not None:
590 throughput["LOWER"] = float(groups.group(1))
591 throughput["UPPER"] = float(groups.group(2))
593 except (IndexError, ValueError):
596 return throughput, status
598 def _get_ndrpdr_latency(self, msg):
599 """Get LATENCY from the test message.
601 :param msg: The test message to be parsed.
603 :returns: Parsed data as a dict and the status (PASS/FAIL).
604 :rtype: tuple(dict, str)
609 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
610 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
613 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
614 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
618 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
620 if groups is not None:
621 keys = ("min", "avg", "max")
623 latency["NDR"]["direction1"] = dict(
624 zip(keys, [float(l) for l in groups.group(1).split('/')]))
625 latency["NDR"]["direction2"] = dict(
626 zip(keys, [float(l) for l in groups.group(2).split('/')]))
627 latency["PDR"]["direction1"] = dict(
628 zip(keys, [float(l) for l in groups.group(3).split('/')]))
629 latency["PDR"]["direction2"] = dict(
630 zip(keys, [float(l) for l in groups.group(4).split('/')]))
632 except (IndexError, ValueError):
635 return latency, status
637 def visit_suite(self, suite):
638 """Implements traversing through the suite and its direct children.
640 :param suite: Suite to process.
644 if self.start_suite(suite) is not False:
645 suite.suites.visit(self)
646 suite.tests.visit(self)
647 self.end_suite(suite)
649 def start_suite(self, suite):
650 """Called when suite starts.
652 :param suite: Suite to process.
658 parent_name = suite.parent.name
659 except AttributeError:
662 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
663 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
664 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
666 self._data["suites"][suite.longname.lower().replace('"', "'").
667 replace(" ", "_")] = {
668 "name": suite.name.lower(),
670 "parent": parent_name,
671 "level": len(suite.longname.split("."))
674 suite.keywords.visit(self)
676 def end_suite(self, suite):
677 """Called when suite ends.
679 :param suite: Suite to process.
685 def visit_test(self, test):
686 """Implements traversing through the test.
688 :param test: Test to process.
692 if self.start_test(test) is not False:
693 test.keywords.visit(self)
696 def start_test(self, test):
697 """Called when test starts.
699 :param test: Test to process.
704 longname_orig = test.longname.lower()
706 # Check the ignore list
707 if longname_orig in self._ignore:
710 tags = [str(tag) for tag in test.tags]
713 # Change the TC long name and name if defined in the mapping table
714 longname = self._mapping.get(longname_orig, None)
715 if longname is not None:
716 name = longname.split('.')[-1]
717 logging.debug("{0}\n{1}\n{2}\n{3}".format(
718 self._data["metadata"], longname_orig, longname, name))
720 longname = longname_orig
721 name = test.name.lower()
723 # Remove TC number from the TC long name (backward compatibility):
724 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
725 # Remove TC number from the TC name (not needed):
726 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
728 test_result["parent"] = test.parent.name.lower()
729 test_result["tags"] = tags
730 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
731 replace('\r', '').replace('[', ' |br| [')
732 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
733 test_result["msg"] = test.message.replace('\n', ' |br| '). \
734 replace('\r', '').replace('"', "'")
735 test_result["type"] = "FUNC"
736 test_result["status"] = test.status
738 if "PERFTEST" in tags:
739 # Replace info about cores (e.g. -1c-) with the info about threads
740 # and cores (e.g. -1t1c-) in the long test case names and in the
741 # test case names if necessary.
742 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
745 for tag in test_result["tags"]:
746 groups = re.search(self.REGEX_TC_TAG, tag)
752 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
753 "-{0}-".format(tag_tc.lower()),
756 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
757 "-{0}-".format(tag_tc.lower()),
761 test_result["status"] = "FAIL"
762 self._data["tests"][self._test_ID] = test_result
763 logging.debug("The test '{0}' has no or more than one "
764 "multi-threading tags.".format(self._test_ID))
765 logging.debug("Tags: {0}".format(test_result["tags"]))
768 if test.status == "PASS" and ("NDRPDRDISC" in tags or
774 # TODO: Remove when definitely no NDRPDRDISC tests are used:
775 if "NDRDISC" in tags:
776 test_result["type"] = "NDR"
777 # TODO: Remove when definitely no NDRPDRDISC tests are used:
778 elif "PDRDISC" in tags:
779 test_result["type"] = "PDR"
780 elif "NDRPDR" in tags:
781 test_result["type"] = "NDRPDR"
783 test_result["type"] = "SOAK"
785 test_result["type"] = "TCP"
787 test_result["type"] = "MRR"
788 elif "FRMOBL" in tags or "BMRR" in tags:
789 test_result["type"] = "BMRR"
791 test_result["status"] = "FAIL"
792 self._data["tests"][self._test_ID] = test_result
795 # TODO: Remove when definitely no NDRPDRDISC tests are used:
796 if test_result["type"] in ("NDR", "PDR"):
798 rate_value = str(re.search(
799 self.REGEX_RATE, test.message).group(1))
800 except AttributeError:
803 rate_unit = str(re.search(
804 self.REGEX_RATE, test.message).group(2))
805 except AttributeError:
808 test_result["throughput"] = dict()
809 test_result["throughput"]["value"] = \
810 int(rate_value.split('.')[0])
811 test_result["throughput"]["unit"] = rate_unit
812 test_result["latency"] = \
813 self._get_latency(test.message, test_result["type"])
814 if test_result["type"] == "PDR":
815 test_result["lossTolerance"] = str(re.search(
816 self.REGEX_TOLERANCE, test.message).group(1))
818 elif test_result["type"] in ("NDRPDR", ):
819 test_result["throughput"], test_result["status"] = \
820 self._get_ndrpdr_throughput(test.message)
821 test_result["latency"], test_result["status"] = \
822 self._get_ndrpdr_latency(test.message)
824 elif test_result["type"] in ("SOAK", ):
825 test_result["throughput"], test_result["status"] = \
826 self._get_plr_throughput(test.message)
828 elif test_result["type"] in ("TCP", ):
829 groups = re.search(self.REGEX_TCP, test.message)
830 test_result["result"] = int(groups.group(2))
832 elif test_result["type"] in ("MRR", "BMRR"):
833 test_result["result"] = dict()
834 groups = re.search(self.REGEX_BMRR, test.message)
835 if groups is not None:
836 items_str = groups.group(1)
837 items_float = [float(item.strip()) for item
838 in items_str.split(",")]
839 metadata = AvgStdevMetadataFactory.from_data(items_float)
840 # Next two lines have been introduced in CSIT-1179,
841 # to be removed in CSIT-1180.
844 test_result["result"]["receive-rate"] = metadata
846 groups = re.search(self.REGEX_MRR, test.message)
847 test_result["result"]["receive-rate"] = \
848 AvgStdevMetadataFactory.from_data([
849 float(groups.group(3)) / float(groups.group(1)), ])
851 self._data["tests"][self._test_ID] = test_result
853 def end_test(self, test):
854 """Called when test ends.
856 :param test: Test to process.
862 def visit_keyword(self, keyword):
863 """Implements traversing through the keyword and its child keywords.
865 :param keyword: Keyword to process.
866 :type keyword: Keyword
869 if self.start_keyword(keyword) is not False:
870 self.end_keyword(keyword)
872 def start_keyword(self, keyword):
873 """Called when keyword starts. Default implementation does nothing.
875 :param keyword: Keyword to process.
876 :type keyword: Keyword
880 if keyword.type == "setup":
881 self.visit_setup_kw(keyword)
882 elif keyword.type == "teardown":
883 self._lookup_kw_nr = 0
884 self.visit_teardown_kw(keyword)
886 self._lookup_kw_nr = 0
887 self.visit_test_kw(keyword)
888 except AttributeError:
891 def end_keyword(self, keyword):
892 """Called when keyword ends. Default implementation does nothing.
894 :param keyword: Keyword to process.
895 :type keyword: Keyword
900 def visit_test_kw(self, test_kw):
901 """Implements traversing through the test keyword and its child
904 :param test_kw: Keyword to process.
905 :type test_kw: Keyword
908 for keyword in test_kw.keywords:
909 if self.start_test_kw(keyword) is not False:
910 self.visit_test_kw(keyword)
911 self.end_test_kw(keyword)
913 def start_test_kw(self, test_kw):
914 """Called when test keyword starts. Default implementation does
917 :param test_kw: Keyword to process.
918 :type test_kw: Keyword
921 if test_kw.name.count("Show Runtime Counters On All Duts"):
922 self._lookup_kw_nr += 1
923 self._show_run_lookup_nr = 0
924 self._msg_type = "test-show-runtime"
925 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
926 self._msg_type = "dpdk-version"
929 test_kw.messages.visit(self)
931 def end_test_kw(self, test_kw):
932 """Called when keyword ends. Default implementation does nothing.
934 :param test_kw: Keyword to process.
935 :type test_kw: Keyword
940 def visit_setup_kw(self, setup_kw):
941 """Implements traversing through the teardown keyword and its child
944 :param setup_kw: Keyword to process.
945 :type setup_kw: Keyword
948 for keyword in setup_kw.keywords:
949 if self.start_setup_kw(keyword) is not False:
950 self.visit_setup_kw(keyword)
951 self.end_setup_kw(keyword)
953 def start_setup_kw(self, setup_kw):
954 """Called when teardown keyword starts. Default implementation does
957 :param setup_kw: Keyword to process.
958 :type setup_kw: Keyword
961 if setup_kw.name.count("Show Vpp Version On All Duts") \
962 and not self._version:
963 self._msg_type = "vpp-version"
965 elif setup_kw.name.count("Setup performance global Variables") \
966 and not self._timestamp:
967 self._msg_type = "timestamp"
968 elif setup_kw.name.count("Setup Framework") and not self._testbed:
969 self._msg_type = "testbed"
972 setup_kw.messages.visit(self)
974 def end_setup_kw(self, setup_kw):
975 """Called when keyword ends. Default implementation does nothing.
977 :param setup_kw: Keyword to process.
978 :type setup_kw: Keyword
983 def visit_teardown_kw(self, teardown_kw):
984 """Implements traversing through the teardown keyword and its child
987 :param teardown_kw: Keyword to process.
988 :type teardown_kw: Keyword
991 for keyword in teardown_kw.keywords:
992 if self.start_teardown_kw(keyword) is not False:
993 self.visit_teardown_kw(keyword)
994 self.end_teardown_kw(keyword)
996 def start_teardown_kw(self, teardown_kw):
997 """Called when teardown keyword starts. Default implementation does
1000 :param teardown_kw: Keyword to process.
1001 :type teardown_kw: Keyword
1005 if teardown_kw.name.count("Show Vat History On All Duts"):
1006 self._vat_history_lookup_nr = 0
1007 self._msg_type = "teardown-vat-history"
1008 teardown_kw.messages.visit(self)
1010 def end_teardown_kw(self, teardown_kw):
1011 """Called when keyword ends. Default implementation does nothing.
1013 :param teardown_kw: Keyword to process.
1014 :type teardown_kw: Keyword
1019 def visit_message(self, msg):
1020 """Implements visiting the message.
1022 :param msg: Message to process.
1026 if self.start_message(msg) is not False:
1027 self.end_message(msg)
1029 def start_message(self, msg):
1030 """Called when message starts. Get required information from messages:
1033 :param msg: Message to process.
1039 self.parse_msg[self._msg_type](msg)
1041 def end_message(self, msg):
1042 """Called when message ends. Default implementation does nothing.
1044 :param msg: Message to process.
1051 class InputData(object):
1054 The data is extracted from output.xml files generated by Jenkins jobs and
1055 stored in pandas' DataFrames.
1061 (as described in ExecutionChecker documentation)
1063 (as described in ExecutionChecker documentation)
1065 (as described in ExecutionChecker documentation)
1068 def __init__(self, spec):
1071 :param spec: Specification.
1072 :type spec: Specification
1079 self._input_data = pd.Series()
1083 """Getter - Input data.
1085 :returns: Input data
1086 :rtype: pandas.Series
1088 return self._input_data
1090 def metadata(self, job, build):
1091 """Getter - metadata
1093 :param job: Job which metadata we want.
1094 :param build: Build which metadata we want.
1098 :rtype: pandas.Series
1101 return self.data[job][build]["metadata"]
1103 def suites(self, job, build):
1106 :param job: Job which suites we want.
1107 :param build: Build which suites we want.
1111 :rtype: pandas.Series
1114 return self.data[job][str(build)]["suites"]
1116 def tests(self, job, build):
1119 :param job: Job which tests we want.
1120 :param build: Build which tests we want.
1124 :rtype: pandas.Series
1127 return self.data[job][build]["tests"]
1129 def _parse_tests(self, job, build, log):
1130 """Process data from robot output.xml file and return JSON structured
1133 :param job: The name of job which build output data will be processed.
1134 :param build: The build which output data will be processed.
1135 :param log: List of log messages.
1138 :type log: list of tuples (severity, msg)
1139 :returns: JSON data structure.
1148 with open(build["file-name"], 'r') as data_file:
1150 result = ExecutionResult(data_file)
1151 except errors.DataError as err:
1152 log.append(("ERROR", "Error occurred while parsing output.xml: "
1155 checker = ExecutionChecker(metadata, self._cfg.mapping,
1157 result.visit(checker)
1161 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1162 """Download and parse the input data file.
1164 :param pid: PID of the process executing this method.
1165 :param job: Name of the Jenkins job which generated the processed input
1167 :param build: Information about the Jenkins build which generated the
1168 processed input file.
1169 :param repeat: Repeat the download specified number of times if not
1179 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1180 format(job, build["build"])))
1187 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1193 logs.append(("ERROR", "It is not possible to download the input "
1194 "data file from the job '{job}', build "
1195 "'{build}', or it is damaged. Skipped.".
1196 format(job=job, build=build["build"])))
1198 logs.append(("INFO", " Processing data from the build '{0}' ...".
1199 format(build["build"])))
1200 data = self._parse_tests(job, build, logs)
1202 logs.append(("ERROR", "Input data file from the job '{job}', "
1203 "build '{build}' is damaged. Skipped.".
1204 format(job=job, build=build["build"])))
1209 remove(build["file-name"])
1210 except OSError as err:
1211 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1212 format(build["file-name"], repr(err))))
1214 # If the time-period is defined in the specification file, remove all
1215 # files which are outside the time period.
1216 timeperiod = self._cfg.input.get("time-period", None)
1217 if timeperiod and data:
1219 timeperiod = timedelta(int(timeperiod))
1220 metadata = data.get("metadata", None)
1222 generated = metadata.get("generated", None)
1224 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1225 if (now - generated) > timeperiod:
1226 # Remove the data and the file:
1231 " The build {job}/{build} is outdated, will be "
1232 "removed".format(job=job, build=build["build"])))
1233 file_name = self._cfg.input["file-name"]
1235 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1236 "{job}{sep}{build}{sep}{name}".format(
1239 build=build["build"],
1243 logs.append(("INFO",
1244 " The file {name} has been removed".
1245 format(name=full_name)))
1246 except OSError as err:
1247 logs.append(("ERROR",
1248 "Cannot remove the file '{0}': {1}".
1249 format(full_name, repr(err))))
1250 logs.append(("INFO", " Done."))
1252 for level, line in logs:
1255 elif level == "ERROR":
1257 elif level == "DEBUG":
1259 elif level == "CRITICAL":
1260 logging.critical(line)
1261 elif level == "WARNING":
1262 logging.warning(line)
1264 return {"data": data, "state": state, "job": job, "build": build}
1266 def download_and_parse_data(self, repeat=1):
1267 """Download the input data files, parse input data from input files and
1268 store in pandas' Series.
1270 :param repeat: Repeat the download specified number of times if not
1275 logging.info("Downloading and parsing input files ...")
1277 for job, builds in self._cfg.builds.items():
1278 for build in builds:
1280 result = self._download_and_parse_build(job, build, repeat)
1281 build_nr = result["build"]["build"]
1284 data = result["data"]
1285 build_data = pd.Series({
1286 "metadata": pd.Series(
1287 data["metadata"].values(),
1288 index=data["metadata"].keys()),
1289 "suites": pd.Series(data["suites"].values(),
1290 index=data["suites"].keys()),
1291 "tests": pd.Series(data["tests"].values(),
1292 index=data["tests"].keys())})
1294 if self._input_data.get(job, None) is None:
1295 self._input_data[job] = pd.Series()
1296 self._input_data[job][str(build_nr)] = build_data
1298 self._cfg.set_input_file_name(
1299 job, build_nr, result["build"]["file-name"])
1301 self._cfg.set_input_state(job, build_nr, result["state"])
1303 logging.info("Memory allocation: {0:,d}MB".format(
1304 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1306 logging.info("Done.")
1309 def _end_of_tag(tag_filter, start=0, closer="'"):
1310 """Return the index of character in the string which is the end of tag.
1312 :param tag_filter: The string where the end of tag is being searched.
1313 :param start: The index where the searching is stated.
1314 :param closer: The character which is the tag closer.
1315 :type tag_filter: str
1318 :returns: The index of the tag closer.
1323 idx_opener = tag_filter.index(closer, start)
1324 return tag_filter.index(closer, idx_opener + 1)
1329 def _condition(tag_filter):
1330 """Create a conditional statement from the given tag filter.
1332 :param tag_filter: Filter based on tags from the element specification.
1333 :type tag_filter: str
1334 :returns: Conditional statement which can be evaluated.
1340 index = InputData._end_of_tag(tag_filter, index)
1344 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1346 def filter_data(self, element, params=None, data_set="tests",
1347 continue_on_error=False):
1348 """Filter required data from the given jobs and builds.
1350 The output data structure is:
1354 - test (or suite) 1 ID:
1360 - test (or suite) n ID:
1367 :param element: Element which will use the filtered data.
1368 :param params: Parameters which will be included in the output. If None,
1369 all parameters are included.
1370 :param data_set: The set of data to be filtered: tests, suites,
1372 :param continue_on_error: Continue if there is error while reading the
1373 data. The Item will be empty then
1374 :type element: pandas.Series
1377 :type continue_on_error: bool
1378 :returns: Filtered data.
1379 :rtype pandas.Series
1383 if element["filter"] in ("all", "template"):
1386 cond = InputData._condition(element["filter"])
1387 logging.debug(" Filter: {0}".format(cond))
1389 logging.error(" No filter defined.")
1393 params = element.get("parameters", None)
1395 params.append("type")
1399 for job, builds in element["data"].items():
1400 data[job] = pd.Series()
1401 for build in builds:
1402 data[job][str(build)] = pd.Series()
1404 data_iter = self.data[job][str(build)][data_set].\
1407 if continue_on_error:
1411 for test_ID, test_data in data_iter:
1412 if eval(cond, {"tags": test_data.get("tags", "")}):
1413 data[job][str(build)][test_ID] = pd.Series()
1415 for param, val in test_data.items():
1416 data[job][str(build)][test_ID][param] = val
1418 for param in params:
1420 data[job][str(build)][test_ID][param] =\
1423 data[job][str(build)][test_ID][param] =\
1427 except (KeyError, IndexError, ValueError) as err:
1428 logging.error(" Missing mandatory parameter in the element "
1429 "specification: {0}".format(err))
1431 except AttributeError:
1434 logging.error(" The filter '{0}' is not correct. Check if all "
1435 "tags are enclosed by apostrophes.".format(cond))
1439 def merge_data(data):
1440 """Merge data from more jobs and builds to a simple data structure.
1442 The output data structure is:
1444 - test (suite) 1 ID:
1450 - test (suite) n ID:
1453 :param data: Data to merge.
1454 :type data: pandas.Series
1455 :returns: Merged data.
1456 :rtype: pandas.Series
1459 logging.info(" Merging data ...")
1461 merged_data = pd.Series()
1462 for _, builds in data.iteritems():
1463 for _, item in builds.iteritems():
1464 for ID, item_data in item.iteritems():
1465 merged_data[ID] = item_data