1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from robot.api import ExecutionResult, ResultVisitor
28 from robot import errors
29 from collections import OrderedDict
30 from string import replace
32 from os.path import join
33 from datetime import datetime as dt
34 from datetime import timedelta
35 from json import loads
36 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
38 from input_data_files import download_and_unzip_data_file
41 # Separator used in file names
45 class ExecutionChecker(ResultVisitor):
46 """Class to traverse through the test suite structure.
48 The functionality implemented in this class generates a json structure:
54 "generated": "Timestamp",
55 "version": "SUT version",
56 "job": "Jenkins job name",
57 "build": "Information about the build"
60 "Suite long name 1": {
62 "doc": "Suite 1 documentation",
63 "parent": "Suite 1 parent",
64 "level": "Level of the suite in the suite hierarchy"
66 "Suite long name N": {
68 "doc": "Suite N documentation",
69 "parent": "Suite 2 parent",
70 "level": "Level of the suite in the suite hierarchy"
77 "parent": "Name of the parent of the test",
78 "doc": "Test documentation",
79 "msg": "Test message",
80 "conf-history": "DUT1 and DUT2 VAT History",
81 "show-run": "Show Run",
82 "tags": ["tag 1", "tag 2", "tag n"],
84 "status": "PASS" | "FAIL",
126 "parent": "Name of the parent of the test",
127 "doc": "Test documentation",
128 "msg": "Test message",
129 "tags": ["tag 1", "tag 2", "tag n"],
131 "status": "PASS" | "FAIL",
138 "parent": "Name of the parent of the test",
139 "doc": "Test documentation",
140 "msg": "Test message",
141 "tags": ["tag 1", "tag 2", "tag n"],
142 "type": "MRR" | "BMRR",
143 "status": "PASS" | "FAIL",
145 "receive-rate": AvgStdevMetadata,
149 # TODO: Remove when definitely no NDRPDRDISC tests are used:
153 "parent": "Name of the parent of the test",
154 "doc": "Test documentation",
155 "msg": "Test message",
156 "tags": ["tag 1", "tag 2", "tag n"],
157 "type": "PDR" | "NDR",
158 "status": "PASS" | "FAIL",
159 "throughput": { # Only type: "PDR" | "NDR"
161 "unit": "pps" | "bps" | "percentage"
163 "latency": { # Only type: "PDR" | "NDR"
170 "50": { # Only for NDR
175 "10": { # Only for NDR
187 "50": { # Only for NDR
192 "10": { # Only for NDR
199 "lossTolerance": "lossTolerance", # Only type: "PDR"
200 "conf-history": "DUT1 and DUT2 VAT History"
201 "show-run": "Show Run"
213 "metadata": { # Optional
214 "version": "VPP version",
215 "job": "Jenkins job name",
216 "build": "Information about the build"
220 "doc": "Suite 1 documentation",
221 "parent": "Suite 1 parent",
222 "level": "Level of the suite in the suite hierarchy"
225 "doc": "Suite N documentation",
226 "parent": "Suite 2 parent",
227 "level": "Level of the suite in the suite hierarchy"
233 "parent": "Name of the parent of the test",
234 "doc": "Test documentation"
235 "msg": "Test message"
236 "tags": ["tag 1", "tag 2", "tag n"],
237 "conf-history": "DUT1 and DUT2 VAT History"
238 "show-run": "Show Run"
239 "status": "PASS" | "FAIL"
247 .. note:: ID is the lowercase full path to the test.
250 # TODO: Remove when definitely no NDRPDRDISC tests are used:
251 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
253 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
254 r'PLRsearch upper bound::\s(\d+.\d+)')
256 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
257 r'NDR_UPPER:\s(\d+.\d+).*\n'
258 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
259 r'PDR_UPPER:\s(\d+.\d+)')
261 # TODO: Remove when definitely no NDRPDRDISC tests are used:
262 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
263 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
264 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
265 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
270 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
271 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
272 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
274 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
275 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
277 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
280 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
281 r"VPP Version:\s*|VPP version:\s*)(.*)")
283 REGEX_VERSION_DPDK = re.compile(r"DPDK Version: (\d*.\d*)")
285 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
287 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
288 r'tx\s(\d*),\srx\s(\d*)')
290 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
291 r' in packets per second: \[(.*)\]')
293 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
295 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
297 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
299 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
301 def __init__(self, metadata, mapping, ignore):
304 :param metadata: Key-value pairs to be included in "metadata" part of
306 :param mapping: Mapping of the old names of test cases to the new
308 :param ignore: List of TCs to be ignored.
314 # Type of message to parse out from the test messages
315 self._msg_type = None
321 self._timestamp = None
323 # Testbed. The testbed is identified by TG node IP address.
326 # Mapping of TCs long names
327 self._mapping = mapping
330 self._ignore = ignore
332 # Number of VAT History messages found:
334 # 1 - VAT History of DUT1
335 # 2 - VAT History of DUT2
336 self._lookup_kw_nr = 0
337 self._conf_history_lookup_nr = 0
339 # Number of Show Running messages found
341 # 1 - Show run message found
342 self._show_run_lookup_nr = 0
344 # Test ID of currently processed test- the lowercase full path to the
348 # The main data structure
350 "metadata": OrderedDict(),
351 "suites": OrderedDict(),
352 "tests": OrderedDict()
355 # Save the provided metadata
356 for key, val in metadata.items():
357 self._data["metadata"][key] = val
359 # Dictionary defining the methods used to parse different types of
362 "timestamp": self._get_timestamp,
363 "vpp-version": self._get_vpp_version,
364 "dpdk-version": self._get_dpdk_version,
365 "teardown-vat-history": self._get_vat_history,
366 "teardown-papi-history": self._get_papi_history,
367 "test-show-runtime": self._get_show_run,
368 "testbed": self._get_testbed
373 """Getter - Data parsed from the XML file.
375 :returns: Data parsed from the XML file.
380 def _get_testbed(self, msg):
381 """Called when extraction of testbed IP is required.
382 The testbed is identified by TG node IP address.
384 :param msg: Message to process.
389 if msg.message.count("Setup of TG node"):
390 reg_tg_ip = re.compile(
391 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
393 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
394 except (KeyError, ValueError, IndexError, AttributeError):
397 self._data["metadata"]["testbed"] = self._testbed
398 self._msg_type = None
400 def _get_vpp_version(self, msg):
401 """Called when extraction of VPP version is required.
403 :param msg: Message to process.
408 if msg.message.count("return STDOUT Version:") or \
409 msg.message.count("VPP Version:") or \
410 msg.message.count("VPP version:"):
411 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
413 self._data["metadata"]["version"] = self._version
414 self._msg_type = None
416 def _get_dpdk_version(self, msg):
417 """Called when extraction of DPDK version is required.
419 :param msg: Message to process.
424 if msg.message.count("DPDK Version:"):
426 self._version = str(re.search(
427 self.REGEX_VERSION_DPDK, msg.message). group(1))
428 self._data["metadata"]["version"] = self._version
432 self._msg_type = None
434 def _get_timestamp(self, msg):
435 """Called when extraction of timestamp is required.
437 :param msg: Message to process.
442 self._timestamp = msg.timestamp[:14]
443 self._data["metadata"]["generated"] = self._timestamp
444 self._msg_type = None
446 def _get_vat_history(self, msg):
447 """Called when extraction of VAT command history is required.
449 :param msg: Message to process.
453 if msg.message.count("VAT command history:"):
454 self._conf_history_lookup_nr += 1
455 if self._conf_history_lookup_nr == 1:
456 self._data["tests"][self._test_ID]["conf-history"] = str()
458 self._msg_type = None
459 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
460 "VAT command history:", "", msg.message, count=1). \
461 replace("\n\n", "\n").replace('\n', ' |br| ').\
462 replace('\r', '').replace('"', "'")
464 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
465 self._data["tests"][self._test_ID]["conf-history"] += \
466 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
468 def _get_papi_history(self, msg):
469 """Called when extraction of PAPI command history is required.
471 :param msg: Message to process.
475 if msg.message.count("PAPI command history:"):
476 self._conf_history_lookup_nr += 1
477 if self._conf_history_lookup_nr == 1:
478 self._data["tests"][self._test_ID]["conf-history"] = str()
480 self._msg_type = None
481 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
482 "PAPI command history:", "", msg.message, count=1). \
483 replace("\n\n", "\n").replace('\n', ' |br| ').\
484 replace('\r', '').replace('"', "'")
486 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
487 self._data["tests"][self._test_ID]["conf-history"] += \
488 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
490 def _get_show_run(self, msg):
491 """Called when extraction of VPP operational data (output of CLI command
492 Show Runtime) is required.
494 :param msg: Message to process.
498 if msg.message.count("Thread 0 vpp_main"):
499 self._show_run_lookup_nr += 1
500 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
501 self._data["tests"][self._test_ID]["show-run"] = str()
502 if self._lookup_kw_nr > 1:
503 self._msg_type = None
504 if self._show_run_lookup_nr == 1:
505 text = msg.message.replace("vat# ", "").\
506 replace("return STDOUT ", "").replace("\n\n", "\n").\
507 replace('\n', ' |br| ').\
508 replace('\r', '').replace('"', "'")
510 self._data["tests"][self._test_ID]["show-run"] += " |br| "
511 self._data["tests"][self._test_ID]["show-run"] += \
512 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
516 # TODO: Remove when definitely no NDRPDRDISC tests are used:
517 def _get_latency(self, msg, test_type):
518 """Get the latency data from the test message.
520 :param msg: Message to be parsed.
521 :param test_type: Type of the test - NDR or PDR.
524 :returns: Latencies parsed from the message.
528 if test_type == "NDR":
529 groups = re.search(self.REGEX_LAT_NDR, msg)
530 groups_range = range(1, 7)
531 elif test_type == "PDR":
532 groups = re.search(self.REGEX_LAT_PDR, msg)
533 groups_range = range(1, 3)
538 for idx in groups_range:
540 lat = [int(item) for item in str(groups.group(idx)).split('/')]
541 except (AttributeError, ValueError):
543 latencies.append(lat)
545 keys = ("min", "avg", "max")
553 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
554 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
555 if test_type == "NDR":
556 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
557 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
558 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
559 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
563 def _get_ndrpdr_throughput(self, msg):
564 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
567 :param msg: The test message to be parsed.
569 :returns: Parsed data as a dict and the status (PASS/FAIL).
570 :rtype: tuple(dict, str)
574 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
575 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
578 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
580 if groups is not None:
582 throughput["NDR"]["LOWER"] = float(groups.group(1))
583 throughput["NDR"]["UPPER"] = float(groups.group(2))
584 throughput["PDR"]["LOWER"] = float(groups.group(3))
585 throughput["PDR"]["UPPER"] = float(groups.group(4))
587 except (IndexError, ValueError):
590 return throughput, status
592 def _get_plr_throughput(self, msg):
593 """Get PLRsearch lower bound and PLRsearch upper bound from the test
596 :param msg: The test message to be parsed.
598 :returns: Parsed data as a dict and the status (PASS/FAIL).
599 :rtype: tuple(dict, str)
607 groups = re.search(self.REGEX_PLR_RATE, msg)
609 if groups is not None:
611 throughput["LOWER"] = float(groups.group(1))
612 throughput["UPPER"] = float(groups.group(2))
614 except (IndexError, ValueError):
617 return throughput, status
619 def _get_ndrpdr_latency(self, msg):
620 """Get LATENCY from the test message.
622 :param msg: The test message to be parsed.
624 :returns: Parsed data as a dict and the status (PASS/FAIL).
625 :rtype: tuple(dict, str)
630 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
631 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
634 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
635 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
639 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
641 if groups is not None:
642 keys = ("min", "avg", "max")
644 latency["NDR"]["direction1"] = dict(
645 zip(keys, [float(l) for l in groups.group(1).split('/')]))
646 latency["NDR"]["direction2"] = dict(
647 zip(keys, [float(l) for l in groups.group(2).split('/')]))
648 latency["PDR"]["direction1"] = dict(
649 zip(keys, [float(l) for l in groups.group(3).split('/')]))
650 latency["PDR"]["direction2"] = dict(
651 zip(keys, [float(l) for l in groups.group(4).split('/')]))
653 except (IndexError, ValueError):
656 return latency, status
658 def visit_suite(self, suite):
659 """Implements traversing through the suite and its direct children.
661 :param suite: Suite to process.
665 if self.start_suite(suite) is not False:
666 suite.suites.visit(self)
667 suite.tests.visit(self)
668 self.end_suite(suite)
670 def start_suite(self, suite):
671 """Called when suite starts.
673 :param suite: Suite to process.
679 parent_name = suite.parent.name
680 except AttributeError:
683 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
684 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
685 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
687 self._data["suites"][suite.longname.lower().replace('"', "'").
688 replace(" ", "_")] = {
689 "name": suite.name.lower(),
691 "parent": parent_name,
692 "level": len(suite.longname.split("."))
695 suite.keywords.visit(self)
697 def end_suite(self, suite):
698 """Called when suite ends.
700 :param suite: Suite to process.
706 def visit_test(self, test):
707 """Implements traversing through the test.
709 :param test: Test to process.
713 if self.start_test(test) is not False:
714 test.keywords.visit(self)
717 def start_test(self, test):
718 """Called when test starts.
720 :param test: Test to process.
725 longname_orig = test.longname.lower()
727 # Check the ignore list
728 if longname_orig in self._ignore:
731 tags = [str(tag) for tag in test.tags]
734 # Change the TC long name and name if defined in the mapping table
735 longname = self._mapping.get(longname_orig, None)
736 if longname is not None:
737 name = longname.split('.')[-1]
738 logging.debug("{0}\n{1}\n{2}\n{3}".format(
739 self._data["metadata"], longname_orig, longname, name))
741 longname = longname_orig
742 name = test.name.lower()
744 # Remove TC number from the TC long name (backward compatibility):
745 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
746 # Remove TC number from the TC name (not needed):
747 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
749 test_result["parent"] = test.parent.name.lower()
750 test_result["tags"] = tags
751 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
752 replace('\r', '').replace('[', ' |br| [')
753 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
754 test_result["msg"] = test.message.replace('\n', ' |br| '). \
755 replace('\r', '').replace('"', "'")
756 test_result["type"] = "FUNC"
757 test_result["status"] = test.status
759 if "PERFTEST" in tags:
760 # Replace info about cores (e.g. -1c-) with the info about threads
761 # and cores (e.g. -1t1c-) in the long test case names and in the
762 # test case names if necessary.
763 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
766 for tag in test_result["tags"]:
767 groups = re.search(self.REGEX_TC_TAG, tag)
773 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
774 "-{0}-".format(tag_tc.lower()),
777 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
778 "-{0}-".format(tag_tc.lower()),
782 test_result["status"] = "FAIL"
783 self._data["tests"][self._test_ID] = test_result
784 logging.debug("The test '{0}' has no or more than one "
785 "multi-threading tags.".format(self._test_ID))
786 logging.debug("Tags: {0}".format(test_result["tags"]))
789 if test.status == "PASS" and ("NDRPDRDISC" in tags or
795 # TODO: Remove when definitely no NDRPDRDISC tests are used:
796 if "NDRDISC" in tags:
797 test_result["type"] = "NDR"
798 # TODO: Remove when definitely no NDRPDRDISC tests are used:
799 elif "PDRDISC" in tags:
800 test_result["type"] = "PDR"
801 elif "NDRPDR" in tags:
802 test_result["type"] = "NDRPDR"
804 test_result["type"] = "SOAK"
806 test_result["type"] = "TCP"
808 test_result["type"] = "MRR"
809 elif "FRMOBL" in tags or "BMRR" in tags:
810 test_result["type"] = "BMRR"
812 test_result["status"] = "FAIL"
813 self._data["tests"][self._test_ID] = test_result
816 # TODO: Remove when definitely no NDRPDRDISC tests are used:
817 if test_result["type"] in ("NDR", "PDR"):
819 rate_value = str(re.search(
820 self.REGEX_RATE, test.message).group(1))
821 except AttributeError:
824 rate_unit = str(re.search(
825 self.REGEX_RATE, test.message).group(2))
826 except AttributeError:
829 test_result["throughput"] = dict()
830 test_result["throughput"]["value"] = \
831 int(rate_value.split('.')[0])
832 test_result["throughput"]["unit"] = rate_unit
833 test_result["latency"] = \
834 self._get_latency(test.message, test_result["type"])
835 if test_result["type"] == "PDR":
836 test_result["lossTolerance"] = str(re.search(
837 self.REGEX_TOLERANCE, test.message).group(1))
839 elif test_result["type"] in ("NDRPDR", ):
840 test_result["throughput"], test_result["status"] = \
841 self._get_ndrpdr_throughput(test.message)
842 test_result["latency"], test_result["status"] = \
843 self._get_ndrpdr_latency(test.message)
845 elif test_result["type"] in ("SOAK", ):
846 test_result["throughput"], test_result["status"] = \
847 self._get_plr_throughput(test.message)
849 elif test_result["type"] in ("TCP", ):
850 groups = re.search(self.REGEX_TCP, test.message)
851 test_result["result"] = int(groups.group(2))
853 elif test_result["type"] in ("MRR", "BMRR"):
854 test_result["result"] = dict()
855 groups = re.search(self.REGEX_BMRR, test.message)
856 if groups is not None:
857 items_str = groups.group(1)
858 items_float = [float(item.strip()) for item
859 in items_str.split(",")]
860 metadata = AvgStdevMetadataFactory.from_data(items_float)
861 # Next two lines have been introduced in CSIT-1179,
862 # to be removed in CSIT-1180.
865 test_result["result"]["receive-rate"] = metadata
867 groups = re.search(self.REGEX_MRR, test.message)
868 test_result["result"]["receive-rate"] = \
869 AvgStdevMetadataFactory.from_data([
870 float(groups.group(3)) / float(groups.group(1)), ])
872 self._data["tests"][self._test_ID] = test_result
874 def end_test(self, test):
875 """Called when test ends.
877 :param test: Test to process.
883 def visit_keyword(self, keyword):
884 """Implements traversing through the keyword and its child keywords.
886 :param keyword: Keyword to process.
887 :type keyword: Keyword
890 if self.start_keyword(keyword) is not False:
891 self.end_keyword(keyword)
893 def start_keyword(self, keyword):
894 """Called when keyword starts. Default implementation does nothing.
896 :param keyword: Keyword to process.
897 :type keyword: Keyword
901 if keyword.type == "setup":
902 self.visit_setup_kw(keyword)
903 elif keyword.type == "teardown":
904 self._lookup_kw_nr = 0
905 self.visit_teardown_kw(keyword)
907 self._lookup_kw_nr = 0
908 self.visit_test_kw(keyword)
909 except AttributeError:
912 def end_keyword(self, keyword):
913 """Called when keyword ends. Default implementation does nothing.
915 :param keyword: Keyword to process.
916 :type keyword: Keyword
921 def visit_test_kw(self, test_kw):
922 """Implements traversing through the test keyword and its child
925 :param test_kw: Keyword to process.
926 :type test_kw: Keyword
929 for keyword in test_kw.keywords:
930 if self.start_test_kw(keyword) is not False:
931 self.visit_test_kw(keyword)
932 self.end_test_kw(keyword)
934 def start_test_kw(self, test_kw):
935 """Called when test keyword starts. Default implementation does
938 :param test_kw: Keyword to process.
939 :type test_kw: Keyword
942 if test_kw.name.count("Show Runtime Counters On All Duts"):
943 self._lookup_kw_nr += 1
944 self._show_run_lookup_nr = 0
945 self._msg_type = "test-show-runtime"
946 elif test_kw.name.count("Install Dpdk Test") and not self._version:
947 self._msg_type = "dpdk-version"
950 test_kw.messages.visit(self)
952 def end_test_kw(self, test_kw):
953 """Called when keyword ends. Default implementation does nothing.
955 :param test_kw: Keyword to process.
956 :type test_kw: Keyword
961 def visit_setup_kw(self, setup_kw):
962 """Implements traversing through the teardown keyword and its child
965 :param setup_kw: Keyword to process.
966 :type setup_kw: Keyword
969 for keyword in setup_kw.keywords:
970 if self.start_setup_kw(keyword) is not False:
971 self.visit_setup_kw(keyword)
972 self.end_setup_kw(keyword)
974 def start_setup_kw(self, setup_kw):
975 """Called when teardown keyword starts. Default implementation does
978 :param setup_kw: Keyword to process.
979 :type setup_kw: Keyword
982 if setup_kw.name.count("Show Vpp Version On All Duts") \
983 and not self._version:
984 self._msg_type = "vpp-version"
985 elif setup_kw.name.count("Set Global Variable") \
986 and not self._timestamp:
987 self._msg_type = "timestamp"
988 elif setup_kw.name.count("Setup Framework") and not self._testbed:
989 self._msg_type = "testbed"
992 setup_kw.messages.visit(self)
994 def end_setup_kw(self, setup_kw):
995 """Called when keyword ends. Default implementation does nothing.
997 :param setup_kw: Keyword to process.
998 :type setup_kw: Keyword
1003 def visit_teardown_kw(self, teardown_kw):
1004 """Implements traversing through the teardown keyword and its child
1007 :param teardown_kw: Keyword to process.
1008 :type teardown_kw: Keyword
1011 for keyword in teardown_kw.keywords:
1012 if self.start_teardown_kw(keyword) is not False:
1013 self.visit_teardown_kw(keyword)
1014 self.end_teardown_kw(keyword)
1016 def start_teardown_kw(self, teardown_kw):
1017 """Called when teardown keyword starts. Default implementation does
1020 :param teardown_kw: Keyword to process.
1021 :type teardown_kw: Keyword
1025 if teardown_kw.name.count("Show Vat History On All Duts"):
1026 self._conf_history_lookup_nr = 0
1027 self._msg_type = "teardown-vat-history"
1028 teardown_kw.messages.visit(self)
1029 elif teardown_kw.name.count("Show Papi History On All Duts"):
1030 self._conf_history_lookup_nr = 0
1031 self._msg_type = "teardown-papi-history"
1032 teardown_kw.messages.visit(self)
1034 def end_teardown_kw(self, teardown_kw):
1035 """Called when keyword ends. Default implementation does nothing.
1037 :param teardown_kw: Keyword to process.
1038 :type teardown_kw: Keyword
1043 def visit_message(self, msg):
1044 """Implements visiting the message.
1046 :param msg: Message to process.
1050 if self.start_message(msg) is not False:
1051 self.end_message(msg)
1053 def start_message(self, msg):
1054 """Called when message starts. Get required information from messages:
1057 :param msg: Message to process.
1063 self.parse_msg[self._msg_type](msg)
1065 def end_message(self, msg):
1066 """Called when message ends. Default implementation does nothing.
1068 :param msg: Message to process.
1075 class InputData(object):
1078 The data is extracted from output.xml files generated by Jenkins jobs and
1079 stored in pandas' DataFrames.
1085 (as described in ExecutionChecker documentation)
1087 (as described in ExecutionChecker documentation)
1089 (as described in ExecutionChecker documentation)
1092 def __init__(self, spec):
1095 :param spec: Specification.
1096 :type spec: Specification
1103 self._input_data = pd.Series()
1107 """Getter - Input data.
1109 :returns: Input data
1110 :rtype: pandas.Series
1112 return self._input_data
1114 def metadata(self, job, build):
1115 """Getter - metadata
1117 :param job: Job which metadata we want.
1118 :param build: Build which metadata we want.
1122 :rtype: pandas.Series
1125 return self.data[job][build]["metadata"]
1127 def suites(self, job, build):
1130 :param job: Job which suites we want.
1131 :param build: Build which suites we want.
1135 :rtype: pandas.Series
1138 return self.data[job][str(build)]["suites"]
1140 def tests(self, job, build):
1143 :param job: Job which tests we want.
1144 :param build: Build which tests we want.
1148 :rtype: pandas.Series
1151 return self.data[job][build]["tests"]
1153 def _parse_tests(self, job, build, log):
1154 """Process data from robot output.xml file and return JSON structured
1157 :param job: The name of job which build output data will be processed.
1158 :param build: The build which output data will be processed.
1159 :param log: List of log messages.
1162 :type log: list of tuples (severity, msg)
1163 :returns: JSON data structure.
1172 with open(build["file-name"], 'r') as data_file:
1174 result = ExecutionResult(data_file)
1175 except errors.DataError as err:
1176 log.append(("ERROR", "Error occurred while parsing output.xml: "
1179 checker = ExecutionChecker(metadata, self._cfg.mapping,
1181 result.visit(checker)
1185 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1186 """Download and parse the input data file.
1188 :param pid: PID of the process executing this method.
1189 :param job: Name of the Jenkins job which generated the processed input
1191 :param build: Information about the Jenkins build which generated the
1192 processed input file.
1193 :param repeat: Repeat the download specified number of times if not
1203 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1204 format(job, build["build"])))
1211 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1217 logs.append(("ERROR", "It is not possible to download the input "
1218 "data file from the job '{job}', build "
1219 "'{build}', or it is damaged. Skipped.".
1220 format(job=job, build=build["build"])))
1222 logs.append(("INFO", " Processing data from the build '{0}' ...".
1223 format(build["build"])))
1224 data = self._parse_tests(job, build, logs)
1226 logs.append(("ERROR", "Input data file from the job '{job}', "
1227 "build '{build}' is damaged. Skipped.".
1228 format(job=job, build=build["build"])))
1233 remove(build["file-name"])
1234 except OSError as err:
1235 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1236 format(build["file-name"], repr(err))))
1238 # If the time-period is defined in the specification file, remove all
1239 # files which are outside the time period.
1240 timeperiod = self._cfg.input.get("time-period", None)
1241 if timeperiod and data:
1243 timeperiod = timedelta(int(timeperiod))
1244 metadata = data.get("metadata", None)
1246 generated = metadata.get("generated", None)
1248 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1249 if (now - generated) > timeperiod:
1250 # Remove the data and the file:
1255 " The build {job}/{build} is outdated, will be "
1256 "removed".format(job=job, build=build["build"])))
1257 file_name = self._cfg.input["file-name"]
1259 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1260 "{job}{sep}{build}{sep}{name}".format(
1263 build=build["build"],
1267 logs.append(("INFO",
1268 " The file {name} has been removed".
1269 format(name=full_name)))
1270 except OSError as err:
1271 logs.append(("ERROR",
1272 "Cannot remove the file '{0}': {1}".
1273 format(full_name, repr(err))))
1274 logs.append(("INFO", " Done."))
1276 for level, line in logs:
1279 elif level == "ERROR":
1281 elif level == "DEBUG":
1283 elif level == "CRITICAL":
1284 logging.critical(line)
1285 elif level == "WARNING":
1286 logging.warning(line)
1288 return {"data": data, "state": state, "job": job, "build": build}
1290 def download_and_parse_data(self, repeat=1):
1291 """Download the input data files, parse input data from input files and
1292 store in pandas' Series.
1294 :param repeat: Repeat the download specified number of times if not
1299 logging.info("Downloading and parsing input files ...")
1301 for job, builds in self._cfg.builds.items():
1302 for build in builds:
1304 result = self._download_and_parse_build(job, build, repeat)
1305 build_nr = result["build"]["build"]
1308 data = result["data"]
1309 build_data = pd.Series({
1310 "metadata": pd.Series(
1311 data["metadata"].values(),
1312 index=data["metadata"].keys()),
1313 "suites": pd.Series(data["suites"].values(),
1314 index=data["suites"].keys()),
1315 "tests": pd.Series(data["tests"].values(),
1316 index=data["tests"].keys())})
1318 if self._input_data.get(job, None) is None:
1319 self._input_data[job] = pd.Series()
1320 self._input_data[job][str(build_nr)] = build_data
1322 self._cfg.set_input_file_name(
1323 job, build_nr, result["build"]["file-name"])
1325 self._cfg.set_input_state(job, build_nr, result["state"])
1327 logging.info("Memory allocation: {0:,d}MB".format(
1328 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1330 logging.info("Done.")
1333 def _end_of_tag(tag_filter, start=0, closer="'"):
1334 """Return the index of character in the string which is the end of tag.
1336 :param tag_filter: The string where the end of tag is being searched.
1337 :param start: The index where the searching is stated.
1338 :param closer: The character which is the tag closer.
1339 :type tag_filter: str
1342 :returns: The index of the tag closer.
1347 idx_opener = tag_filter.index(closer, start)
1348 return tag_filter.index(closer, idx_opener + 1)
1353 def _condition(tag_filter):
1354 """Create a conditional statement from the given tag filter.
1356 :param tag_filter: Filter based on tags from the element specification.
1357 :type tag_filter: str
1358 :returns: Conditional statement which can be evaluated.
1364 index = InputData._end_of_tag(tag_filter, index)
1368 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1370 def filter_data(self, element, params=None, data_set="tests",
1371 continue_on_error=False):
1372 """Filter required data from the given jobs and builds.
1374 The output data structure is:
1378 - test (or suite) 1 ID:
1384 - test (or suite) n ID:
1391 :param element: Element which will use the filtered data.
1392 :param params: Parameters which will be included in the output. If None,
1393 all parameters are included.
1394 :param data_set: The set of data to be filtered: tests, suites,
1396 :param continue_on_error: Continue if there is error while reading the
1397 data. The Item will be empty then
1398 :type element: pandas.Series
1401 :type continue_on_error: bool
1402 :returns: Filtered data.
1403 :rtype pandas.Series
1407 if element["filter"] in ("all", "template"):
1410 cond = InputData._condition(element["filter"])
1411 logging.debug(" Filter: {0}".format(cond))
1413 logging.error(" No filter defined.")
1417 params = element.get("parameters", None)
1419 params.append("type")
1423 for job, builds in element["data"].items():
1424 data[job] = pd.Series()
1425 for build in builds:
1426 data[job][str(build)] = pd.Series()
1428 data_iter = self.data[job][str(build)][data_set].\
1431 if continue_on_error:
1435 for test_ID, test_data in data_iter:
1436 if eval(cond, {"tags": test_data.get("tags", "")}):
1437 data[job][str(build)][test_ID] = pd.Series()
1439 for param, val in test_data.items():
1440 data[job][str(build)][test_ID][param] = val
1442 for param in params:
1444 data[job][str(build)][test_ID][param] =\
1447 data[job][str(build)][test_ID][param] =\
1451 except (KeyError, IndexError, ValueError) as err:
1452 logging.error(" Missing mandatory parameter in the element "
1453 "specification: {0}".format(err))
1455 except AttributeError:
1458 logging.error(" The filter '{0}' is not correct. Check if all "
1459 "tags are enclosed by apostrophes.".format(cond))
1463 def merge_data(data):
1464 """Merge data from more jobs and builds to a simple data structure.
1466 The output data structure is:
1468 - test (suite) 1 ID:
1474 - test (suite) n ID:
1477 :param data: Data to merge.
1478 :type data: pandas.Series
1479 :returns: Merged data.
1480 :rtype: pandas.Series
1483 logging.info(" Merging data ...")
1485 merged_data = pd.Series()
1486 for _, builds in data.iteritems():
1487 for _, item in builds.iteritems():
1488 for ID, item_data in item.iteritems():
1489 merged_data[ID] = item_data