1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
29 from robot.api import ExecutionResult, ResultVisitor
30 from robot import errors
31 from collections import OrderedDict
32 from string import replace
34 from os.path import join
35 from datetime import datetime as dt
36 from datetime import timedelta
37 from json import loads
38 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
40 from input_data_files import download_and_unzip_data_file
41 from utils import Worker
44 # Separator used in file names
48 class ExecutionChecker(ResultVisitor):
49 """Class to traverse through the test suite structure.
51 The functionality implemented in this class generates a json structure:
57 "generated": "Timestamp",
58 "version": "SUT version",
59 "job": "Jenkins job name",
60 "build": "Information about the build"
63 "Suite long name 1": {
65 "doc": "Suite 1 documentation",
66 "parent": "Suite 1 parent",
67 "level": "Level of the suite in the suite hierarchy"
69 "Suite long name N": {
71 "doc": "Suite N documentation",
72 "parent": "Suite 2 parent",
73 "level": "Level of the suite in the suite hierarchy"
80 "parent": "Name of the parent of the test",
81 "doc": "Test documentation",
82 "msg": "Test message",
83 "conf-history": "DUT1 and DUT2 VAT History",
84 "show-run": "Show Run",
85 "tags": ["tag 1", "tag 2", "tag n"],
87 "status": "PASS" | "FAIL",
129 "parent": "Name of the parent of the test",
130 "doc": "Test documentation",
131 "msg": "Test message",
132 "tags": ["tag 1", "tag 2", "tag n"],
134 "status": "PASS" | "FAIL",
141 "parent": "Name of the parent of the test",
142 "doc": "Test documentation",
143 "msg": "Test message",
144 "tags": ["tag 1", "tag 2", "tag n"],
145 "type": "MRR" | "BMRR",
146 "status": "PASS" | "FAIL",
148 "receive-rate": AvgStdevMetadata,
152 # TODO: Remove when definitely no NDRPDRDISC tests are used:
156 "parent": "Name of the parent of the test",
157 "doc": "Test documentation",
158 "msg": "Test message",
159 "tags": ["tag 1", "tag 2", "tag n"],
160 "type": "PDR" | "NDR",
161 "status": "PASS" | "FAIL",
162 "throughput": { # Only type: "PDR" | "NDR"
164 "unit": "pps" | "bps" | "percentage"
166 "latency": { # Only type: "PDR" | "NDR"
173 "50": { # Only for NDR
178 "10": { # Only for NDR
190 "50": { # Only for NDR
195 "10": { # Only for NDR
202 "lossTolerance": "lossTolerance", # Only type: "PDR"
203 "conf-history": "DUT1 and DUT2 VAT History"
204 "show-run": "Show Run"
216 "metadata": { # Optional
217 "version": "VPP version",
218 "job": "Jenkins job name",
219 "build": "Information about the build"
223 "doc": "Suite 1 documentation",
224 "parent": "Suite 1 parent",
225 "level": "Level of the suite in the suite hierarchy"
228 "doc": "Suite N documentation",
229 "parent": "Suite 2 parent",
230 "level": "Level of the suite in the suite hierarchy"
236 "parent": "Name of the parent of the test",
237 "doc": "Test documentation"
238 "msg": "Test message"
239 "tags": ["tag 1", "tag 2", "tag n"],
240 "conf-history": "DUT1 and DUT2 VAT History"
241 "show-run": "Show Run"
242 "status": "PASS" | "FAIL"
250 .. note:: ID is the lowercase full path to the test.
253 # TODO: Remove when definitely no NDRPDRDISC tests are used:
254 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
256 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
257 r'PLRsearch upper bound::\s(\d+.\d+)')
259 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
260 r'NDR_UPPER:\s(\d+.\d+).*\n'
261 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
262 r'PDR_UPPER:\s(\d+.\d+)')
264 # TODO: Remove when definitely no NDRPDRDISC tests are used:
265 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
266 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
270 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
271 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
273 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
274 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
275 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
277 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
278 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
280 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
283 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
284 r"VPP Version:\s*|VPP version:\s*)(.*)")
286 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
287 r"(RTE Version: 'DPDK )(.*)(')")
289 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
291 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
292 r'tx\s(\d*),\srx\s(\d*)')
294 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
295 r' in packets per second: \[(.*)\]')
297 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
299 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
301 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
303 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
305 def __init__(self, metadata, mapping, ignore):
308 :param metadata: Key-value pairs to be included in "metadata" part of
310 :param mapping: Mapping of the old names of test cases to the new
312 :param ignore: List of TCs to be ignored.
318 # Type of message to parse out from the test messages
319 self._msg_type = None
325 self._timestamp = None
327 # Testbed. The testbed is identified by TG node IP address.
330 # Mapping of TCs long names
331 self._mapping = mapping
334 self._ignore = ignore
336 # Number of VAT History messages found:
338 # 1 - VAT History of DUT1
339 # 2 - VAT History of DUT2
340 self._lookup_kw_nr = 0
341 self._conf_history_lookup_nr = 0
343 # Number of Show Running messages found
345 # 1 - Show run message found
346 self._show_run_lookup_nr = 0
348 # Test ID of currently processed test- the lowercase full path to the
352 # The main data structure
354 "metadata": OrderedDict(),
355 "suites": OrderedDict(),
356 "tests": OrderedDict()
359 # Save the provided metadata
360 for key, val in metadata.items():
361 self._data["metadata"][key] = val
363 # Dictionary defining the methods used to parse different types of
366 "timestamp": self._get_timestamp,
367 "vpp-version": self._get_vpp_version,
368 "dpdk-version": self._get_dpdk_version,
369 "teardown-vat-history": self._get_vat_history,
370 "teardown-papi-history": self._get_papi_history,
371 "test-show-runtime": self._get_show_run,
372 "testbed": self._get_testbed
377 """Getter - Data parsed from the XML file.
379 :returns: Data parsed from the XML file.
384 def _get_testbed(self, msg):
385 """Called when extraction of testbed IP is required.
386 The testbed is identified by TG node IP address.
388 :param msg: Message to process.
393 if msg.message.count("Arguments:"):
394 message = str(msg.message).replace(' ', '').replace('\n', '').\
395 replace("'", '"').replace('b"', '"').\
396 replace("honeycom", "honeycomb")
397 message = loads(message[11:-1])
399 self._testbed = message["TG"]["host"]
400 except (KeyError, ValueError):
403 self._data["metadata"]["testbed"] = self._testbed
404 self._msg_type = None
406 def _get_vpp_version(self, msg):
407 """Called when extraction of VPP version is required.
409 :param msg: Message to process.
414 if msg.message.count("return STDOUT Version:") or \
415 msg.message.count("VPP Version:") or \
416 msg.message.count("VPP version:"):
417 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
419 self._data["metadata"]["version"] = self._version
420 self._msg_type = None
422 def _get_dpdk_version(self, msg):
423 """Called when extraction of DPDK version is required.
425 :param msg: Message to process.
430 if msg.message.count("return STDOUT testpmd"):
432 self._version = str(re.search(
433 self.REGEX_VERSION_DPDK, msg.message). group(4))
434 self._data["metadata"]["version"] = self._version
438 self._msg_type = None
440 def _get_timestamp(self, msg):
441 """Called when extraction of timestamp is required.
443 :param msg: Message to process.
448 self._timestamp = msg.timestamp[:14]
449 self._data["metadata"]["generated"] = self._timestamp
450 self._msg_type = None
452 def _get_vat_history(self, msg):
453 """Called when extraction of VAT command history is required.
455 :param msg: Message to process.
459 if msg.message.count("VAT command history:"):
460 self._conf_history_lookup_nr += 1
461 if self._conf_history_lookup_nr == 1:
462 self._data["tests"][self._test_ID]["conf-history"] = str()
464 self._msg_type = None
465 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
466 "VAT command history:", "", msg.message, count=1). \
467 replace("\n\n", "\n").replace('\n', ' |br| ').\
468 replace('\r', '').replace('"', "'")
470 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
471 self._data["tests"][self._test_ID]["conf-history"] += \
472 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
474 def _get_papi_history(self, msg):
475 """Called when extraction of PAPI command history is required.
477 :param msg: Message to process.
481 if msg.message.count("PAPI command history:"):
482 self._conf_history_lookup_nr += 1
483 if self._conf_history_lookup_nr == 1:
484 self._data["tests"][self._test_ID]["conf-history"] = str()
486 self._msg_type = None
487 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
488 "PAPI command history:", "", msg.message, count=1). \
489 replace("\n\n", "\n").replace('\n', ' |br| ').\
490 replace('\r', '').replace('"', "'")
492 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
493 self._data["tests"][self._test_ID]["conf-history"] += \
494 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
496 def _get_show_run(self, msg):
497 """Called when extraction of VPP operational data (output of CLI command
498 Show Runtime) is required.
500 :param msg: Message to process.
504 if msg.message.count("Thread 0 vpp_main"):
505 self._show_run_lookup_nr += 1
506 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
507 self._data["tests"][self._test_ID]["show-run"] = str()
508 if self._lookup_kw_nr > 1:
509 self._msg_type = None
510 if self._show_run_lookup_nr == 1:
511 text = msg.message.replace("vat# ", "").\
512 replace("return STDOUT ", "").replace("\n\n", "\n").\
513 replace('\n', ' |br| ').\
514 replace('\r', '').replace('"', "'")
516 self._data["tests"][self._test_ID]["show-run"] += " |br| "
517 self._data["tests"][self._test_ID]["show-run"] += \
518 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
522 # TODO: Remove when definitely no NDRPDRDISC tests are used:
523 def _get_latency(self, msg, test_type):
524 """Get the latency data from the test message.
526 :param msg: Message to be parsed.
527 :param test_type: Type of the test - NDR or PDR.
530 :returns: Latencies parsed from the message.
534 if test_type == "NDR":
535 groups = re.search(self.REGEX_LAT_NDR, msg)
536 groups_range = range(1, 7)
537 elif test_type == "PDR":
538 groups = re.search(self.REGEX_LAT_PDR, msg)
539 groups_range = range(1, 3)
544 for idx in groups_range:
546 lat = [int(item) for item in str(groups.group(idx)).split('/')]
547 except (AttributeError, ValueError):
549 latencies.append(lat)
551 keys = ("min", "avg", "max")
559 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
560 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
561 if test_type == "NDR":
562 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
563 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
564 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
565 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
569 def _get_ndrpdr_throughput(self, msg):
570 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
573 :param msg: The test message to be parsed.
575 :returns: Parsed data as a dict and the status (PASS/FAIL).
576 :rtype: tuple(dict, str)
580 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
581 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
584 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
586 if groups is not None:
588 throughput["NDR"]["LOWER"] = float(groups.group(1))
589 throughput["NDR"]["UPPER"] = float(groups.group(2))
590 throughput["PDR"]["LOWER"] = float(groups.group(3))
591 throughput["PDR"]["UPPER"] = float(groups.group(4))
593 except (IndexError, ValueError):
596 return throughput, status
598 def _get_plr_throughput(self, msg):
599 """Get PLRsearch lower bound and PLRsearch upper bound from the test
602 :param msg: The test message to be parsed.
604 :returns: Parsed data as a dict and the status (PASS/FAIL).
605 :rtype: tuple(dict, str)
613 groups = re.search(self.REGEX_PLR_RATE, msg)
615 if groups is not None:
617 throughput["LOWER"] = float(groups.group(1))
618 throughput["UPPER"] = float(groups.group(2))
620 except (IndexError, ValueError):
623 return throughput, status
625 def _get_ndrpdr_latency(self, msg):
626 """Get LATENCY from the test message.
628 :param msg: The test message to be parsed.
630 :returns: Parsed data as a dict and the status (PASS/FAIL).
631 :rtype: tuple(dict, str)
636 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
637 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
640 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
641 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
645 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
647 if groups is not None:
648 keys = ("min", "avg", "max")
650 latency["NDR"]["direction1"] = dict(
651 zip(keys, [float(l) for l in groups.group(1).split('/')]))
652 latency["NDR"]["direction2"] = dict(
653 zip(keys, [float(l) for l in groups.group(2).split('/')]))
654 latency["PDR"]["direction1"] = dict(
655 zip(keys, [float(l) for l in groups.group(3).split('/')]))
656 latency["PDR"]["direction2"] = dict(
657 zip(keys, [float(l) for l in groups.group(4).split('/')]))
659 except (IndexError, ValueError):
662 return latency, status
664 def visit_suite(self, suite):
665 """Implements traversing through the suite and its direct children.
667 :param suite: Suite to process.
671 if self.start_suite(suite) is not False:
672 suite.suites.visit(self)
673 suite.tests.visit(self)
674 self.end_suite(suite)
676 def start_suite(self, suite):
677 """Called when suite starts.
679 :param suite: Suite to process.
685 parent_name = suite.parent.name
686 except AttributeError:
689 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
690 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
691 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
693 self._data["suites"][suite.longname.lower().replace('"', "'").
694 replace(" ", "_")] = {
695 "name": suite.name.lower(),
697 "parent": parent_name,
698 "level": len(suite.longname.split("."))
701 suite.keywords.visit(self)
703 def end_suite(self, suite):
704 """Called when suite ends.
706 :param suite: Suite to process.
712 def visit_test(self, test):
713 """Implements traversing through the test.
715 :param test: Test to process.
719 if self.start_test(test) is not False:
720 test.keywords.visit(self)
723 def start_test(self, test):
724 """Called when test starts.
726 :param test: Test to process.
731 longname_orig = test.longname.lower()
733 # Check the ignore list
734 if longname_orig in self._ignore:
737 tags = [str(tag) for tag in test.tags]
740 # Change the TC long name and name if defined in the mapping table
741 longname = self._mapping.get(longname_orig, None)
742 if longname is not None:
743 name = longname.split('.')[-1]
744 logging.debug("{0}\n{1}\n{2}\n{3}".format(
745 self._data["metadata"], longname_orig, longname, name))
747 longname = longname_orig
748 name = test.name.lower()
750 # Remove TC number from the TC long name (backward compatibility):
751 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
752 # Remove TC number from the TC name (not needed):
753 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
755 test_result["parent"] = test.parent.name.lower()
756 test_result["tags"] = tags
757 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
758 replace('\r', '').replace('[', ' |br| [')
759 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
760 test_result["msg"] = test.message.replace('\n', ' |br| '). \
761 replace('\r', '').replace('"', "'")
762 test_result["type"] = "FUNC"
763 test_result["status"] = test.status
765 if "PERFTEST" in tags:
766 # Replace info about cores (e.g. -1c-) with the info about threads
767 # and cores (e.g. -1t1c-) in the long test case names and in the
768 # test case names if necessary.
769 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
772 for tag in test_result["tags"]:
773 groups = re.search(self.REGEX_TC_TAG, tag)
779 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
780 "-{0}-".format(tag_tc.lower()),
783 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
784 "-{0}-".format(tag_tc.lower()),
788 test_result["status"] = "FAIL"
789 self._data["tests"][self._test_ID] = test_result
790 logging.debug("The test '{0}' has no or more than one "
791 "multi-threading tags.".format(self._test_ID))
792 logging.debug("Tags: {0}".format(test_result["tags"]))
795 if test.status == "PASS" and ("NDRPDRDISC" in tags or
801 # TODO: Remove when definitely no NDRPDRDISC tests are used:
802 if "NDRDISC" in tags:
803 test_result["type"] = "NDR"
804 # TODO: Remove when definitely no NDRPDRDISC tests are used:
805 elif "PDRDISC" in tags:
806 test_result["type"] = "PDR"
807 elif "NDRPDR" in tags:
808 test_result["type"] = "NDRPDR"
810 test_result["type"] = "SOAK"
812 test_result["type"] = "TCP"
814 test_result["type"] = "MRR"
815 elif "FRMOBL" in tags or "BMRR" in tags:
816 test_result["type"] = "BMRR"
818 test_result["status"] = "FAIL"
819 self._data["tests"][self._test_ID] = test_result
822 # TODO: Remove when definitely no NDRPDRDISC tests are used:
823 if test_result["type"] in ("NDR", "PDR"):
825 rate_value = str(re.search(
826 self.REGEX_RATE, test.message).group(1))
827 except AttributeError:
830 rate_unit = str(re.search(
831 self.REGEX_RATE, test.message).group(2))
832 except AttributeError:
835 test_result["throughput"] = dict()
836 test_result["throughput"]["value"] = \
837 int(rate_value.split('.')[0])
838 test_result["throughput"]["unit"] = rate_unit
839 test_result["latency"] = \
840 self._get_latency(test.message, test_result["type"])
841 if test_result["type"] == "PDR":
842 test_result["lossTolerance"] = str(re.search(
843 self.REGEX_TOLERANCE, test.message).group(1))
845 elif test_result["type"] in ("NDRPDR", ):
846 test_result["throughput"], test_result["status"] = \
847 self._get_ndrpdr_throughput(test.message)
848 test_result["latency"], test_result["status"] = \
849 self._get_ndrpdr_latency(test.message)
851 elif test_result["type"] in ("SOAK", ):
852 test_result["throughput"], test_result["status"] = \
853 self._get_plr_throughput(test.message)
855 elif test_result["type"] in ("TCP", ):
856 groups = re.search(self.REGEX_TCP, test.message)
857 test_result["result"] = int(groups.group(2))
859 elif test_result["type"] in ("MRR", "BMRR"):
860 test_result["result"] = dict()
861 groups = re.search(self.REGEX_BMRR, test.message)
862 if groups is not None:
863 items_str = groups.group(1)
864 items_float = [float(item.strip()) for item
865 in items_str.split(",")]
866 metadata = AvgStdevMetadataFactory.from_data(items_float)
867 # Next two lines have been introduced in CSIT-1179,
868 # to be removed in CSIT-1180.
871 test_result["result"]["receive-rate"] = metadata
873 groups = re.search(self.REGEX_MRR, test.message)
874 test_result["result"]["receive-rate"] = \
875 AvgStdevMetadataFactory.from_data([
876 float(groups.group(3)) / float(groups.group(1)), ])
878 self._data["tests"][self._test_ID] = test_result
880 def end_test(self, test):
881 """Called when test ends.
883 :param test: Test to process.
889 def visit_keyword(self, keyword):
890 """Implements traversing through the keyword and its child keywords.
892 :param keyword: Keyword to process.
893 :type keyword: Keyword
896 if self.start_keyword(keyword) is not False:
897 self.end_keyword(keyword)
899 def start_keyword(self, keyword):
900 """Called when keyword starts. Default implementation does nothing.
902 :param keyword: Keyword to process.
903 :type keyword: Keyword
907 if keyword.type == "setup":
908 self.visit_setup_kw(keyword)
909 elif keyword.type == "teardown":
910 self._lookup_kw_nr = 0
911 self.visit_teardown_kw(keyword)
913 self._lookup_kw_nr = 0
914 self.visit_test_kw(keyword)
915 except AttributeError:
918 def end_keyword(self, keyword):
919 """Called when keyword ends. Default implementation does nothing.
921 :param keyword: Keyword to process.
922 :type keyword: Keyword
927 def visit_test_kw(self, test_kw):
928 """Implements traversing through the test keyword and its child
931 :param test_kw: Keyword to process.
932 :type test_kw: Keyword
935 for keyword in test_kw.keywords:
936 if self.start_test_kw(keyword) is not False:
937 self.visit_test_kw(keyword)
938 self.end_test_kw(keyword)
940 def start_test_kw(self, test_kw):
941 """Called when test keyword starts. Default implementation does
944 :param test_kw: Keyword to process.
945 :type test_kw: Keyword
948 if test_kw.name.count("Show Runtime Counters On All Duts"):
949 self._lookup_kw_nr += 1
950 self._show_run_lookup_nr = 0
951 self._msg_type = "test-show-runtime"
952 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
953 self._msg_type = "dpdk-version"
956 test_kw.messages.visit(self)
958 def end_test_kw(self, test_kw):
959 """Called when keyword ends. Default implementation does nothing.
961 :param test_kw: Keyword to process.
962 :type test_kw: Keyword
967 def visit_setup_kw(self, setup_kw):
968 """Implements traversing through the teardown keyword and its child
971 :param setup_kw: Keyword to process.
972 :type setup_kw: Keyword
975 for keyword in setup_kw.keywords:
976 if self.start_setup_kw(keyword) is not False:
977 self.visit_setup_kw(keyword)
978 self.end_setup_kw(keyword)
980 def start_setup_kw(self, setup_kw):
981 """Called when teardown keyword starts. Default implementation does
984 :param setup_kw: Keyword to process.
985 :type setup_kw: Keyword
988 if setup_kw.name.count("Show Vpp Version On All Duts") \
989 and not self._version:
990 self._msg_type = "vpp-version"
991 elif setup_kw.name.count("Set Global Variable") \
992 and not self._timestamp:
993 self._msg_type = "timestamp"
994 elif setup_kw.name.count("Setup Framework") and not self._testbed:
995 self._msg_type = "testbed"
998 setup_kw.messages.visit(self)
1000 def end_setup_kw(self, setup_kw):
1001 """Called when keyword ends. Default implementation does nothing.
1003 :param setup_kw: Keyword to process.
1004 :type setup_kw: Keyword
1009 def visit_teardown_kw(self, teardown_kw):
1010 """Implements traversing through the teardown keyword and its child
1013 :param teardown_kw: Keyword to process.
1014 :type teardown_kw: Keyword
1017 for keyword in teardown_kw.keywords:
1018 if self.start_teardown_kw(keyword) is not False:
1019 self.visit_teardown_kw(keyword)
1020 self.end_teardown_kw(keyword)
1022 def start_teardown_kw(self, teardown_kw):
1023 """Called when teardown keyword starts. Default implementation does
1026 :param teardown_kw: Keyword to process.
1027 :type teardown_kw: Keyword
1031 if teardown_kw.name.count("Show Vat History On All Duts"):
1032 self._conf_history_lookup_nr = 0
1033 self._msg_type = "teardown-vat-history"
1034 teardown_kw.messages.visit(self)
1035 elif teardown_kw.name.count("Show Papi History On All Duts"):
1036 self._conf_history_lookup_nr = 0
1037 self._msg_type = "teardown-papi-history"
1038 teardown_kw.messages.visit(self)
1040 def end_teardown_kw(self, teardown_kw):
1041 """Called when keyword ends. Default implementation does nothing.
1043 :param teardown_kw: Keyword to process.
1044 :type teardown_kw: Keyword
1049 def visit_message(self, msg):
1050 """Implements visiting the message.
1052 :param msg: Message to process.
1056 if self.start_message(msg) is not False:
1057 self.end_message(msg)
1059 def start_message(self, msg):
1060 """Called when message starts. Get required information from messages:
1063 :param msg: Message to process.
1069 self.parse_msg[self._msg_type](msg)
1071 def end_message(self, msg):
1072 """Called when message ends. Default implementation does nothing.
1074 :param msg: Message to process.
1081 class InputData(object):
1084 The data is extracted from output.xml files generated by Jenkins jobs and
1085 stored in pandas' DataFrames.
1091 (as described in ExecutionChecker documentation)
1093 (as described in ExecutionChecker documentation)
1095 (as described in ExecutionChecker documentation)
1098 def __init__(self, spec):
1101 :param spec: Specification.
1102 :type spec: Specification
1109 self._input_data = pd.Series()
1113 """Getter - Input data.
1115 :returns: Input data
1116 :rtype: pandas.Series
1118 return self._input_data
1120 def metadata(self, job, build):
1121 """Getter - metadata
1123 :param job: Job which metadata we want.
1124 :param build: Build which metadata we want.
1128 :rtype: pandas.Series
1131 return self.data[job][build]["metadata"]
1133 def suites(self, job, build):
1136 :param job: Job which suites we want.
1137 :param build: Build which suites we want.
1141 :rtype: pandas.Series
1144 return self.data[job][str(build)]["suites"]
1146 def tests(self, job, build):
1149 :param job: Job which tests we want.
1150 :param build: Build which tests we want.
1154 :rtype: pandas.Series
1157 return self.data[job][build]["tests"]
1159 def _parse_tests(self, job, build, log):
1160 """Process data from robot output.xml file and return JSON structured
1163 :param job: The name of job which build output data will be processed.
1164 :param build: The build which output data will be processed.
1165 :param log: List of log messages.
1168 :type log: list of tuples (severity, msg)
1169 :returns: JSON data structure.
1178 with open(build["file-name"], 'r') as data_file:
1180 result = ExecutionResult(data_file)
1181 except errors.DataError as err:
1182 log.append(("ERROR", "Error occurred while parsing output.xml: "
1185 checker = ExecutionChecker(metadata, self._cfg.mapping,
1187 result.visit(checker)
1191 def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1192 """Download and parse the input data file.
1194 :param pid: PID of the process executing this method.
1195 :param data_queue: Shared memory between processes. Queue which keeps
1196 the result data. This data is then read by the main process and used
1197 in further processing.
1198 :param job: Name of the Jenkins job which generated the processed input
1200 :param build: Information about the Jenkins build which generated the
1201 processed input file.
1202 :param repeat: Repeat the download specified number of times if not
1205 :type data_queue: multiprocessing.Manager().Queue()
1213 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1214 format(job, build["build"])))
1221 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1227 logs.append(("ERROR", "It is not possible to download the input "
1228 "data file from the job '{job}', build "
1229 "'{build}', or it is damaged. Skipped.".
1230 format(job=job, build=build["build"])))
1232 logs.append(("INFO", " Processing data from the build '{0}' ...".
1233 format(build["build"])))
1234 data = self._parse_tests(job, build, logs)
1236 logs.append(("ERROR", "Input data file from the job '{job}', "
1237 "build '{build}' is damaged. Skipped.".
1238 format(job=job, build=build["build"])))
1243 remove(build["file-name"])
1244 except OSError as err:
1245 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1246 format(build["file-name"], repr(err))))
1248 # If the time-period is defined in the specification file, remove all
1249 # files which are outside the time period.
1250 timeperiod = self._cfg.input.get("time-period", None)
1251 if timeperiod and data:
1253 timeperiod = timedelta(int(timeperiod))
1254 metadata = data.get("metadata", None)
1256 generated = metadata.get("generated", None)
1258 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1259 if (now - generated) > timeperiod:
1260 # Remove the data and the file:
1265 " The build {job}/{build} is outdated, will be "
1266 "removed".format(job=job, build=build["build"])))
1267 file_name = self._cfg.input["file-name"]
1269 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1270 "{job}{sep}{build}{sep}{name}".format(
1273 build=build["build"],
1277 logs.append(("INFO",
1278 " The file {name} has been removed".
1279 format(name=full_name)))
1280 except OSError as err:
1281 logs.append(("ERROR",
1282 "Cannot remove the file '{0}': {1}".
1283 format(full_name, repr(err))))
1284 logs.append(("INFO", " Done."))
1292 data_queue.put(result)
1294 for level, line in logs:
1297 elif level == "ERROR":
1299 elif level == "DEBUG":
1301 elif level == "CRITICAL":
1302 logging.critical(line)
1303 elif level == "WARNING":
1304 logging.warning(line)
1306 logging.info("Memory allocation: {0:,d}MB".format(
1307 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1309 def download_and_parse_data(self, repeat=1):
1310 """Download the input data files, parse input data from input files and
1311 store in pandas' Series.
1313 :param repeat: Repeat the download specified number of times if not
1318 logging.info("Downloading and parsing input files ...")
1320 work_queue = multiprocessing.JoinableQueue()
1321 manager = multiprocessing.Manager()
1322 data_queue = manager.Queue()
1323 cpus = multiprocessing.cpu_count()
1326 for cpu in range(cpus):
1327 worker = Worker(work_queue,
1329 self._download_and_parse_build)
1330 worker.daemon = True
1332 workers.append(worker)
1333 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1334 format(cpu, worker.pid))
1336 for job, builds in self._cfg.builds.items():
1337 for build in builds:
1338 work_queue.put((job, build, repeat))
1342 logging.info("Done.")
1343 logging.info("Collecting data:")
1345 while not data_queue.empty():
1346 result = data_queue.get()
1349 build_nr = result["build"]["build"]
1350 logging.info(" {job}-{build}".format(job=job, build=build_nr))
1353 data = result["data"]
1354 build_data = pd.Series({
1355 "metadata": pd.Series(
1356 data["metadata"].values(),
1357 index=data["metadata"].keys()),
1358 "suites": pd.Series(data["suites"].values(),
1359 index=data["suites"].keys()),
1360 "tests": pd.Series(data["tests"].values(),
1361 index=data["tests"].keys())})
1363 if self._input_data.get(job, None) is None:
1364 self._input_data[job] = pd.Series()
1365 self._input_data[job][str(build_nr)] = build_data
1367 self._cfg.set_input_file_name(
1368 job, build_nr, result["build"]["file-name"])
1370 self._cfg.set_input_state(job, build_nr, result["state"])
1372 logging.info("Memory allocation: {0:,d}MB".format(
1373 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1377 # Terminate all workers
1378 for worker in workers:
1382 logging.info("Done.")
1385 def _end_of_tag(tag_filter, start=0, closer="'"):
1386 """Return the index of character in the string which is the end of tag.
1388 :param tag_filter: The string where the end of tag is being searched.
1389 :param start: The index where the searching is stated.
1390 :param closer: The character which is the tag closer.
1391 :type tag_filter: str
1394 :returns: The index of the tag closer.
1399 idx_opener = tag_filter.index(closer, start)
1400 return tag_filter.index(closer, idx_opener + 1)
1405 def _condition(tag_filter):
1406 """Create a conditional statement from the given tag filter.
1408 :param tag_filter: Filter based on tags from the element specification.
1409 :type tag_filter: str
1410 :returns: Conditional statement which can be evaluated.
1416 index = InputData._end_of_tag(tag_filter, index)
1420 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1422 def filter_data(self, element, params=None, data_set="tests",
1423 continue_on_error=False):
1424 """Filter required data from the given jobs and builds.
1426 The output data structure is:
1430 - test (or suite) 1 ID:
1436 - test (or suite) n ID:
1443 :param element: Element which will use the filtered data.
1444 :param params: Parameters which will be included in the output. If None,
1445 all parameters are included.
1446 :param data_set: The set of data to be filtered: tests, suites,
1448 :param continue_on_error: Continue if there is error while reading the
1449 data. The Item will be empty then
1450 :type element: pandas.Series
1453 :type continue_on_error: bool
1454 :returns: Filtered data.
1455 :rtype pandas.Series
1459 if element["filter"] in ("all", "template"):
1462 cond = InputData._condition(element["filter"])
1463 logging.debug(" Filter: {0}".format(cond))
1465 logging.error(" No filter defined.")
1469 params = element.get("parameters", None)
1471 params.append("type")
1475 for job, builds in element["data"].items():
1476 data[job] = pd.Series()
1477 for build in builds:
1478 data[job][str(build)] = pd.Series()
1480 data_iter = self.data[job][str(build)][data_set].\
1483 if continue_on_error:
1487 for test_ID, test_data in data_iter:
1488 if eval(cond, {"tags": test_data.get("tags", "")}):
1489 data[job][str(build)][test_ID] = pd.Series()
1491 for param, val in test_data.items():
1492 data[job][str(build)][test_ID][param] = val
1494 for param in params:
1496 data[job][str(build)][test_ID][param] =\
1499 data[job][str(build)][test_ID][param] =\
1503 except (KeyError, IndexError, ValueError) as err:
1504 logging.error(" Missing mandatory parameter in the element "
1505 "specification: {0}".format(err))
1507 except AttributeError:
1510 logging.error(" The filter '{0}' is not correct. Check if all "
1511 "tags are enclosed by apostrophes.".format(cond))
1515 def merge_data(data):
1516 """Merge data from more jobs and builds to a simple data structure.
1518 The output data structure is:
1520 - test (suite) 1 ID:
1526 - test (suite) n ID:
1529 :param data: Data to merge.
1530 :type data: pandas.Series
1531 :returns: Merged data.
1532 :rtype: pandas.Series
1535 logging.info(" Merging data ...")
1537 merged_data = pd.Series()
1538 for _, builds in data.iteritems():
1539 for _, item in builds.iteritems():
1540 for ID, item_data in item.iteritems():
1541 merged_data[ID] = item_data